blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
1db38aeb71bf2c5be032cfcb16c873a87601e03b
bec8a34363b8da5cefee3a98fe98d339a8ec8eab
/R/op_kill_cursors.r
ccf74e8f3bfbb8fba84b3552126d75db7e232371
[]
no_license
strongh/mongor
24fa4fb6fad577562db97c4356e403c7cd74568d
7f1a95cb742e8163151f895f765be430a67dbe01
refs/heads/master
2016-09-05T09:16:14.667651
2010-11-15T01:42:36
2010-11-15T01:42:36
937,944
3
0
null
null
null
null
UTF-8
R
false
false
726
r
op_kill_cursors.r
##' OP_KILL_CURSORS ##' ##' One of the basic mongo messages. ##' ##' @param collection the name of the collection to query ##' @param cursor.ids a list of cursor ids ##' @return a raw vector encoding the query op_kill_cursors <- function(collection, cursor.ids, to_return = 10){ ## header fut_use <- numToRaw(0, nBytes = 4) # reserved for future use full_name <- encode_cstring(collection) # full collection name id.count <- encode_int32(length(cursor.ids)) cursor.id <- unlist(cursor.ids) rawl <- c(fut_use, full_name, id.count, cursor.id) header <- make_header(2007, length(rawl)) # make header last, so that it has the msgSize return(c(header, rawl)) }
3dc1858bf8baad4ff38d2f74b127aa740505c316
7d53d36cdb86afd193e75301e0318abaf21c0b3f
/server.R
c4d05152075a7d2502d25f6b1d2b27047113858f
[]
no_license
ZivaXu/uw-club-journey
c6d07d865aa18fb58823348f112b46061edfb502
d716a318b58e3903b287714ca2e02c79b0101b78
refs/heads/master
2022-12-02T10:37:56.488033
2020-08-16T18:49:21
2020-08-16T18:49:21
287,830,961
0
0
null
null
null
null
UTF-8
R
false
false
3,900
r
server.R
library(shiny) library(ggplot2) library(dplyr) library(R.utils) library(lubridate) library(scales) library(plotly) library(shinydashboard) library(gapminder) library(DT) shinyServer(function(input, output) { # Read task stamp task_list <- read.csv("tasks/task-list.csv") # Output stamp image output$teststamp <- renderImage({ return(list( src = "image/stamp/1.png", filetype = "image/png", width = "100%", alt = "Rising Star" )) }, deleteFile = FALSE) output$teststamp2 <- renderImage({ return(list( src = "image/stamp/3.png", filetype = "image/png", width = "100%", alt = "Sharing is Caring" )) }, deleteFile = FALSE) output$teststamp3 <- renderImage({ return(list( src = "image/stamp/5.png", filetype = "image/png", width = "100%", alt = "Exploring" )) }, deleteFile = FALSE) output$teststamp4 <- renderImage({ return(list( src = "image/stamp/9.png", filetype = "image/png", width = "100%", alt = "Founder" )) }, deleteFile = FALSE) output$teststamp5 <- renderImage({ return(list( src = "image/stamp/23.png", filetype = "image/png", width = "100%", alt = "HuskyLink" )) }, deleteFile = FALSE) output$teststamp6 <- renderImage({ return(list( src = "image/stamp/48.png", filetype = "image/png", width = "100%", alt = "Insta Guru" )) }, deleteFile = FALSE) output$teststamp7 <- renderImage({ return(list( src = "image/stamp/51.png", filetype = "image/png", width = "100%", alt = "Recruiter" )) }, deleteFile = FALSE) output$teststamp8 <- renderImage({ return(list( src = "image/stamp/67.png", filetype = "image/png", width = "100%", alt = "Club Baby I" )) }, deleteFile = FALSE) output$teststamp9 <- renderImage({ return(list( src = "image/stamp/84.png", filetype = "image/png", width = "100%", alt = "Social Good" )) }, deleteFile = FALSE) output$teststamp10 <- renderImage({ return(list( src = "image/stamp/92.png", filetype = "image/png", width = "100%", alt = "WOAH!" )) }, deleteFile = FALSE) output$teststamp11 <- renderImage({ return(list( src = "image/stamp/372.png", filetype = "image/png", width = "100%", alt = "Virtual Club Fair" )) }, deleteFile = FALSE) output$teststamp12 <- renderImage({ return(list( src = "image/stamp/258.png", filetype = "image/png", width = "100%", alt = "Virtual Club Fair" )) }, deleteFile = FALSE) output$teststamp13 <- renderImage({ return(list( src = "image/stamp/492.png", filetype = "image/png", width = "100%", alt = "Virtual Club Fair" )) }, deleteFile = FALSE) #Tasks tab output$rec_tasks <- DT::renderDataTable({ DT::datatable(rectask1, colnames=c("Task Name", "Description"), options = list(paging = FALSE)) }) output$my_tasks <- DT::renderDataTable({ DT::datatable(user_tasklist11, colnames=c("Task Name", "Description", "Completed?"), options = list(paging = FALSE)) }) #Club tab output$all_clubs <- DT::renderDataTable({ DT::datatable(all_clubs, colnames=c("Club Name", "Contact Email"), options = list(paging = FALSE)) }) output$starred_clubs <- renderTable({ colnames(starred_clubs) <- c("Club Name", "Contact Email") starred_clubs }) #Top chart tab output$top_20_club <- renderPlotly({ ggplotly( ggplot( data <- task_counts ) + geom_col(mapping = aes(x = Club_Name, y = Task_Counts)) + coord_flip() + ggtitle( "Top 20 Users" ) + labs(x = "Club Name", y = "Completed Task Counts") ) }) })
507b368a12edd6f729b409a8a9d38670d0b959ef
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/spate/examples/propagate.spectral.Rd.R
2b1d8cd47a8259ec26b513ea81ea4f0fc446f61b
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
780
r
propagate.spectral.Rd.R
library(spate) ### Name: propagate.spectral ### Title: Function that propagates a state (spectral coefficients). ### Aliases: propagate.spectral ### ** Examples n <- 50 spec <- matern.spec(wave=spate.init(n=n,T=1)$wave,n=n,rho0=0.05,sigma2=1,norm=TRUE) alphat <- sqrt(spec)*rnorm(n*n) ##Propagate initial state wave <- wave.numbers(n) Gvec <- get.propagator.vec(wave=wave$wave,indCos=wave$indCos,zeta=0.1,rho1=0.02,gamma=2, alpha=pi/4,muX=0.2,muY=0.2,dt=1,ns=4) alphat1 <- propagate.spectral(alphat,n=n,Gvec=Gvec) par(mfrow=c(1,2)) image(1:n,1:n,matrix(real.fft(alphat,n=n,inv=FALSE),nrow=n),main="Whittle field",xlab="",ylab="",col=cols()) image(1:n,1:n,matrix(real.fft(alphat1,n=n,inv=FALSE),nrow=n),main="Propagated field",xlab="",ylab="",col=cols())
1ad9678d9a39c0c9ec78763e4de9e2d6ff2bf59f
fc7c616bda497d9b193df08ec09241b1486bacac
/MEAN,MEDIAN,MODE.R
1fc02eb6dd5c78f85094564108ef5c919e9d216e
[]
no_license
MinhajulAkib/R-Programming
5c38306ce7486e283e3c51aa9ce81fb34fe6c850
7bbd654e4c6b8917bd21a380ffb120665915773a
refs/heads/main
2023-02-02T02:39:48.166304
2020-12-11T11:26:12
2020-12-11T11:26:12
316,182,597
1
0
null
null
null
null
UTF-8
R
false
false
363
r
MEAN,MEDIAN,MODE.R
A=c(1:20) mean(A) sum(A) length(A) median(A) x<-c(8,3,4,5,2,1,3,5,6) mean(x) median(x) sort(x) sum(x) sum(x)/length(x) ## LungCapData<-read.table(file.choose(),header=T,sep = "\t") attach(LungCapData) names(LungCapData) summary(LungCapData) table(Smoke) table(Smoke,Gender) #standard deviation sd(LungCap) sd(LungCap)^2 sqrt(var(LungCap))
73b38b7bf69b592eb44bfa8003b32d3a59a3170a
2cd54a4365c128d94c120a204aaccf68c3607b49
/R/plsda.R
cc2d6fc12e94e44135d76f57cf0f82580564787d
[ "MIT" ]
permissive
tikunov/AlpsNMR
952a9e47a93cbdc22d7f11b4cb1640edd736a5c7
748d140d94f65b93cb67fd34753cc1ef9e450445
refs/heads/master
2021-01-13T17:35:29.827357
2020-02-23T02:35:30
2020-02-23T02:35:30
242,443,517
0
0
NOASSERTION
2020-02-23T02:27:15
2020-02-23T02:27:15
null
UTF-8
R
false
false
10,646
r
plsda.R
#' Build a PLSDA model, optionally with multilevel #' @param x the X training set #' @param y the y training class to predict #' @param identity the multilevel variable in [mixOmics::plsda] #' @param ncomp The number of components of the model #' @noRd plsda_build <- function(x, y, identity, ncomp) { plsda_model <- NULL tryCatch({ suppressMessages( utils::capture.output({ plsda_model <- mixOmics::plsda( X = x, Y = y, ncomp = ncomp, scale = TRUE, multilevel = identity ) }) ) }, error = function(e) { message("Error building PLSDA, continuing") }) plsda_model } #' Compute the area under the ROC curve of a PLS-DA model on a test subset #' @param plsda_model A mixOmics plsda model #' @param x_test the x test set #' @param y_test the y test class to predict #' @param identity_test the multilevel variable in [mixOmics::plsda] #' @return A list with two elements: #' - `aucs`: A data frame with two columns: `ncomp` (the number of components) and #' `auc` the area under roc curve for that number of components. For multiclass problems #' the AUC returned is the mean of all the one-vs-other AUCs. #' - `aucs_full`: A list of matrices, as returned by [mixOmics::auroc]. #' @noRd plsda_auroc <- function(plsda_model, x_test, y_test, identity_test) { aucs <- numeric(0L) aucs_full <- list() tryCatch({ suppressMessages( utils::capture.output({ roc <- mixOmics::auroc(plsda_model, newdata = x_test, outcome.test = y_test, multilevel = identity_test, plot = FALSE) }) ) aucs <- purrr::map_dbl(roc, function(x) mean(x[, "AUC"])) aucs_full <- roc }, error = function(e) { message("Error in auroc estimation, continuing") }) ncomps <- as.integer(gsub(pattern = "Comp(.*)", replacement = "\\1", x = names(aucs))) list(aucs = data.frame(ncomp = ncomps, auc = aucs, stringsAsFactors = FALSE), aucs_full = aucs_full) } #' Compute the variable importance in the projection #' @param plsda_model A mixOmics plsda model #' @return A matrix with the variable importance in the projection #' @noRd plsda_vip <- function(plsda_model) { vip <- NULL tryCatch({ suppressMessages( utils::capture.output({ vip <- mixOmics::vip(object = plsda_model) }) ) }, error = function(e) { message("Error in vip, continuing") }) vip } #' Callback for building a PLSDA model, computing the AUROC and extract the VIP #' #' @param x_train Training data for x #' @param y_train Training data for y #' @param identity_train Training data for the identities #' @param x_test Test data for x #' @param y_test Test data for y #' @param identity_test Test data for the identities #' @param ncomp Number of components to use in the model #' @param return_model A logical. #' @param return_auroc A logical. #' @param return_auroc_full A logical. #' @param return_vip A logical. #' #' #' For multiclass problems the AUC returned is the mean of all the one-vs-other AUCs. #' #' @return A list with the model, the area under the roc curve and the VIP items. #' @noRd callback_plsda_auroc_vip <- function(x_train, y_train, identity_train, x_test, y_test, identity_test, ncomp, return_model = FALSE, return_auroc = TRUE, return_auroc_full = FALSE, return_vip = FALSE) { plsda_model <- plsda_build(x_train, y_train, identity_train, ncomp = max(ncomp)) out <- list(model = NULL, auroc = NULL, auroc_full = NULL, vip = NULL) if (isTRUE(return_model)) { out$model <- plsda_model } if (isTRUE(return_auroc) || isTRUE(return_auroc_full)) { aurocs <- plsda_auroc(plsda_model, x_test, y_test, identity_test) if (isTRUE(return_auroc)) { out$auroc <- aurocs$aucs } if (isTRUE(return_auroc_full)) { out$auroc_full <- aurocs$aucs_full } } if (isTRUE(return_vip)) { vip <- plsda_vip(plsda_model) out$vip <- vip } out } #' Callback to choose the best number of latent variables based on the AUC threshold #' #' @param auc_threshold Threshold on the increment of AUC. Increasing the number of #' latent variables must increase the AUC at least by this threshold. #' #' @return The actual function to compute the best number of latent variables according to a threshold on the increment of AUC #' @noRd fun_choose_best_ncomp_auc_threshold <- function(auc_threshold = 0.05) { force(auc_threshold) # Choose best number of latent variables based on a threshold on the auc increment. #' @param inner_cv_results A list of elements returned by [callback_plsda_auroc_vip] #' @return A list with: #' - `train_evaluate_model_args`: A list wit one element named `ncomp` with the number of latent variables selected #' for each outer cross-validation #' - `num_latent_var`: A data frame with the number of latent variables chosen for each outer cross-validation #' - `diagnostic_plot`: A plot showing the evolution of the AUC vs the number of latent variables for each iteration #' - `model_performances`: A data frame with the AUC model performances function(inner_cv_results) { model_performances <- inner_cv_results %>% purrr::map("auroc") %>% purrr::map_dfr(~ ., .id = "outer_inner") %>% tidyr::separate("outer_inner", into = c("cv_outer_iteration", "cv_inner_iteration"), convert = TRUE) # There is a more elegant way to do this. nlv <- model_performances %>% dplyr::group_by(.data$cv_outer_iteration, .data$cv_inner_iteration) %>% dplyr::arrange(.data$cv_outer_iteration, .data$cv_inner_iteration, .data$ncomp) %>% dplyr::mutate(auc_diff = ifelse(is.na(dplyr::lag(.data$auc)), .data$auc, .data$auc - dplyr::lag(.data$auc))) %>% dplyr::mutate(auc_limit_cumany = dplyr::cumall(.data$auc_diff > !!auc_threshold)) %>% dplyr::mutate(auc_limit_cumanyd = .data$auc_limit_cumany == TRUE & dplyr::lead(.data$auc_limit_cumany) == FALSE) %>% dplyr::filter(.data$auc_limit_cumanyd == TRUE) %>% dplyr::select(-.data$auc_limit_cumany, -.data$auc_limit_cumanyd) %>% dplyr::ungroup() %>% dplyr::group_by(.data$cv_outer_iteration) %>% dplyr::summarise(ncomp = round(stats::median(.data$ncomp))) plot_to_choose_nlv <- ggplot2::ggplot(model_performances) + ggplot2::geom_jitter(ggplot2::aes(x = .data$ncomp, y = .data$auc, group = .data$ncomp, color = as.character(.data$cv_inner_iteration)), width = 0.25, height = 0) + ggplot2::geom_vline(data = nlv, mapping = ggplot2::aes(xintercept = .data$ncomp), color = "red") + ggplot2::scale_x_continuous(name = "Number of latent variables", breaks = function(limits) { seq(from = 1, to = max(limits)) }) + ggplot2::scale_y_continuous(name = "Area Under ROC") + ggplot2::facet_wrap(~cv_outer_iteration) + ggplot2::guides(colour = "none") list(train_evaluate_model_args = list(ncomp = nlv$ncomp), num_latent_var = nlv, diagnostic_plot = plot_to_choose_nlv, model_performances = model_performances) } } #################### Validation ####### #' Callback to digest the results of the outer cross validation #' @noRd callback_outer_cv_auroc_vip <- function(outer_cv_results) { auroc <- outer_cv_results %>% purrr::map("auroc") %>% purrr::map_dfr(~ ., .id = "cv_outer_iteration") %>% dplyr::mutate(cv_outer_iteration = as.integer(.data$cv_outer_iteration)) %>% dplyr::group_by(.data$cv_outer_iteration) %>% dplyr::filter(.data$ncomp == max(.data$ncomp)) %>% dplyr::ungroup() %>% dplyr::arrange(.data$cv_outer_iteration) vip_vectors <- outer_cv_results %>% purrr::map("vip") %>% purrr::map2(auroc$ncomp, function(vip_matrix, selected_ncomp) { vip_vec <- as.numeric(vip_matrix[, selected_ncomp, drop = TRUE]) names(vip_vec) <- rownames(vip_matrix) vip_vec }) vip_ranks <- do.call(cbind, purrr::map(vip_vectors, ~rank(-.))) vip_rp <- apply(vip_ranks, 1, function(x) exp(mean(log(x)))) # geom mean (RankProducts) list(auroc = auroc, vip_vectors = vip_vectors, vip_rankproducts = vip_rp) } #' Method for nmr_data_analysis (PLSDA model with AUROC and VIP outputs) #' @param ncomp Max. number of latent variables to explore in the PLSDA analysis #' @param auc_increment_threshold Choose the number of latent variables when the #' AUC does not increment more than this threshold. #' #' Returns an object to be used with [nmr_data_analysis] to perform a (optionally #' multilevel) PLS-DA model, using the area under the ROC curve as figure of #' merit to determine the optimum number of latent variables. #' #' #' @export plsda_auroc_vip_method <- function(ncomp, auc_increment_threshold = 0.05) { new_nmr_data_analysis_method( train_evaluate_model = callback_plsda_auroc_vip, train_evaluate_model_params_inner = list(ncomp = ncomp, return_model = FALSE, return_auroc = TRUE, return_auroc_full = FALSE, return_vip = FALSE), choose_best_inner = fun_choose_best_ncomp_auc_threshold(auc_threshold = auc_increment_threshold), train_evaluate_model_params_outer = list(return_model = TRUE, return_auroc = TRUE, return_auroc_full = TRUE, return_vip = TRUE), train_evaluate_model_digest_outer = callback_outer_cv_auroc_vip) } #' Compare PLSDA auroc VIP results #' #' @param ... Results of [nmr_data_analysis] to be combined. Give each result a name. #' #' @return A plot of the AUC for each method #' @export plsda_auroc_vip_compare <- function(...) { dots <- list(...) class_compare <- names(dots) if (is.null(class_compare) || any(nchar(class_compare) == 0)) { stop("All arguments should be named") } auroc_tables <- dots %>% purrr::map("outer_cv_results_digested") %>% purrr::map("auroc") %>% purrr::map2(class_compare, function(auroc, group_name) { auroc %>% dplyr::select(.data$auc) %>% dplyr::mutate(Group = !!group_name) }) toplot <- do.call(rbind, c(auroc_tables, list(stringsAsFactors = FALSE))) ggplot2::ggplot(toplot) + ggplot2::geom_boxplot(ggplot2::aes(x = .data$Group, y = .data$auc, fill = .data$Group), show.legend = FALSE) + ggplot2::scale_x_discrete(name = "Model") + ggplot2::scale_y_continuous(name = "Area under ROC") }
30cc4caeef11944775076a368af84a0b8eae10f2
237bcbdc6b09c57b251191471359eeefb8014410
/dbSNP_to_annovar_NEW.r
69e1169b41c4db74055a2b49fb4655588be1d82c
[]
no_license
achalneupane/rcodes
d2055b03ca70fcd687440e6262037507407ec7a5
98cbc1b65d85bbb6913eeffad62ad15ab9d2451a
refs/heads/master
2022-10-02T20:35:18.444003
2022-09-09T20:53:03
2022-09-09T20:53:03
106,714,514
0
0
null
null
null
null
UTF-8
R
false
false
9,215
r
dbSNP_to_annovar_NEW.r
#answer = which.isMatchingAt("N", seqs, at=6:1, follow.index=TRUE) code.dir<-"/media/UQCCG/Programming/VersionControl_GitRepository/UQCCG_Pipeline_Rscripts" setwd(code.dir) # load in the UCSC tables these use the db file names and not their lable-names source("annotate_SNPs_subroutines.r") options("width"=250,"max.print"=1000) vcf.files<-"00-All.vcf" output.name<-"hg19_snp137" vcf.files<-"clinvar_20120616_2.vcf" output.name<-"hg19_snp137_clinical" names(vcf.files)<-"snp" snp.dir<-"/media/scratch2/dbSNP" # .txt extension will be added location<-snp.dir unwanted.cols<-c("QUAL","FILTER") ########################BEGIN setwd(location) the.files<-dir(getwd()) if(paste(output.name,"_maf.txt",sep="") %in% the.files){print("WARNING output files exits they will be appended too!!")} ################ LARGE FILES ######## print(vcf.files) ######BELOW PROCESSING this for snp for indel varient types in vcf4.0 or vcf 3.0 format rm(start.data) start.data<-prepare.for.Vcf.file.read(vcf.files) for(i in 1:length(start.data)){assign( names(start.data)[i],value=start.data[[i]])} cbind(info.types,info.description) con <- file(vcf.files, open="r") # close(con) num.lines<-1 # so does while llop at least once reads<-1000000 #1.3M lines in snp file 50000 goes out to 24Gb without QC cgeck counter<- -1 while (num.lines >0 ){ counter<-counter+1 print(counter) if(counter==0){ indels<-try(scan(con,what=character(num.vars),skip=(reads*counter)+skip.lines,nlines=reads,sep="\t",fill=TRUE,na.strings="",quote="\"")) }else{ indels<-try(scan(con,what=character(num.vars),nlines=reads,sep="\t",fill=TRUE,na.strings="",quote="\"")) } ## indels1 <- read.table(con,col.names=column.labels,sep="\t",skip=skip.lines,fill=TRUE,stringsAsFactors=FALSE,colClasses="character",nrows=reads,comment.char="",quote="") num.lines<-length(indels)/(num.vars) print(num.lines) if(num.lines==0){next} dim(indels)<-c(num.vars,num.lines) indels<-t(indels) colnames(indels)<-column.labels #indels[1:5,] ##PM,Number=0,Type=Flag,Description="Variant is Precious(Clinical,Pubmed Cited)"> ##INFO=<ID=TPA,Number=0,Type=Flag,Description="Provisional Third Party Annotation(TPA) (currently rs from PHARMGKB who will give phenotype data)"> ##INFO=<ID=CDA,Number=0,Type=Flag,Description="Variation is interrogated in a clinical diagnostic assay"> ####################################### FINISHED Read in data DO PROCESSIng below ################################################################################################### alt.list<-strsplit(indels[,"ALT"],split=",") has.g5.or.g5A<-grepl(";G5",indels[,"INFO"]) # catch G5 and G5A ## has.g5<-grepl(";G5;",indels[,"INFO"]) ## has.g5a<-grepl(";G5A;",indels[,"INFO"]) has.gmaf<-grepl(";GMAF=",indels[,"INFO"]) #########extract A quatity from INFO GMAF DONE HERE #indels[has.gmaf,"INFO"][1:20] the.gmaf<-extract.value.from.info(indels[has.gmaf,"INFO"],"GMAF=") ########################################### # make MAF the.af<-rep(0,times=dim(indels)[1]) the.af[!has.gmaf & has.g5.or.g5A]<-0.5 ### maf > 0.5% so set to 0.5 so it is excluded entirely the.af[has.gmaf]<-the.gmaf ################### fltten the data and previos tests number.of.alleles<-unlist(lapply(alt.list,length)) flat.index<-rep(1:length(number.of.alleles),times=number.of.alleles) indels<-indels[flat.index,] the.af<-the.af[flat.index] has.g5.or.g5A<-has.g5.or.g5A[flat.index] indels[,"ALT"]<-unlist(alt.list) # they are unlisted in the same order as they appear #################### FLAG interesting stuff has.OM<-grepl(";OM",indels[,"INFO"]) has.GNO<-grepl(";GNO",indels[,"INFO"]) has.CLN<-grepl(";CLN",indels[,"INFO"]) # variant is clinical (but of that is benighn - taste) has.CDA<-grepl(";CDA",indels[,"INFO"]) # "Variation is interrogated in a clinical diagnostic assay" has.PM<-grepl(";PM",indels[,"INFO"]) # ""Variant is Precious(Clinical,Pubmed Cited)"" has.MUT<-grepl(";MUT",indels[,"INFO"]) # Is mutation (journal citation, explicit fact): a low frequency variation that is cited in journal and other reputable sources"> has.SCS<-grepl(";SCS",indels[,"INFO"]) # variant is clinical significance #0 - unknown, 1 - untested, 2 - non-pathogenic, 3 - probable-non-pathogenic, 4 - probable-pathogenic, 5 - pathogenic, 6 - drug-response, 7 - histocompatibility, 255 - other"> ## has.SCS.prob.patho<-grepl(";SCS=4;",indels[,"INFO"]) ## has.SCS.patho<-grepl(";SCS=5;",indels[,"INFO"]) ## has.SCS.drug<-grepl(";SCS=6;",indels[,"INFO"]) ## has.SCS.histo<-grepl(";SCS=7;",indels[,"INFO"]) ## has.bad.path<-(has.SCS.prob.patho | has.SCS.patho | has.SCS.drug | has.SCS.histo) ## to.test<- has.g5.or.g5A & has.bad.path ## if(sum(to.test)>0){print("common pathogenic allele");print(indels[to.test,])} # these appear to be GWAS hits ## pathological<-vector(mode="character",length=dim(indels)[1]) ## clinical<-vector(mode="character",length=dim(indels)[1]) ## pathological[has.SCS.patho]<-"pathogenic" ## pathological[has.SCS.prob.patho]<-"probable-pathogenic" ## pathological[has.SCS.drug]<-"drug-response" ## pathological[has.SCS.histo]<-"histocompatibility" ## clinical.event<-extract.value.from.info(indels[has.CLN,"INFO"],"CLN=") ## clinical[has.CLN]<-clinical.event ########################### PUT ALLELES IN ANNOVAR FORMAT from convert2annovar.pl line 1083######## ref.length<-nchar(as.character(indels[,"REF"])) alt.length<-nchar(as.character(indels[,"ALT"])) is.snp<-(ref.length==1 & alt.length==1) POS.end<-as.numeric(indels[,"POS"]) del<-ref.length > alt.length ins<-(ref.length <= alt.length) & !is.snp #indels[del,][1:5,] #POS.end[del][1:5] ### deletion or block substitution head<-substr(as.character(indels[del,"REF"]),1,alt.length[del]) head.is.mut<-(head==as.character(indels[del,"ALT"])) indels[del,"REF"][head.is.mut]<-substr(as.character(indels[del,"REF"][head.is.mut]),(alt.length[del][head.is.mut]+1),ref.length[del][head.is.mut]) indels[del,"ALT"][head.is.mut]<-"-" indels[del,"POS"][head.is.mut]<-as.numeric(indels[del,"POS"][head.is.mut]) + nchar(as.character(head[head.is.mut])) POS.end[del]<-POS.end[del]+ref.length[del]-1 # same for both head is mut and not head is mut ## indels ## POS.end ### insertion or block substitution head<-substr(as.character(indels[ins,"ALT"]),1,ref.length[ins]) head.is.ref<-(head==as.character(indels[ins,"REF"])) indels[ins,"ALT"][head.is.ref]<-substr(as.character(indels[ins,"ALT"][head.is.ref]),(ref.length[ins][head.is.ref]+1),alt.length[ins][head.is.ref]) indels[ins,"REF"][head.is.ref]<-"-" indels[ins,"POS"][head.is.ref]<-as.numeric(indels[ins,"POS"][head.is.ref]) + ref.length[ins][head.is.ref]-1 POS.end[ins]<-POS.end[ins]+ref.length[ins]-1 ######################################################################################################## #indels[ins,] #POS.end[ins] ###wait here indels<-cbind(indels[,c("chr","POS")],POS.end,indels[,c("REF","ALT")],the.af,indels[,c("ID","INFO")]) #indels[1:5,] write.table(indels,file=paste(output.name,"_maf.txt",sep=""),col.names=FALSE,row.names=FALSE,sep="\t",quote=FALSE,append=TRUE) ## write.table(indels[has.bad.path,],file=paste(output.name,"_pathalogical_maf.txt",sep=""),col.names=FALSE,row.names=FALSE,sep="\t",quote=FALSE,append=TRUE) write.table(indels[has.OM,],file=paste(output.name,"_omim_maf.txt",sep=""),col.names=FALSE,row.names=FALSE,sep="\t",quote=FALSE,append=TRUE) write.table(indels[has.CDA,],file=paste(output.name,"_clinical_assay_maf.txt",sep=""),col.names=FALSE,row.names=FALSE,sep="\t",quote=FALSE,append=TRUE) write.table(indels[has.MUT,],file=paste(output.name,"_mutation_maf.txt",sep=""),col.names=FALSE,row.names=FALSE,sep="\t",quote=FALSE,append=TRUE) write.table(indels[has.PM,],file=paste(output.name,"_pubmed_maf.txt",sep=""),col.names=FALSE,row.names=FALSE,sep="\t",quote=FALSE,append=TRUE) } ## loop over data chunks ## Error in indels[del, "REF"][head.is.mut] <- substr(as.character(indels[del, : ## NAs are not allowed in subscripted assignments #This reads the vcf files stored as names in samples files an makes a corrending data object with the name provided ## > sample.files ## snp indel ## "SKDP-FAM-26_All_snps.raw.vcf" "SKDP-FAM-26_All_DINDEL.raw.vcf" ## http://www.ncbi.nlm.nih.gov/projects/SNP/docs/rs_attributes.html#gmaf ## Global minor allele frequency (MAF): dbSNP is reporting the minor allele frequency for each rs included in a default global population. Since this is being provided to distinguish common polymorphism from rare variants, the MAF is actually the second most frequent allele value. In other words, if there are 3 alleles, with frequencies of 0.50, 0.49, and 0.01, the MAF will be reported as 0.49. The current default global population is 1000Genome phase 1 genotype data from 1094 worldwide individuals, released in the May 2011 dataset. ## For example, refSNP page for rs222 reports: "MAF/MinorAlleleCount:G=0.262/330". This means that for rs222, minor allele is 'G' and has a frequency of 26.2% in the 1000Genome phase 1 population and that 'G' is observed 330 times in the sample population of 629 people (or 1258 chromosomes). ## ###################################to a read in a lrage file
01468fa815778ebf10a3569cc76ee4ced99b7802
abc1bb92a1052e5dbc7d7a19b1205c2a20a93867
/man/stirDistances.Rd
5ea14eaf0eac43d7a6a05007a103bb32bd7d4b9d
[]
no_license
tactless2004/STIR
ed1ba363f3ce8d7d3ed6f0a998fcdb2c3374526b
b7043e659ae3563a7815fb0b45667df0fd24043f
refs/heads/master
2020-04-13T13:22:38.841180
2018-12-26T18:50:51
2018-12-26T18:50:51
163,227,873
0
0
null
2018-12-27T00:14:36
2018-12-27T00:14:35
null
UTF-8
R
false
true
601
rd
stirDistances.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reSTIR.R \name{stirDistances} \alias{stirDistances} \title{stirDistances} \usage{ stirDistances(attr.mat, metric = "manhattan") } \arguments{ \item{attr.mat}{m x p matrix of m instances and p attributes} \item{metric}{for distance matrix between instances (default: \code{"manhattan"}, \code{"euclidean"}, \code{"relief-scaled-manhattan"}, \code{"relief-scaled-euclidean"}, \code{"allele-sharing-manhattan"}).} } \description{ Note: Should probalby standardize data before manhattan and euclidean? } \examples{ Example }
4d4dd18d63b173980e0d197e3b572509904355ca
50a83dbdb80fae7fc386482d5720418ea892473e
/syntax/old/models_beat_longitudinal_010820.R
2d4bcc86027f63ed3dc121826d30becd0952e60a
[]
no_license
clanfear/CampsPolicing
f2f9fedefabee89e9b0e4829f71d463f45355b92
0b0289bdcae84c1785c161235e39c31740e3c4ba
refs/heads/master
2022-01-27T00:04:40.195375
2022-01-18T21:27:06
2022-01-18T21:27:06
234,187,988
1
0
null
null
null
null
UTF-8
R
false
false
645
r
models_beat_longitudinal_010820.R
# What do I need here? ## Beat-level models of fear of crime, police efficacy, SPU complaints, and offenses. ## Can theoretically do beat-quarter level if they're evenly spaced? # Fear / satisfaction covers 2007 through 2018 reliably load("./data/derived/beats_all_pe_fe_aw.RData") # Observations per beat per year beats_all_pe_fe_aw %>% group_by(year) %>% summarize(mean_n = mean(n)) beats_all_pe_fe_aw %>% ggplot(aes(fill = police_efficacy)) + geom_sf() + facet_wrap(~year) beats_all_pe_fe_aw %>% ggplot(aes(fill = fear_of_crime)) + geom_sf() + facet_wrap(~year) beats_all_pe_fe_aw %>% ggplot(aes(fill = n)) + geom_sf() + facet_wrap(~year)
70e0ef6184535314936856b7666d068dbcd0ca7a
9719ea69f693adfddc62b27eaf948fc7b16f6ad0
/man/wastd_parse.Rd
8a213c7a7d3c080bd5e5d1f7f0d13d5d2c531044
[]
no_license
dbca-wa/wastdr
49fe2fb1b8b1e518f6d38549ff12309de492a2ad
5afb22d221d6d62f6482798d9108cca4c7736040
refs/heads/master
2022-11-18T01:00:41.039300
2022-11-16T08:32:12
2022-11-16T08:32:12
86,165,655
2
0
null
null
null
null
UTF-8
R
false
true
1,594
rd
wastd_parse.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wastd_parse.R \name{wastd_parse} \alias{wastd_parse} \title{Parse a \code{wastd_api_response} into a tibble} \usage{ wastd_parse(wastd_api_response, payload = "data") } \arguments{ \item{wastd_api_response}{A \code{wastd_api_response} from WAStD} \item{payload}{(chr) The name of the key containing the parsed \code{httr::content()} from the WAStD API call} } \value{ A tibble with one row per record and columns corresponding to each record's fields. } \description{ From a \code{wastd_api_response}, turn the key \code{payload} (default: "features") into a \code{tibble:tibble}, and \code{tidyr::unnest_wider} the tibble into columns equivalent to the fields of the WAStD API serializer. If GeoJSON is found, the keys \code{geometry} (including \code{coordinates}) will remain unchanged, but the key \code{properties} will be unnested. } \details{ \lifecycle{stable} } \seealso{ Other api: \code{\link{build_auth}()}, \code{\link{download_minimal_wastd_turtledata}()}, \code{\link{download_wastd_sites}()}, \code{\link{download_wastd_turtledata}()}, \code{\link{download_wastd_users}()}, \code{\link{export_wastd_turtledata}()}, \code{\link{filter_wastd_turtledata_area}()}, \code{\link{filter_wastd_turtledata_seasons}()}, \code{\link{filter_wastd_turtledata}()}, \code{\link{handle_http_status}()}, \code{\link{wastd_GET}()}, \code{\link{wastd_POST}()}, \code{\link{wastd_bulk_post}()}, \code{\link{wastd_chunk_post}()}, \code{\link{wastd_create_update_skip}()}, \code{\link{wastd_post_one}()} } \concept{api}
feaa22a2b7501da4eab33f95d462fb76d40acdf0
178fdf4459e4817988b60b69e93fbe079446b4a5
/Load.R
1575612b790a099249f1ab71d8faf46f8045b794
[]
no_license
KaiqueS/Eletiva_Analise_De_Dados_UFPE_2021.1
c4de6ba486db0e4bfd196a2bf4e984d384b45f40
258ddd3e06931a61fa0f1b201e7d00f4a16d187e
refs/heads/main
2023-06-11T15:42:09.368480
2021-07-05T15:23:18
2021-07-05T15:23:18
357,022,682
0
0
null
null
null
null
ISO-8859-1
R
false
false
1,415
r
Load.R
install.packages( "microbenchmark" ) install.packages( "readxl" ) library( microbenchmark ) library( tidyverse ) library( plyr ) library( readxl ) setwd( "D:\\Trabalho\\Eletiva_Analise_De_Dados_UFPE_2021.1" ) sinistrosRecife2020Raw <- read.csv2('http://dados.recife.pe.gov.br/dataset/44087d2d-73b5-4ab3-9bd8-78da7436eed1/resource/fc1c8460-0406-4fff-b51a-e79205d1f1ab/download/acidentes_2020-novo.csv', sep = ';', encoding = 'UTF-8') sinistrosRecife2021Raw <- read.csv2('http://dados.recife.pe.gov.br/dataset/44087d2d-73b5-4ab3-9bd8-78da7436eed1/resource/2caa8f41-ccd9-4ea5-906d-f66017d6e107/download/acidentes_2021-jan.csv', sep = ';', encoding = 'UTF-8') sinistrosRecifeRaw <- rbind(sinistrosRecife2020Raw, sinistrosRecife2021Raw) # exporta em formato nativo do R saveRDS( sinistrosRecifeRaw, "sinistrosRecife.rds" ) # exporta em formato tabular (.csv) - padrão para interoperabilidade write.csv2( sinistrosRecifeRaw, "sinistrosRecife.csv" ) # Exporta em formato excel csv2 write_excel_csv2( sinistrosRecifeRaw, "sinistrosRecife.xlsx" ) # compara os processos de exportação, usando a função microbenchmark microbenchmark( a <- readRDS( 'sinistrosRecife.rds'), b <- read.csv2( 'sinistrosRecife.csv', sep = ';' ), times = 10L ) microbenchmark( a <- readRDS( 'sinistrosRecife.rds'), b <- readxl::read_xlsx( 'sinistrosRecife.xlsx' ), times = 10L ) teste <- readxl::read_xlsx( "sinistrosRecife.xlsx" )
a1cf612f3542daeba6fbf1d3d7754b976cf56447
b8f69e2a1d3d706f2d9b767b99c0df95b23ad56f
/man/release_questions.Rd
fa153448c3936e5e014317fdb7116e61b6808d32
[ "MIT" ]
permissive
cran/wilson
b03932a828d284a6b8b8b29411721727c6268ec0
e2dec1181e01d212b545a6ebfb53beee6320cf2f
refs/heads/master
2021-06-08T21:43:25.793829
2021-04-19T08:40:02
2021-04-19T08:40:02
145,903,340
0
0
null
null
null
null
UTF-8
R
false
true
364
rd
release_questions.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/release_questions.R \name{release_questions} \alias{release_questions} \title{Defines additional questions asked before CRAN submission. DO NOT EXPORT!} \usage{ release_questions() } \description{ Defines additional questions asked before CRAN submission. DO NOT EXPORT! }
22d8cc5e6e7c5ccee95f3c36a590e871c280d3a2
e8ec37267ed9441e229c4774ac20621806f69a81
/Scripts/Annotate.R
ce14f4c96136aa1c74c7cf227e7f42193cee2a17
[]
no_license
sohumgala/Single-Cell-RNA-Sequencing-Analysis
150e23229ee546ac40eef4e32f011c8f71beaf72
712e917b121adc2f030442346d81bc482f37bbf4
refs/heads/master
2023-07-08T18:57:13.124369
2021-08-22T15:34:25
2021-08-22T15:34:25
398,834,735
0
0
null
null
null
null
UTF-8
R
false
false
600
r
Annotate.R
# After cell types have been determined, it is easy to label the UMAP with appropriate cell types # INPUT: cell ids based on previously specified annotation techniques new.cluster.ids <- c("Naive CD4 T", "Memory CD4 T", "CD14+ Mono", "B", "CD8 T", "FCGR3A+ Mono", "NK", "DC", "Platelet") names(new.cluster.ids) <- levels(cells.filt) cells.filt <- RenameIdents(cells.filt, new.cluster.ids) DimPlot(cells.filt, reduction = "umap", label = TRUE, pt.size = 0.5) + NoLegend() # Further customization of the plot is possible # save the plot by exporting from plots window
f8e1a240ba7257ce577a172c86930f6d65bc6471
70e015e71ce31e129c141ddfbcdbf5b200c52df4
/Content/how_we_compare_mcmc_performance/how_we_compare_mcmc_performance.R
a29822c6072efe60f3afdb90c33b2e134bb65f89
[]
no_license
lponisio/Vogelwarte_NIMBLE_workshop
ec258a845381621c303b779bb72c5b72924bbdd6
323a8ab63ba5b0199b2e3b368dfe2f51bfa17e1f
refs/heads/master
2020-06-04T00:58:16.528340
2018-04-25T11:31:03
2018-04-25T11:31:03
null
0
0
null
null
null
null
UTF-8
R
false
false
1,543
r
how_we_compare_mcmc_performance.R
## ----setup, include=FALSE------------------------------------------------ library(methods) ## needed only when building documents outside of R library(nimble) library(mcmcplots) if(!exists("nimble_course_dir")) ## set your own nimble_course_dir if needed nimble_course_dir <- file.path(getwd(),'..') cur_dir <- getwd() setwd(file.path(nimble_course_dir, 'examples_code', 'CJS_dipper')) source('dipper_basic.R') setwd(cur_dir) ## ---- default-mcmc, eval = TRUE------------------------------------------ dipper_model <- nimbleModel(dipper_code, constants = dipper_constants, data = dipper_data, inits = dipper_inits) defaultMCMCconf <- configureMCMC(dipper_model) defaultMCMC <- buildMCMC(defaultMCMCconf) ## We can compile both in one step dipper_compiled <- compileNimble(dipper_model, defaultMCMC) CdefaultMCMC <- dipper_compiled$defaultMCMC ## Illustration of running MCMC "directly" CdefaultMCMC$run(5000) defaultSamples <- as.matrix(CdefaultMCMC$mvSamples) ## Do burn-in manually when running in this mode defaultSamples <- defaultSamples[1001:5000,] dir.create('default_samples_plots', showWarnings = FALSE) mcmcplot(defaultSamples, dir = 'default_samples_plots') ## ---- zoomed-trace-plot, eval = TRUE------------------------------------- plot(defaultSamples[2051:2100, 'p'], type = 'b') ## ---- ess, eval = TRUE--------------------------------------------------- library(coda) effectiveSize(defaultSamples)
282727bb4fdf62c606688af4f5be87adb43edc89
254937c1395588e176e61a81ee84bdccefc8ddfd
/man/getJobs.Rd
c4badac5a661c8dac73242e910b2fc89d81734dc
[]
no_license
cran/antaresEditObject
3fd126458f8b9e98a18788f9cf212b5480f8d9ec
f13a7f09e8c32612f236f5b92b23cbb45d7b0e61
refs/heads/master
2023-04-17T19:17:10.925966
2023-04-06T10:00:06
2023-04-06T10:00:06
162,731,154
0
0
null
null
null
null
UTF-8
R
false
true
606
rd
getJobs.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/API.R \name{getJobs} \alias{getJobs} \title{Retrieve API jobs} \usage{ getJobs(job_id = NULL, opts = antaresRead::simOptions()) } \arguments{ \item{job_id}{The job identifier, if \code{NULL} (default), retrieve all jobs.} \item{opts}{List of simulation parameters returned by the function \code{\link[antaresRead:setSimulationPath]{antaresRead::setSimulationPath()}}} } \value{ A \code{data.table} with information about jobs. } \description{ Retrieve API jobs } \examples{ \dontrun{ getJobs() } }
2164925dda50fdf46c4d6532f4613bffaefab168
698cd11b38d16eff0c6fd0429ea4106e5517fcc1
/Chapter 03 (foreign)/03_05 foreign for SAS.R
d74aaa8d0b3c6d48ed1b109fbc8ff56280a124ae
[]
no_license
mnr/R-Data-Science-High-Variety
f0707f0eaad176f53716b45ef6f2c1778bd55c85
026bd6660545df6a0f17e2c35a325dfe8a628d6b
refs/heads/master
2020-03-29T07:52:52.385823
2018-10-19T21:18:51
2018-10-19T21:18:51
149,683,442
0
0
null
null
null
null
UTF-8
R
false
false
893
r
03_05 foreign for SAS.R
# Copyright Mark Niemann-Ross, 2018 # Author: Mark Niemann-Ross. mark.niemannross@gmail.com # LinkedIn: https://www.linkedin.com/in/markniemannross/ # Github: https://github.com/mnr # More Learning: http://niemannross.com/link/mnratlil # Description: R Programming in Data Science: High Variety Data # use foreign to import and export sas install.packages("foreign") library(foreign) # Some data to write to an excel range Smalldf <- data.frame(thisThing = 1:5, thatThing = 7:11, AnotThing = LETTERS[1:5]) # read from SAS ---- read.ssd() # requires SAS. If no copy of SAS, use a utility to convert to csv # additionally, look at the haven package for read_sas( ). # write to SAS ---- write.foreign(Smalldf, datafile = "sasData.csv", codefile = "sasCode.sas", package = "SAS")
99a2af4fe0a5b4ac07d916bd95c9bf0881beddf6
25dee6e43b0efbcb68856c11d0b2e39804eb4fb3
/u5m_media_request.R
572e8150c621e0c56185ec3197ea7158d19fc501
[]
no_license
rburstein-IDM/scratch
43e3f72fe37eaa17f12dcf23d3eb543f1af41141
6eaa6bfad15dd80cb0773f4e75d1a5761ac984f2
refs/heads/master
2020-05-02T23:33:09.047264
2019-12-08T22:00:36
2019-12-08T22:00:36
178,283,413
0
0
null
null
null
null
UTF-8
R
false
false
749
r
u5m_media_request.R
library(data.table) library(sf) library(ggplot2) library(maps) setwd('C:/Users/rburstein/Dropbox (IDM)/IHME/data') # load and trim data d <- fread('U5MR_LMICS_admin2_data_full.csv') # Do we have Indonesia data comparable to the article which will be published ie: % of district/cities that have achieved SDG 3,2 targets and performance/ progress 2000 - 2017 in neonatal, infant and U5 mortality? d1 <- d[year == 2017 & ADM0_NAME == 'Indonesia'] mean(d1$u5mr_mean<.025) sum(d1$u5mr_mean<.025) mean(d1$u5mr_upper<.025) sum(d1$u5mr_upper<.025) mean(d1$u5mr_lower<.025) sum(d1$u5mr_lower<.025) mean(d1$nnmr_mean<.012) sum(d1$nnmr_mean<.012) mean(d1$nnmr_upper<.012) sum(d1$nnmr_upper<.012) mean(d1$nnmr_lower<.012) sum(d1$nnmr_lower<.012)
bea21dd0b9b56b387ecbda7da305cd576a01216f
50066dae4216d17bd6f0dcb9a11d872e73246eb6
/man/checkProvenance.Rd
34e93b278c08937f19eef6bd53087f458ce52013
[]
no_license
cran/PKNCA
11de9db2cb98279c79d06022415b8772e7c1f5ea
8f580da3e3c594e4e1be747cb2d8e35216784ed2
refs/heads/master
2023-05-10T16:54:19.131987
2023-04-29T18:30:02
2023-04-29T18:30:02
48,085,829
0
0
null
null
null
null
UTF-8
R
false
true
600
rd
checkProvenance.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/provenance.R \name{checkProvenance} \alias{checkProvenance} \title{Check the hash of an object to confirm its provenance.} \usage{ checkProvenance(object) } \arguments{ \item{object}{The object to check provenance for} } \value{ \code{TRUE} if the provenance is confirmed to be consistent, \code{FALSE} if the provenance is not consistent, or \code{NA} if provenance is not present. } \description{ Check the hash of an object to confirm its provenance. } \seealso{ \code{\link{addProvenance}} }
a1f8ebc203c3d7b03589b5aabb616751695435b6
1ed571c85cff621d21b04071af55d28ef1231c44
/script_BIC.R
7ae543f18bfd74abb308fcd8c06949d838f605dc
[]
no_license
amiaty/RFLLat
33f3f62867bb5ee01ce61780e35db4da3d3d7e06
758d86937d85d3b7bed48555e92d27b65d0c0a0e
refs/heads/main
2023-03-04T06:26:49.803295
2021-02-17T12:01:49
2021-02-17T12:01:49
327,432,774
0
1
null
2021-02-03T13:56:00
2021-01-06T21:25:58
C++
UTF-8
R
false
false
2,261
r
script_BIC.R
## BIC stands for Bayesian information criterion ## Load data num_files <- length(list.files("./data/")) files_name <- list.files(path = "./data/") methods_name <- list("FLLat", "SFLLat", "RFLLat") empty_vec <- vector(mode="character", length=num_files * 3) opt_lam <- list(method = empty_vec, fname=empty_vec, lam0=numeric(num_files*3), lam1=numeric(num_files*3), lam2=numeric(num_files*3)) results <- list(bicinfo=numeric(num_files*3)) odd_val <- -3 temp <- 0 for (i in 1:num_files) { odd_val <- odd_val+2 fname = unlist(strsplit(files_name[i], split='.', fixed=TRUE))[1] sim_data <- get(fname) ## Run FLLat.BIC to choose optimal hyper parameter for a specific J for (k in 1:length(methods_name)) { if (opt_feat$opt_feat_num[i+k+odd_val] != 0){ temp <- temp +1 if (methods_name[k] == "FLLat") result.bic <- FLLat.BIC(sim_data, J=opt_feat$opt_feat_num[i+k+odd_val]) else if (methods_name[k] == "SFLLat") result.bic <- FLLat.BIC(sim_data, J=opt_feat$opt_feat_num[i+k+odd_val]) else if (methods_name[k] == "RFLLat") result.bic <- FLLat.BIC(sim_data, J=opt_feat$opt_feat_num[i+k+odd_val]) opt_lam$lam0[i+k+odd_val] = result.bic$lam0 opt_lam$lam1[i+k+odd_val] = result.bic$lam1 opt_lam$lam2[i+k+odd_val] = result.bic$lam2 opt_lam$fname[i+k+odd_val] = fname opt_lam$method[i+k+odd_val] = methods_name[k] results$bicinfo[i+k+odd_val] <- list(result.bic) setEPS() postscript(paste(paste(paste("./outputs/Bic/features_",fname), methods_name[k], sep="_"), "eps", sep = ".")) plot(result.bic$opt.FLLat) ## Plot a heatmap of the weights for the optimal FLLat model. postscript(paste(paste(paste("./outputs/Bic/features_heatmap_",fname), methods_name[k], sep="_"), "eps", sep = ".")) plot(result.bic$opt.FLLat,type="weights") #plot(result.bic$opt.FLLat,type="weights") dev.off() } } ## Plot the features for the optimal FLLat model. #png(paste(paste(paste("./outputs/features_ ",fname,sep="/"), methods_name[k], sep="_"), "png", sep='.')) #plot(result.bic$opt.FLLat) #dev.off() } save(opt_lam, file="./outputs/opt_lam.RData")
ce38ac3cb3f2a6de1b7ea615dcef2d1ab139fcf7
d0d061329421401283a3db1f8e7aa016e61888d7
/man/spider-package.Rd
001c3cd9edfb109465baf855b9420961de9bcd1e
[ "MIT" ]
permissive
boopsboops/spider
87885b53570a98aece6e7ca1ce600330d9b95d25
e93c5b4bc7f50168b8a155a6dca7c87dfbdef134
refs/heads/master
2021-05-12T07:38:37.413486
2019-03-07T21:43:43
2019-03-07T21:43:43
117,250,046
2
0
null
null
null
null
UTF-8
R
false
true
2,166
rd
spider-package.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spider-package.R \docType{package} \name{spider-package} \alias{spider-package} \alias{spider} \title{Species Identity and Evolution in R} \description{ Spider: SPecies IDentity and Evolution in R, is an R package implementing a number of useful analyses for DNA barcoding studies and associated research into species delimitation and speciation. Included are functions for generating summary statistics from DNA barcode data, assessing specimen identification efficacy, and for testing and optimising divergence threshold limits. In terms of investigating evolutionary and taxonomic questions, techniques for sliding window, population aggregate, and nucleotide diagnostic analyses are also provided. } \details{ The complete list of functions can be displayed with \code{library(help=spider)}. More information, including a tutorial on the use of spider can be found at \code{http://spider.r-forge.r-project.org}. \tabular{ll}{ Package: \tab spider\cr Type: \tab Package\cr Version: \tab 1.4-2\cr Date: \tab 2017-05-13\cr License: \tab GPL\cr LazyLoad: \tab yes\cr } A few of the key functions provided by spider: DNA barcoding: \code{\link{bestCloseMatch}}, \code{\link{nearNeighbour}}, \code{\link{threshID}}, \code{\link{threshOpt}}, \code{\link{heatmapSpp}}. Sliding window: \code{\link{slidingWindow}}, \code{\link{slideAnalyses}}, \code{\link{slideBoxplots}}. Nucleotide diagnostics: \code{\link{nucDiag}}, \code{\link{rnucDiag}}. Morphological techniques: \code{\link{paa}}. } \references{ Brown S. D. J., Collins R. A., Boyer S., Lefort M.-C., Malumbres-Olarte J., Vink C. J., & Cruickshank R. H. 2012. SPIDER: an R package for the analysis of species identity and evolution, with particular reference to DNA barcoding. _Molecular Ecology Resources_ 12:562-565. doi: 10.1111/j.1755-0998.2011.03108.x } \seealso{ \code{\link{ape-package}}, \code{\link{pegas-package}}. } \author{ Samuel Brown, Rupert Collins, Stephane Boyer, Marie-Caroline Lefort, Jagoba Malumbres-Olarte, Cor Vink, Rob Cruickshank Maintainer: Samuel Brown <s_d_j_brown@hotmail.com> } \keyword{package}
d5186e3abdef6eb5dffe0c98d700711469d76271
7e849b23af37f1b2921372e08bc896e085cf205a
/HW3/HW3_control_variate.r
5c908d5dc750a14c2f29c1767d793b90dd5923c2
[]
no_license
letsjdosth/statComputing2
d4759073c282aeac8b1cf85e85922ebbc79b2230
623c7df13c6ad2e0a644270675f0eafb9b7d0e0d
refs/heads/master
2020-07-15T08:48:45.192258
2019-12-23T17:37:34
2019-12-23T17:37:34
205,524,663
0
0
null
null
null
null
UTF-8
R
false
false
500
r
HW3_control_variate.r
#example 2 (HW3) m = 10000 # MC u = runif(m) mc.sample = (exp(-u)/(1+u^2)) mean(mc.sample) var(mc.sample) #control variate MC con.var.sample = (exp(-0.5)/(1+u^2)) #같은 u 써야함 lambda = -cov(mc.sample, con.var.sample)/var(con.var.sample) print(lambda) # -2.45가량 con.sample= mc.sample + lambda*(con.var.sample - exp(-0.5)*pi/4) mean(con.sample) var(con.sample) #개선? c(mean(mc.sample), mean(con.sample)) c(var(mc.sample), var(con.sample)) (var(mc.sample)-var(con.sample))/var(mc.sample)
7cfb38fdbab282794801497ec7f4a30b3e942dbc
ecc3f86ed2f437c34c817761799e1179f8bee275
/man/joinrate.Rd
751c8ba81b9d2b9e186fd73f191275c795d53644
[]
no_license
cran/relsurv
8e6b4821d99fda97e58ab1ac284cbd12e3c9cde1
20447b8198707f9cebc678ec567a02c57ea4101b
refs/heads/master
2023-01-10T22:20:36.163357
2022-12-22T12:30:02
2022-12-22T12:30:02
17,699,136
2
0
null
null
null
null
UTF-8
R
false
false
1,455
rd
joinrate.Rd
\name{joinrate} \alias{joinrate} \title{Join ratetables} \description{ The function joins two or more objects organized as \code{ratetable} by adding a new dimension. } \usage{ joinrate(tables,dim.name="country") } \arguments{ \item{tables}{ a list of ratetables. If names are given, they are included as \code{dimnames}. } \item{dim.name}{ the name of the added dimension. } } \details{ This function joins two or more \code{ratetable} objects by adding a new dimension. The cutpoints of all the rate tables are compared and only the common intervals kept. If the intervals defined by the cutpoints are not of the same length, a warning message is displayed. Each rate table must have 3 dimensions, i.e. age, sex and year (the order is not important). } \value{An object of class \code{ratetable}.} \references{ Package: Pohar M., Stare J. (2006) "Relative survival analysis in R." Computer Methods and Programs in Biomedicine, \bold{81}: 272-278. Relative survival: Pohar, M., Stare, J. (2007) "Making relative survival analysis relatively easy." Computers in biology and medicine, \bold{37}: 1741-1749. } \examples{ #newpop <- joinrate(list(Arizona=survexp.az,Florida=survexp.fl, # Minnesota=survexp.mn),dim.name="state") } \seealso{\code{\link{ratetable}}, \code{\link{transrate.hld}}, \code{\link{transrate.hmd}}, \code{\link{transrate}}.} \keyword{survival}
6b257c36aa81e6e7f37f3f571b526a86cbc0e3e6
1714a940b25d785c13d53425e323ffdf5d306475
/cachematrix.R
900d2ed853c87064e8f1cf9c5b6a8f814cfceed9
[]
no_license
robmill/ProgrammingAssignment2
84e2b2e445260d9a88d75a07ee4fb82294108064
d21bdd83c7b7492cd0c7343f5a2b8eac2f25d69d
refs/heads/master
2021-01-21T03:59:15.211191
2016-07-22T23:52:20
2016-07-22T23:52:20
63,987,964
0
0
null
2016-07-22T22:47:12
2016-07-22T22:47:11
null
UTF-8
R
false
false
1,343
r
cachematrix.R
## Rob Miller ## Programming Assignment 2 ## July 22, 2016 ## ## ## cacheMatrix.R contains two functions: ## makeCacheMatrix() that creates a special matrix, and ## cachSolve() that computes the inverse of the special matrix. ## makeCacheMatrix creates a special matrix ## 1. set the value of the matrix. ## 2. get the value of the matrix. ## 3. set the value of the inverse. ## 4. get teh value of the inverse. ## ## the inverse is computed using solve() makeCacheMatrix <- function(x = matrix()) { s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setinverse <- function(solve) s <<- solve getinverse <- function() s list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve calculates the inverse of the matrix created ## by makeCacheMatrix(). ## checks to see if the cached matrix has been inverted. ## if there is no inverse, then cacheSolve ## computes the inverse and returns the result as s. cacheSolve <- function(x, ...) { s <- x$getinverse() if(!is.null(s)) { message("getting cached matrix") return(s) } matrix <- x$get() s <- solve(matrix, ...) x$setinverse(s) s }
599916ccf6ad2ac6fb816c1eb404386ba1ecc42e
936a42930c9d9e4fa5911aa847510f56edcd3d5e
/Scalper-BH.R
bbc2fd468fc53debfc3f051c8d1d1aae839a4c64
[]
no_license
maglavis138/DashboardDataUpdate
46ccdbd2efd19436b6706026b73531123d20f8e9
3f61e70dea5ddf12b2c9b107698dc82a4a9d71cf
refs/heads/master
2021-01-22T03:57:51.785779
2017-09-06T04:29:35
2017-09-06T04:29:35
102,261,023
0
0
null
null
null
null
UTF-8
R
false
false
26,906
r
Scalper-BH.R
library(curl) library(jsonlite) library(plyr) since = as.Date("2017-08-21") until = as.Date("2017-08-31") post_type = 'all' ############################################################################################## ########################################## INSIGHTS ########################################## ############################################################################################## GetPostIds = function(since, until){ base = "https://graph.facebook.com/v2.6/150424302108405/posts" cross= paste("?fields=id,link,created_time,type,message,description&limit=10&since=", since, "T00:00:00-8:00&until=", until,"T00:00:00-8:00&access_token=", sep = "") token= "EAAZAEuju9Pm4BAIi3rDJoAJPPw9Lmf37nnuitWyIrLQRBppTIk4fE3IaIjyIZBRA5CEdQredeKxt3TpD7rIuugS2H9QhMaC2SjrZCOSZCO3d5iGRV8h1xmBPNsUTlD3a6kUnZCUbPT73MVwZBCqOBpTaYxtqslKfwZD" url = paste(base, cross, token, sep="") id_data = fromJSON(url) } id_data = GetPostIds(since, until) posts = id_data$data has_next_page = TRUE num_processed = 0 statuses = vector() scrape_starttime = datetime.datetime.now() while (has_next_page == TRUE ){ for (i in posts$id){ if (post_type == "all"){ statuses = append(statuses, i) num_processed = num_processed + 1 if(num_processed %% 100 == 0){ print(paste(num_processed, 'Kekerino!!!')) } ##Falta la fecha } else if(posts$type[match(i,posts$id)] == post_type) { statuses = append(statuses, i) num_processed = num_processed + 1 if(num_processed %% 100 == 0){ print(paste(num_processed, 'Kekerino!!!')) } ##Falta la fecha } } if('paging' %in% names(id_data)){ id_data = fromJSON(id_data$paging$'next') posts = id_data$data } else{ has_next_page = FALSE } } print(paste('Severo perro...', num_processed, "Posts Procesados")) GetPostData = function(status){ base = paste("https://graph.facebook.com/v2.6/", status, sep="") cross= "?fields=id,created_time,message,link,name,type,comments.limit(1).summary(true),shares,reactions.limit(1).summary(true),full_picture&limit=100&access_token=" token= "EAAZAEuju9Pm4BAIi3rDJoAJPPw9Lmf37nnuitWyIrLQRBppTIk4fE3IaIjyIZBRA5CEdQredeKxt3TpD7rIuugS2H9QhMaC2SjrZCOSZCO3d5iGRV8h1xmBPNsUTlD3a6kUnZCUbPT73MVwZBCqOBpTaYxtqslKfwZD" url = paste(base, cross, token, sep="") post_data = fromJSON(url) return(post_data) } getFacebookPageFeedData = function(status){ base = paste("https://graph.facebook.com/v2.6/", status, "/insights/", sep = '') cross= "?limit=100&period=lifetime&access_token=" token= "EAAZAEuju9Pm4BAIi3rDJoAJPPw9Lmf37nnuitWyIrLQRBppTIk4fE3IaIjyIZBRA5CEdQredeKxt3TpD7rIuugS2H9QhMaC2SjrZCOSZCO3d5iGRV8h1xmBPNsUTlD3a6kUnZCUbPT73MVwZBCqOBpTaYxtqslKfwZD" url = paste(base, cross, token, sep = '') data = fromJSON(url) return(data) } endate = since + 30 if(endate > until){ endate = until} GetPageData = function(since, until){ base = "https://graph.facebook.com/v2.6/150424302108405/insights" cross= paste("/page_fans,page_fan_adds?since=", since, "T00:00:00&until=", endate,"T00:00:00&access_token=", sep = "") token= "EAAZAEuju9Pm4BAIi3rDJoAJPPw9Lmf37nnuitWyIrLQRBppTIk4fE3IaIjyIZBRA5CEdQredeKxt3TpD7rIuugS2H9QhMaC2SjrZCOSZCO3d5iGRV8h1xmBPNsUTlD3a6kUnZCUbPT73MVwZBCqOBpTaYxtqslKfwZD" url = paste(base, cross, token, sep="") id_data = fromJSON(url) } pi = data.frame() finish = FALSE while(finish == FALSE){ if(endate == until){ finish = TRUE} pagedata = GetPageData(since, until) pi = rbind(pi, data.frame('daily_new_likes' = pagedata$data$values[[2]]$value, 'total_likes' = pagedata$data$values[[1]]$value, 'date' = pagedata$data$values[[1]][2])) since = endate if(endate + 30 >= until){ endate = until} else{endate = endate + 30} } pi$date = format(as.Date(pi$end_time), "%Y-%m-%d") pi = within(pi, rm(end_time)) ################ processFacebookPageFeedStatus = function(status){ post_data = GetPostData(status) data = getFacebookPageFeedData(status) status_id = post_data['id'][[1]] permalink = paste('https://www.facebook.com/', post_data['id'][[1]], sep = '') post_type = post_data['type'][[1]] full_picture = post_data['full_picture'][[1]] if(is.null(post_data['shares']$shares$count[[1]])){shares_on_post = 0} else {shares_on_post = post_data['shares']$shares$count[[1]]} if(is.null(post_data['comments']$comments$summary$total_count[[1]])){comments_on_post = 0} else {comments_on_post = post_data['comments']$comments$summary$total_count[[1]]} if(is.null(post_data['reactions']$reactions$summary$total_count[[1]])){likes_on_post = 0} else {likes_on_post = post_data['reactions']$reactions$summary$total_count[[1]]} if(is.null(post_data['message'][[1]])){sharetext = ''} else {sharetext = post_data['message'][[1]]} if(is.null(post_data['name'][[1]])){headline = ''} else {headline = post_data['name'][[1]]} if(is.null(post_data['link'][[1]])){link = ''} else {link = post_data['link'][[1]]} status_published = format(as.POSIXct(strptime(post_data['created_time'][[1]], "%Y-%m-%dT%H:%M:%OS", tz="UTC")), tz="America/Los_Angeles",usetz=TRUE) status_published = as.POSIXct(status_published) created_time = format(status_published, '%Y-%m-%d %H:%M:%S') date = format(status_published, '%Y-%m-%d') hour = format(status_published, "%H:%M") post_consumptions_by_type = data$data$values[[match("post_consumptions_by_type", data$data$name)]][[1]] colnames(post_consumptions_by_type) = gsub('\\s+', '_', colnames(post_consumptions_by_type)) post_story_adds_by_action_type = data$data$values[[match("post_story_adds_by_action_type", data$data$name)]][[1]] colnames(post_story_adds_by_action_type) = gsub('\\s+', '_', colnames(post_story_adds_by_action_type)) if(is.null(data$data$values[[match('post_impressions',data$data$name)]][[1]])){post_impressions = 0} else {post_impressions =data$data$values[[match('post_impressions',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_impressions_unique',data$data$name)]][[1]])){post_reach = 0} else {post_reach =data$data$values[[match('post_impressions_unique',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_impressions_fan_unique',data$data$name)]][[1]])){post_reach_fan_unique = 0} else {post_reach_fan_unique =data$data$values[[match('post_impressions_fan_unique',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_impressions_viral_unique',data$data$name)]][[1]])){post_reach_viral_unique = 0} else {post_reach_viral_unique =data$data$values[[match('post_impressions_viral_unique',data$data$name)]][[1]]} if(is.null(post_story_adds_by_action_type$comment)){total_comments = 0} else {total_comments = post_story_adds_by_action_type$comment} if(is.null(post_story_adds_by_action_type$like)){total_likes = 0} else {total_likes = post_story_adds_by_action_type$like} if(is.null(post_story_adds_by_action_type$share)){total_shares = 0} else {total_shares = post_story_adds_by_action_type$share} likes_on_shares = total_likes - likes_on_post comments_on_shares = total_comments - comments_on_post shares_on_shares = total_shares - shares_on_post if(likes_on_shares<0){likes_on_shares = 0} if(comments_on_shares<0){comments_on_shares = 0} if(shares_on_shares<0){shares_on_shares = 0} if(is.null(post_consumptions_by_type$link_clicks)){link_clicks = 0} else {link_clicks = post_consumptions_by_type$link_clicks} if(is.null(post_consumptions_by_type$photo_view)){photo_view = 0} else {photo_view = post_consumptions_by_type$photo_view} if(is.null(post_consumptions_by_type$video_play)){video_play = 0} else {video_play = post_consumptions_by_type$video_play} if(is.null(post_consumptions_by_type$other_clicks)){other_clicks = 0} else {other_clicks = post_consumptions_by_type$other_clicks} if(is.null(data$data$values[[match('post_negative_feedback',data$data$name)]][[1]])){post_negative_feedback = 0} else {post_negative_feedback =data$data$values[[match('post_negative_feedback',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_story_adds',data$data$name)]][[1]])){post_story_adds = 0} else {post_story_adds =data$data$values[[match('post_story_adds',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_stories',data$data$name)]][[1]])){post_stories = 0} else {post_stories =data$data$values[[match('post_stories',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_storytellers',data$data$name)]][[1]])){post_storytellers = 0} else {post_storytellers =data$data$values[[match('post_storytellers',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_consumptions',data$data$name)]][[1]])){post_consumptions = 0} else {post_consumptions =data$data$values[[match('post_consumptions',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_engaged_users',data$data$name)]][[1]])){post_engaged_users = 0} else {post_engaged_users =data$data$values[[match('post_engaged_users',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_engaged_fan',data$data$name)]][[1]])){post_engaged_fan = 0} else {post_engaged_fan =data$data$values[[match('post_engaged_fan',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_complete_views_30s_autoplayed',data$data$name)]][[1]])){post_video_complete_views_30s_autoplayed = 0} else {post_video_complete_views_30s_autoplayed =data$data$values[[match('post_video_complete_views_30s_autoplayed',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_complete_views_30s_clicked_to_play',data$data$name)]][[1]])){post_video_complete_views_30s_clicked_to_play = 0} else {post_video_complete_views_30s_clicked_to_play =data$data$values[[match('post_video_complete_views_30s_clicked_to_play',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_complete_views_30s_organic',data$data$name)]][[1]])){post_video_complete_views_30s_organic = 0} else {post_video_complete_views_30s_organic =data$data$values[[match('post_video_complete_views_30s_organic',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_complete_views_30s_paid',data$data$name)]][[1]])){post_video_complete_views_30s_paid = 0} else {post_video_complete_views_30s_paid =data$data$values[[match('post_video_complete_views_30s_paid',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_complete_views_30s_unique',data$data$name)]][[1]])){post_video_complete_views_30s_unique = 0} else {post_video_complete_views_30s_unique =data$data$values[[match('post_video_complete_views_30s_unique',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_avg_time_watched',data$data$name)]][[1]])){post_video_avg_time_watched = 0} else {post_video_avg_time_watched =data$data$values[[match('post_video_avg_time_watched',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_complete_views_organic_unique',data$data$name)]][[1]])){post_video_complete_views_organic_unique = 0} else {post_video_complete_views_organic_unique =data$data$values[[match('post_video_complete_views_organic_unique',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_length',data$data$name)]][[1]])){post_video_length = 0} else {post_video_length =data$data$values[[match('post_video_length',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views',data$data$name)]][[1]])){post_video_views = 0} else {post_video_views =data$data$values[[match('post_video_views',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views_autoplayed',data$data$name)]][[1]])){post_video_views_autoplayed = 0} else {post_video_views_autoplayed =data$data$values[[match('post_video_views_autoplayed',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views_clicked_to_play',data$data$name)]][[1]])){post_video_views_clicked_to_play = 0} else {post_video_views_clicked_to_play =data$data$values[[match('post_video_views_clicked_to_play',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views_10s_unique',data$data$name)]][[1]])){post_video_views_10s_unique = 0} else {post_video_views_10s_unique =data$data$values[[match('post_video_views_10s_unique',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views_10s_autoplayed',data$data$name)]][[1]])){post_video_views_10s_autoplayed = 0} else {post_video_views_10s_autoplayed =data$data$values[[match('post_video_views_10s_autoplayed',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views_10s_clicked_to_play',data$data$name)]][[1]])){post_video_views_10s_clicked_to_play = 0} else {post_video_views_10s_clicked_to_play =data$data$values[[match('post_video_views_10s_clicked_to_play',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views_10s_sound_on',data$data$name)]][[1]])){post_video_views_10s_sound_on = 0} else {post_video_views_10s_sound_on =data$data$values[[match('post_video_views_10s_sound_on',data$data$name)]][[1]]} if(is.null(data$data$values[[match('post_video_views_sound_on',data$data$name)]][[1]])){post_video_views_sound_on = 0} else {post_video_views_sound_on =data$data$values[[match('post_video_views_sound_on',data$data$name)]][[1]]} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'0')){s0 = ''} else {s0 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'0'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'1')){s1 = ''} else {s1 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'1'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'2')){s2 = ''} else {s2 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'2'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'3')){s3 = ''} else {s3 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'3'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'4')){s4 = ''} else {s4 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'4'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'5')){s5 = ''} else {s5 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'5'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'6')){s6 = ''} else {s6 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'6'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'7')){s7 = ''} else {s7 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'7'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'8')){s8 = ''} else {s8 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'8'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'9')){s9 = ''} else {s9 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'9'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'10')){s10 = ''} else {s10 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'10'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'11')){s11 = ''} else {s11 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'11'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'12')){s12 = ''} else {s12 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'12'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'13')){s13 = ''} else {s13 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'13'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'14')){s14 = ''} else {s14 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'14'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'15')){s15 = ''} else {s15 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'15'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'16')){s16 = ''} else {s16 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'16'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'17')){s17 = ''} else {s17 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'17'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'18')){s18 = ''} else {s18 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'18'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'19')){s19 = ''} else {s19 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'19'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'20')){s20 = ''} else {s20 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'20'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'21')){s21 = ''} else {s21 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'21'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'22')){s22 = ''} else {s22 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'22'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'23')){s23 = ''} else {s23 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'23'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'24')){s24 = ''} else {s24 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'24'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'25')){s25 = ''} else {s25 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'25'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'26')){s26 = ''} else {s26 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'26'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'27')){s27 = ''} else {s27 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'27'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'28')){s28 = ''} else {s28 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'28'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'29')){s29 = ''} else {s29 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'29'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'30')){s30 = ''} else {s30 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'30'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'31')){s31 = ''} else {s31 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'31'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'32')){s32 = ''} else {s32 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'32'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'33')){s33 = ''} else {s33 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'33'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'34')){s34 = ''} else {s34 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'34'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'35')){s35 = ''} else {s35 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'35'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'36')){s36 = ''} else {s36 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'36'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'37')){s37 = ''} else {s37 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'37'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'38')){s38 = ''} else {s38 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'38'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'39')){s39 = ''} else {s39 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'39'} if(is.null(data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'40')){s40 = ''} else {s40 = data$data$values[match('post_video_retention_graph',data$data$name)][[1]][,1]$'40'} page_total_likes = pi$total_likes[match(date, pi$date)] page_new_likes = pi$daily_new_likes[match(date, pi$date)] result = as.data.frame(t(as.matrix(c(status_id,permalink,post_type,sharetext,headline,link,full_picture,created_time,date,hour,page_total_likes,page_new_likes,post_impressions,post_reach,post_reach_fan_unique,post_reach_viral_unique,comments_on_post,likes_on_post,shares_on_post,total_comments,total_likes,total_shares,comments_on_shares,likes_on_shares,shares_on_shares,link_clicks,photo_view,video_play,other_clicks,post_negative_feedback,post_story_adds,post_stories,post_storytellers,post_consumptions,post_engaged_users,post_engaged_fan,post_video_complete_views_30s_autoplayed,post_video_complete_views_30s_clicked_to_play,post_video_complete_views_30s_organic,post_video_complete_views_30s_paid,post_video_complete_views_30s_unique,post_video_avg_time_watched,post_video_complete_views_organic_unique,post_video_length,post_video_views,post_video_views_autoplayed,post_video_views_clicked_to_play,post_video_views_10s_unique,post_video_views_10s_autoplayed,post_video_views_10s_clicked_to_play,post_video_views_10s_sound_on,post_video_views_sound_on, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31, s32, s33, s34, s35, s36, s37, s38, s39, s40)))) return(result) } # statuses = c('1405630409737397_1510785635888540','1405630409737397_1511436792490091','1405630409737397_1511436792490091','1405630409737397_1512136232420147','1405630409737397_1532264077074029','1405630409737397_1532392773727826','1405630409737397_1560033190963784','1405630409737397_1647037648930004','1405630409737397_1653348084965627','1405630409737397_1653363801630722','1405630409737397_1653349724965463','1405630409737397_1653346828299086','1405630409737397_1653375138296255','1405630409737397_1653423528291416','1405630409737397_1653337191633383','1405630409737397_1653411344959301','1405630409737397_1653338324966603','1405630409737397_1653295401637562','1405630409737397_1653423781624724','1405630409737397_1653339828299786','1405630409737397_1653285528305216','1405630409737397_1653425824957853','1405630409737397_1653715424928893','1405630409737397_1653298638303905','1405630409737397_1653754348258334','1405630409737397_1653786898255079','1405630409737397_1653773454923090','1405630409737397_1653837734916662','1405630409737397_1653864711580631','1405630409737397_1653826244917811','1405630409737397_1653823791584723','1405630409737397_1653860541581048','1405630409737397_1653841741582928','1405630409737397_1653883098245459','1405630409737397_1653829834917452','1405630409737397_1654051918228577','1405630409737397_1653345221632580','1405630409737397_1654052261561876','1405630409737397_1650206555279780','1405630409737397_1653850838248685','1405630409737397_1654052101561892','1405630409737397_1653253214975114','1405630409737397_1654294671537635','1405630409737397_1653851061581996','1405630409737397_1653773421589760','1405630409737397_1654295988204170','1405630409737397_1653304694969966','1405630409737397_1653302434970192','1405630409737397_1650666325233803','1405630409737397_1654435468190222','1405630409737397_1654435698190199','1405630409737397_1659396867694082','1405630409737397_1671326359834466','1405630409737397_1673861356247633','1405630409737397_1673848899582212','1405630409737397_1682996722000763') np = 0 final = data.frame() for(status in statuses){ final = rbind.fill(final, processFacebookPageFeedStatus(status)) np = np +1 if(np %% 20 == 0){ print(paste(np, 'Posts Procesados (Woooooot!!?!!!?)'))} } colnames(final) = c('status_id','permalink','post_type','sharetext','headline','link','full_picture','created_time','date','hour','page_total_likes','page_new_likes','post_impressions','post_reach','post_reach_fan_unique','post_reach_viral_unique','comments_on_post','likes_on_post','shares_on_post','total_comments','total_likes','total_shares','comments_on_shares','likes_on_shares','shares_on_shares','link_clicks','photo_view','video_play','other_clicks','post_negative_feedback','post_story_adds','post_stories','post_storytellers','post_consumptions','post_engaged_users','post_engaged_fan','post_video_complete_views_30s_autoplayed','post_video_complete_views_30s_clicked_to_play','post_video_complete_views_30s_organic','post_video_complete_views_30s_paid','post_video_complete_views_30s_unique','post_video_avg_time_watched','post_video_complete_views_organic_unique','post_video_length','post_video_views','post_video_views_autoplayed','post_video_views_clicked_to_play','post_video_views_10s_unique','post_video_views_10s_autoplayed','post_video_views_10s_clicked_to_play','post_video_views_10s_sound_on','post_video_views_sound_on','s0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14', 's15', 's16', 's17', 's18', 's19', 's20', 's21', 's22', 's23', 's24', 's25', 's26', 's27', 's28', 's29', 's30', 's31', 's32', 's33', 's34', 's35', 's36', 's37', 's38', 's39', 's40') write.csv(final, paste("Bad Hombres - ", as.character(Sys.Date()), ".csv"), row.names = FALSE)
b67a153efd99da5c1025fc1f55488056a3e447c1
818dd3954e873a4dcb8251d8f5f896591942ead7
/Mouse/RNASequencing/SNPcalling/snps.R
c8abc041632f3997303dc2b5d9e4d3e19ee2e511
[]
no_license
DannyArends/HU-Berlin
92cefa16dcaa1fe16e58620b92e41805ebef11b5
16394f34583e3ef13a460d339c9543cd0e7223b1
refs/heads/master
2023-04-28T07:19:38.039132
2023-04-27T15:29:29
2023-04-27T15:29:29
20,514,898
3
1
null
null
null
null
UTF-8
R
false
false
245
r
snps.R
# SNP calling using the population VCF # # copyright (c) 2014-2020 - Brockmann group - HU Berlin, Danny Arends # last modified Dec, 2014 # first written Dec, 2014 setwd("E:/Mouse/RNA/Sequencing/Reciprocal Cross B6 BFMI by MPI/ReAnalysisSNPs")
b750b57961a192f95602feb7855bf687529d2a75
da907c6cf0c26266ecf364a0fafb3f3a2615d58f
/day05 - slope/day05_slope.R
eae31ac9381e5598607564d5ad7b6f08e3aab467
[]
no_license
MaiaPelletier/30DayChartChallenge
08a1ff56b4aca5f7694025bbfd34e926e9aa1824
23d9c67a18a06e03fd5c0c21f71a230d28c5ff35
refs/heads/main
2023-04-10T07:09:01.669797
2021-04-19T00:53:15
2021-04-19T00:53:15
353,796,540
5
2
null
null
null
null
UTF-8
R
false
false
3,530
r
day05_slope.R
# day 05 - slope --------------------------------------------------- library(tidyverse) library(here) library(janitor) library(jsonlite) library(lubridate) # data import ------------------------------------------------------------- # Function to clean the JSON files that my spotify data is stored in read_my_streaming_data <- function(file) { read_json(file, simplifyVector = TRUE) %>% as_tibble() %>% clean_names() %>% mutate( end_time = ymd_hm(end_time), date = date(end_time) ) } # List of the streaming data files my_streaming_files <- list.files(here("day05 - slope", "raw data"), full.names = TRUE) # Map cleaning function to files and bind rows to a tibble my_streaming_data <- map_dfr(my_streaming_files, read_my_streaming_data) # data transformation ----------------------------------------------------- # Podcasts to filter out of data podcasts <- c("Dungeons and Daddies", "You're Wrong About", "Revolutions", "The Daily Zeitgeist", "The Blasting Company") # Data of my top artists of 2020 top_artists <- my_streaming_data %>% filter(!artist_name %in% podcasts) %>% count(artist_name, sort = T) %>% top_n(5) # Data from the beginning of 2020 / end of 2020 data <- my_streaming_data %>% filter(date <= min(date) + 60 | date >= max(date) - 60) %>% filter(artist_name %in% top_artists$artist_name) %>% mutate( time_period = ifelse(date <= min(date) + 30, "Beginning of 2020", "End of 2020") ) %>% group_by(time_period) %>% count(artist_name, sort = TRUE) %>% mutate(time_period_num = ifelse(time_period == "Beginning of 2020", 0, 2)) # Write data to csv to link to in alt text on Twitter write_csv(data, here("day05 - slope", "data", "my_top_artists.csv")) # build plot -------------------------------------------------------------- # Load fonts extrafont::loadfonts(device = "win") # Colour scale palette morbid_stuff_pal <- c( "#ECF2CD", "#DAE57B", "#779169", "#8DC4C1", "#B2DDBF" ) data %>% mutate( align = ifelse(time_period_num == 0, "right", "left"), axis_nudge = ifelse(time_period_num == 0, -0.25, 2.25) ) %>% ggplot(aes(time_period_num, n)) + geom_segment( aes(x = time_period_num, xend = time_period_num, y = 0, yend = 130), color = "#3C4153" ) + geom_line( aes(color = artist_name, group = artist_name), size = 2 ) + geom_point( aes(color = artist_name), size = 4 ) + geom_text( aes(x = axis_nudge, label = artist_name, hjust = align), size = 3.5, family = "Montserrat SemiBold" ) + geom_text( data = data.frame(), aes(x = -2.25, y = 70, label = "MY\nSPOTIFY\nTOP\nARTISTS\nOF 2020"), hjust = "right", size = 20, color = "#352862", family = "Compacta BT" ) + geom_text( data = data %>% distinct(time_period_num, time_period), aes(x = time_period_num, y = 135, label = time_period), size = 4, color = "#352862", family = "Montserrat ExtraBold" ) + labs( caption = "Viz - @MaiaPelletier" ) + xlim(c(-4.5, 3.5)) + ylim(c(-3, 140)) + scale_color_manual(values = morbid_stuff_pal) + theme_void(base_family = "Montserrat SemiBold") + theme( legend.position = "none", plot.background = element_rect(fill = "#F7CDC1", color = NA), plot.caption = element_text(hjust = 0.5, size = 6, color = "#352862"), plot.margin = margin(10, 15, 10, 15) ) + ggsave(here("day05 - slope", "day05_slope.png"), type = "cairo", dpi = 500, height = 6.5, width = 8)
9e91371f0b2a32ed8ecc89ccb32b686eb7004184
6b87119889f0e6645411d0029549ca296111c54c
/man/n_orgs_100k.Rd
10a8ae9d6778919068f1881bc4b32f8850a9b4ae
[]
no_license
edson-github/newsatlasbr
006a1d0cd50dc7a19be61141708827eb5bf2a5cd
1f54bec2ba369102a79a12dffaad21a1cd29da18
refs/heads/master
2023-03-17T12:51:18.408828
2020-09-18T01:45:15
2020-09-18T01:45:15
null
0
0
null
null
null
null
UTF-8
R
false
true
1,779
rd
n_orgs_100k.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_orgs_100k.R \name{n_orgs_100k} \alias{n_orgs_100k} \title{Retrieve data on the number of news organizations per 100k/inhabitants} \usage{ n_orgs_100k() } \value{ A \code{data.frame} with the following variables: \itemize{ \item municipio: municipality name. \item uf: abbreviation of the state's name. \item regiao: name of the country's region where the municipality is located at. \item qtd_veiculos: number of news organizations in the municipality. \item codmun: IBGE's (Brazilian Institute of Geography and Statistics) code for the municipality (7-digit). \item populacao: municipality's population. \item ano: year from IBGE's population records (note that data on news organizations collected by Atlas' team were last updated on Nov. 30th, 2019.) \item veiculos_por_100k_hab: number of news organizations per 100,000 inhabitants. \item IDHM: Human Development Index for the municipality (Census 2010). \item IDHM_R: Human Development Index - \emph{per capita} income for the municipality (Census 2010). \item IDHM_E: Human Development Index - education for the municipality (Census 2010).} } \description{ Retrieve data on the number of news organizations per 100,000 inhabitants in Brazilian municipalities. } \details{ \code{n_orgs_100k} returns a dataset containing information on municipalities with at least one news outlet recorded by Atlas da Noticia's research team up to November, 2019. It includes a variable on the number of organizations per 100,000 inhabitants. The function returns only those municipalities. } \examples{ # Extract data on all the municipalities with at least one news outlet. municipalities_with_media <- n_orgs_100k() }
815118002298abbb99ac1d6553836582d2ae87b5
b06b16ab56edbd8d63384c849aa65a044029a3f5
/script.R
89ff7912118f38f6868d48391bbf5d0691cfb9d3
[]
no_license
yobero/ter
6472d6ea4d485a77c4bf1b32488906d6230e6de9
3ec36290587a0f8ead952dd379c3e15d2ed392f1
refs/heads/master
2021-04-12T04:41:00.552438
2018-05-20T21:28:34
2018-05-20T21:28:34
125,828,819
0
0
null
null
null
null
UTF-8
R
false
false
289
r
script.R
g = read.table("result") jpeg("plot.jpg") plot(g$V1, g$V2, type = "n", ylim = range(c(g$V2, g$V3)), xlab = "Nombre de chiffre", ylab = "temps d'execution", main="Temps d'execution en fonction du nombre de chiffres") lines(g$V1, g$V2, col = "blue") lines(g$V1, g$V3, col = "red") dev.off()
45131979670a6977ae0efafbd903f57ce1b3d8f5
0309d6e6296f4faccddfc6d4ecc5a9161564c1eb
/archive/kht/norsyss_purpose.R
e77356c7b3b334d99b0e37ad0ecafd8abe2c2370
[]
no_license
ybkamaleri/shiny
4bf864e9ea445d9ba6714c87ff2b98f977f5b542
1c185b6ffd069460260f570eeb341c18a20f62db
refs/heads/master
2023-06-03T19:34:37.831626
2021-06-17T12:21:08
2021-06-17T12:21:08
255,894,191
0
0
null
2020-04-15T11:21:57
2020-04-15T11:21:56
null
UTF-8
R
false
false
4,246
r
norsyss_purpose.R
norsyss_purpose_ui <- function(id, config) { ns <- NS(id) tagList( fluidRow( column( width=2, p("") ), column( width=8, align="left", p( "Vi får data til denne overvåkingen via Sykdomspulsen. ", "Diagnosekoder som registreres hos lege eller legevakt sendes ", "til Helsedirektoratet som en del av legenes refusjonskrav (KUHR-systemet). ", "Folkehelseinstituttet mottar daglig oppdatert KUHR-data til Sykdomspulsen. ", "Dataene er anonyme når vi mottar dem, uten pasientidentifikasjon, men med ", "informasjon om kjønn, aldersgruppe, konsultasjonsdato og sted for konsultasjon.", br(), br(), strong("Informasjon om dataene vi bruker i NorSySS:"), br(), "- Både telefon og legekontakt er inkludert", br(), "- Legekontor og legevakt er inkludert", br(), "- Geografisk område basert på stedet for legekonsultasjon, ikke pasientens bosted", br(), "- De kommunene som ikke har legevakt eller legekontor er ikke med i listen ", "der man velger geografisk område da vi ikke har noe data om disse. ", "Personene som bor i kommuner uten lege og legevakt benytter legekontor ", "og legevakt i andre kommuner", br(), "- Antallet konsultasjoner er vanligvis lavere i ferier og på helligdager. ", "Dette er spesielt tydelig rundt jul/nyttår og påske, men også i ", "sommerferieukene.", br(), "- Det kan være 14 dager forsinkelse i dataene da de kommer fra KUHR ", "systemet. Dersom det for noen datoer ikke er registrert noen ", "konsultasjoner fra et geografisk område vil dette vises som røde ", "stiplede linjer i grafene.", br(), br(), strong("Fargekoder i grafene:"), br(), "- Bakgrunnsfargen er laget ut fra beregninger fra de foregående 5 ", "årene i samme geografiske område og samme sykdom/syndrom og ", "aldersgruppe (for årene 2006-2010 er 5 fremtidige år brukt).", br(), "- Blå bakgrunnsfarge viser at antallet konsultasjoner er som forventet", br(), "- Gul bakgrunnsfarge viser at antallet konsultasjoner er høyere enn forventet", br(), "- Rød bakgrunnsfarge viser at antall konsultasjoner er betydelig høyere enn forventet", br(), "- I grafer der det er en svart strek viser det antallet faktiske konsultasjoner. ", "Dersom denne streken er i det blå feltet er antallet konsultasjoner er som forventet, ", "om den er i det gule feltet er antallet konsultasjoner høyere enn forventet og om ", "den er i det røde feltet er antallet konsultasjoner betydelig høyere enn forventet ", "for gitt tidsrom, alder og geografisk område.", br(), br(), strong("Kommunereformen: "), "Kommuner som har blitt slått sammen og fått ", "et nytt navn vil ikke finnes i oversiktene. Kommuner som har blitt slått ", "sammen med en annen kommune men beholdt navnet vil vises i oversiktene, ", "og beregningene tar hensyn til sammenslåingen. Det samme gjelder ", "sammenslåtte kommuner som får nytt kommunenavn.", br(), br(), strong("Små kommuner: "), "Kommuner med under 500 innbyggere vil ikke ", "kunne se grafer for aldersgrupperinger, men bare 'totalt antall'. ", "Dette er av hensyn til personvern.", br(), br(), strong("Interkommunalt samarbeid om legekontor/legevakt: "), "I Sykdomspulsen er geografisk område basert på stedet for legekonsultasjon, ", "ikke pasientens bosted. Derfor vil legekontorets/legevaktens postadresse ", "si hvilken kommune som vises i Sykdomspulsen. De andre kommunene som er ", "med på det interkommunale samarbeidet vil ikke vises i Sykdomspulsen.", br(), br(), strong("Ved tekniske feil, spørsmål eller tilbakemeldinger "), "vennligst send en mail til sykdomspulsen@fhi.no" ), br() ), column( width=2, p("") ) ) ) } norsyss_purpose_server <- function(input, output, session, config) { }
836e407b5de957a53f04f9be6fbe02be03de07cd
784bbd690a54af0d941cf0891af957ccb5e6a44d
/tests/make_mi_histograms.R
2524c79338bdbe522c15af916660b662cf29ec8d
[]
no_license
gditzler/MicrobiomeInformation
4c15706ca1242c9d37234a444f2f3c1de329aa22
e22dbdaa6975de338c871720525fc3287b5ec449
refs/heads/master
2020-06-05T23:46:43.452195
2015-06-03T19:09:09
2015-06-03T19:09:09
26,067,779
0
0
null
null
null
null
UTF-8
R
false
false
2,780
r
make_mi_histograms.R
# generate heatmaps of the mutual and conditional mutual information # after the features have been filtered. # build the documentation and load the package into the environment library("devtools") library("ggplot2") library("reshape2") library("plyr") library("fields") #document() # set up program constants lvl = 0.75 # filter level for OTUs nbins = 50 # number of bins for estimating the pdfs bin_w = 0.0007 # set the paths of the biom & map files then load them biom_fps <- c("~/Git/DataCollections/AmericanGut/AmericanGut-Gut-Diet.biom", #"~/Git/DataCollections/AmericanGut/AmericanGut-Gut-Sex.biom", "~/Git/DataCollections/Caporaso/caporaso-gut.biom" ) map_fps <- c("~/Git/DataCollections/AmericanGut/AmericanGut-Gut-Diet-OV.txt", #"~/Git/DataCollections/AmericanGut/AmericanGut-Gut-Sex.txt", "~/Git/DataCollections/Caporaso/caporaso-gut.txt" ) d_names <- c("ag-diet-ov", #"ag-gut-sex", "cap-gut-sex" ) col_names <- c("DIET_TYPE", #"SEX", "SEX" ) for (n in 1:length(biom_fps)) { # get the latest files for plotting biom_fp <- biom_fps[n] map_fp <- map_fps[n] d_name <- d_names[n] col_name <- col_names[n] print(paste("Running", d_name, col_name)) # load the biom & map files biom_df <- load_biom_matrix(biom_fp) map_df <- load_metadata(map_fp) # scale the matrix data <- scale_matrix(biom_df, rescale=FALSE) biom_df$data_dense <- data # extract the labels from the map file data struture labels <- retrieve_labels(map_df, biom_df$sample_ids, col_name) # filter the otus to remove the low ranking otus that are very low in terms of # the abunance lst <- filter_otus(biom_df, lvl=lvl) data_filter <- lst$data otus_filter <- lst$otu_names # compute mi & cmi matrices mi_vec_f <- measure_otu_mi(data_filter, labels, discrete=TRUE, disc="equalwidth", nbins=nbins, method="emp") cmi_vec_f <- measure_otu_cmi(data_filter, labels, discrete=TRUE, disc="equalwidth", nbins=nbins, method="emp") bin_w <- (max(mi_vec_f)-min(mi_vec_f))/25 ggplot(data.frame(x=1:length(mi_vec_f), mi=mi_vec_f), aes(x=mi))+ geom_histogram(aes(y=..density.., fill=..count..), colour='black', binwidth=bin_w)#+ #geom_line(stat="density", colour='blue',size=2) ggsave(file=paste("data/plots/",d_name,"-density-mi-partial.pdf",sep="")) bin_w <- (max(mi_vec_f)-min(mi_vec_f))/25 ggplot(data.frame(x=1:length(cmi_vec_f), cmi=cmi_vec_f), aes(x=cmi))+ geom_histogram(aes(y=..density.., fill=..count..), colour='black', binwidth=bin_w)#+ #geom_line(stat="density", colour='blue',size=2) ggsave(file=paste("data/plots/",d_name,"-density-cmi-partial.pdf",sep="")) }
9a5abb474159e6c8584e3c5b722395692f604bec
12922577d8cbfed96add26bd2d5357e4e1806519
/grimoirelibR/man/plotTimeSerieWeekN.Rd
912094083731dd7a2e516256d7c523b3066b3caf
[]
no_license
VizGrimoire/VizGrimoireUtils
dd535efa2408109f3b71ef0c4f823b52def5b227
28ce8be6d01222ed86c8ebd2c48847e759a13aaa
refs/heads/master
2020-12-21T22:58:32.034270
2017-05-09T10:26:20
2017-05-09T10:26:20
14,416,400
0
8
null
2016-05-25T12:09:10
2013-11-15T05:59:56
Python
UTF-8
R
false
false
2,488
rd
plotTimeSerieWeekN.Rd
\name{plotTimeSerieWeekN} \alias{plotTimeSerieWeekN} %- Also NEED an '\alias' for EACH other topic documented here. \title{ %% ~~function to do ... ~~ Plot weekly time serie. } \description{ %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ plotTimeSerieWeekN(data, columns, filename, labels = columns) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{data}{ %% ~~Describe \code{data} here~~ } \item{columns}{ %% ~~Describe \code{columns} here~~ } \item{filename}{ %% ~~Describe \code{filename} here~~ } \item{labels}{ %% ~~Describe \code{labels} here~~ } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- Should be DIRECTLY executable !! ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## The function is currently defined as function (data, columns, filename, labels = columns) { pdffilename <- paste(c(filename, ".pdf"), collapse = "") pdffilenamediff <- paste(c(filename, "-diff.pdf"), collapse = "") pdffilenamecum <- paste(c(filename, "-cumsum.pdf"), collapse = "") label <- "" for (col in 1:length(columns)) { if (col != 1) { label <- paste(c(label, " / "), collapse = "") } label = paste(c(label, labels[col], " (", colors[col], ")"), collapse = "") } pdf(file = pdffilename, height = 3.5, width = 5) timeserie <- ts(data[columns[1]], start = c(data$year[1], data$week[1]), frequency = 52) ts.plot(timeserie, col = colors[1], ylab = label) if (length(columns) > 1) { for (col in 2:length(columns)) { timeserie <- ts(data[columns[col]], start = c(data$year[1], data$week[1]), frequency = 52) lines(timeserie, col = colors[col]) } } dev.off() } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
86ef53e706c9006f2d7fb71705e5e3b91158c545
ee4e6398420bb34c94c1ede5508064c0623f4e54
/cachematrix.R
6561a8b25e93b01f6fe0410097d69285bbc9204a
[]
no_license
jbgonzalez81/datasciencecoursera
86626e25aaea8058a47844b32ed388947c851b5f
ad14751ea737de43fb200a5b56ebb591e00bd756
refs/heads/master
2020-04-29T09:17:52.772205
2015-04-01T20:47:55
2015-04-01T20:47:55
25,180,532
0
0
null
null
null
null
UTF-8
R
false
false
689
r
cachematrix.R
## This program attempts to cache a square matrix for future calculation. ## The functions with store and retrieve the matrix and its inverse when called. ## Store a square matrix makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setmatrix<-function(solve) m<<- solve getmatrix<-function() m list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) } ## Solve the inverse of a matrix cacheSolve <- function(x=matrix(), ...) { m<-x$getmatrix() if(!is.null(m)){ message("getting cached data") return(m) } matrix<-x$get() m<-solve(matrix, ...) x$setmatrix(m) m }
25137f73a116c4340495fd8755f77dc028892d1f
59b641615cc5e1cf60adb64de99e8ab2c609350d
/R/hauction.R
1368ad75992d17031e3df7772de855df1628dc3a
[]
no_license
nullsatz/dauc
1181778e0d781d72e3b71c76f40a1195997ee077
746f705835786555965867283f856ab309c40fa3
refs/heads/master
2016-09-06T01:29:31.959937
2013-02-19T16:36:15
2013-02-19T16:36:15
null
0
0
null
null
null
null
UTF-8
R
false
false
498
r
hauction.R
hauction <- function(weights) { ubidders <- sort(unique(weights$bidder)) nBidders <- length(ubidders) uitems <- sort(unique(weights$item)) nItems <- length(uitems) weights <- weights[order(weights$bidder, weights$item), ] bidderItems <- .Call('host_auction', nBidders, nItems, weights$weight, PACKAGE='dauc') bidderItems <- ifelse(bidderItems == -1, NA, bidderItems) bidderItems <- bidderItems + 1 result <- data.frame(bidder=ubidders, item=uitems[bidderItems]) return(result) }
9b76730e0d7e21b561601247a52f170d64e9aeb7
f75fdbd39e642ea2065ba4e5502e7e2aac44754d
/test/test_gg.r
e5adde53303b1cf7ec2a6deb8b613e1304a3805c
[ "Apache-2.0" ]
permissive
sinanshi/visotmed
2b7787671e175d2ae7f715a8e06a8b8262ec5996
cdea8bf0428145d310e77963d237343c554204d1
refs/heads/master
2016-09-05T19:53:53.075416
2014-10-30T16:56:47
2014-10-30T16:56:47
16,539,917
3
0
null
null
null
null
UTF-8
R
false
false
11,642
r
test_gg.r
library(tikzDevice) library(raster) library(rgdal) library(ggplot2) library(RColorBrewer) library("grid") load("../data/clim.R") # map<-cru_raster_10min_window$val[,,2] # Longitude<<-cru_raster_10min_window$lon # Latitude<<-cru_raster_10min_window$lat # x<-vector() # y<-vector() # z<-vector() # k<-1 # for(i in 1:length(Longitude)){ # for(j in 1: length(Latitude)){ # z[k]<-map[i,j] # x[k]<-Longitude[i] # y[k]<-Latitude[j] # k<-k+1 # } # } # map<-data.frame(lon=x,lat=y,val=z) # map<-map[which(!is.na(map$val)),] # read shapefile wmap <- readOGR(dsn="ne_110m_land", layer="ne_110m_land") # convert to dataframe wmap_df <- fortify(wmap) # # create a blank ggplot theme theme_opts <- list(theme( panel.grid.minor = element_blank(), panel.grid.major = element_blank(), panel.background = element_rect(color = "black",fill=NA, size = 0.4, linetype = "solid"), #panel.border=element_rect(color = "black",fill=NA, size = 0.4, linetype = "solid"), plot.background = element_blank(), #plot.background=element_blank(), #panel.border = element_blank(), axis.line = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank(), panel.margin = unit(5,"lines"), #axis.title.x = element_blank(), #axis.title.y = element_blank(), plot.title = element_text())) # # # plot map a<-ggplot(wmap_df, aes(long,lat, group=group)) + geom_polygon() + labs(title="World map (longlat)") + coord_equal() + theme_opts ggsave("map1.png", width=12.5, height=8.25, dpi=72) wmap_robin <- spTransform(wmap, CRS("+proj=robin")) wmap_df_robin <- fortify(wmap_robin) ggplot(wmap_df_robin, aes(long,lat, group=group)) + geom_polygon() + labs(title="World map (robinson)") + coord_equal() + theme_opts ggsave("map2.png", width=12.5, height=8.25, dpi=72) ggplot(wmap_df_robin, aes(long,lat, group=group, fill=hole)) + geom_polygon() + labs(title="World map (robin)") + coord_equal() + theme_opts ggsave("map3.png", width=12.5, height=8.25, dpi=72) # # ggplot(wmap_df_robin, aes(long,lat, group=group, fill=hole)) + # geom_polygon() + # labs(title="World map (Robinson)") + # coord_equal() + # theme_opts + # scale_fill_manual(values=c("#262626", "#e6e8ed"), guide="none") # change colors & remove legend # # ggsave("map4.png", width=12.5, height=8.25, dpi=72) # # # # # # # # # # # # # # grat <- readOGR("ne_110m_graticules_all", layer="ne_110m_graticules_15") grat_df <- fortify(grat) bbox <- readOGR("ne_110m_graticules_all", layer="ne_110m_wgs84_bounding_box") bbox_df<- fortify(bbox) # # a<-ggplot(bbox_df, aes(long,lat, group=group)) + # geom_polygon(fill="white") + # geom_polygon(data=wmap_df, aes(long,lat, group=group, fill=hole)) + # geom_path(data=grat_df, aes(long, lat, group=group, fill=NULL), linetype="dashed", color="grey50") + # labs(title="World map + graticule (longlat)") + # coord_equal() + # theme_opts + # scale_fill_manual(values=c("black", "white"), guide="none") # change colors & remove legend # # #ggsave("map5.png", width=12.5, height=8.25, dpi=72) # # # # graticule (Robin) grat_robin <- spTransform(grat, CRS("+proj=robin")) # reproject graticule grat_df_robin <- fortify(grat_robin) bbox_robin <- spTransform(bbox, CRS("+proj=robin")) # reproject bounding box bbox_robin_df <- fortify(bbox_robin) # # ggplot(bbox_robin_df, aes(long,lat, group=group)) + # geom_polygon(fill="white") + # geom_polygon(data=wmap_df_robin, aes(long,lat, group=group, fill=hole)) + # geom_path(data=grat_df_robin, aes(long, lat, group=group, fill=NULL), linetype="dashed", color="grey50") + # labs(title="World map (Robinson)") + # coord_equal() + # theme_opts + # scale_fill_manual(values=c("black", "white"), guide="none") # change colors & remove legend # # ggsave("map6.png", width=12.5, height=8.25, dpi=72) # # # # # # add country borders # countries <- readOGR("ne_110m_admin_0_countries", layer="ne_110m_admin_0_countries") # countries_robin <- spTransform(countries, CRS("+proj=robin")) # countries_robin_df <- fortify(countries_robin) # # ggplot(bbox_robin_df, aes(long,lat, group=group)) + # geom_polygon(fill="white") + # theme_opts + # geom_polygon(data=countries_robin_df, aes(long,lat, group=group, fill=hole)) + # geom_path(data=countries_robin_df, aes(long,lat, group=group, fill=hole), color="white", size=0.3) + # geom_path(data=grat_df_robin, aes(long, lat, group=group, fill=NULL), linetype="dashed", color="grey50") + # labs(title="World map (Robinson)") + # coord_equal() + xlim(-2e6,5e6) + ylim(2e6,7e6)+ # scale_fill_manual(values=c("black", "white"), guide="none") # change colors & remove legend # myPalette <- colorRampPalette(rev(brewer.pal(11, "Spectral")), space="Lab") # # # # zp1 <- ggplot(longData, # # aes(x = Var2, y = Var1, fill = value)) # # zp1 <- zp1 + geom_tile() # # zp1 <- zp1 + scale_fill_gradientn(colours = myPalette(100)) #=============== #scale bottom (a) #=============== # plot.title = "General Circulation Models Climate Data Output" # plot.subtitle = 'created by GCM runs' # # a<-ggplot(map,aes(lon,lat,fill=val))+ # geom_raster(hjust = 0, vjust = 0)+ # scale_fill_gradientn(colours = myPalette(100),breaks=c(-15,-10, -5,10,5, 0,5,15), # guide= guide_colorbar(title=expression(Surface~Temperature~degree~C), title.position="top", # barwidth = 25, barheight = 1,nbin=100, # draw.ulim = FALSE, draw.llim = FALSE ))+ # #geom_path(data=wmap_df,aes(long,lat,group=group,fill=NULL))+ # #coord_cartesian()+coord_map()+ # geom_path(data=grat_df,aes(long,lat,group=group,fill=NULL),linetype="dashed", color="grey50")+ # geom_path(data=countries,aes(long,lat,goup=group,fill=NULL))+ # xlim(min(map$lon),max(map$lon))+ylim(min(map$lat),max(map$lat))+ # theme_opts+theme(legend.position="bottom", legend.background = element_rect(color = "black", # fill = "grey90", size = 0.4, linetype = "solid"))+ # coord_equal()+ # ggtitle(bquote(atop(.(plot.title), atop(italic(.(plot.subtitle)), "")))) + # labs( x = "", y="") # # #labs(title="Mean Surface Temperature ",x="GCM")#+ # # #scale_x_discrete(grat@data$display) # # #expression(Depth[mm]) # # ggsave("map10.png",width=12.5,height=6,dpi=72) # #=============== # #scale right (b) # #=============== # b<-ggplot(map,aes(lon,lat,fill=val))+ # geom_raster(hjust = 0, vjust = 0)+ # scale_fill_gradientn(colours = myPalette(100),breaks=c(-15,-10, -5,10,5, 0,5,15), # guide= guide_colorbar(title=expression(degree~C), title.position="top", # barwidth = 1, barheight = 15,#nbin=100, # draw.ulim = FALSE, draw.llim = FALSE ))+ # #geom_path(data=wmap_df,aes(long,lat,group=group,fill=NULL))+ # #coord_cartesian()+coord_map()+ # geom_path(data=grat_df,aes(long,lat,group=group,fill=NULL),linetype="dashed", color="grey50")+ # geom_path(data=countries,aes(long,lat,goup=group,fill=NULL))+ # xlim(min(map$lon),max(map$lon))+ylim(min(map$lat),max(map$lat))+ # theme_opts+theme(legend.position="right", legend.background = element_rect(color = "black", # fill = "white", size = 0.4, linetype = "solid"))+ # coord_equal()+ # ggtitle(bquote(atop(.(plot.title), atop(italic(.(plot.subtitle)), "")))) + # labs( x = "", y="") # #labs(title="Mean Surface Temperature ",x="GCM")#+ # #scale_x_discrete(grat@data$display) # #expression(Depth[mm]) # # ggsave("map10.png",width=12.5,height=6,dpi=72) # #=============== # #scale inside (c) # #=============== # plot.title = "General Circulation Models Climate Data Output" # plot.subtitle = 'created by GCM runs' # # c<-ggplot(map,aes(lon,lat,fill=val))+ # geom_raster(hjust = 0, vjust = 0)+ # scale_fill_gradientn(colours = myPalette(100),breaks=c(-15,-10, -5,10,5, 0,5,15), # guide= guide_colorbar(title=expression(degree~C), title.position="top", # # barwidth = 25, barheight = 1,nbin=100, # draw.ulim = FALSE, draw.llim = FALSE ))+ # #geom_path(data=wmap_df,aes(long,lat,group=group,fill=NULL))+ # #coord_cartesian()+coord_map()+ # geom_path(data=grat_df,aes(long,lat,group=group,fill=NULL),linetype="dashed", color="grey50")+ # geom_path(data=countries,aes(long,lat,goup=group,fill=NULL))+ # xlim(min(map$lon),max(map$lon))+ylim(min(map$lat),max(map$lat))+ # theme_opts+theme(legend.position=c(0.9,0.3), legend.background = element_rect(color = "black", # fill = "grey90", size = 0.4, linetype = "solid"))+ # coord_equal()+ # ggtitle(bquote(atop(.(plot.title), atop(italic(.(plot.subtitle)), "")))) + # labs( x = "", y="") #=============== #scale inside (d) #=============== # plot.title = "GCM Surface Temperature" # plot.subtitle = 'created by GCM runs' # # plot.title = "" # # plot.subtitle = '' # # mr<-fortify(map_robin) # d<-ggplot(map,aes(long,lat,fill=group))+ # geom_tile(hjust = 0, vjust = 0)+ # scale_fill_gradientn(colours= myPalette(10),breaks=c(15,5,0,-5,-15), # guide= guide_legend(title=expression(degree~C), title.position="top", # # barwidth = 25, barheight = 1,nbin=100, # draw.ulim = FALSE, draw.llim = FALSE ))+ # # #scale_color_manual(values=myPalette(1000))+ # # scale_fill_brewer(palette="Spectral")+ # #geom_path(data=wmap_df,aes(long,lat,group=group,fill=NULL))+ # #coord_cartesian()+coord_map()+ # #geom_path(data=grat_df,aes(long,lat,group=group,fill=NULL),linetype="dashed", color="grey50")+ # #geom_path(data=countries,aes(long,lat,goup=group,fill=NULL))+ # xlim(min(map$lon),max(map$lon))+ylim(min(map$lat),max(map$lat))+ # theme_opts+theme(legend.position=c(0.9,0.3), legend.background = element_rect(color = "black", # fill = "grey90", size = 0.4, linetype = "solid"))+ # coord_equal()+ # ggtitle(bquote(atop(.(plot.title), atop(italic(.(plot.subtitle)), "")))) + # labs( x = "", y="") # ggsave("map10.png",width=12.5,height=6,dpi=72) # names(map)<-c("long","lat","val") # grid<-expand.grid(lon=unique(map$lon),lat=unique(map$lat)) # val<-array(NA,dim(grid)[1] ) # for(i in 1:dim(map)[1]){ # val[i]<-map$val[which(grid$lon==map$lon[i]&grid$lat==map$lat[i])] # } ## coordinates (map)= ~long+lat projection(map)<-CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0") # k<-spTransform(map,CRS("+proj=robin")) # # cor<-as.data.frame(k@coords) # pp<-data.frame("long"=cor$x,"lat"=cor$y,"val"=k@data) # # ggplot(pp,aes(long,lat,fill=z))+geom_tile()#+scale_fill_gradientn(colours= myPalette(10)) # coordinates (map)= ~long+lat projection(map)<-CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0") pp_robin<-spTransform(pp,CRS("+proj=robin")) a<-data.frame(pp_robin@coords,pp_robin@data) map_pj_c$lat<-round(map_pj$lat/10000) # s100 <- matrix(c(267573.9, 2633781, 213.29545, 262224.4, 2633781, 69.78261, 263742.7, 2633781, 51.21951, 259328.4, 2633781, 301.98413, 264109.8, 2633781, 141.72414, 255094.8, 2633781, 88.90244), ncol=3, byrow=TRUE) # colnames(s100) <- c('X', 'Y', 'Z') # # library(raster) # # set up an 'empty' raster, here via an extent object derived from your data # e <- extent(s100[,1:2]) # e <- e + 1000 # add this as all y's are the same # # r <- raster(e, ncol=10, nrow=2) # # or r <- raster(xmn=, xmx=, ... # # # you need to provide a function 'fun' for when there are multiple points per cell # x <- rasterize(s100[, 1:2], r, s100[,3], fun=mean) # plot(x)
a21fad56139d9812565a1630a0c75229cd8914e4
cd34ddfa6a89237c9ed41ff98f611c93abddb55f
/herring_rv_survey_analyses.r
bfe1b16c32f87f3e7b03a5f55aeaac3499718dfa
[]
no_license
danielgboyce/Herring_state_2018
02babecb67bd23640d921c1b7f950bac89e01662
d7b10f55765d844d7fc547f7c404502c0258f6da
refs/heads/master
2020-03-11T15:52:45.340441
2018-10-03T14:21:46
2018-10-03T14:21:46
130,098,781
0
0
null
null
null
null
UTF-8
R
false
false
104,749
r
herring_rv_survey_analyses.r
n<-number of sets k<-outcome interested in testing p<-probability of outcome (catchability) d<-rbinom(n,k,prob=p) hist(d,breaks=100) p<-.02#probability of capture 1-dbinom(0,size=30,prob=p)#probability of detecting a herring if it is present p<-0.02 1-dbinom(0,size=230,prob=p)#probability of detecting a herring if it is present library(rgdal) library(maptools) library(segmented) library(tidyr) library(DataCombine) library(RColorBrewer) library(segmented) library(splines) library(strucchange) library(data.table) library(psych) library(reshape2) library(gplots) library(forecast) library(cluster) library(vegan) library(ggplot2) library(hybridHclust) library(raster) library(fields) library(gridExtra) library(colorRamps) library(mapdata) library(scales) library(MASS) library(mgcv) library(maps) library(plyr) library(plotrix) library(lubridate) library(fossil) datadir1<-'N://cluster_2017//scratch//spera//data//stagingdat' datadir<-'N://cluster_2017//scratch//spera//data//finaldat_v2' figsdir<-'C://Users//copepod//Documents//aalldocuments//literature//research//active//SPERA//Figures' figsdir<-'C://Users//sailfish//Documents//aalldocuments//literature//research//active//SPERA//Figures' setwd('C:/Users/copepod/Documents/aalldocuments/literature/research/active/ESS_trophic_control/data') setwd('C:/Users/sailfish/Documents/aalldocuments/literature/research/active/ESS_trophic_control/data') plg<-readShapePoly('polygons_ecnasap.shp')#COMMAND TO READ BACK IN plg<-subset(plg,region=='NS') plot(plg) text(plg,plg$stratum) map('world',add=TRUE,col='gray',fill=TRUE) mcrt<-"+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0" coast<-readOGR('N://data/shapefiles/naturalearthdata_ne_10m_land_poly',layer='ne_10m_land')#works for Alfcoast<-fortify(coast) coast.mc<-crop(coast,extent(-180,180,-90,90),proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")) ggplot <- function(...) { ggplot2::ggplot(...) + theme_bw() } theme_opts <- list(theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(), panel.background = element_blank(), plot.background = element_blank(), panel.border = element_blank(), axis.line = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank(), axis.title.x = element_blank(), axis.title.y = element_blank(), legend.position="right", plot.title = element_text(size=16))) ############################################################## ############################################################## setwd(datadir) rvl<-read.csv("herring_lengths_RV_survey_spera_spawnar.csv",header=TRUE) rvl$flen<-ifelse(rvl$mission=='NED2016016',rvl$flen/10,rvl$flen) rvl$flen<-ifelse(rvl$mission!='NED2016016',(rvl$flen*1.0866)+0.95632,rvl$flen) #plot(log10(rvl$flen),log10(rvl$fwt)) rvl<-subset(rvl,log10(rvl$flen)>=.5 & month %in% c(6,7,8))#REMOVE OUTLIERS rvl$lonc<-round(rvl$lon,digits=0) rvl$lonc<-ifelse(rvl$lon<=rvl$lonc,rvl$lonc-.25,rvl$lonc+.25) rvl$latc<-round(rvl$lat,digits=0) rvl$latc<-ifelse(rvl$lat>=rvl$latc,rvl$latc+.25,rvl$latc-.25) rvl$cell<-gsub(' ','',paste(rvl$lonc,'_',rvl$latc)) plot(log10(rvl$flen),log10(rvl$fwt)) a<-rvl a$lfwt<-log10(a$fwt) a$lflen<-log10(a$flen) mod<-lm(lfwt ~ lflen,data=a) abline(mod,col='red') rvl$fwtest<-10^predict(mod,newdata=data.frame(lflen=a$lflen)) rvl$fwtest<-ifelse(is.na(rvl$fwt)==TRUE,rvl$fwtest,rvl$fwt) rvl2<-rvl rvl2$jcat<-ifelse(rvl$flen<=24.75,'J','A') rvl2$id<-gsub(' ','',paste(rvl2$mission,'_',rvl2$setno)) rvl2$id1<-gsub(' ','',paste(rvl2$mission,'_',rvl2$setno,'_',rvl2$fshno)) a<-subset(rvl2,jcat=='J') b<-subset(rvl2,jcat=='A') sum(a$fwtest,na.rm=TRUE)/sum(rvl2$fwtest) sum(b$fwtest,na.rm=TRUE)/sum(rvl2$fwtest) f<-function(d){ return(data.frame(year=unique(d$year), lon=unique(d$lon), lat=unique(d$lat), tbin5=unique(d$tbin5), strat=unique(d$strat), time=unique(d$time), no=sum(d$clen,na.rm=TRUE), wt=sum(d$fwtest/1000,na.rm=TRUE))) } rvll<-ddply(rvl2,.(id,jcat),.fun=f,.progress='text') #ADDS 0'S FOR EACH LENGTH CATEGORY dt<-unique(subset(rvll,select=c('jcat'))) f<-function(d){ d2<-merge(dt,d,by=c('jcat'),all.x=TRUE,all.y=FALSE) #d2<-rbind.fill(dt,d) d2$id<-unique(d$id) d2$tbin5<-unique(d$tbin5) d2$year<-unique(d$year) d2$lon<-unique(d$lon) d2$lat<-unique(d$lat) d2$strat<-unique(d$strat) d2$time<-unique(d$time) d2$no<-ifelse(is.na(d2$no)==TRUE,0,d2$no) d2$wt<-ifelse(is.na(d2$wt)==TRUE,0,d2$wt) return(d2) } rvll2<-ddply(rvll,.(id),.fun=f,.progress='text') f<-function(d){ gblon<--66.3 gblat<-43.3 if(length(unique(d$year))>=5 & length(unique(d$lat))>10 & length(unique(d$time))>10 & length(unique(d$lat))>10){ #PREDICT ANNUAL AVERAGE VALUES modw<-gam(wt ~ as.factor(year) + s(lon,lat,k=30) + s(time,bs='cc',k=5),data=d,family='nb'(link='log')) modn<-gam(no ~ as.factor(year) + s(lon,lat,k=30) + s(time,bs='cc',k=5),data=d,family='nb'(link='log')) #modw<-gam(wt ~ as.factor(year)+s(time,bs='cc',k=5),data=d,family='nb'(link='log')) #modn<-gam(no ~ as.factor(year)+s(time,bs='cc',k=5),data=d,family='nb'(link='log')) pdat<-data.frame(year=sort(unique(d$year)), lon=gblon, lat=gblat, time=1200) pdat$pn<-predict(modn,newdata=pdat,type='response') pdat$pw<-predict(modw,newdata=pdat,type='response') return(pdat) } else NULL } ot<-ddply(rvll2,.(jcat),.fun=f,.progress='text') a<-subset(ot,jcat=='A') plot(a$year,a$pw,pch=15,las=1,xlab='Year',ylab='Predicted weight per tow [Adults]') plot(a$year,a$pn,pch=15,las=1,xlab='Year',ylab='Predicted numbers per tow [Adults]') b<-subset(ot,jcat=='J') plot(b$year,b$pw,pch=15,las=1,xlab='Year',ylab='Predicted weight per tow [Juveniles]') plot(b$year,b$pn,pch=15,las=1,xlab='Year',ylab='Predicted numbers per tow [Juveniles]') plot(a$year,a$pw,pch=15,las=1,xlab='Year',ylab='Predicted weight per tow [Adults]') par(new=TRUE) plot(b$year,b$pw,pch=15,col='red',xlab='',ylab='',yaxt='n') plot(a$pw,b$pw) plot(log10(a$pw+1),log10(b$pw+1)) abline(a=0,b=1) xx<-merge(a,b,by=c('year')) ccf(as.ts(log10(xx$pn.x+1)),as.ts(log10(xx$pn.y+1))) acf(as.ts(xx$pn.x)) xyplot(pw~year|as.factor(jcat),data=ot,pch=15) xyplot(pn~year|as.factor(jcat),data=ot,pch=15) a<-subset(ot,jcat=='J') plot(a$year,a$pw,pch=15) ############################################################# ############ ESIMATE TRENDS IN HERRING BY INFERRED AGE ################################################################## #LOOK AT LENGTH-AGE DATA TO PREDICT LENGTH OF JUVENILES V ADULTS setwd(datadir) adat<-read.csv("herring_assess_2016_len_wt_atage.csv",header=TRUE,na.strings=c('- ',' - ')) names(adat)<-tolower(names(adat)) names(adat)<-gsub('\\.','',names(adat)) adat<- adat %>% gather(age, y, age1:age11) adat$agen<-as.numeric(gsub('age','',adat$age)) a<-unique(subset(adat,var=='no.x1000',select=c('age','agen','db','y'))) names(a)[4]<-c('no.x1000') adat<-subset(adat,!(var=='no.x1000')) adat<-merge(adat,a,by=c('db','age','agen'),all.x=TRUE,all.y=FALSE) setwd(figsdir) pdf('length_age_assess2016.pdf',height=7, width=8) a<-subset(adat,var=='len.cm') plot(a$agen,a$y,xlim=c(0,11),ylim=c(0,35),las=1,pch=15,col=alpha('black',.3),xaxt='n',xlab='Age',ylab='Length [cm]') axis(1,seq(0,11,1)) mod <- nls(y ~ b0*(1-exp(-b1 * agen)), a, start=c(b0=1,b1=1),control=nls.control(maxiter=500),algorithm='port') pdat<-data.frame(agen=seq(0,11,.01)) pdat$p<-predict(mod,newdata=pdat) lines(pdat$agen,pdat$p) pdat<-data.frame(agen=seq(0,11,.5)) pdat$p<-predict(mod,newdata=pdat) aa<-subset(pdat,agen==3.5) points(aa$agen,aa$p,pch=16,col='firebrick3',cex=2) lines(c(-1,aa$agen),c(aa$p,aa$p),col='red',lty=2) lines(c(aa$agen,aa$agen),c(-5,aa$p),col='red',lty=2) #PREDICTED CUTOFF FOR JUVENILE/ADULT=24.8CM agedat<-data.frame(agen=seq(0,11,.00001)) agedat$flen<-predict(mod,newdata=agedat) a<-subset(adat,var=='len.cm') plot(a$y,a$agen,las=1,pch=15,col=alpha('black',.3),ylab='Age',xlab='Length [cm]',ylim=c(0,11),xlim=c(0,40)) axis(1,seq(0,40,5)) md<-lm(agen~y,data=a) mds<-summary(md) k.strt<-mds$coef[2,1] modage <- nls(agen ~ .1*exp(k * y), a, start=c(k=k.strt),control=nls.control(maxiter=500),algorithm='port') pdat<-data.frame(y=seq(0,max(a$y,na.rm=TRUE),length.out=1000)) pdat$p<-predict(modage,newdata=pdat) lines(pdat$y,pdat$p) dev.off() setwd(datadir) #rvl<-read.csv("herring_lengths_RV_survey_spera_spawnar.csv",header=TRUE) rvl<-read.csv("herring_lengths_RV_survey_spera_allar.csv",header=TRUE) rvl$flen<-ifelse(rvl$mission=='NED2016016',rvl$flen/10,rvl$flen) rvl$flen<-ifelse(rvl$mission!='NED2016016',(rvl$flen*1.0866)+0.95632,rvl$flen) #plot(log10(rvl$flen),log10(rvl$fwt)) rvl<-subset(rvl,log10(rvl$flen)>=.5 & month %in% c(6,7,8) & lat<46)#REMOVE OUTLIERS rvl$strat<-as.character(rvl$strat) rvl<-subset(rvl,!(strat %in% c("5Z1","5Z2","5Z9",'','558','559','440','441','442','445','446','447','443','444','559','447','449','448','450','496','451'))) rvl$lonc<-round(rvl$lon,digits=0) rvl$lonc<-ifelse(rvl$lon<=rvl$lonc,rvl$lonc-.25,rvl$lonc+.25) rvl$latc<-round(rvl$lat,digits=0) rvl$latc<-ifelse(rvl$lat>=rvl$latc,rvl$latc+.25,rvl$latc-.25) rvl$cell<-gsub(' ','',paste(rvl$lonc,'_',rvl$latc)) rvl$flen<-round(rvl$flen,digits=2) #plot(log10(rvl$flen),log10(rvl$fwt)) a<-rvl a$lfwt<-log10(a$fwt) a$lflen<-log10(a$flen) mod<-lm(lfwt ~ lflen,data=a) abline(mod,col='red') rvl$fwtest<-10^predict(mod,newdata=data.frame(lflen=a$lflen)) rvl$fwtest<-ifelse(is.na(rvl$fwt)==TRUE,rvl$fwtest,rvl$fwt) agedat$flen<-round(agedat$flen,digits=2) agedat<-unique(agedat) agedat<-data.frame(flen=sort(unique(agedat$flen)), agen=tapply(agedat$agen,agedat$flen,mean)) rvl<-merge(rvl,agedat,by=c('flen'),all.x=TRUE,all.y=FALSE) rvl$agen<-ifelse(is.na(rvl$agen)==TRUE,10,rvl$agen) rvl$agen<-floor(rvl$agen)+1 rvl$agen<-ifelse(rvl$agen>=8,8,rvl$agen) rvl2<-rvl rvl2$id<-gsub(' ','',paste(rvl2$mission,'_',rvl2$setno)) rvl2$id1<-gsub(' ','',paste(rvl2$mission,'_',rvl2$setno,'_',rvl2$fshno)) f<-function(d){ return(data.frame(year=unique(d$year), lon=unique(d$lon), lat=unique(d$lat), tbin5=unique(d$tbin5), strat=unique(d$strat), time=unique(d$time), no=sum(d$clen,na.rm=TRUE), wt=sum(d$fwtest/1000,na.rm=TRUE))) } rvll<-ddply(rvl2,.(id,agen),.fun=f,.progress='text') #ADDS 0'S FOR EACH LENGTH CATEGORY dt<-unique(subset(rvll,select=c('agen'))) f<-function(d){ d2<-merge(dt,d,by=c('agen'),all.x=TRUE,all.y=FALSE) #d2<-rbind.fill(dt,d) d2$id<-unique(d$id) d2$tbin5<-unique(d$tbin5) d2$year<-unique(d$year) d2$lon<-unique(d$lon) d2$lat<-unique(d$lat) d2$strat<-unique(d$strat) d2$time<-unique(d$time) d2$no<-ifelse(is.na(d2$no)==TRUE,0,d2$no) d2$wt<-ifelse(is.na(d2$wt)==TRUE,0,d2$wt) return(d2) } rvll2<-ddply(rvll,.(id),.fun=f,.progress='text') #sdata<-rvl2[,!c('flen','fwt')] f<-function(d){ gblon<--66.3 gblat<-43.3 if(length(unique(d$year))>=5 & length(unique(d$lat))>10 & length(unique(d$time))>10 & length(unique(d$lat))>10){ #PREDICT ANNUAL AVERAGE VALUES modw<-gam(wt ~ as.factor(year) + s(lon,lat,k=10) + s(time,bs='cc',k=5),data=d,family='nb'(link='log')) modn<-gam(no ~ as.factor(year) + s(lon,lat,k=10) + s(time,bs='cc',k=5),data=d,family='nb'(link='log')) pdat<-data.frame(year=sort(unique(d$year)), lon=gblon, lat=gblat, time=1200) pdat$pn<-predict(modn,newdata=pdat,type='response') pdat$pw<-predict(modw,newdata=pdat,type='response') pdat$lpn<-log10(pdat$pn+1) pdat$lpw<-log10(pdat$pw+.01) pdat$pnz<-(pdat$lpn-mean(pdat$lpn))/sd(pdat$lpn) pdat$pwz<-(pdat$lpw-mean(pdat$lpw))/sd(pdat$lpw) return(pdat) } else NULL } #ot<-ddply(subset(rvll,lat<=44),.(lcat),.fun=f)#exclude smallest #ot<-ddply(subset(rvll,lat<=44 & lon< -60),.(lcat),.fun=f) ot<-ddply(rvll2,.(agen),.fun=f,.progress='text') f<-function(d){ return(data.frame(totno=sum(d$no), totwgt=sum(d$wt), lon=mean(d$lon), lat=mean(d$lat))) } dd<-ddply(rvll2,.(strat,agen),.fun=f) a<-subset(dd,agen==1) a<-subset(dd,agen==4) plot(a$lon,a$lat,pch=16,cex=rescale(a$totno,newrange=c(.5,7))) map('world',add=TRUE,col='gray',fill=TRUE) d<-subset(rvll2,year==1970) f<-function(d){ print(unique(d$year)) j<-subset(d,agen<=4) a<-subset(d,agen>4) if(dim(j)[1]>0){ return(data.frame(padult=mean(a$no)/mean(d$no), pjuv=mean(j$no)/mean(d$no), rt=mean(a$no)/mean(j$no))) } else NULL } dd<-ddply(rvll2,.(year),.fun=f) plot(dd$year,dd$padult) plot(dd$year,dd$pjuv) plot(dd$year,dd$rt,log='y',las=1,type='b') plot(rvll2$lon,rvll2$lat,pch=16) plot(plg,add=TRUE) xyplot(pnz~year|as.factor(agen),data=ot,pch=15) plot(ot$agen,ot$year,pch=16,cex=rescale(ot$lpn,newrange=c(.2,5)),col=alpha('darkred',.5),las=1) pltfun<-function(ott,ttl){ names(ott)[1]<-'y' return(ggplot()+ geom_tile(data=ott, aes(x=agen, y=year,fill=y),col='gray80',size=.0001)+ scale_fill_distiller(palette='Spectral')+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.1,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=sort(dt$agen),labels=sort(dt$agen),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(1970,2015,5),labels=as.character(seq(1970,2015,5)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(1969.5,2016.5),xlim=c(.5,8.5))+ xlab('')+ ylab('')+ labs(title = ttl, subtitle = "", caption = '') ) } pznum<-pltfun(subset(ot,select=c('pnz','agen','year')),'Log numbers') pzwgt<-pltfun(subset(ot,select=c('pwz','agen','year')),'Log weight') plnum<-pltfun(subset(ot,select=c('lpn','agen','year')),'Log numbers') plwgt<-pltfun(subset(ot,select=c('lpw','agen','year')),'Log weight') pnum<-pltfun(subset(ot,select=c('pn','agen','year')),'Numbers') pwgt<-pltfun(subset(ot,select=c('pw','agen','year')),'Weight') setwd(figsdir) pdf('herring_rv_trends_bylength.pdf',height=8,width=10) grid.arrange(plnum,plwgt,ncol=2) grid.arrange(pnum,pwgt,ncol=2) xyplot(lpn~year|as.factor(lcat),data=ot,type=c('p','spline'),pch=16,main='Log numbers') xyplot(lpw~year|as.factor(lcat),data=ot,type=c('p','spline'),pch=16,main='Log weight') f<-function(d){ gblon<--66.3 gblat<-43.3 if(length(unique(d$year))>=5 & length(unique(d$lat))>10 & length(unique(d$time))>10 & length(unique(d$lat))>10){ #PREDICT ANNUAL AVERAGE VALUES mod<-gam(wt ~ year + s(lon,lat,k=10) + s(time,bs='cc',k=5),data=d,family='nb') s<-summary(mod) return(data.frame(b=s$p.table[2,1], se=s$p.table[2,2])) } else NULL } ot2<-ddply(rvll2,.(lcat),.fun=f,.progress='text') ot2$lcat<-as.numeric(as.character(ot2$lcat)) f<-function(d){ return(data.frame(lcat=sort(unique(d$lcat)), no=sum(d$no), wt=sum(d$wt))) } o<-ddply(rvll2,.(lcat),.fun=f) ot2<-merge(ot2,o,by=c('lcat'),all=TRUE) par(mfrow=c(2,2),mar=c(4,4,1,1)) plot(ot2$lcat,ot2$b,las=1,pch=16,xlab='Length',ylab='Rate of change over time',col='white',ylim=c(-.15,.15),xaxt='n') axis(1,at=seq(5,45,1)) abline(h=0,lty=2) f<-function(d){lines(c(d$lcat,d$lcat),c(d$b+(1.96*d$se),d$b-(1.96*d$se)),col=alpha('dodgerblue3',.3),lwd=2) } zz<-dlply(ot2,.(lcat),.fun=f) points(ot2$lcat,ot2$b,las=1,pch=16,cex=rescale(ot2$wt,newrange=c(1,4)),col=alpha('darkblue',1)) points(ot2$lcat,ot2$b,las=1,pch=1,cex=rescale(ot2$wt,newrange=c(1,4)),col=alpha('lightgray',1),lwd=.5) dev.off() plot(ot2$wt,ot2$b,pch=15,las=1) abline(h=0,lty=2) f<-function(d){lines(c(d$wt,d$wt),c(d$b+(1.96*d$se),d$b-(1.96*d$se)))} zz<-dlply(ot2,.(lcat),.fun=f) o<-ddply(rvll2,.(lcat,tbin5),.fun=f) o$lcat<-as.numeric(as.character(o$lcat)) xyplot(wt~lcat |as.factor(tbin5),data=o,type=c('p','spline'),pch=15) xyplot(no~lcat |as.factor(tbin5),data=o,type=c('p','spline'),pch=15) plot(o$lcat,o$no,pch=15) plot(o$lcat,o$wt,pch=15) plot(o$lcat,log10(o$wt),pch=15) plot(rvl2$flen,rvl2$fwt) mod<-gam(fwt ~ s(flen,k=4),data=rvl2) pdat<-data.frame(flen=seq(min(rvl2$flen),max(rvl2$flen),length.out=100)) pdat$p<-predict(mod,newdata=pdat) lines(pdat$flen,pdat$p,col='red') rvll$lwgt<-predict(mod,newdata=data.frame(flen=as.numeric(as.character(rvll$lcat)))) d<-subset(rvll,lcat==7.5) #CHECKS TO SEE IF GENERIC EARLY WARNINGS INDICATORS ARE RELEVANT ewfun<-function{ library(earlywarnings) mod<-gam(totwgt ~ as.factor(year) + s(lon,lat,k=50),data=rvw) pdat<-data.frame(year=sort(unique(rvw$year)), lon=gblon, lat=gblat) p<-predict(mod,newdata=pdat,se.fit=TRUE,type='response') pdat$p<-10^p$fit pdat$se<-10^p$se.fit ew<-generic_ews(subset(pdat,select=c('year','p')),winsize=10,detrending='gaussian',interpolate=TRUE) } ############################################################## ##ESTIMATE HERRING TRENDS FROM RV DATA AT STRATUM LEVEL setwd(datadir) #rvw<-read.csv("herring_weights_RV_survey_spera_spawnar.csv",header=TRUE) rvw<-read.csv("herring_weights_RV_survey_spera_allar.csv",header=TRUE) rvw<-subset(rvw,month %in% c(6,7,8) & is.na(dmin)==FALSE) rvw$bank<-ifelse(rvw$strat %in% c(447,448),'banq','no') rvw$bank<-ifelse(rvw$strat %in% c(443),'mis',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(458),'mid',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(455,456),'sab',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(464),'west',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(463),'em',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(473),'lh',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(474),'rw',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(475),'bac',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(480),'bn',rvw$bank) rvw$no<-log10(rvw$totno+1) rvw$wgt<-log10(rvw$totwgt+1) rvw$sz<-rvw$totwgt/rvw$totno rvw$sz<-ifelse(rvw$sz==Inf,rvw$totwgt,rvw$sz) rvw$id<-gsub(' ','',paste(rvw$mission,'_',rvw$setno)) rvw$pres<-ifelse(rvw$totno>0,1,0) rvw$tbin20<-ifelse(rvw$year<=1990,1980,2010) rvw$lonc<-round(rvw$lon,digits=0) rvw$lonc<-ifelse(rvw$lon<=rvw$lonc,rvw$lonc-.25,rvw$lonc+.25) rvw$latc<-round(rvw$lat,digits=0) rvw$latc<-ifelse(rvw$lat>=rvw$latc,rvw$latc+.25,rvw$latc-.25) rvw$cell<-gsub(' ','',paste(rvw$lonc,'_',rvw$latc)) rvw$cell.1<-gsub(' ','',paste(round(rvw$lon,digits=1),'_',round(rvw$lat,digits=1))) lonc.1<-seq(min(rvw$lon),max(rvw$lon),.1) latc.1<-seq(min(rvw$lat),max(rvw$lat),.1) crds<-expand.grid(lonc.1=lonc.1,latc.1=latc.1) crds$cell.1<-gsub(' ','',paste(round(crds$lonc.1,digits=1),'_',round(crds$latc.1,digits=1))) rvw<-merge(rvw,crds,by=c('cell.1'),all.x=TRUE,all.y=FALSE) f<-function(d){ return(data.frame(mnno=mean(d$totno), mdno=median(d$totno), mnwt=mean(d$totwgt), mdwt=median(d$totwgt), lon=median(d$lonc), lat=median(d$latc))) } dd<-ddply(rvw,.(strat),.fun=f) dd<-ddply(subset(rvw,month==7 & year>=2000),.(cell),.fun=f) dd<-ddply(subset(rvw,month==8 & year>=2000),.(cell),.fun=f) plot(dd$lon,dd$lat,pch=16,cex=rescale(log10(dd$mdwt+1),newrange=c(.5,7)),col='dodgerblue') map('world',add=TRUE,col='gray',fill=TRUE) plot(plg,add=TRUE,fill=FALSE) plot(plg,col=alpha('darkblue',.4)) points(rvw$lon,rvw$lat,pch=16,col=alpha('red3',.3),cex=.5) map('world',add=TRUE,col='gray',fill=TRUE) dm<-data.frame(year=sort(unique(rvw$year)), n=tapply(rvw$totno,rvw$year,sum), ntow=tapply(rvw$id,rvw$year,length), ncell=tapply(rvw$cell.1,rvw$year,function(x) length(unique(x))), ntime=tapply(rvw$time,rvw$year,function(x) length(unique(x))), ndepth=tapply(round(rvw$dmax,digits=0),rvw$year,function(x) length(unique(x))), nday=tapply(rvw$day,rvw$year,function(x) length(unique(x))), dayrng=tapply(rvw$day,rvw$year,function(x) max(x)-min(x))) a<-subset(dm,year<=1990) b<-subset(dm,year>1990) mean(a$ncell) mean(b$ncell) mean(a$dayrng) mean(b$dayrng) par(mfrow=c(2,2)) dm$n<-log10(dm$n) plot(dm$year,dm$ncell) plot(dm$year,dm$ntow) plot(dm$year,dm$ntow) plot(dm,pch=15) setwd(figsdir) pdf('rv_sample_effort_overtime.pdf',height=10,width=8) par(mfrow=c(3,2),mar=c(4,4,1,1)) f<-function(d,lg){ nm<-names(d)[2] names(d)<-c('year','y') if(lg==TRUE){d$y<-log10(d$y) } else NULL plot(d$year,d$y,las=1,xlab='Year',ylab=nm,pch=16,col=alpha('gold3',1),cex=2,type='l',lwd=2) points(d$year,d$y,pch=16,col=alpha('gold3',.3),cex=2) points(d$year,d$y,pch=1,col='lightgray',lwd=.01,cex=2) } f(subset(dm,select=c('year','n')),TRUE) f(subset(dm,select=c('year','n')),F) f(subset(dm,select=c('year','ntow')),F) f(subset(dm,select=c('year','ncell')),F) f(subset(dm,select=c('year','nday')),F) f(subset(dm,select=c('year','dayrng')),F) dev.off() d<-subset(rvw,bank=='sab') fsm<-function(d){ d$y<-d$totwgt+.1 mod<-gam(y~as.factor(year) + s(time,bs='cc',k=5) + s(lon,lat,k=4),data=d,gamma=.5,family=Gamma('log')) pdat<-data.frame(year=sort(unique(d$year)), time=1200, lon=median(d$lon), lat=median(d$lat)) pdat$pwt<-predict(mod,newdata=pdat,type='response') d$y<-ifelse(d$totwgt>0,1,0) mod2<-gam(y~as.factor(year) + s(time,bs='cc',k=5) + s(lon,lat,k=4),data=d,gamma=.5,family=binomial) pdat$pps<-predict(mod2,newdata=pdat,type='response') pdat<-subset(pdat,select=c('year','pwt','pps')) pdat$bank<-unique(d$bank) pdat$lon<-mean(d$lon) #pdat$p<-(pdat$p-mean(pdat$p))/sd(pdat$p) #names(pdat)[2]<-unique(as.character(d$cell)) return(pdat) } qq<-ddply(rvw,.(bank),.fun=fsm) xyplot(log(pwt)~year | bank,data=qq,type=c('p','l'),pch=15) xyplot(pps~year | bank,data=qq,pch=15) f<-function(d){ return(data.frame(no=mean(d$totno), ntow=length(unique(d$id)))) } ot<-ddply(rvw,.(bank),.fun=f) ot<-ot[order(ot$no,decreasing=TRUE),] ott<-subset(qq,bank!='no',select=c('pwt','bank','year','lon')) ttl<-'Presence' lg<-TRUE pltfun<-function(ott,ttl,lg){ names(ott)[1]<-'y' names(ott)[3]<-'year' if(lg==TRUE){ lms<-c(max(ott$y)*-1,0) } else { lms<-c(-1,0)} ott$bank<-as.factor(ott$bank) dm<-unique(subset(ott,select=c('lon','bank'))) dm<-dm[order(dm$lon),] dm$id<-seq(1,dim(dm)[1],1) ott<-merge(ott,dm,by=c('bank'),all.x=TRUE,all.y=FALSE) ott$y<-ott$y*-1 return(ggplot()+ geom_tile(data=ott, aes(x=id, y=year,fill=y),col='gray',size=.0001) + scale_fill_distiller(palette='YlOrRd',limits=lms) + theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.1,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(1,10,1),labels=dm$bank,limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(1970,2015,5),labels=as.character(seq(1970,2015,5)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(1969.5,2016.5),xlim=c(.5,10.5))+ xlab('')+ ylab('')+ labs(title = ttl, subtitle = "", caption = '') ) } p1<-pltfun(subset(qq,bank!='no',select=c('pps','bank','year','lon')),'Presence',FALSE) p2<-pltfun(subset(qq,bank!='no',select=c('pwt','bank','year','lon')),'Presence',TRUE) p2 p1 f<-function(d){return(data.frame(nyear=length(unique(d$year)), myear=min(d$year))) } dm<-ddply(rvw,.(strat),.fun=f) dmm<-dm[order(dm$nyear),] dmm<-subset(dmm,nyear>=15 & myear<1990) fsm<-function(d){ d$y<-d$totwgt+.1 mod<-gam(y~s(year) + s(time,bs='cc',k=5) + s(lon,lat,k=4),data=d,gamma=.5,family=Gamma('log')) pdat<-data.frame(year=seq(min(d$year),max(d$year),1), time=1200, lon=median(d$lon), lat=median(d$lat)) pdat$pwt<-predict(mod,newdata=pdat,type='response') d$y<-ifelse(d$totwgt>0,1,0) mod2<-gam(y~as.factor(year) + s(time,bs='cc',k=5) + s(lon,lat,k=4),data=d,gamma=.5,family=binomial) pdat$pps<-predict(mod2,newdata=pdat,type='response') pdat<-subset(pdat,select=c('year','pwt','pps')) pdat$bank<-unique(d$bank) pdat$lon<-mean(d$lon) pdat$lat<-mean(d$lat) return(pdat) } qt<-ddply(subset(rvw,strat %in% dmm$strat),.(strat),.fun=fsm) #devtools::install_github('dgrtwo/gganimate',force=TRUE) install.packages('magick') library(magick) library(gganimate) qt2<-subset(qt,select=c('strat','year','pwt','pps','bank')) qt2$id<-qt2$strat a<-plg a$id<-a$stratum am<-fortify(a,region='id') am<-subset(am,id%in% qt2$id) mydat<-merge(am,qt2,by=c('id')) mydat$lpwt<-log10(mydat$pwt+.1) p<-ggplot(mydat,aes(x=long,y=lat,group = group, fill=lpwt,frame=year)) + theme_opts + coord_equal() + geom_polygon(color = 'grey',size=.0001) + # geom_polygon(aes(long,lat,group=group),fill=NA,colour='black',data=a) + labs(title = "Herring presence between 1970 and 2015", subtitle = "Dynamic Map", caption = 'Data source: Statistics Canada') + scale_fill_distiller(palette='Spectral') setwd(figsdir) gganimate(p) gganimate(p,'output.gif') gganimate(p,'output.log.gif') gganimate(p,'output.mp4') gganimate(p,'output.smooth.mp4') ################################################################### #ESTIMATES SMOOTH TREND IN DIFFERENT QUANTITIES OVER TIME FOR CLUSTER f<-function(d){ return(data.frame(nyear=length(unique(d$year)), myear=min(d$year))) } dm<-ddply(rvw,.(cell),.fun=f) dmm<-dm[order(dm$nyear),] dmm<-subset(dmm,nyear>=15 & myear<1990) dm2<-ddply(subset(rvw,is.na(sz)==FALSE),.(cell),.fun=f) dmm2<-subset(dm2,nyear>=15 & myear<1990) fsm<-function(d){ nm<-names(d)[1] names(d)[1]<-'y' if(nm=='sz'){ d<-subset(d,is.na(y)==FALSE) mod<-gam(y~s(year) + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') } else { mod<-gam(y~s(year) + s(time,bs='cc',k=5) + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') } pdat<-data.frame(year=seq(min(d$year),max(d$year),.25), time=1200, lon=median(d$lon), lat=median(d$lat)) pdat$p<-predict(mod,newdata=pdat,type='response') pdat<-subset(pdat,select=c('year','p')) pdat$p<-(pdat$p-mean(pdat$p))/sd(pdat$p) names(pdat)[2]<-unique(as.character(d$cell)) return(pdat) } #TOTAL WEIGHT qq<-dlply(subset(rvw,cell %in% dmm$cell,select=c('totwgt','strat','year','cell','time','lon','lat')),.(cell),.fun=fsm) qsm<-Reduce(function(x, y) merge(x, y, by=c('year'),all=TRUE), qq)#COMBINE qsm<-qsm[,colSums(is.na(qsm)) != nrow(qsm)]#REMOVES COLUMNS THAT ARE ALL MISSING #TOTAL NUMBERS qq<-dlply(subset(rvw,cell %in% dmm$cell,select=c('totno','cell','year','time','lon','lat')),.(cell),.fun=fsm) qsm2<-Reduce(function(x, y) merge(x, y, by=c('year'),all=TRUE), qq)#COMBINE qsm2<-qsm2[,colSums(is.na(qsm2)) != nrow(qsm2)]#REMOVES COLUMNS THAT ARE ALL MISSING #AVERAGE SIZE qq<-dlply(subset(rvw,sz!=Inf & is.na(sz)==FALSE & cell %in% dmm2$cell,select=c('sz','cell','year','time','lon','lat')),.(cell),.fun=fsm) qsm3<-Reduce(function(x, y) merge(x, y, by=c('year'),all=TRUE), qq)#COMBINE qsm3<-qsm3[,colSums(is.na(qsm3)) != nrow(qsm3)]#REMOVES COLUMNS THAT ARE ALL MISSING q<-qsm q2<- q %>% gather(cell, value, -year) xyplot(value ~ year | strata,data=q2, pch=15, type=c('spline'),col='black') xyplot(value ~ year | strata,data=q2, pch=15, type=c('p','spline'),col='black') clfun<-function(df,k,lbl){ rownames(df)<-df$year df<-df[,-1] #k<-3#ER OF CLUSTERS dmat<-1-cor(df,use='pairwise.complete.obs') dst<-as.dist(dmat) ff<-fanny(dst,k,maxit=5000,diss=T) par(mfrow=c(3,1),mar=c(4,12,1,12)) dum<-c('red3','darkblue','gold3') plot(silhouette(ff),col=dum[1:k],main='')#silhouette plot dc.pcoa<-cmdscale(dst) dc.scores<-scores(dc.pcoa,choices=c(1,2)) spefuz.g<-ff$clustering a<-data.frame(cell=as.character(sort(unique(names(df)))), clusters=ff$clustering) aa<-data.frame(ff$membership) aa$cell<-rownames(a) plot(scores(dc.pcoa),asp=1,type='n',xlim=c(-1,1.5),ylim=c(-1,.5),las=1,axes=TRUE,xlab='',ylab='') stars(ff$membership,location=scores(dc.pcoa),draw.segments=T,add=T,scale=F,len=.1,col.segments=alpha(c(dum[1:k]),.25),byt='n',labels=NULL,xlim=c(-1.1,1.4),ylim=c(-1,.5),lwd=.0001,xpd=TRUE,border=NULL,radius=FALSE,col.radius=alpha('white',.1)) for(i in 1:k){ cl<-dum[i] gg<-dc.scores[spefuz.g==i,] hpts<-chull(gg) hpts<-c(hpts,hpts[1]) lines(gg[hpts,],col=cl,lwd=3,xlim=c(-1.1,1.4),ylim=c(-1,.5)) } cx <- data.frame(cell=aa$cell, cx=apply(aa[, 1:k], 1, max)) cx$cxx <- rescale(cx$cx,newrange=c(.05,.5)) #PLOTS THE TIMESERIES OF R FOR EACH CLUSTER par(mfrow=c(3,1),mar=c(2,12,1,12),oma=c(1,1,1,1)) par(mar=c(4,8,4,8)) mb<-seq(1,k,1) l<-list() for(i in 1:length(mb)){ print(mb[i]) one<-subset(a,clusters==mb[i],select=c('cell'))# IN THIS CLUSTER cx2<-subset(cx,cx>=0.5)#GETS INSTANCES WHERE CLUSTER PROBABILITY>0.5 data5<-subset(df,select=c(as.character(one$cell)))#TS FOR CLUSTER cx2<-subset(cx2,cell %in% names(data5)) data5<-subset(df,select=c(as.character(cx2$cell)))#TS FOR CLUSTER x<-rownames(data5) t<-data.frame(year=as.numeric(x),mn=rowMeans(data5,na.rm=F)) t2<-data.frame(year=as.numeric(x),mn=rowMeans(data5,na.rm=T)) cl<-dum[i] plot(0,0,pch=16,cex=.01,xlim=c(1970,2020),ylim=c(-2,3),main='',xaxt='n',las=1,axes=FALSE,xlab='',ylab='') axis(side=2,at=seq(-2,3,1),las=1,lwd=.001,cex.axis=.75) axis(side=1,at=seq(1970,2020,10),cex.axis=.75) for(j in 1:length(data5[1,])){ try(dat<-data5[,j]) try(trnsp<-subset(cx,cell==as.character(names(data5[j])))$cxx) try(lines(x,dat,cex=.5,ylim=c(0,1),lwd=2,col=alpha(cl,rescale(trnsp,newrange=c(.1,.5))))) } lines(as.numeric(as.character(t$year)),t$mn,col='black',cex=.7,lwd=4) dm<-subset(t2,mn==max(t2$mn,na.rm=TRUE),select=c('year')) dm$cluster=mb[i] names(dm)<-c('clusterday','clusters') l[[i]]<-dm } a2<-a if(k==3){a2$cx<- apply(aa[,c('X1','X2','X3')], 1, function(x) max(x) ) } else {a2$cx<- apply(aa[,c('X1','X2')], 1, function(x) max(x) ) } a2$id<-a2$cell a2$cl<-ifelse(a2$clusters==1,dum[1],dum[3]) a2$cl<-ifelse(a2$clusters==2,dum[2],a2$cl) crds<-unique(subset(rvw,select=c('cell','lonc','latc'))) a2<-merge(a2,crds,by=c('cell'),all.x=TRUE,all.y=FALSE) dum<-unique(subset(a2,select=c('clusters','cl'))) return(ggplot()+ geom_tile(data=a2, aes(x=lonc, y=latc,fill=as.factor(clusters),alpha=cx),col='gray80',size=.0001)+ scale_fill_manual(breaks=as.character(dum$clusters),values=dum$cl,na.value="transparent",guide=guide_legend(title=''))+ scale_alpha(guide = 'none')+ geom_polygon(aes(long,lat, group=group), fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.9,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.1, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-56,1),labels=as.character(seq(-68,-56,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,48,1),labels=as.character(seq(41,48,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,48),xlim=c(-68,-56))+ xlab('')+ ylab('')+ labs(title = lbl, subtitle = "", caption = '') ) } setwd(figsdir) pdf('herring_rvsurvey_cluster_all_grid_k3.pdf',height=10,width=6.5) mp1<-clfun(qsm,3,"Total observed herring (weight)") mp2<-clfun(qsm2,3,"Total observed herring (numb)") mp3<-clfun(qsm3,3,"Average weight") dev.off() pdf('herring_rvsurvey_cluster_map_grid_k3.pdf',height=14,width=7) grid.arrange(mp1,mp2,mp3,ncol=1) dev.off() #MAP OF TRENDS IN ABUNDANCE AND AVERAGE WEIGHT #ESTIMATES SMOOTH TREND IN DIFFERENT QUANTITIES OVER TIME FOR CLUSTER f<-function(d){ return(data.frame(nyear=length(unique(d$year)), myear=min(d$year), mx=max(d$totwgt))) } dm<-ddply(rvw,.(cell),.fun=f) dmm<-dm[order(dm$nyear),] dmm<-subset(dmm,nyear>=15 & myear<1990 & mx>0) fsm<-function(d){ names(d)[1]<-'y' #d$y<-(d$y-mean(d$y))/sd(d$y) mod<-gam(y~s(year) + s(time,bs='cc',k=5) + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') mod2<-gam(y~year + s(time,bs='cc',k=5) + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') s<-summary(mod2) pdat<-data.frame(year=seq(min(d$year),max(d$year),.25), time=1200, lon=median(d$lon), lat=median(d$lat)) pdat$p<-predict(mod,newdata=pdat,type='response') ot<-data.frame(pstart=pdat$p[1], pend=pdat$p[dim(pdat)[1]], lonc=unique(d$lonc), latc=unique(d$latc), span=max(pdat$year)-min(pdat$year), year1=min(pdat$year), beta=s$p.table[2,1], pv=s$p.table[2,4]) ot$chng<-ot$pend-ot$pstart d$y<-(d$y-mean(d$y))/sd(d$y) mod<-gam(y~s(year,k=4) + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') p<-predict(mod,newdata=pdat,type='response',se.fit=FALSE) ot$chngz<-p[length(p)]-p[1] return(ot) } #TOTAL WEIGHT mdat<-ddply(subset(rvw,cell %in% dmm$cell,select=c('totwgt','strat','year','cell','time','lon','lat','lonc','latc')),.(cell),.fun=fsm) ###SAME BUT FOR AVERAGE WEIGHT rvws<-subset(rvw,is.na(sz)==FALSE) f<-function(d){ return(data.frame(nyear=length(unique(d$year)), myear=min(d$year), mx=max(d$totwgt))) } dm<-ddply(rvws,.(cell),.fun=f) dmm<-dm[order(dm$nyear),] dmm<-subset(dmm,nyear>=15 & myear<1990 & mx>0) d<-subset(rvws,cell=="-58.25_45.75",select=c('sz','strat','year','cell','time','lon','lat','lonc','latc')) fsm<-function(d){ print(unique(d$cell)) names(d)[1]<-'y' mod<-gam(y~s(year,k=4) + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') mod2<-gam(y~year + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') s<-summary(mod2) pdat<-data.frame(year=seq(min(d$year),max(d$year),.25), lon=median(d$lon), lat=median(d$lat)) pdat$p<-predict(mod,newdata=pdat,type='response') ot<-data.frame(pstart=pdat$p[1], pend=pdat$p[dim(pdat)[1]], lonc=unique(d$lonc), latc=unique(d$latc), span=max(pdat$year)-min(pdat$year), year1=min(pdat$year), beta=s$p.table[2,1], pv=s$p.table[2,4]) ot$chng<-ot$pend-ot$pstart d$y<-(d$y-mean(d$y))/sd(d$y) mod<-gam(y~s(year,k=4) + s(lon,lat,k=4),data=d,gamma=.5,gamily='nb') p<-predict(mod,newdata=pdat,type='response',se.fit=FALSE) ot$chngz<-p[length(p)]-p[1] return(ot) } #AVERAGE WEIGHT mdat2<-ddply(subset(rvws,cell %in% dmm$cell,select=c('sz','strat','year','cell','time','lon','lat','lonc','latc')),.(cell),.fun=fsm) pfun<-function(a,mx,dg,lbl){ names(a)[1]<-'y' a$y<-ifelse(a$y>mx,mx,a$y) a$y<-ifelse(a$y< -mx,-mx,a$y) aa<-data.frame(y=seq((-mx)-.001,max(abs(a$y)+.001,na.rm=TRUE),length.out=100)) a<-rbind.fill(a,aa) n<-21 mxx<-max(abs(a$y)) brks<-seq((-mx)-0.001,mxx+.001,length.out=n) brks2<-round(seq((-mx)-0.001,mxx+.001,length.out=n),digits=dg) a$ycat<-cut(a$y,breaks=brks) lbls<-sort(unique(a$ycat)) lbls2<-sort(unique(cut(a$y,breaks=brks2))) cls<-matlab.like(length(lbls)) ggplot()+ geom_tile(data=a, aes(x=lonc, y=latc,fill=ycat),col='gray80',size=.0001)+ scale_fill_manual(breaks=as.character(lbls),values=cls,labels=lbls2,na.value="transparent")+ scale_alpha(guide = 'none')+ geom_polygon(aes(long,lat, group=group), fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.9,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.1, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-56,1),labels=as.character(seq(-68,-56,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,48,1),labels=as.character(seq(41,48,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,48),xlim=c(-68,-56))+ xlab('')+ ylab('')+ labs(title = lbl, subtitle = "", caption = '') } pfun(subset(mdat2,select=c('chngz','lonc','latc')),4,2,'Average size (Z)') pfun(subset(mdat2,select=c('chng','lonc','latc')),.4,2,'Average size') pfun(subset(mdat,select=c('chngz','lonc','latc')),1.5,2,'Biomass trend (Z)') pfun(subset(mdat,select=c('chng','lonc','latc')),85,2,'Biomass trend') ####################################################### #GETS CELLS WHERE AT LEAST ONE HERRING WAS CAPTURED DURING ALL YEARS OF SURVEY zz<-data.frame(cell.1=sort(unique(rvw$cell.1)), n=tapply(rvw$totno,rvw$cell.1,sum)) zzz<-subset(zz,n>0) #GETS SUM OF ALL FISH CAPTURED BY CELL AND YEAR f<-function(d){ return(data.frame(n=sum(unique(d$totno)), lon=unique(d$lonc.1), lat=unique(d$latc.1), ntows=length(unique(d$id)))) } dt<-ddply(rvw,.(cell.1,year),.fun=f,.progress='text') f<-function(d){ pres<-subset(d,n>0) return(data.frame(rng=(dim(pres)[1]/dim(d)[1])*100, ncells=length(unique(d$cell.1)), ntows=sum(d$ntows))) } oo<-ddply(subset(dt,cell.1 %in% zzz$cell.1),.(year),.fun=f) plot(oo$year,oo$rng,pch=15) plot(oo$year,oo$ncells,pch=15) plot(oo$year,oo$ntows,pch=15) plot(oo$year,oo$rng/oo$ncells,pch=15) ########################################################## ################ DIURNAL CHANGES ########################################################## pltfun<-function(ott,ttl){ names(ott)[1]<-'y' names(ott)[3]<-'year' ott<-na.omit(ott) return(ggplot()+ geom_tile(data=ott, aes(x=time, y=year,fill=y),size=.0001) + scale_fill_distiller(palette='Spectral') + theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.1,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(0,2400,300),labels=seq(0,2400,300),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(1970,2015,5),labels=as.character(seq(1970,2015,5)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(1969.5,2016.5),xlim=c(0,2350))+ xlab('')+ ylab('')+ labs(title = ttl, subtitle = "", caption = '') ) } setwd(figsdir) pdf('herring_rv_diurnal.pdf',height=8,width=10) par(mfrow=c(2,2),mar=c(4,4,1,1)) f<-function(d){ if((max(d$time)-min(d$time))>15){ mod<-gam(totwgt~s(time,bs='cc',k=6)+s(lon,lat,k=20),data=d,gamma=1,family=nb) s<-summary(mod) pdat<-data.frame(time=seq(min(d$time),max(d$time),1), lon=median(d$lon), lat=median(d$lat)) pdat$p<-predict(mod,newdata=pdat,type='response') pdat$pz<-(pdat$p-mean(pdat$p,na.rm=TRUE))/sd(pdat$p,na.rm=TRUE) pdat$rng<-max(pdat$pz,na.rm=TRUE)-min(pdat$pz,na.rm=TRUE) pdat$pmx<-subset(pdat,p==max(pdat$p)[1])$time[1] pdat$pv<-s$s.table[1,4] pdat$b<-s$s.table[1,1] return(pdat) } else NULL } ot<-ddply(rvw,.(year),.fun=f,.progress='text') ot2<-acast(ot,year~time,value.var="pz") image(x=sort(unique(ot$year)),y=sort(unique(ot$time)),ot2,col=palette(rich.colors(500)),xlab='Exploitation rate',ylab='Consumer trophic level',las=1,cex.axis=.8) plot(ot$year,ot$rng,pch=15,las=1,xlab='Year',ylab='Daily range of catch') plot(ot$year,ot$pmx,pch=15,las=1,xlab='Year',ylab='Time of max catch') p1<-pltfun(subset(ot,select=c('pz','time','year')),'Z-score') #SAME BUT BINNED TO 3 EYAR INTERVALS f<-function(d){ mod<-gam(totwgt~s(time,bs='cc',k=6)+s(lon,lat,k=30) + as.factor(year),data=d,gamma=1,family=nb) pdat<-data.frame(time=seq(min(d$time),max(d$time),1), lon=median(d$lon), lat=median(d$lat), year=median(d$year)) pdat$p<-predict(mod,newdata=pdat,type='response') pdat$pz<-(pdat$p-mean(pdat$p))/sd(pdat$p) pdat$rng<-max(pdat$pz)-min(pdat$pz) pdat$pmx<-subset(pdat,p==max(pdat$p)[1])$time return(pdat) } ot<-ddply(rvw,.(tbin3),.fun=f,.progress='text') ot2<-acast(ot,tbin3~time,value.var="pz") image(x=sort(unique(ot$tbin3)),y=sort(unique(ot$time)),ot2,col=palette(rich.colors(500)),xlab='Exploitation rate',ylab='Consumer trophic level',las=1,cex.axis=.8) plot(ot$tbin3,ot$rng,pch=15,las=1,xlab='Year',ylab='Daily range of catch') plot(ot$tbin3,ot$pmx,pch=15,las=1,xlab='Year',ylab='Time of max catch') p2<-pltfun(subset(ot,select=c('pz','time','tbin3')),'Z-score') cls<-colorRampPalette(c('black','darkmagenta','hotpink','darkblue','lightskyblue','forestgreen','lawngreen','gold','orange','firebrick1','firebrick4')) n<-length(unique(ot$tbin3)) dum3<-data.frame(tbin3=sort(unique(ot$tbin3)), cls=cls(n+2)[3:(n+2)]) ot<-merge(ot,dum3,by=c('tbin3'),all.x=TRUE,all.y=FALSE) f<-function(d){ lines(d$time,d$pz,col=alpha(as.character(unique(d$cl)),.5),lwd=3) } plot(0,0,xlim=c(0,2400),ylim=c(-1.5,2.5),las=1,xlab='Time',ylab='Herring',col='white') zz<-dlply(ot,.(tbin3),.fun=f) xyplot(pz~time | tbin3,data=ot) grid.arrange(p1,p2,ncol=2) dev.off() dt$lcat<-as.numeric(as.character(dt$lcat)) sfun<-function(d){ return(d[sample(nrow(d),100,replace=FALSE),]) } rvw2<-ddply(rvw,.(year),.fun=sfun,.progress='text') a<-subset(rvw,id==sort(unique(rvw$id))[2]) par(mfrow=c(3,4),mar=c(1,1,1,1)) yrs<-seq(1975,2015,5) for(i in 1:length(yrs)){ d<-subset(dt,year==yrs[i] & n>0) plot(d$lon,d$lat,pch=16,col='red',las=1) map('world',add=TRUE,fill=TRUE,col='lightgray') } oo$rng<-log10(oo$rng) plot(oo,pch=16) a<-subset(rvw,year<1990) b<-subset(rvw,year>=1990) plot(a$lonc.1,a$latc.1,pch=15) plot(b$lonc.1,b$latc.1,pch=15) par(mfrow=c(3,4),mar=c(1,1,1,1)) f<-function(d){ mod<-gam(totwgt~s(time,bs='cc',k=6) + s(lon,lat,k=10),data=d,family='nb',gamma=1) pdat<-data.frame(time=seq(min(d$time),max(d$time),length.out=100), lon=-55, lat=42.5) p<-predict(mod,newdata=pdat,type='response') pdat$p<-p # plot(pdat$time,pdat$p,type='l',main=unique(d$tbin10)) return(data.frame(time=subset(pdat,p==max(pdat$p))$time[1])) } zz<-ddply(rvw,.(year),.fun=f,.progress='text') plot(zz$year,zz$time,pch=15,type='b') plot(rvw$lon,rvw$lat,pch='.') points(rvw$lonc,rvw$latc,pch=15,col='purple') f<-function(d){ return(data.frame(n=length(unique(d$id)), lon=unique(d$lonc.1), lat=unique(d$latc.1), totno=sum(d$totno)))} dtt<-ddply(rvw,.(cell.1,year),.fun=f,.progress='text') dtt$p<-1-dbinom(0,dtt$n,.02) 1-dbinom(0,1,.02) 1-dbinom(0,30,.02) 1-dbinom(0,1,.02) 1-dbinom(0,30,.02) d<-subset(rvw,cell=="-57.25_44.25") f<-function(d){ if(length(unique(d$year))>=5 & length(unique(d$time))>5){ mod<-gam(pres~as.factor(year),data=d,gamma=1.4,family='binomial') s<-summary(mod) pdat<-data.frame(year=sort(unique(d$year)), time=1200, ntows=tapply(d$id,d$year,function(x) length(unique(x)))) pdat$pabs<-1-dbinom(0,pdat$ntows,.02)#probability that 0 is real length(c(1,s$p.table[,4])) pdat$pprs<-s$p.table[,4] pdat$pprs[1]<-1 p<-predict(mod,newdata=pdat,type='response',se.fit=TRUE) pdat$p<-p$fit return(pdat) } else NULL } ot<-ddply(rvw,.(cell),.fun=f,.progress='text') a<-subset(ot,p==0) hist(ot$p,breaks=100,col='black') plot(log(ot$p+.01),(ot$pb)) plot(log(ot$p+.01),log(ot$pb)) f<-function(d){ return(data.frame(n=length(unique(d$id)), lon=unique(d$lonc.1), lat=unique(d$latc.1), totno=sum(d$totno)))} dt<-ddply(rvw,.(cell.1),.fun=f,.progress='text') dt$p<-dbinom(1,dt$n,.02) #dt<-ddply(rvw,.(cell,tbin20),.fun=f,.progress='text') plot(log10(dt$n),log10(dt$totno+1),pch=16) cor(log10(dt$n),log10(dt$totno+1)) hist(dt$n,breaks=100,col='black') adat<-subset(dt,select=c('p','lon','lat')) adat<-subset(dt,select=c('n','lon','lat')) ttl<-'dan' ct<-30 dg<-2 nm<-names(adat)[1] names(adat)[1]<-'y' adat$y<-ifelse(adat$y>ct,ct,adat$y) adat$y<-ifelse(adat$y< -ct,-ct,adat$y) adat$y<-round(adat$y,digits=2) a<-adat aa<-data.frame(y=seq(0,max(abs(adat$y),na.rm=TRUE),length.out=100)) a<-rbind.fill(a,aa) n<-21 mxx<-max(abs(adat$y)) brks<-seq(0,mxx+.01,length.out=n) brks2<-round(seq(0,mxx+.01,length.out=n),digits=dg) a$ycat<-cut(a$y,breaks=brks) lbls<-sort(unique(a$ycat)) lbls2<-sort(unique(cut(a$y,breaks=brks2))) cls<-matlab.like(length(lbls)) #cls<-colorRampPalette(c('magenta4','blue3','green','yellow','red3')) #cls<-(cls(length(lbls))) #cls<-colorRampPalette(c('dodgerblue4','white','firebrick4')) #cls<-cls(length(lbls)) return( ggplot()+ geom_tile(data=a, aes(x=lon, y=lat,fill=ycat),col='gray80',size=.0001) + scale_fill_manual(breaks=as.character(lbls),values=cls,labels=lbls2,na.value="transparent",guide=guide_legend(title=paste(ttl)))+ scale_alpha(guide = 'none')+ geom_polygon(aes(long,lat, group=group), fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='right',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.1, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-70,-56,1),labels=as.character(seq(-70,-56,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,48,1),labels=as.character(seq(41,48,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(min(adat$lat)-.3,max(adat$lat)+.3),xlim=c(min(adat$lon)-.3,max(adat$lon)+.3))+ xlab('')+ ylab('') ) pltfun<-function(adat,ttl,ct,dg){ nm<-names(adat)[1] names(adat)[1]<-'y' adat$y<-ifelse(adat$y>ct,ct,adat$y) adat$y<-ifelse(adat$y< -ct,-ct,adat$y) adat$y<-round(adat$y,digits=2) a<-adat aa<-data.frame(y=seq(-(max(abs(adat$y),na.rm=TRUE)),max(abs(adat$y),na.rm=TRUE),length.out=100)) a<-rbind.fill(a,aa) n<-21 mxx<-max(abs(adat$y)) brks<-seq(-mxx-.01,mxx+.01,length.out=n) brks2<-round(seq(-mxx-.01,mxx+.01,length.out=n),digits=dg) a$ycat<-cut(a$y,breaks=brks) lbls<-sort(unique(a$ycat)) lbls2<-sort(unique(cut(a$y,breaks=brks2))) #cls<-matlab.like(length(lbls)) #cls<-colorRampPalette(c('magenta4','blue3','green','yellow','red3')) #cls<-(cls(length(lbls))) cls<-colorRampPalette(c('dodgerblue4','white','firebrick4')) cls<-cls(length(lbls)) return( ggplot()+ geom_tile(data=a, aes(x=lon, y=lat,fill=ycat),col='gray80',size=.0001) + scale_fill_manual(breaks=as.character(lbls),values=cls,labels=lbls2,na.value="transparent",guide=guide_legend(title=paste(ttl)))+ scale_alpha(guide = 'none')+ geom_polygon(aes(long,lat, group=group), fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='right',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.1, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-70,-56,1),labels=as.character(seq(-70,-56,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,48,1),labels=as.character(seq(41,48,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(min(adat$lat)-.3,max(adat$lat)+.3),xlim=c(min(adat$lon)-.3,max(adat$lon)+.3))+ xlab('')+ ylab('') ) } p1<-pltfun(subset(phen2,select=c('delta.mxfall','lon','lat')),'mxfall',2.8,3) return(ggplot()+ geom_polygon(aes(long,lat,group=group,fill=as.factor(clusters),alpha=cx),data=mydat,col='black',size=.0001) + coord_equal()+ scale_fill_manual(values=c(dum[1],dum[2],dum[3]))+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(42,47,1),labels=as.character(seq(42,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(42,47),xlim=c(-68,-57))+ labs(title = lbl, subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('')) } setwd(figsdir) pdf('herring_rvsurvey_cluster_all_k3.pdf',height=10,width=8) mp1<-clfun(qsm,3,"Total observed herring (weight)") summary(rvw$time) sort(unique(rvw$time)) plot(rvw$time,log10(rvw$no+.1)) mod<-gam(totwgt ~ s(time,bs='cc'),data=rvw,family='nb') pdat<-data.frame(time=seq(min(rvw$time),max(rvw$time),length.out=100)) p<-predict(mod,newdata=pdat,type='response') pdat$p<-p plot(pdat$time,pdat$p) f<-function(d){ return(data.frame(nyear=length(unique(d$year)), myear=min(d$year))) } dm<-ddply(rvw,.(strat),.fun=f) dmm<-dm[order(dm$nyear),] dmm<-subset(dmm,nyear>=20 & myear<1990) f<-function(d){ names(d)[1]<-'y' mod<-gam(y~as.factor(year),data=d,gamma=.5,gamily=negbin) pdat<-data.frame(year=sort(unique(d$year)), ntows=tapply(d$id,d$year,function(x) length(unique(x)))) pdat$p<-predict(mod,newdata=pdat,type='response') return(pdat) } #TOTAL NUMBERS qq<-ddply(subset(rvw,strat %in% dmm$strat,select=c('totwgt','strat','year','id')),.(strat),.fun=f) par(mfrow=c(2,2)) plot(qq$ntows,qq$p,col=alpha('darkred',.4),pch=16,cex=2) plot(log10(qq$ntows),log10(qq$p+1),col=alpha('darkred',.4),pch=16,cex=2) cor(log10(qq$ntows),log10(qq$p+1))#.019 f<-function(d){ names(d)[1]<-'y' mod<-gam(y~as.factor(year),data=d,gamma=.5,gamily=binomial) pdat<-data.frame(year=sort(unique(d$year)), ntows=tapply(d$id,d$year,function(x) length(unique(x)))) pdat$p<-predict(mod,newdata=pdat,type='response') return(pdat) } #TOTAL NUMBERS qq<-ddply(subset(rvw,strat %in% dmm$strat,select=c('pres','strat','year','id')),.(strat),.fun=f) par(mfrow=c(2,2)) plot(qq$ntows,qq$p,col=alpha('darkred',.4),pch=16,cex=2) plot(log10(qq$ntows),log10(qq$p+.1),col=alpha('darkred',.4),pch=16,cex=2) cor(log10(qq$ntows),log10(qq$p+.1))#-.1 plot(qq$year,log10(qq$p+.1),col=alpha('darkred',.4),pch=16,cex=2) xyplot(log10(p+1)~year|strat,data=qq,pch=15,col=alpha('darkred',.5)) #ESTIMATES SMOOTH TREND IN DIFFERENT QUANTITIES OVER TIME FOR CLUSTER f<-function(d){ names(d)[1]<-'y' #mod<-gam(y~as.factor(year),data=d,gamma=.5,gamily=Gamma('log')) mod<-gam(y~as.factor(year),data=d,gamma=.5,family=nb) pdat<-data.frame(year=sort(unique(d$year)), ntows=tapply(d$id,d$year,function(x) length(unique(x))), n=tapply(d$y,d$year,sum), sz=tapply(d$sz,d$year,function(x) mean(x,na.rm=TRUE))) pdat$p<-predict(mod,newdata=pdat,type='response') #pdat$p<-(pdat$p-mean(pdat$p))/sd(pdat$p) return(pdat) } #TOTAL NUMBERS qq<-ddply(subset(rvw,strat %in% dmm$strat,select=c('totno','strat','year','id','sz')),.(strat),.fun=f) plot(log10(qq$sz+.1),log10(qq$p+.01),pch=15) cor(log10(qq$sz+.1),log10(qq$p+.01),use='pairwise.complete.obs') plot(log10(qq$ntows),log10(qq$n+1),col=alpha('darkred',.3),pch=16,cex=2) plot(log10(qq$ntows),log10(qq$p+.01),col=alpha('darkred',.3),pch=16,cex=2) cor(log10(qq$ntows),log10(qq$p+.01))#.10 plot((qq$ntows),log10(qq$n+.01),col=alpha('darkred',.4),pch=16,cex=2) plot(log10(qq$ntows),log10(qq$n+.01),col=alpha('darkred',.4),pch=16,cex=2) cor(log10(qq$ntows),log10(qq$n+.01))#.40 f<-function(d){ return(data.frame(r=cor(log10(d$ntows),log10(d$p+.01),use='pairwise.complete.obs'), r2=cor(log10(d$ntows),log10(d$n+.01),use='pairwise.complete.obs'))) } dt<-ddply(qq,.(strat),.fun=f) dt$dir<-ifelse(dt$r>0,1,-1) dt$dir2<-ifelse(dt$r2>0,1,-1) cor(log10(qq$ntows),log10(qq$p+1)) xyplot(log10(p+.1)~log10(ntows) |strat,data=qq,col=alpha('dodgerblue3',.7),pch=15, type=c('p','r')) xyplot(log10(p+.01)~log10(ntows) |strat,data=qq,col=alpha('dodgerblue3',.7),pch=15, type=c('p','r')) xyplot(log10(p+1)~log10(ntows) |strat,data=qq,col=alpha('dodgerblue3',.7),pch=15, type=c('p','r')) #ESTIMATES SMOOTH TREND IN DIFFERENT QUANTITIES OVER TIME FOR CLUSTER fsm<-function(d){ names(d)[1]<-'y' mod<-gam(y~s(year),data=d,gamma=.5,gamily=Gamma('log')) pdat<-data.frame(year=seq(min(d$year),max(d$year),.25)) pdat$p<-predict(mod,newdata=pdat,type='response') pdat$p<-(pdat$p-mean(pdat$p))/sd(pdat$p) names(pdat)[2]<-unique(as.character(d$strat)) return(pdat) } #TOTAL WEIGHT qq<-dlply(subset(rvw,strat %in% dmm$strat,select=c('totwgt','strat','year')),.(strat),.fun=fsm) qsm<-Reduce(function(x, y) merge(x, y, by=c('year'),all=TRUE), qq)#COMBINE qsm<-qsm[,colSums(is.na(qsm)) != nrow(qsm)]#REMOVES COLUMNS THAT ARE ALL MISSING #TOTAL NUMBERS qq<-dlply(subset(rvw,strat %in% dmm$strat,select=c('totno','strat','year')),.(strat),.fun=fsm) qsm2<-Reduce(function(x, y) merge(x, y, by=c('year'),all=TRUE), qq)#COMBINE qsm2<-qsm2[,colSums(is.na(qsm2)) != nrow(qsm2)]#REMOVES COLUMNS THAT ARE ALL MISSING #AVERAGE SIZE qq<-dlply(subset(rvw,strat %in% dmm$strat,select=c('sz','strat','year')),.(strat),.fun=fsm) qsm3<-Reduce(function(x, y) merge(x, y, by=c('year'),all=TRUE), qq)#COMBINE qsm3<-qsm3[,colSums(is.na(qsm3)) != nrow(qsm3)]#REMOVES COLUMNS THAT ARE ALL MISSING q<-qsm3 q2<- q %>% gather(strata, value, -year) xyplot(value ~ year | strata,data=q2, pch=15, type=c('spline'),col='black') xyplot(value ~ year | strata,data=q2, pch=15, type=c('p','spline'),col='black') clfun<-function(df,k,lbl){ rownames(df)<-df$year df<-df[,-1] #k<-3#ER OF CLUSTERS dmat<-1-cor(df,use='pairwise.complete.obs') dst<-as.dist(dmat) ff<-fanny(dst,k,maxit=5000,diss=T) par(mfrow=c(2,1),mar=c(4,4,1,1)) dum<-c('red3','forestgreen','darkblue','cornflowerblue','darkblue') plot(silhouette(ff),col=dum[1:k],main='')#silhouette plot dc.pcoa<-cmdscale(dst) dc.scores<-scores(dc.pcoa,choices=c(1,2)) spefuz.g<-ff$clustering a<-data.frame(strat=as.character(sort(unique(names(df)))), clusters=ff$clustering) aa<-data.frame(ff$membership) aa$strat<-rownames(a) #par(mar=c(1,1,1,8),oma=c(1,1,1,1)) plot(scores(dc.pcoa),asp=1,type='n',xlim=c(-1.5,1),ylim=c(-1,1.2),las=1,axes=TRUE,xlab='',ylab='') stars(ff$membership,location=scores(dc.pcoa),draw.segments=T,add=T,scale=F,len=.1,col.segments=alpha(c(dum[1:k]),.25),byt='n',labels=NULL,xlim=c(-1.1,1.4),ylim=c(-1,.5),lwd=.0001,xpd=TRUE,border=NULL,radius=FALSE,col.radius=alpha('white',.1)) for(i in 1:k){ cl<-dum[i] gg<-dc.scores[spefuz.g==i,] hpts<-chull(gg) hpts<-c(hpts,hpts[1]) lines(gg[hpts,],col=cl,lwd=3,xlim=c(-1.1,1.4),ylim=c(-1,.5)) } cx <- data.frame(strat=aa$strat, cx=apply(aa[, 1:k], 1, max)) cx$cxx <- rescale(cx$cx,newrange=c(.05,.5)) #PLOTS THE TIMESERIES OF R FOR EACH CLUSTER par(mfrow=c(3,1),mar=c(2,12,1,12),oma=c(1,1,1,1)) mb<-seq(1,k,1) l<-list() for(i in 1:length(mb)){ print(mb[i]) one<-subset(a,clusters==mb[i],select=c('strat'))# IN THIS CLUSTER cx2<-subset(cx,cx>=0.5)#GETS INSTANCES WHERE CLUSTER PROBABILITY>0.5 data5<-subset(df,select=c(as.character(one$strat)))#TS FOR CLUSTER cx2<-subset(cx2,strat %in% names(data5)) data5<-subset(df,select=c(as.character(cx2$strat)))#TS FOR CLUSTER x<-rownames(data5) t<-data.frame(year=as.numeric(x),mn=rowMeans(data5,na.rm=F)) t2<-data.frame(year=as.numeric(x),mn=rowMeans(data5,na.rm=T)) cl<-dum[i] plot(0,0,pch=16,cex=.01,xlim=c(1970,2015),ylim=c(-2,3),main='',xaxt='n',las=1,axes=FALSE,xlab='',ylab='') axis(side=2,at=seq(-2,3,1),las=1,lwd=.001,cex.axis=.75) axis(side=1,at=seq(1970,2020,10),cex.axis=.75) for(j in 1:length(data5[1,])){ try(dat<-data5[,j]) try(trnsp<-subset(cx,strat==as.character(names(data5[j])))$cxx) try(lines(x,dat,cex=.5,ylim=c(0,1),lwd=2,col=alpha(cl,rescale(trnsp,newrange=c(.1,.5))))) } lines(as.numeric(as.character(t$year)),t$mn,col='gold3',cex=.7,lwd=3) dm<-subset(t2,mn==max(t2$mn,na.rm=TRUE),select=c('year')) dm$cluster=mb[i] names(dm)<-c('clusterday','clusters') l[[i]]<-dm } a2<-a if(k==3){a2$cx<- apply(aa[,c('X1','X2','X3')], 1, function(x) max(x) ) } else {a2$cx<- apply(aa[,c('X1','X2')], 1, function(x) max(x) ) } a2$id<-a2$strat am<-fortify(plg,region='stratum') am<-subset(am,id%in% a2$id) mydat<-merge(am,a2,by=c('id')) mydat$cx<-rescale(mydat$cx,newrange=c(.4,1)) par(mfrow=c(1,1),mar=c(3,3,3,3)) return(ggplot()+ geom_polygon(aes(long,lat,group=group,fill=as.factor(clusters),alpha=cx),data=mydat,col='black',size=.0001) + coord_equal()+ scale_fill_manual(values=c(dum[1],dum[2],dum[3]))+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(42,47,1),labels=as.character(seq(42,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(42,47),xlim=c(-68,-57))+ labs(title = lbl, subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('')) } setwd(figsdir) pdf('herring_rvsurvey_cluster_all_k3.pdf',height=10,width=8) mp1<-clfun(qsm,3,"Total observed herring (weight)") mp2<-clfun(qsm2,3,"Total observed herring (numb)") mp3<-clfun(qsm3,3,"Average weight") dev.off() pdf('herring_rvsurvey_cluster_map_k3.pdf',height=14,width=8) grid.arrange(mp1,mp2,mp3,ncol=1) dev.off() setwd(figsdir) pdf('herring_rvsurvey_cluster_all_k2.pdf',height=10,width=8) mp1<-clfun(qsm,2,"Total observed herring (weight)") mp2<-clfun(qsm2,2,"Total observed herring (numb)") mp3<-clfun(qsm3,2,"Average weight") dev.off() pdf('herring_rvsurvey_cluster_map_k2.pdf',height=14,width=8) grid.arrange(mp1,mp2,mp3,ncol=1) dev.off() ################################################### #TOTAL NUMBER OF HERRING RECORDED FROM ALL TRAWLS ALL YEARS f<-function(d){ return(data.frame(wt=mean(d$totwgt), no=mean(d$totno), dep=mean(d$depth,na.rm=TRUE), sz=mean(d$sz))) } d1<-ddply(rvw,.(strat),.fun=f) d1<-d1[order(d1$sz,decreasing=TRUE),] cor(subset(d1,select=c('wt','no','dep','sz')),use='pairwise.complete.obs') plot(subset(d1,select=c('wt','no','dep','sz')),pch=15) a2<-d1 a2$id<-a2$strat am<-fortify(plg,region='stratum') am<-subset(am,id%in% a2$id) mydat<-merge(am,a2,by=c('id')) pwt<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=wt),data=mydat,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = "Total observed herring (weight)", subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') pno<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=no),data=mydat,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = "Total observed herring (numbers)", subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') psz<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=sz),data=mydat,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = "Average weight of all observed herring", subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') setwd(figsdir) pdf('herring_rvsurvey_allobs_map2.pdf',height=14,width=8) grid.arrange(pwt,pno,psz,ncol=1) dev.off() rvw$dayc<-cut(rvw$day,breaks=seq(170,230,10),labels=seq(175,225,10)) f<-function(d){ if(length(unique(d$dayc))>1){ mod<-gam(totwgt~as.factor(dayc),data=d,gamma=1) pdat<-data.frame(dayc=sort(unique(d$dayc))) pdat$p<-predict(mod,newdata=pdat) return(pdat) } else NULL } phdat<-ddply(rvw,.(strat),.fun=f) a2<-phdat a2$id<-a2$strat am<-fortify(plg,region='stratum') am<-subset(am,id%in% a2$id) mydat<-merge(am,a2,by=c('id')) mydat$p<-(mydat$p-mean(mydat$p))/sd(mydat$p) dys<-sort(unique(phdat$dayc)) l<-list() for(i in 1:length(dys)){ d<-subset(mydat,dayc==dys[i]) d$p<-log10(d$p+1) print(dim(d)) l[[i]]<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=p),data=d,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.8,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = paste("Total observed herring (day=",dys[i]), subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') } setwd(figsdir) pdf('herring_rvsurvey_seasonal_daysbin.pdf',height=14,width=10) grid.arrange(l[[1]],l[[2]],l[[3]],l[[4]],l[[5]],l[[6]],ncol=2) dev.off() a<-subset(mydat,dayc==205) hist(a$p,breaks=50) hist(log(a$p),breaks=50) d<-subset(rvw,strat==462) a2<-a2[order(a2$wt,decreasing=TRUE),] f<-function(d){ if(length(unique(d$month))>1){ mod<-gam(totwgt~as.factor(month),data=d,gamma=1) pdat<-data.frame(month=sort(unique(d$month))) pdat$p<-predict(mod,newdata=pdat) return(pdat) } else NULL } phdat<-ddply(rvw,.(strat),.fun=f) plot(rvw$day,rvw$totno) a2<-phdat a2$id<-a2$strat am<-fortify(plg,region='stratum') am<-subset(am,id%in% a2$id) mydat<-merge(am,a2,by=c('id')) mydat1<-subset(mydat,month==6) p1<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=p),data=mydat1,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = "Total observed herring (June)", subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') mydat2<-subset(mydat,month==7) p2<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=p),data=mydat2,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = "Total observed herring (July)", subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') mydat3<-subset(mydat,month==8) p3<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=p),data=mydat3,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position='bottomright',plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.15, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = "Total observed herring (August)", subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') setwd(figsdir) pdf('herring_rvsurvey_bymonth_map.pdf',height=14,width=8) grid.arrange(p1,p2,p3,ncol=1) dev.off() rvw$yearc<-cut(rvw$year,breaks=seq(1970,2020,5),labels=seq(1972.5,2017.5,5),include.lowest=TRUE) f<-function(d){ return(data.frame(no=median(d$totno), wt=median(d$totwgt), sz=median(d$sz))) } d1<-ddply(rvw,.(strat,yearc),.fun=f) a2<-d1 a2$id<-a2$strat am<-fortify(plg,region='stratum') am<-subset(am,id%in% a2$id) mydat<-merge(am,a2,by=c('id')) #mydat$p<-(mydat$p-mean(mydat$p))/sd(mydat$p) yrs<-sort(unique(mydat$yearc)) l<-list() for(i in 1:length(yrs)){ d<-subset(mydat,yearc==yrs[i]) d$wt<-log10(d$wt) print(dim(d)) l[[i]]<-ggplot()+ geom_polygon(aes(long,lat,group=group,fill=wt),data=d,col='black',size=.0001) + coord_equal()+ scale_fill_distiller(palette='Spectral')+ geom_polygon(aes(long,lat,group=group),fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.8,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.11, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ labs(title = paste("Total observed herring (year=",yrs[i]), subtitle = "", caption = 'Data source: Summer RV survey') + xlab('')+ ylab('') } setwd(figsdir) pdf('herring_rvsurvey_wt_byyear.pdf',height=16,width=16) grid.arrange(l[[1]],l[[2]],l[[3]],l[[4]],l[[5]],l[[6]],l[[7]],l[[8]],l[[9]],l[[10]],ncol=3) dev.off() f<-function(d){ if(length(unique(d$year))>=20){ print(unique(d$strat)) print(length(unique(d$year))) mod<-gam(no~year,data=d,gamma=1) s<-summary(mod) no.pv<-s$p.table[2,4] no.b<-(10^s$p.table[2,1])-1 mod<-gam(wgt~year,data=d,gamma=1) s<-summary(mod) wt.pv<-s$p.table[2,4] wt.b<-(10^s$p.table[2,1])-1 d$depth<-(d$dmin+d$dmax)/2 d$totno<-ifelse(d$totno==0,1,d$totno) d$M<-d$totwgt/d$totno d$temperature<-(d$surface_temperature+d$bottom_temperature)/2 d<-subset(d,is.na(temperature)==FALSE) d$kelvins<-d$temperature+273.15 k<-0.00008617#BOLTZMANN CONSTANT d$metai<-(d$M^.25)*(exp(-1*(1/(k*d$kelvins)))) d$metai<-d$metai*10^18 mod<-gam(metai~as.factor(year),data=d,gamma=1) s<-summary(mod) met.pv<-s$p.table[2,4] met.b<-(10^s$p.table[2,1])-1 return(data.frame(lon=mean(d$lon), lat=mean(d$lat), no.b=no.b, no.pv=no.pv, wt.b=wt.b, wt.pv=wt.pv, met.b=met.b, met.pv=met.pv)) } else NULL } sdat1<-ddply(rvw,.(strat),.fun=f) map('world',xlim=c(-70,-60),ylim=c(42,46),fill=TRUE,col='gray') map.axes() points(sdat1$lon,sdat1$lat,pch=16,cex=rescale(abs(sdat1$no.b),newrange=c(3,14)),col=ifelse(sdat1$no.b>0,'firebrick3','dodgerblue3')) map('world',xlim=c(-70,-60),ylim=c(42,46),fill=TRUE,col='gray') map.axes() points(sdat1$lon,sdat1$lat,pch=16,cex=rescale(abs(sdat1$wt.b),newrange=c(3,14)),col=ifelse(sdat1$wt.b>0,'firebrick3','dodgerblue3')) map('world',xlim=c(-70,-60),ylim=c(42,46),fill=TRUE,col='gray') map.axes() points(sdat1$lon,sdat1$lat,pch=16,cex=rescale(abs(sdat1$met.b),newrange=c(3,14)),col=ifelse(sdat1$met.b>0,'firebrick3','dodgerblue3')) cor(sdat1$no.b,sdat1$wt.b) cor(sdat1$no.b,sdat1$met.b) plot(sdat1$met.b,sdat1$no.b,pch=15) setwd(datadir) rvl<-read.csv("herring_lengths_RV_survey_spera_spawnar.csv",header=TRUE) rvl$flen<-ifelse(rvl$mission=='NED2016016',rvl$flen/10,rvl$flen) rvl$flen<-ifelse(rvl$mission!='NED2016016',(rvl$flen*1.0866)+0.95632,rvl$flen) #plot(log10(rvl$flen),log10(rvl$fwt)) rvl<-subset(rvl,log10(rvl$flen)>=.5 & month %in% c(6,7,8))#REMOVE OUTLIERS rvl2<-rvl rvl2$lcat<-cut(rvl$flen,breaks=seq(5,45,10),labels=seq(10,40,10)) rvl2$id<-gsub(' ','',paste(rvl2$mission,'_',rvl2$setno)) f<-function(d){ return(data.frame(year=unique(d$year), lon=unique(d$lon), lat=unique(d$lat), strat=unique(d$strat), no=sum(d$clen,na.rm=TRUE), wt=sum(d$fwt/1000,na.rm=TRUE))) } rvll<-ddply(rvl2,.(id,lcat),.fun=f,.progress='text') d<-subset(rvll,strat==sort(unique(rvll$strat))[1] & lcat==27.5) f<-function(d){ if(length(unique(d$year))>=25){ print(unique(d$strat)) print(unique(d$lcat)) mod<-gam(no~year,data=d,gamma=1.4) s<-summary(mod) no.pv<-s$p.table[2,4] no.b<-s$p.table[2,1] return(data.frame(lon=mean(d$lon), lat=mean(d$lat), no.b=no.b, no.pv=no.pv)) } else NULL } sdat2<-ddply(rvll,.(strat,lcat),.fun=f,.progress='text') a<-subset(sdat2,lcat==20) a<-subset(sdat2,lcat==30) a<-subset(sdat2,lcat==40) map('world',xlim=c(-70,-60),ylim=c(42,46),fill=TRUE,col='gray') map.axes() points(a$lon,a$lat,pch=16,cex=rescale(abs(a$no.b),newrange=c(3,14)),col=alpha(ifelse(a$no.b>0,'firebrick3','dodgerblue3'),.75)) ################################################################### ################# PHENOLOGY ################################################################### ################################################################## setwd(datadir) rvw<-read.csv("herring_weights_RV_survey_spera_allar.csv",header=TRUE) rvw$strat<-as.character(rvw$strat) #rvw<-subset(rvw,!(strat %in% c('493','494',''))) rvw$bank<-ifelse(rvw$strat %in% c(447,448),'banq','no') rvw$bank<-ifelse(rvw$strat %in% c(443),'mis',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(458),'mid',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(455,456),'sab',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(464),'west',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(463),'em',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(473),'lh',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(474),'rw',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(475),'bac',rvw$bank) rvw$bank<-ifelse(rvw$strat %in% c(480),'bn',rvw$bank) rvw$no<-log10(rvw$totno+1) rvw$wgt<-log10(rvw$totwgt+1) rvw$sz<-rvw$totwgt/rvw$totno rvw$sz<-ifelse(rvw$sz==Inf,rvw$totwgt,rvw$sz) rvw$id<-gsub(' ','',paste(rvw$mission,'_',rvw$setno)) rvw$pres<-ifelse(rvw$totno>0,1,0) rvw$tbin20<-ifelse(rvw$year<=1990,1980,2010) rvw$lonc<-round(rvw$lon,digits=0) rvw$lonc<-ifelse(rvw$lon<=rvw$lonc,rvw$lonc-.25,rvw$lonc+.25) rvw$latc<-round(rvw$lat,digits=0) rvw$latc<-ifelse(rvw$lat>=rvw$latc,rvw$latc+.25,rvw$latc-.25) rvw$cell<-gsub(' ','',paste(rvw$lonc,'_',rvw$latc)) rvw$cell.1<-gsub(' ','',paste(round(rvw$lon,digits=1),'_',round(rvw$lat,digits=1))) lonc.1<-seq(min(rvw$lon),max(rvw$lon),.1) latc.1<-seq(min(rvw$lat),max(rvw$lat),.1) crds<-expand.grid(lonc.1=lonc.1,latc.1=latc.1) crds$cell.1<-gsub(' ','',paste(round(crds$lonc.1,digits=1),'_',round(crds$latc.1,digits=1))) rvw<-merge(rvw,crds,by=c('cell.1'),all.x=TRUE,all.y=FALSE) #ESTIMATES SMOOTH TREND IN PHENOLOGY BY STRATA - SHORT FORMAT rvw2<-subset(rvw,!(geardesc %in% c('Newston net','Campelen 1800 survey trawl'))) f<-function(d){ if(dim(subset(d,month %in% c(1,2,3)))[1]>10){wint<-1 } else { wint<-0 } if(dim(subset(d,month %in% c(6,7,8)))[1]>10){summ<-1 } else { summ<-0 } if(dim(subset(d,month %in% c(9,10,11)))[1]>10){fall<-1 } else { fall<-0 } return(data.frame(nmo=length(unique(d$month)), ndy=length(unique(d$day)), nyr=length(unique(d$year)), wint=wint, summ=summ, fall=fall, tot=wint+summ+fall, lon=mean(d$lon), lat=mean(d$lat))) } dt<-ddply(rvw2,.(strat),.fun=f) dt<-dt[order(dt$nmo,decreasing=TRUE),] dmm<-subset(dt,nmo>= 4 & ndy>=30 & nyr>=3 & tot>=3) f<-function(d){ if(dim(subset(d,month %in% c(1,2,3)))[1]>10){wint<-1 } else { wint<-0 } if(dim(subset(d,month %in% c(6,7,8)))[1]>10){summ<-1 } else { summ<-0 } if(dim(subset(d,month %in% c(9,10,11)))[1]>10){fall<-1 } else { fall<-0 } return(data.frame(nmo=length(unique(d$month)), ndy=length(unique(d$day)), nyr=length(unique(d$year)), wint=wint, summ=summ, fall=fall, tot=wint+summ+fall, lon=mean(d$lon), lat=mean(d$lat))) } dt<-ddply(rvw2,.(cell),.fun=f) dt<-dt[order(dt$nmo,decreasing=TRUE),] dmm<-subset(dt,nmo>= 4 & ndy>=30 & nyr>=3 & tot>=3) f1<-function(d0){ f<-function(d){ day<-unique(d$day) d2<-subset(d0,select=c('day')) d2$day2<-d2$day-(day-1) d2$day2<-ifelse(d2$day2<=0,d2$day2+(365),d2$day2) return(data.frame(day=day, span=diff(range(min(d2$day2),max(d2$day2))))) } z<-ddply(d0,.(day),.fun=f) z<-subset(z,span==max(z$span)) return(subset(z,day==min(z$day))) } #dayshift<-ddply(unique(subset(rvw2,strat %in% dmm$strat,select=c('strat','day'))),.(strat),.fun=f1,.progress='text') dayshift<-ddply(unique(subset(rvw2,cell %in% dmm$cell,select=c('strat','day','cell'))),.(cell),.fun=f1,.progress='text') ############################################## #ADJUSTS DAY VALUES TO MAXIMIZE PHENOLOGY SPAN dayshiftfun<-function(d,shf){ d$day2<-d$day-(shf) d$day2<-ifelse(d$day2<=0,d$day2+(365),d$day2) return(d) } #BACK-CALCULATE ORIGINAL DAY OF THE YEAR revdayshiftfun<-function(d,shf){ d$day3<-d$day2+shf d$day3<-ifelse(d$day3>365,d$day3-365,d$day3) return(d) } ff<-function(d){ shf<-subset(dayshift,cell==unique(d$cell))$day print(shf) names(d)[1]<-'y' #SHIFT DAYS TO CENTER ON JULY AND MAKE CONTINUOUS d<-dayshiftfun(d,shf) mod<-gam(y~s(year) + s(time,k=4,bs='cc') + s(day2,k=4,bs='cc'),data=d,gamma=1.4,family='nb'(link='log')) pdat<-data.frame(year=max(d$year), time=1200, day2=seq(min(d$day2),max(d$day2),1)) p<-predict(mod,newdata=pdat,type='response') pdat$p<-p pdat$pz<-(pdat$p-mean(pdat$p))/sd(pdat$p) pdat<-revdayshiftfun(pdat,shf) pdat<-subset(pdat,select=c('day3','p','pz')) pdat$lon<-median(d$lon) pdat$lat<-median(d$lat) pdat$did<-ifelse(pdat$day %in% d$day,1,0) names(pdat)[1]<-'day' return(data.frame(pdat)) } #TOTAL WEIGHT #out<-ddply(subset(rvw2, strat %in% dmm$strat,select=c('totwgt','strat','year','day','time','lon','lat')),.(strat),.fun=ff,.progress='text') out<-ddply(subset(rvw2, cell %in% dmm$cell,select=c('totwgt','strat','year','day','time','lon','lat','cell')),.(cell),.fun=ff,.progress='text') xyplot(pz~day|strat,data=out,col=ifelse(out$did==1,'red','blue')) xyplot(pcx~day|strat,data=out,col=ifelse(out$did==1,'red','blue')) ########################################################### #ESTIMATE PHENOLOGY FOR EACH 1/4 GRID CELL AND ANIMATE rvw2<-subset(rvw,!(geardesc %in% c('Newston net','Campelen 1800 survey trawl'))) f<-function(d){ if(dim(subset(d,month %in% c(1,2,3)))[1]>10){wint<-1 } else { wint<-0 } if(dim(subset(d,month %in% c(6,7,8)))[1]>10){summ<-1 } else { summ<-0 } if(dim(subset(d,month %in% c(9,10,11)))[1]>10){fall<-1 } else { fall<-0 } return(data.frame(nmo=length(unique(d$month)), ndy=length(unique(d$day)), nyr=length(unique(d$year)), wint=wint, summ=summ, fall=fall, tot=wint+summ+fall, lon=mean(d$lon), lat=mean(d$lat))) } dt<-ddply(rvw2,.(cell),.fun=f) dt<-dt[order(dt$nmo,decreasing=TRUE),] dmm<-subset(dt,nmo>= 4 & ndy>=30 & nyr>=3 & tot>=3) f1<-function(d0){ f<-function(d){ day<-unique(d$day) d2<-subset(d0,select=c('day')) d2$day2<-d2$day-(day-1) d2$day2<-ifelse(d2$day2<=0,d2$day2+(365),d2$day2) return(data.frame(day=day, span=diff(range(min(d2$day2),max(d2$day2))))) } z<-ddply(d0,.(day),.fun=f) z<-subset(z,span==max(z$span)) return(subset(z,day==min(z$day))) } #dayshift<-ddply(unique(subset(rvw2,strat %in% dmm$strat,select=c('strat','day'))),.(strat),.fun=f1,.progress='text') dayshift<-ddply(unique(subset(rvw2,cell %in% dmm$cell,select=c('strat','day','cell'))),.(cell),.fun=f1,.progress='text') ############################################## #ADJUSTS DAY VALUES TO MAXIMIZE PHENOLOGY SPAN dayshiftfun<-function(d,shf){ d$day2<-d$day-(shf) d$day2<-ifelse(d$day2<=0,d$day2+(365),d$day2) return(d) } #BACK-CALCULATE ORIGINAL DAY OF THE YEAR revdayshiftfun<-function(d,shf){ d$day3<-d$day2+shf d$day3<-ifelse(d$day3>365,d$day3-365,d$day3) return(d) } library(akima) ff<-function(d){ shf<-subset(dayshift,cell==unique(d$cell))$day print(shf) names(d)[1]<-'y' #SHIFT DAYS TO CENTER ON JULY AND MAKE CONTINUOUS d<-dayshiftfun(d,shf) mod<-gam(y~s(year) + s(time,k=4,bs='cc') + s(day2,k=4,bs='cc'),data=d,gamma=1.4,family='nb'(link='log')) pdat0<-data.frame(year=2000, time=1200, day2=seq(min(d$day2),max(d$day2),1)) p<-predict(mod,newdata=pdat0,type='response') pdat0$p<-p pdat<-data.frame(day2=seq(1,365,1)) pdat$p<-pasp<-aspline(pdat0$day2,pdat0$p,xout=pdat$day2)$y pdat$pz<-(pdat$p-mean(pdat$p))/sd(pdat$p) pdat<-revdayshiftfun(pdat,shf) pdat<-subset(pdat,select=c('day3','p','pz')) pdat$lon<-unique(d$lonc) pdat$lat<-unique(d$latc) pdat$depth<-mean(d$dmax,na.rm=TRUE) pdat$did<-ifelse(pdat$day %in% d$day,1,0) names(pdat)[1]<-'day' return(data.frame(pdat)) } #TOTAL WEIGHT out<-ddply(subset(rvw2, cell %in% dmm$cell,select=c('totwgt','strat','year','day','time','lon','lat','cell','lonc','latc','dmax')),.(cell),.fun=ff,.progress='text') xyplot(pz~day|cell,data=out,col=ifelse(out$did==1,'red','blue')) xyplot(log10(p+1)~day|cell,data=out,col=ifelse(out$did==1,'red','blue')) xyplot(p~day|cell,data=out,col=ifelse(out$did==1,'red','blue')) bnk<-subset(plg,stratum %in% c(443,458,455,456,464,463,473,474,475,480)) x1<--68 x2<--57 y<-41.5 out$dy<-rescale(out$day,newrange=c(x1,x2)) frames <- length(unique(out$day)) rename <- function(x){ if (x < 10) { return(name <- paste('000',i,'plot.jpg',sep='')) } if (x < 100 && i >= 10) { return(name <- paste('00',i,'plot.jpg', sep='')) } if (x >= 100) { return(name <- paste('0', i,'plot.jpg', sep='')) } } out$pcx<-rescale(out$pz,newrange=c(.2,7)) setwd('C:/Users/sailfish/Documents/aalldocuments/literature/research/active/SPERA/Figures/prac') #loop through plots for(i in 1:frames){ name <- rename(i) d<-subset(out,day==i) #saves the plot as a file in the working directory jpeg(name,width=6,height=5,units='in',quality=100,res=300) map('worldHires',fill=TRUE,col='gray',border=NA,xlim=c(x1,x2),ylim=c(y,46)) #plot(plg,add=TRUE,lwd=.01,border=alpha('lightgray',.5)) plot(bnk,add=TRUE,lwd=.01,border=NA,col=alpha('firebrick3',.2)) points(d$lon,d$lat,pch=16,cex=d$pcx,col=alpha('dodgerblue3',.3),xlim=c(x1,x2),ylim=c(y,46)) points(d$lon,d$lat,pch=1,lwd=.01,cex=d$pcx,col='dodgerblue3',xlim=c(-x1,x2),ylim=c(y,46)) legend('bottomright',paste('Day=',i),bty='n') points(unique(d$dy),y,pch=17,col='red3',cex=2) axis(1,at=seq(x1,x2,length.out=14),labels=c('','JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC',''),cex.axis=.7) dev.off() } ################################################################# # SAME BUT FOR NON STANDARDIZED BIOMSSS out$dy<-rescale(out$day,newrange=c(x1,x2)) out$pcx<-rescale(log10(out$p+1),newrange=c(.2,7)) setwd('C:/Users/sailfish/Documents/aalldocuments/literature/research/active/SPERA/Figures/prac') #loop through plots for(i in 1:frames){ name <- rename(i) d<-subset(out,day==i) #saves the plot as a file in the working directory jpeg(name,width=6,height=5,units='in',quality=100,res=300) map('worldHires',fill=TRUE,col='gray',border=NA,xlim=c(x1,x2),ylim=c(y,46)) #plot(plg,add=TRUE,lwd=.01,border=alpha('lightgray',.5)) plot(bnk,add=TRUE,lwd=.01,border=NA,col=alpha('firebrick3',.2)) points(d$lon,d$lat,pch=16,cex=d$pcx,col=alpha('dodgerblue3',.3),xlim=c(x1,x2),ylim=c(y,46)) points(d$lon,d$lat,pch=1,lwd=.01,cex=d$pcx,col='dodgerblue3',xlim=c(-x1,x2),ylim=c(y,46)) legend('bottomright',paste('Day=',i),bty='n') points(unique(d$dy),y,pch=17,col='red3',cex=2) #axis(1,at=seq(x1,x2,length.out=10),labels=round(seq(0,365,length.out=10),digits=0)) axis(1,at=seq(x1,x2,length.out=14),labels=c('','JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC',''),cex.axis=.7) dev.off() } xyplot(p~day|cell,data=out,pch=16) xyplot(log10(p+1)~day|cell,data=out,pch=16) xyplot(pcx~day|cell,data=out,pch=16) #FROM COMMAND LINE WITH IMAGEMAGIK INSTALLED RUN: CONVERT -DELAY 2 -QUALITY 100 *.JPG MOVIE.MP4 #run ImageMagick #library(magick) #my_command <- 'convert *.png -delay 2 -loop 0 animation.gif' #system(my_command) f<-function(d){ return(data.frame(lon=unique(d$lon), lat=unique(d$lat), depth=unique(d$depth), day=subset(d,pz==max(d$pz))$day[1], amp=max(d$pz)[1]-min(d$pz)[1])) } tm<-ddply(out,.(cell),.fun=f) plot(subset(tm,select=c('lon','lat','depth','day','amp')),pch=15) plot(tm$lon,tm$lat,pch=16,cex=rescale(tm$day,newrange=c(.2,6)),col='purple') plot(tm$lon,tm$lat,pch=16,cex=rescale(tm$amp,newrange=c(.2,6)),col='purple') ########################################################### #ESTIMATE PHENOLOGY OF HERRING SIZE AND ANIMATE b<-subset(rvw2,(totwgt==0 & totno>0) | (totwgt>0 & totno==0)) rvw3<-subset(rvw2,!(id %in% b$id)) f<-function(d){ if(dim(subset(d,month %in% c(1,2,3)))[1]>10){wint<-1 } else { wint<-0 } if(dim(subset(d,month %in% c(6,7,8)))[1]>10){summ<-1 } else { summ<-0 } if(dim(subset(d,month %in% c(9,10,11)))[1]>10){fall<-1 } else { fall<-0 } return(data.frame(nmo=length(unique(d$month)), ndy=length(unique(d$day)), nyr=length(unique(d$year)), wint=wint, summ=summ, fall=fall, tot=wint+summ+fall, lon=mean(d$lon), lat=mean(d$lat))) } dt<-ddply(rvw3,.(cell),.fun=f) dt<-dt[order(dt$nmo,decreasing=TRUE),] dmm<-subset(dt,nmo>= 4 & ndy>=30 & nyr>=3 & tot>=3) f1<-function(d0){ f<-function(d){ day<-unique(d$day) d2<-subset(d0,select=c('day')) d2$day2<-d2$day-(day-1) d2$day2<-ifelse(d2$day2<=0,d2$day2+(365),d2$day2) return(data.frame(day=day, span=diff(range(min(d2$day2),max(d2$day2))))) } z<-ddply(d0,.(day),.fun=f) z<-subset(z,span==max(z$span)) return(subset(z,day==min(z$day))) } dayshift<-ddply(unique(subset(rvw3,cell %in% dmm$cell,select=c('strat','day','cell'))),.(cell),.fun=f1,.progress='text') ############################################## #ADJUSTS DAY VALUES TO MAXIMIZE PHENOLOGY SPAN dayshiftfun<-function(d,shf){ d$day2<-d$day-(shf) d$day2<-ifelse(d$day2<=0,d$day2+(365),d$day2) return(d) } #BACK-CALCULATE ORIGINAL DAY OF THE YEAR revdayshiftfun<-function(d,shf){ d$day3<-d$day2+shf d$day3<-ifelse(d$day3>365,d$day3-365,d$day3) return(d) } library(akima) ff<-function(d){ shf<-subset(dayshift,cell==unique(d$cell))$day print(unique(d$cell)) print(length(unique(d$day))) #SHIFT DAYS TO CENTER ON JULY AND MAKE CONTINUOUS d<-dayshiftfun(d,shf) modw<-gam(totwgt~s(year) + s(day2,k=4,bs='cc') + s(time,bs='cc',k=4),data=d,gamma=1.4,family='nb'(link='log')) pdat0<-data.frame(year=sort(unique(d$year),decreasing=TRUE)[1], time=1200, day2=seq(min(d$day2),max(d$day2),1)) p<-predict(modw,newdata=pdat0,type='response') pdat0$p<-p pdat<-data.frame(day2=seq(1,365,1)) pdat$p<-pasp<-aspline(pdat0$day2,pdat0$p,xout=pdat$day2)$y pdat<-revdayshiftfun(pdat,shf) pdatw<-subset(pdat,select=c('day3','p')) names(pdatw)<-c('day','pwt') modn<-gam(totno~as.factor(year)+s(day2,k=4,bs='cc') + s(time,bs='cc',k=4),data=d,gamma=1.4,family='nb'(link='log')) pdat0<-data.frame(year=sort(unique(d$year),decreasing=TRUE)[1], time=1200, day2=seq(min(d$day2),max(d$day2),1)) p<-predict(modn,newdata=pdat0,type='response') pdat0$p<-p pdat<-data.frame(day2=seq(1,365,1)) pdat$p<-pasp<-aspline(pdat0$day2,pdat0$p,xout=pdat$day2)$y pdat<-revdayshiftfun(pdat,shf) pdatn<-subset(pdat,select=c('day3','p')) names(pdatn)<-c('day','pno') pdat<-merge(pdatw,pdatn,by=c('day'),all=FALSE) pdat$sz<-pdat$pwt/pdat$pno pdat$szz<-(pdat$sz-mean(pdat$sz))/sd(pdat$sz) pdat$lon<-unique(d$lonc) pdat$lat<-unique(d$latc) pdat$did<-ifelse(pdat$day %in% d$day,1,0) return(data.frame(pdat)) } #LENGTH out<-ddply(subset(rvw3, cell %in% dmm$cell),.(cell),.fun=ff,.progress='text') xyplot(log10(sz+1)~day|cell,data=out,col=ifelse(out$did==1,'red','blue')) xyplot(sz~day|cell,data=out,col=ifelse(out$did==1,'red','blue')) xyplot(szz~day|cell,data=out,col=ifelse(out$did==1,'red','blue')) bnk<-subset(plg,stratum %in% c(443,458,455,456,464,463,473,474,475,480)) x1<--68 x2<--57 y<-41.5 out$dy<-rescale(out$day,newrange=c(x1,x2)) frames <- length(unique(out$day)) rename <- function(x){ if (x < 10) { return(name <- paste('000',i,'plot.jpg',sep='')) } if (x < 100 && i >= 10) { return(name <- paste('00',i,'plot.jpg', sep='')) } if (x >= 100) { return(name <- paste('0', i,'plot.jpg', sep='')) } } out$pcx<-rescale(log10(out$sz+1),newrange=c(.2,7)) setwd('C:/Users/sailfish/Documents/aalldocuments/literature/research/active/SPERA/Figures/prac') #loop through plots for(i in 1:frames){ name <- rename(i) d<-subset(out,day==i) #saves the plot as a file in the working directory jpeg(name,width=6,height=5,units='in',quality=100,res=300) map('worldHires',fill=TRUE,col='gray',border=NA,xlim=c(x1,x2),ylim=c(y,46)) #plot(plg,add=TRUE,lwd=.01,border=alpha('lightgray',.5)) plot(bnk,add=TRUE,lwd=.01,border=NA,col=alpha('firebrick3',.2)) points(d$lon,d$lat,pch=16,cex=d$pcx,col=alpha('dodgerblue3',.3),xlim=c(x1,x2),ylim=c(y,46)) points(d$lon,d$lat,pch=1,lwd=.01,cex=d$pcx,col='dodgerblue3',xlim=c(-x1,x2),ylim=c(y,46)) legend('bottomright',paste('Day=',i),bty='n') points(unique(d$dy),y,pch=17,col='red3',cex=2) axis(1,at=seq(x1,x2,length.out=14),labels=c('','JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC',''),cex.axis=.7) dev.off() } out$pcx<-rescale(out$szz,newrange=c(.2,7)) setwd('C:/Users/sailfish/Documents/aalldocuments/literature/research/active/SPERA/Figures/prac') #loop through plots for(i in 1:frames){ name <- rename(i) d<-subset(out,day==i) #saves the plot as a file in the working directory jpeg(name,width=6,height=5,units='in',quality=100,res=300) map('worldHires',fill=TRUE,col='gray',border=NA,xlim=c(x1,x2),ylim=c(y,46)) #plot(plg,add=TRUE,lwd=.01,border=alpha('lightgray',.5)) plot(bnk,add=TRUE,lwd=.01,border=NA,col=alpha('firebrick3',.2)) points(d$lon,d$lat,pch=16,cex=d$pcx,col=alpha('dodgerblue3',.3),xlim=c(x1,x2),ylim=c(y,46)) points(d$lon,d$lat,pch=1,lwd=.01,cex=d$pcx,col='dodgerblue3',xlim=c(-x1,x2),ylim=c(y,46)) legend('bottomright',paste('Day=',i),bty='n') points(unique(d$dy),y,pch=17,col='red3',cex=2) axis(1,at=seq(x1,x2,length.out=14),labels=c('','JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC',''),cex.axis=.7) dev.off() } a<-subset(rvw2,geardesc=='Campelen 1800 survey trawl')#2002 and 2005 during october plot(a$lon,a$lat,pch=16,col='red') setwd(figsdir) pdf('campelen.survey.scotianshelf.pdf') map('world',col='gray',fill=TRUE,xlim=c(-67,-58),ylim=c(43,45)) map.axes() points(a$lon,a$lat,pch=16,col=alpha('red',.5)) dev.off() ##################################################################### ##################################################################### #SMOOTH TREND IN PHENOLOGY BY 1/4 CELL FOR CLUSTER (LONG FORM) rvw2<-subset(rvw,!(geardesc %in% c('Newston net','Campelen 1800 survey trawl'))) f<-function(d){ if(dim(subset(d,month %in% c(1,2,3)))[1]>10){wint<-1 } else { wint<-0 } if(dim(subset(d,month %in% c(6,7,8)))[1]>10){summ<-1 } else { summ<-0 } if(dim(subset(d,month %in% c(9,10,11)))[1]>10){fall<-1 } else { fall<-0 } return(data.frame(nmo=length(unique(d$month)), ndy=length(unique(d$day)), nyr=length(unique(d$year)), wint=wint, summ=summ, fall=fall, tot=wint+summ+fall, lon=mean(d$lon), lat=mean(d$lat))) } dt<-ddply(rvw2,.(cell),.fun=f) dt<-dt[order(dt$nmo,decreasing=TRUE),] dmm<-subset(dt,nmo>= 4 & ndy>=30 & nyr>=3 & tot>=3) f1<-function(d0){ f<-function(d){ day<-unique(d$day) d2<-subset(d0,select=c('day')) d2$day2<-d2$day-(day-1) d2$day2<-ifelse(d2$day2<=0,d2$day2+(365),d2$day2) return(data.frame(day=day, span=diff(range(min(d2$day2),max(d2$day2))))) } z<-ddply(d0,.(day),.fun=f) z<-subset(z,span==max(z$span)) return(subset(z,day==min(z$day))) } dayshift<-ddply(unique(subset(rvw2,cell %in% dmm$cell,select=c('strat','day','cell'))),.(cell),.fun=f1,.progress='text') ############################################## #ADJUSTS DAY VALUES TO MAXIMIZE PHENOLOGY SPAN dayshiftfun<-function(d,shf){ d$day2<-d$day-(shf) d$day2<-ifelse(d$day2<=0,d$day2+(365),d$day2) return(d) } #BACK-CALCULATE ORIGINAL DAY OF THE YEAR revdayshiftfun<-function(d,shf){ d$day3<-d$day2+shf d$day3<-ifelse(d$day3>365,d$day3-365,d$day3) return(d) } d<-subset(rvw2,cell=="-57.75_44.25",select=c('totwgt','cell','year','time','lon','lat','day')) fsm<-function(d){ shf<-subset(dayshift,cell==unique(d$cell))$day names(d)[1]<-'y' #SHIFT DAYS TO CENTER ON JULY AND MAKE CONTINUOUS d<-dayshiftfun(d,shf) mod<-gam(y~s(year) + s(time,k=4,bs='cc') + s(day2,k=4,bs='cc'),data=d,gamma=1.4,family='nb'(link='log')) pdat0<-data.frame(year=2000, time=1200, day2=seq(min(d$day2),max(d$day2),1)) p<-predict(mod,newdata=pdat0,type='response') pdat0$p<-p pdat<-data.frame(day2=seq(1,365,1)) pdat$p<-aspline(pdat0$day2,pdat0$p,xout=pdat$day2)$y pdat$pz<-(pdat$p-mean(pdat$p))/sd(pdat$p) pdat<-revdayshiftfun(pdat,shf) pdat<-subset(pdat,select=c('day3','pz')) names(pdat)<-c('day',gsub(' ','',paste('X_',unique(d$cell)))) pdat<-pdat[order(pdat$day),] return(data.frame(pdat)) } #TOTAL WEIGHT qq<-dlply(subset(rvw2,cell %in% dmm$cell,select=c('totwgt','cell','year','time','lon','lat','day')),.(cell),.fun=fsm) qsm<-Reduce(function(x, y) merge(x, y, by=c('day'),all=TRUE), qq) qsm<-qsm[,colSums(is.na(qsm)) != nrow(qsm)] q<-qsm q2<- q %>% gather(cell, value, -day) xyplot(value ~ day | cell,data=q2, pch=15, type=c('spline'),col='black') xyplot(value ~ year | strata,data=q2, pch=15, type=c('p','spline'),col='black') clfun<-function(df,k,lbl){ rownames(df)<-df$year df<-df[,-1] #k<-3#ER OF CLUSTERS dmat<-1-cor(df,use='pairwise.complete.obs') dst<-as.dist(dmat) ff<-fanny(dst,k,maxit=5000,diss=T) par(mfrow=c(3,1),mar=c(4,12,1,12)) dum<-c('red3','darkblue','gold3') plot(silhouette(ff),col=dum[1:k],main='')#silhouette plot dc.pcoa<-cmdscale(dst) dc.scores<-scores(dc.pcoa,choices=c(1,2)) spefuz.g<-ff$clustering a<-data.frame(cell=as.character(sort(unique(names(df)))), clusters=ff$clustering) aa<-data.frame(ff$membership) aa$cell<-rownames(a) plot(scores(dc.pcoa),asp=1,type='n',xlim=c(-1,1.5),ylim=c(-1,1),las=1,axes=TRUE,xlab='',ylab='') stars(ff$membership,location=scores(dc.pcoa),draw.segments=T,add=T,scale=F,len=.1,col.segments=alpha(c(dum[1:k]),.25),byt='n',labels=NULL,xlim=c(-1.1,1.4),ylim=c(-1,.5),lwd=.0001,xpd=TRUE,border=NULL,radius=FALSE,col.radius=alpha('white',.1)) for(i in 1:k){ cl<-dum[i] gg<-dc.scores[spefuz.g==i,] hpts<-chull(gg) hpts<-c(hpts,hpts[1]) lines(gg[hpts,],col=cl,lwd=3,xlim=c(-1.1,1.4),ylim=c(-1,.5)) } cx <- data.frame(cell=aa$cell, cx=apply(aa[, 1:k], 1, max)) cx$cxx <- rescale(cx$cx,newrange=c(.05,.5)) #PLOTS THE TIMESERIES OF R FOR EACH CLUSTER par(mfrow=c(3,1),mar=c(2,12,1,12),oma=c(1,1,1,1)) #par(mfrow=c(2,2),mar=c(2,12,1,12),oma=c(1,1,1,1)) mb<-seq(1,k,1) l<-list() for(i in 1:length(mb)){ print(mb[i]) one<-subset(a,clusters==mb[i],select=c('cell'))# IN THIS CLUSTER cx2<-subset(cx,cx>=0.5)#GETS INSTANCES WHERE CLUSTER PROBABILITY>0.5 data5<-subset(df,select=c(as.character(one$cell)))#TS FOR CLUSTER cx2<-subset(cx2,cell %in% names(data5)) data5<-subset(df,select=c(as.character(cx2$cell)))#TS FOR CLUSTER x<-rownames(data5) t<-data.frame(day=as.numeric(x),mn=rowMeans(data5,na.rm=F)) t2<-data.frame(day=as.numeric(x),mn=rowMeans(data5,na.rm=T)) cl<-dum[i] plot(0,0,pch=16,cex=.01,xlim=c(1,365),ylim=c(-2,3),main='',xaxt='n',las=1,axes=FALSE,xlab='',ylab='') axis(side=2,at=seq(-2,3,1),las=1,lwd=.001,cex.axis=.75) axis(side=1,at=seq(1,365,10),cex.axis=.75) for(j in 1:length(data5[1,])){ try(dat<-data5[,j]) try(trnsp<-subset(cx,cell==as.character(names(data5[j])))$cxx) try(lines(x,dat,cex=.5,ylim=c(0,1),lwd=2,col=alpha(cl,rescale(trnsp,newrange=c(.1,.5))))) } lines(as.numeric(as.character(t$day)),t$mn,col='gold3',cex=.7,lwd=3) dm<-subset(t2,mn==max(t2$mn,na.rm=TRUE),select=c('day')) dm$cluster=mb[i] names(dm)<-c('clusterday','clusters') l[[i]]<-dm } a2<-a a2$cell<-gsub('X_\\.','',a2$cell) if(k==3){a2$cx<- apply(aa[,c('X1','X2','X3')], 1, function(x) max(x) ) } else {a2$cx<- apply(aa[,c('X1','X2')], 1, function(x) max(x) ) } a2$id<-a2$cell a2$cl<-ifelse(a2$clusters==1,dum[1],dum[3]) a2$cl<-ifelse(a2$clusters==2,dum[2],a2$cl) crds<-unique(subset(rvw,select=c('cell','lonc','latc'))) crds$cell<-gsub('-','',crds$cell) a2<-merge(a2,crds,by=c('cell'),all.x=TRUE,all.y=FALSE) dum<-unique(subset(a2,select=c('clusters','cl'))) return( ggplot()+ geom_polygon(aes(long,lat, group=group),fill='black', data=bnk,size=1)+ geom_tile(data=a2, aes(x=lonc, y=latc,fill=as.factor(clusters),alpha=cx),col='gray80',size=.0001)+ scale_fill_manual(breaks=as.character(dum$clusters),values=dum$cl,na.value="transparent",guide=guide_legend(title=''))+ scale_alpha(guide = 'none')+ geom_polygon(aes(long,lat, group=group), fill="grey65", data=coast.mc)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.9,.2),plot.background=element_blank(),axis.line = element_line(color = 'black'), legend.key.size = unit(0.1, "in"),legend.text=element_text(size=6))+ scale_x_continuous(expand=c(0,0),breaks=seq(-68,-57,1),labels=as.character(seq(-68,-57,1)),limits=NA)+ scale_y_continuous(expand=c(0,0),breaks=seq(41,47,1),labels=as.character(seq(41,47,1)),limits=NA)+ coord_equal()+ coord_cartesian(ylim=c(41,47),xlim=c(-68,-57))+ xlab('')+ ylab('')+ labs(title = lbl, subtitle = "", caption = '') ) } setwd(figsdir) pdf('herring_rvsurvey_cluster_phenology_grid_k3.pdf',height=10,width=6.5) mp1<-clfun(qsm,3,"Average weight") dev.off() pdf('herring_rvsurvey_cluster_phenology_map_grid_k3.pdf',height=14,width=7) grid.arrange(mp1,mp1,mp1,ncol=1) dev.off() #MAKES BARPLOT OF CLUSTER DISTRIBUTION BY LONGITUDE setwd(figsdir) pdf('herring_rvsurvey_cluster_phenology_extras.pdf',height=14,width=7) f<-function(d){ f2<-function(d2){ return(data.frame(prp=dim(d2)[1]/dim(d)[1]))} return(ddply(d,.(clusters),.fun=f2)) } dd<-ddply(a2,.(lonc),.fun=f) p2<-ggplot(dd, aes(x=lonc, fill=as.factor(clusters),y=prp))+ geom_bar(data=dd,stat='identity',aes(width=.7,order=clusters),size=.0001,col='gray')+ theme(legend.position=c(.8,.8),text = element_text(size=8),axis.text.x = element_text(angle=0, vjust=1), panel.grid.major=element_blank(), panel.grid.minor=element_blank(),panel.background = element_blank(), legend.key.size = unit(0.2, "in"),legend.text=element_text(size=6),axis.line = element_line(color="black", size = .1))+ scale_fill_manual(values=as.character(dum$cl),breaks=as.factor(dum$clusters),labels=as.character(dum$clusters),name='Cluster')+ scale_x_continuous(expand=c(0,0),breaks=seq(min(dd$lonc),max(dd$lonc),10),labels=round(seq(min(dd$lonc),max(dd$lonc),10),digits=0))+ expand_limits(x=c(min(dd$lonc),max(dd$lonc)))+ xlab('Longitude') + ylab('Proportion of cells') #MODEL PROBABILITY OF CLUSTER ASSIGNMENT BY LONGITUDE f<-function(d,cl){ d$ps<-ifelse(d$clusters==cl,1,0) mod<-gam(ps~s(lonc,k=6),data=d,family='binomial',weights=d$cx,gamma=1.5) pdat<-data.frame(lonc=seq(min(d$lonc),max(d$lonc),length.out=100)) p<-predict(mod,newdata=pdat,se.fit=TRUE,type='response') pdat$p<-p$fit return(pdat) } c1<-f(a2,1) c2<-f(a2,2) c3<-f(a2,3) par(mfrow=c(3,1),mar=c(4,4,1,1)) plot(c1$lonc,c1$p,ylim=c(0,1),col=dum$cl[1],las=1,pch=16,type='l',lwd=2,ylab='Proportion of all cells') points(c2$lonc,c2$p,col=dum$cl[2],pch=16,type='l',lwd=2) points(c3$lonc,c3$p,col=dum$cl[3],pch=16,type='l',lwd=2) cx<-.3 plot(0,0,col='white',ylim=c(0,1),las=1,pch=16,type='l',lwd=2,ylab='Proportion of all cells',xlim=c(min(c1$lonc),max(c1$lonc))) polygon(c(c2$lonc,c2$lonc[length(c2$lonc):1]),c(c2$p,rep(-5,dim(c2)[1])[length(c2$p):1]),col=alpha(dum$cl[2],cx),border=alpha(dum$cl[2],.8)) polygon(c(c1$lonc,c1$lonc[length(c1$lonc):1]),c(c1$p,rep(-5,dim(c1)[1])[length(c1$p):1]),col=alpha(dum$cl[1],cx),border=alpha(dum$cl[1],.8)) polygon(c(c3$lonc,c3$lonc[length(c3$lonc):1]),c(c3$p,rep(-5,dim(c3)[1])[length(c3$p):1]),col=alpha(dum$cl[3],cx),border=alpha(dum$cl[3],.8)) grid.arrange(p2,p2,p2,ncol=1) dev.off() #devtools::install_github('dgrtwo/gganimate',force=TRUE) library(magick) library(gganimate) plot(coast.mc) p<-bmap+ geom_point(aes(x=lon, y=lat,size=pz,frame=day),data=out,colour='purple',alpha=.5)+ theme(panel.grid.major=element_blank(),panel.grid.minor=element_blank(),legend.position=c(.1,.2),plot.background=element_blank(),axis.line = element_line(color = 'black')) out$strat<-as.character(out$strat) o<-data.frame(strat=sort(unique(out$strat)), n=tapply(out$day,out$strat,function(x) length(unique(x)))) library(ggthemes) library(animation) setwd(figsdir) ani.options(interval = 0.2) gganimate(p) gganimate(p,'output.phen.gif') gganimate(p,'output.phen.mp4') gganimate(p,'output.smooth.mp4') p a<-subset(rvw2,geardesc=='Campelen 1800 survey trawl')#2002 and 2005 during october plot(a$lon,a$lat,pch=16,col='red') setwd(figsdir) pdf('campelen.survey.scotianshelf.pdf') map('world',col='gray',fill=TRUE,xlim=c(-67,-58),ylim=c(43,45)) map.axes() points(a$lon,a$lat,pch=16,col=alpha('red',.5)) dev.off() #################################################################### #################################################################### #ANIMATE: ESTIMATE TIME TREND IN AVERAGE BIOMASS AND ANIMATE ########################################################### #ESTIMATE PHENOLOGY FOR EACH GRID CELL AND ANIMATE rvw2<-subset(rvw,!(geardesc %in% c('Newston net','Campelen 1800 survey trawl')) & month %in% c(6,7,8)) f<-function(d){ return(data.frame(nmo=length(unique(d$month)), ndy=length(unique(d$day)), nyr=length(unique(d$year)), myr=min(d$year), nlon=length(unique(d$lon)), nlat=length(unique(d$lat)), ntm=length(unique(d$time)), lon=unique(d$lonc), lat=unique(d$latc))) } dt<-ddply(rvw2,.(cell),.fun=f) dt<-dt[order(dt$nmo,decreasing=TRUE),] dmm<-subset(dt,nyr>=10 & myr<1990) library(akima) ff<-function(d){ names(d)[1]<-'y' #SHIFT DAYS TO CENTER ON JULY AND MAKE CONTINUOUS mod<-gam(y~s(year,k=6) + s(time,k=4,bs='cc'),data=d,gamma=1.4,family='nb'(link='log')) pdat<-data.frame(year=seq(min(d$year),max(d$year),.25), time=1200, lon=unique(d$lonc), lat=unique(d$latc)) p<-predict(mod,newdata=pdat,type='response') pdat$p<-p pdat$pz<-(pdat$p-mean(pdat$p))/sd(pdat$p) pdat<-subset(pdat,select=c('year','p','pz')) pdat$lon<-unique(d$lonc) pdat$lat<-unique(d$latc) pdat$depth<-mean(d$dmax,na.rm=TRUE) pdat$did<-ifelse(pdat$year %in% d$year,1,0) return(data.frame(pdat)) } #TOTAL WEIGHT out<-ddply(subset(rvw2, cell %in% dmm$cell,select=c('totwgt','strat','year','day','time','lon','lat','cell','lonc','latc','dmax')),.(cell),.fun=ff,.progress='text') xyplot(pz~year|cell,data=out,col=ifelse(out$did==1,'red','blue')) xyplot(log10(p+1)~year|cell,data=out,col=ifelse(out$did==1,'red','blue')) bnk<-subset(plg,stratum %in% c(443,458,455,456,464,463,473,474,475,480)) x1<--68 x2<--57 y<-41.5 out$dy<-rescale(out$year,newrange=c(x1,x2)) frames <- length(unique(out$year)) rename <- function(x){ if (x < 10) { return(name <- paste('000',i,'plot.jpg',sep='')) } if (x < 100 && i >= 10) { return(name <- paste('00',i,'plot.jpg', sep='')) } if (x >= 100) { return(name <- paste('0', i,'plot.jpg', sep='')) } } out$pcx<-rescale(log10(out$p+1),newrange=c(.2,7)) setwd('C:/Users/copepod/Documents/aalldocuments/literature/research/active/SPERA/Figures/prac') #loop through plots yr<-sort(unique(out$year)) for(i in 1:frames){ name <- rename(i) d<-subset(out,year==yr[i]) #saves the plot as a file in the working directory jpeg(name,width=6,height=5,units='in',quality=100,res=300) map('worldHires',fill=TRUE,col='gray',border=NA,xlim=c(x1,x2),ylim=c(y,46)) #plot(plg,add=TRUE,lwd=.01,border=alpha('lightgray',.5)) plot(bnk,add=TRUE,lwd=.01,border=NA,col=alpha('firebrick3',.2)) points(d$lon,d$lat,pch=16,cex=d$pcx,col=alpha('dodgerblue3',.3),xlim=c(x1,x2),ylim=c(y,46)) points(d$lon,d$lat,pch=1,lwd=.01,cex=d$pcx,col='dodgerblue3',xlim=c(-x1,x2),ylim=c(y,46)) legend('bottomright',paste('Year=',round(yr[i],digits=0)),bty='n') points(unique(d$dy),y,pch=17,col='red3',cex=2) axis(1,at=seq(x1,x2,length.out=10),labels=seq(1970,2015,5),cex.axis=.7) dev.off() } #FROM COMMAND LINE WITH IMAGEMAGIK INSTALLED RUN: CONVERT -DELAY 2 -QUALITY 100 *.JPG MOVIE.MP4 #run ImageMagick #library(magick) #my_command <- 'convert *.png -delay 2 -loop 0 animation.gif' #system(my_command)
94542e13dc94707fad07a9fcf3adc3d96c3e16e0
b330b067d8dfc8a4ee93b3cdab342a366b374b10
/data/데이터전처리.R
ed0a46d4adbdd0ffa24ec27de772a9ce5cb1f020
[]
no_license
statKim/Da_Vinci_SW_Hackathon
84db39a8bfbc65ed1ce9b62b9606f3d9f22c4f0c
d2c8e786e297f2761a8926d03cd2667f3eb46346
refs/heads/master
2020-04-06T18:52:00.524845
2018-12-31T00:39:47
2018-12-31T00:39:47
157,715,878
3
0
null
null
null
null
UTF-8
R
false
false
17,241
r
데이터전처리.R
library(glue) library(XML) library(stringr) api.key <- 'GJdKU2SKJ6oYQgTSsQrkT9BH2hIF%2FG6qtztAeyJHv9Zp31YlWhl%2FCKMmz0fKJnmxtPyQT9TY49AQqtpEeFCw9A%3D%3D' url.format <- 'http://apis.data.go.kr/B090041/openapi/service/SpcdeInfoService/getRestDeInfo?ServiceKey={key}&solYear={year}&solMonth={month}' holiday.request <- function(key, year, month) glue(url.format) # request and read data : year 2017 days<-c() date<-c() for(m in 1:12){ data <- xmlToList(holiday.request(api.key, 2018, str_pad(m, 2, pad=0))) items <- data$body$items for(item in items){ if(item$isHoliday == 'Y') days<-append(days, item$dateName); date<-append(date, item$locdate) } } holi <- data.frame(days,date) holi$date<- as.character(holi$date) #==================================== library(dplyr) peo_201803_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201803/LOCAL_PEOPLE_DONG_201803.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) peo_201804_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201804/LOCAL_PEOPLE_DONG_201804.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) peo_201805_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201805/LOCAL_PEOPLE_DONG_201805.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) peo_201806_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201806/LOCAL_PEOPLE_DONG_201806.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) peo_201807_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201807/LOCAL_PEOPLE_DONG_201807.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) peo_201808_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201808/LOCAL_PEOPLE_DONG_201808.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) peo_201809_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201809/LOCAL_PEOPLE_DONG_201809.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) peo_201810_ori <- read.csv("C:/Users/Jiwan/Downloads/LOCAL_PEOPLE_DONG_201810/LOCAL_PEOPLE_DONG_201810.csv",encoding="UTF-8", stringsAsFactors=FALSE,skip = 1,header = F) aa <- c("03","04","05","06","07","08","09","10") ################# park<-data.frame(NA,NA,NA,NA) j<- 0 for (i in aa){ j <- j+1 assign(paste0("peo_2018",i),get(paste0("peo_2018",i,"_ori"))[1:4]) assign(paste0("peo_2018",i) , get(paste0("peo_2018",i)) %>% filter( V3 == 11215780 ) ) print(dim(get(paste0("peo_2018",i)))) if (j>1){ park <- rbind( park , get(paste0("peo_2018",i))) } else { park <- get(paste0("peo_2018",i)) } } # 9월 18일~ 21일 데이터가 없음 dim(park)[1] == 744+720+744+720+744+720+624+744 head(park); tail(park) ################# lotte<-data.frame(NA,NA,NA,NA) j<- 0 for (i in aa){ j <- j+1 assign(paste0("peo_2018",i),get(paste0("peo_2018",i,"_ori"))[1:4]) assign(paste0("peo_2018",i) , get(paste0("peo_2018",i)) %>% filter( V3 == 11710680 ) ) print(dim(get(paste0("peo_2018",i)))) if (j>1){ lotte <- rbind( lotte , get(paste0("peo_2018",i))) } else { lotte <- get(paste0("peo_2018",i)) } } # 9월 18일~ 21일 데이터가 없음 dim(lotte)[1] == 744+720+744+720+744+720+624+744 head(lotte); tail(lotte) ################# nam<-data.frame(NA,NA,NA,NA) j<- 0 for (i in aa){ j <- j+1 assign(paste0("peo_2018",i),get(paste0("peo_2018",i,"_ori"))[1:4]) assign(paste0("peo_2018",i) , get(paste0("peo_2018",i)) %>% filter( V3 == 11140570 ) ) print(dim(get(paste0("peo_2018",i)))) if (j>1){ nam <- rbind( nam , get(paste0("peo_2018",i))) } else { nam <- get(paste0("peo_2018",i)) } } # 9월 18일~ 21일 데이터가 없음 dim(nam)[1] == 744+720+744+720+744+720+624+744 head(nam); tail(nam) ################# gyeong<-data.frame(NA,NA,NA,NA) j<- 0 for (i in aa){ j <- j+1 assign(paste0("peo_2018",i),get(paste0("peo_2018",i,"_ori"))[1:4]) assign(paste0("peo_2018",i) , get(paste0("peo_2018",i)) %>% filter( V3 == 11110515 ) ) print(dim(get(paste0("peo_2018",i)))) if (j>1){ gyeong <- rbind( gyeong , get(paste0("peo_2018",i))) } else { gyeong <- get(paste0("peo_2018",i)) } } # 9월 18일~ 21일 데이터가 없음 dim(gyeong)[1] == 744+720+744+720+744+720+624+744 head(gyeong); tail(gyeong) ################# duk<-data.frame(NA,NA,NA,NA) j<- 0 for (i in aa){ j <- j+1 assign(paste0("peo_2018",i),get(paste0("peo_2018",i,"_ori"))[1:4]) assign(paste0("peo_2018",i) , get(paste0("peo_2018",i)) %>% filter( V3 == 11140520 ) ) print(dim(get(paste0("peo_2018",i)))) if (j>1){ duk <- rbind( duk , get(paste0("peo_2018",i))) } else { duk <- get(paste0("peo_2018",i)) } } # 9월 18일~ 21일 데이터가 없음 dim(duk)[1] == 744+720+744+720+744+720+624+744 head(duk); tail(duk) ################# buk<-data.frame(NA,NA,NA,NA) j<- 0 for (i in aa){ j <- j+1 assign(paste0("peo_2018",i),get(paste0("peo_2018",i,"_ori"))[1:4]) assign(paste0("peo_2018",i) , get(paste0("peo_2018",i)) %>% filter( V3 == 11110600 ) ) print(dim(get(paste0("peo_2018",i)))) if (j>1){ buk <- rbind( buk , get(paste0("peo_2018",i))) } else { buk <- get(paste0("peo_2018",i)) } } # 9월 18일~ 21일 데이터가 없음 dim(buk)[1] == 744+720+744+720+744+720+624+744 head(buk); tail(buk) # park,lotte,nam,gyeong,duk,buk : 날씨 / 시간 / 장소코드 / 생활인구 # holi : 공휴일 데이터 ################################################ ### 롯데월드 생활인구 & 공휴일 & 요일 데이터 ### ################################################ # park,lotte,nam,gyeong,duk,buk : 날씨 / 시간 / 장소코드 / 생활인구 # holi : 공휴일 데이터 # names(lotte)<-c("date","time","code","people") lotte %>% filter(time==15) -> lotte lotte$date<- as.character(lotte$date) day_levels <- c("일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일") # 일요일 1부터 토요일 7까지. as.numeric(factor(weekdays(as.Date(lotte$date,"%Y%m%d")), levels=day_levels, ordered=TRUE)) -> lotte$weekday lotte %>% mutate( holiday = as.numeric(date %in% holi$date) ) head(lotte) xf <- factor(lotte$weekday, levels = 1:7) d <- model.matrix( ~ xf - 1) lotte %>% mutate( holiday = as.numeric(date %in% holi$date) ) %>% select(date,people,holiday) -> lotte data.frame(lotte,as.data.frame(d)) -> lotte for (i in 1:nrow(lotte)){ if (lotte[i,3] == 1){ lotte[i,4:10] = c(0,0,0,0,0,0,0) } } ################################################ ### 롯데월드 트렌드 데이터 ### ################################################ library(xlsx) trend_lotte <- read.xlsx2("C:/Users/Jiwan/Downloads/datalab_lotte.xlsx",sheetIndex=1,startRow=7,stringsAsFactors=F) str(trend_lotte) trend_lotte[,2]<-as.numeric(trend_lotte[,2]) names(trend_lotte) <- c("date","naver") trend<-c() for (i in 1:nrow(trend_lotte)){ trend<-append(trend, sum(trend_lotte[i:(i+6),2])/7 ) } c(rep(NA,7),trend)->trend1 length(trend1) -> length_trend data.frame( rbind(trend_lotte,NA) , trend=trend1[1:(length_trend-6)] ) -> trend_lotte1 str(trend_lotte1) gsub("-", "", trend_lotte1$date) -> trend_lotte1$date trend_lotte1[,-2]->trend_lotte1 ################################################ ### 어린이대공원 생활인구 & 공휴일 & 요일 데이터 ### ################################################ # park,lotte,nam,gyeong,duk,buk : 날씨 / 시간 / 장소코드 / 생활인구 # holi : 공휴일 데이터 # library(dplyr) names(park)<-c("date","time","code","people") park %>% filter(time==15) -> park park$date<- as.character(park$date) day_levels <- c("일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일") # 일요일 1부터 토요일 7까지. as.numeric(factor(weekdays(as.Date(park$date,"%Y%m%d")), levels=day_levels, ordered=TRUE)) -> park$weekday park %>% mutate( holiday = as.numeric(date %in% holi$date) ) head(park) xf <- factor(park$weekday, levels = 1:7) d <- model.matrix( ~ xf - 1) park %>% mutate( holiday = as.numeric(date %in% holi$date) ) %>% select(date,people,holiday) -> park data.frame(park,as.data.frame(d)) -> park for (i in 1:nrow(park)){ if (park[i,3] == 1){ park[i,4:10] = c(0,0,0,0,0,0,0) } } ################################################ ### 어린이대공원트렌드 데이터 ### ################################################ library(xlsx) trend_park <- read.xlsx2("C:/Users/Jiwan/Downloads/datalab_park.xlsx",sheetIndex=1,startRow=7,stringsAsFactors=F) str(trend_park) trend_park[,2]<-as.numeric(trend_park[,2]) names(trend_park) <- c("date","naver") trend<-c() for (i in 1:nrow(trend_park)){ trend<-append(trend, sum(trend_park[i:(i+6),2])/7 ) } c(rep(NA,7),trend)->trend1 length(trend1) -> length_trend data.frame( rbind(trend_park,NA) , trend=trend1[1:(length_trend-6)] ) -> trend_park1 str(trend_park1) gsub("-", "", trend_park1$date) -> trend_park1$date trend_park1[,-2]->trend_park1 ################################################ ### 남산. 생활인구 & 공휴일 & 요일 데이터 ### ################################################ # park,lotte,nam,gyeong,duk,buk : 날씨 / 시간 / 장소코드 / 생활인구 # holi : 공휴일 데이터 # library(dplyr) names(nam)<-c("date","time","code","people") nam %>% filter(time==15) -> nam nam$date<- as.character(nam$date) day_levels <- c("일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일") # 일요일 1부터 토요일 7까지. as.numeric(factor(weekdays(as.Date(nam$date,"%Y%m%d")), levels=day_levels, ordered=TRUE)) -> nam$weekday nam %>% mutate( holiday = as.numeric(date %in% holi$date) ) head(nam) xf <- factor(nam$weekday, levels = 1:7) d <- model.matrix( ~ xf - 1) nam %>% mutate( holiday = as.numeric(date %in% holi$date) ) %>% select(date,people,holiday) -> nam data.frame(nam,as.data.frame(d)) -> nam for (i in 1:nrow(nam)){ if (nam[i,3] == 1){ nam[i,4:10] = c(0,0,0,0,0,0,0) } } ################################################ ### 남산 트렌드 데이터 ### ################################################ library(xlsx) trend_nam <- read.xlsx2("C:/Users/Jiwan/Downloads/datalab_nam.xlsx",sheetIndex=1,startRow=7,stringsAsFactors=F) str(trend_nam) trend_nam[,2]<-as.numeric(trend_nam[,2]) names(trend_nam) <- c("date","naver") trend<-c() for (i in 1:nrow(trend_nam)){ trend<-append(trend, sum(trend_nam[i:(i+6),2])/7 ) } c(rep(NA,7),trend)->trend1 length(trend1) -> length_trend data.frame( rbind(trend_nam,NA) , trend=trend1[1:(length_trend-6)] ) -> trend_nam1 str(trend_nam1) gsub("-", "", trend_nam1$date) -> trend_nam1$date trend_nam1[,-2]->trend_nam1 ################################################ ### 경복궁 생활인구 & 공휴일 & 요일 데이터 ### ################################################ # park,lotte,nam,gyeong,duk,buk : 날씨 / 시간 / 장소코드 / 생활인구 # holi : 공휴일 데이터 # library(dplyr) names(gyeong)<-c("date","time","code","people") gyeong %>% filter(time==15) -> gyeong gyeong$date<- as.character(gyeong$date) day_levels <- c("일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일") # 일요일 1부터 토요일 7까지. as.numeric(factor(weekdays(as.Date(gyeong$date,"%Y%m%d")), levels=day_levels, ordered=TRUE)) -> gyeong$weekday gyeong %>% mutate( holiday = as.numeric(date %in% holi$date) ) head(gyeong) xf <- factor(gyeong$weekday, levels = 1:7) d <- model.matrix( ~ xf - 1) gyeong %>% mutate( holiday = as.numeric(date %in% holi$date) ) %>% select(date,people,holiday) -> gyeong data.frame(gyeong,as.data.frame(d)) -> gyeong for (i in 1:nrow(gyeong)){ if (gyeong[i,3] == 1){ gyeong[i,4:10] = c(0,0,0,0,0,0,0) } } ################################################ ### 경복궁 트렌드 데이터 ### ################################################ library(xlsx) trend_gyeong <- read.xlsx2("C:/Users/Jiwan/Downloads/datalab_gyeong.xlsx",sheetIndex=1,startRow=7,stringsAsFactors=F) str(trend_gyeong) trend_gyeong[,2]<-as.numeric(trend_gyeong[,2]) names(trend_gyeong) <- c("date","naver") trend<-c() for (i in 1:nrow(trend_gyeong)){ trend<-append(trend, sum(trend_gyeong[i:(i+6),2])/7 ) } c(rep(NA,7),trend)->trend1 length(trend1) -> length_trend data.frame( rbind(trend_gyeong,NA) , trend=trend1[1:(length_trend-6)] ) -> trend_gyeong1 str(trend_gyeong1) gsub("-", "", trend_gyeong1$date) -> trend_gyeong1$date trend_gyeong1[,-2]->trend_gyeong1 ################################################ ### 덕수궁 생활인구 & 공휴일 & 요일 데이터 ### ################################################ # park,lotte,nam,gyeong,duk,buk : 날씨 / 시간 / 장소코드 / 생활인구 # holi : 공휴일 데이터 # library(dplyr) names(duk)<-c("date","time","code","people") duk %>% filter(time==15) -> duk duk$date<- as.character(duk$date) day_levels <- c("일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일") # 일요일 1부터 토요일 7까지. as.numeric(factor(weekdays(as.Date(duk$date,"%Y%m%d")), levels=day_levels, ordered=TRUE)) -> duk$weekday duk %>% mutate( holiday = as.numeric(date %in% holi$date) ) head(duk) xf <- factor(duk$weekday, levels = 1:7) d <- model.matrix( ~ xf - 1) duk %>% mutate( holiday = as.numeric(date %in% holi$date) ) %>% select(date,people,holiday) -> duk data.frame(duk,as.data.frame(d)) -> duk for (i in 1:nrow(duk)){ if (duk[i,3] == 1){ duk[i,4:10] = c(0,0,0,0,0,0,0) } } ################################################ ### 덕수궁 트렌드 데이터 ### ################################################ library(xlsx) trend_duk <- read.xlsx2("C:/Users/Jiwan/Downloads/datalab_duk.xlsx",sheetIndex=1,startRow=7,stringsAsFactors=F) str(trend_duk) trend_duk[,2]<-as.numeric(trend_duk[,2]) names(trend_duk) <- c("date","naver") trend<-c() for (i in 1:nrow(trend_duk)){ trend<-append(trend, sum(trend_duk[i:(i+6),2])/7 ) } c(rep(NA,7),trend)->trend1 length(trend1) -> length_trend data.frame( rbind(trend_duk,NA) , trend=trend1[1:(length_trend-6)] ) -> trend_duk1 str(trend_duk1) gsub("-", "", trend_duk1$date) -> trend_duk1$date trend_duk1[,-2]->trend_duk1 ################################################ ### 북촌한옥마을 생활인구 & 공휴일 & 요일 데이터 ### ################################################ # park,lotte,nam,gyeong,duk,buk : 날씨 / 시간 / 장소코드 / 생활인구 # holi : 공휴일 데이터 # library(dplyr) names(buk)<-c("date","time","code","people") buk %>% filter(time==15) -> buk buk$date<- as.character(buk$date) day_levels <- c("일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일") # 일요일 1부터 토요일 7까지. as.numeric(factor(weekdays(as.Date(buk$date,"%Y%m%d")), levels=day_levels, ordered=TRUE)) -> buk$weekday buk %>% mutate( holiday = as.numeric(date %in% holi$date) ) head(buk) xf <- factor(buk$weekday, levels = 1:7) d <- model.matrix( ~ xf - 1) buk %>% mutate( holiday = as.numeric(date %in% holi$date) ) %>% select(date,people,holiday) -> buk data.frame(buk,as.data.frame(d)) -> buk for (i in 1:nrow(buk)){ if (buk[i,3] == 1){ buk[i,4:10] = c(0,0,0,0,0,0,0) } } ################################################ ### 북촌한옥마을 트렌드 데이터 ### ################################################ library(xlsx) trend_buk <- read.xlsx2("C:/Users/Jiwan/Downloads/datalab_buk.xlsx",sheetIndex=1,startRow=7,stringsAsFactors=F) str(trend_buk) trend_buk[,2]<-as.numeric(trend_buk[,2]) names(trend_buk) <- c("date","naver") trend<-c() for (i in 1:nrow(trend_buk)){ trend<-append(trend, sum(trend_buk[i:(i+6),2])/7 ) } c(rep(NA,7),trend)->trend1 length(trend1) -> length_trend data.frame( rbind(trend_buk,NA) , trend=trend1[1:(length_trend-6)] ) -> trend_buk1 str(trend_buk1) gsub("-", "", trend_buk1$date) -> trend_buk1$date trend_buk1[,-2]->trend_buk1 ################################################ ### 기상변수 만들기 ### park,lotte,nam,gyeong,duk,buk ################################################ library(xlsx) weather_data <- read.xlsx2("C:/Users/Jiwan/Downloads/data_weather.xlsx",sheetIndex=1,startRow=1,stringsAsFactors=F) weather_data[,1:5]->weather_data names(weather_data)<-c("date","sunny","cloudy","rainy","snowy") ################################################ ### 데이터s Join ### park,lotte,nam,gyeong,duk,buk ################################################ dim(lotte) merge( merge(lotte, trend_lotte1, all.x=TRUE) ,weather_data, all.x=TRUE) -> lotte_final merge( merge(park, trend_park1, all.x=TRUE) ,weather_data, all.x=TRUE) -> park_final merge( merge(nam, trend_nam1, all.x=TRUE) ,weather_data, all.x=TRUE) -> nam_final merge( merge(gyeong, trend_gyeong1, all.x=TRUE) ,weather_data, all.x=TRUE) -> gyeong_final merge( merge(duk, trend_duk1, all.x=TRUE) ,weather_data, all.x=TRUE) -> duk_final merge( merge(buk, trend_buk1, all.x=TRUE) ,weather_data, all.x=TRUE) -> buk_final write.csv(lotte_final,"lotte.csv") write.csv(park_final,"park.csv") write.csv(nam_final,"nam.csv") write.csv(gyeong_final,"gyeong.csv") write.csv(duk_final,"duk.csv") write.csv(buk_final,"buk.csv") summary(lotte_final$people)
e1701f4faf06fcc27195817ffe17b42d4d824df2
287add902a548b978254b03f571f5e127d325e88
/man/lib_dist.Rd
05dc39c7038feafcbcedadbfa57e5b332784fde9
[]
no_license
Auburngrads/publicLibs
e36884552220fcf859d28ef5cc16d26baeb23f65
804efbb6bc80f5053712e375a09d2d46ce2f61a6
refs/heads/master
2021-01-17T02:44:58.943620
2020-07-20T00:32:03
2020-07-20T00:32:03
58,672,156
0
0
null
null
null
null
UTF-8
R
false
true
2,781
rd
lib_dist.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/libdistData.R \docType{data} \name{alts_Libs} \alias{alts_Libs} \alias{andr_Libs} \alias{andw_Libs} \alias{arnl_Libs} \alias{bckl_Libs} \alias{beal_Libs} \alias{blln_Libs} \alias{brkd_Libs} \alias{brks_Libs} \alias{chrl_Libs} \alias{clmb_Libs} \alias{cnnn_Libs} \alias{crch_Libs} \alias{dovr_Libs} \alias{dvsm_Libs} \alias{dyss_Libs} \alias{edwr_Libs} \alias{egln_Libs} \alias{ells_Libs} \alias{elmn_Libs} \alias{elsn_Libs} \alias{fewr_Libs} \alias{frch_Libs} \alias{gdfl_Libs} \alias{gntr_Libs} \alias{grnf_Libs} \alias{hckm_Libs} \alias{hill_Libs} \alias{hllm_Libs} \alias{hnsc_Libs} \alias{hrlf_Libs} \alias{krtl_Libs} \alias{kslr_Libs} \alias{lckl_Libs} \alias{lghl_Libs} \alias{lngl_Libs} \alias{lsan_Libs} \alias{lttr_Libs} \alias{luke_Libs} \alias{mcch_Libs} \alias{mccn_Libs} \alias{mcdl_Libs} \alias{mcgr_Libs} \alias{mint_Libs} \alias{mlms_Libs} \alias{mnth_Libs} \alias{mody_Libs} \alias{mxwl_Libs} \alias{nlls_Libs} \alias{offt_Libs} \alias{otis_Libs} \alias{pope_Libs} \alias{ptrc_Libs} \alias{ptrs_Libs} \alias{rbns_Libs} \alias{rndl_Libs} \alias{schr_Libs} \alias{sctt_Libs} \alias{shaw_Libs} \alias{shpp_Libs} \alias{symj_Libs} \alias{tnkr_Libs} \alias{trvs_Libs} \alias{tynd_Libs} \alias{usaf_Libs} \alias{vanc_Libs} \alias{vndn_Libs} \alias{whtm_Libs} \alias{wrgp_Libs} \title{Distances From Public Libraries to USAF Bases} \format{A \code{data.table} with 4 variables: \tabular{rlll}{ [, 1] \tab location \tab Character String of the Library name/address/city/state \tab \bold{Categoric}\cr [, 2] \tab miles \tab Ordered list distances (in miles) from the base \tab \bold{Numeric}\cr [, 3] \tab latitide \tab Latitude coordinate of the library \tab \bold{Numeric}\cr [, 4] \tab longitude \tab Longitude coordinate of the library \tab \bold{Numeric} }} \source{ http://www.publiclibraries.com Google Maps Geocode API } \usage{ alts_Libs andr_Libs andw_Libs arnl_Libs bckl_Libs beal_Libs blln_Libs brkd_Libs brks_Libs chrl_Libs clmb_Libs cnnn_Libs crch_Libs dovr_Libs dvsm_Libs dyss_Libs edwr_Libs egln_Libs ells_Libs elmn_Libs elsn_Libs fewr_Libs frch_Libs gdfl_Libs gntr_Libs grnf_Libs hckm_Libs hill_Libs hllm_Libs hnsc_Libs hrlf_Libs krtl_Libs kslr_Libs lckl_Libs lghl_Libs lngl_Libs lsan_Libs lttr_Libs luke_Libs mcch_Libs mccn_Libs mcdl_Libs mcgr_Libs mint_Libs mlms_Libs mnth_Libs mody_Libs mxwl_Libs nlls_Libs offt_Libs otis_Libs pope_Libs ptrc_Libs ptrs_Libs rbns_Libs rndl_Libs schr_Libs sctt_Libs shaw_Libs shpp_Libs symj_Libs tnkr_Libs trvs_Libs tynd_Libs usaf_Libs vanc_Libs vndn_Libs whtm_Libs wrgp_Libs } \description{ Information about public libraries in each state } \keyword{datasets}
db7d966e350429feda5397b8298df1b4caecb4f0
3b8fcf4e1fc1ed070e8e5b16f48c32f8e8ccec3a
/man/wtInfo.Rd
12ed944b1e73d986585da6d08550435ac9160769
[]
no_license
SwapanK/fracdet
67379391e3722864eeb78129bfd686beee4840d4
69360a3431d7b323d6bc8947c20aa2f58af7cfa1
refs/heads/master
2020-04-30T22:26:37.277441
2018-06-16T08:01:03
2018-06-16T08:01:03
null
0
0
null
null
null
null
UTF-8
R
false
true
551
rd
wtInfo.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/waveletVar.R \name{wtInfo} \alias{wtInfo} \title{Get the family and the filter-number used in the wavelet transform asociated with the \code{waveletVar} object.} \usage{ wtInfo(x) } \arguments{ \item{x}{A \code{waveletVar} object.} } \value{ An R list containing the family (\code{family} field) and the filter-number (\code{filter_number}). } \description{ Get the family and the filter-number used in the wavelet transform asociated with the \code{waveletVar} object. }
516b8a03f718cbd74815ff9e116fe18a3e7d21cc
47ae73e4c6f69138bf7082636366f579dc975409
/man/evaluate.DRWPClassGM.Rd
52047f3374b49cbc01d0f4d8d983c6a807b04072
[]
no_license
pikaqiu321/DRWPClass
ad0d4e4ad0df82dedae2a319cbf3e2bdff3705db
2cb01c8ad8fb77f3113bedbc2344c1b6636d1725
refs/heads/master
2022-11-17T00:01:22.797764
2015-03-01T00:21:37
2015-03-01T00:21:37
null
0
0
null
null
null
null
UTF-8
R
false
false
1,336
rd
evaluate.DRWPClassGM.Rd
\name{evaluate.DRWPClassGM} \alias{evaluate.DRWPClassGM} \title{ Evaluate predictions from a "DRWPClassGM" object. } \description{ This functions evaluates the classification performance of a fitted \code{"DRWPClassGM"} object. } \usage{ evaluate.DRWPClassGM(object, newx, newy.class1, newy.class2) } \arguments{ \item{object}{ Fitted \code{"DRWPClassGM"} model object. } \item{newx}{ A matrix with variables to predict. } \item{newy.class1}{ a integer vector comprising the indexes of class 1 samples in \code{newx}. } \item{newy.class2}{ a integer vector comprising the indexes of class 2 samples in \code{newx}. } } \value{The classification performance of the predictions.} \seealso{ \code{\link{predict.DRWPClassGM}} } \examples{ data(GProf8511) data(GProf3325) data(MProf) data(pathSet) data(dGMGraph) fit <- fit.DRWPClassGM(xG=GProf8511$mRNA_matrix, yG.class1=GProf8511$normal, yG.class2=GProf8511$PCA, xM=MProf$Meta_matrix, yM.class1=MProf$normal, yM.class2=MProf$PCA, DEBUG=TRUE, pathSet=pathSet, globalGraph=dGMGraph, testStatistic="t-test", classifier = "Logistic", normalize = TRUE, nFolds = 5, numTops=50, iter = 1, Gamma=0.7, Alpha = 0.5) evaluate.DRWPClassGM(object=fit, newx=GProf3325$mRNA_matrix, newy.class1=GProf3325$normal, newy.class2=GProf3325$PCA) }
03141cfedc48dc1ece04e93c98a472b0eede40fe
f0c85baaaf0b9d2d2c725327c759fdb9ff58aad1
/05e_Host_State_Variables_-_High_Unemployment.R
9370730180f6f5b961b46da3767813b6e57e3ecc
[]
no_license
hrdii/post_conflict_refugee_returns
4ead6d7ea2997cd8b88230fec76b8e4e9c3e0a9d
757cbde10d49890901fad28db4f590dc6ae9635d
refs/heads/main
2023-09-05T23:32:58.289994
2021-11-04T07:56:18
2021-11-04T07:56:18
424,493,243
0
0
null
null
null
null
UTF-8
R
false
false
4,420
r
05e_Host_State_Variables_-_High_Unemployment.R
##########----------##########----------##########----------##########---------- ##########---------- HEADER ##### meta-information ## Author: Hardika Dayalani(dayalani@rand.org) ## Creation: 2020-02-09 for Post-Conflict Refugee Returns Project ## Description: Adds Host state variables to conflict cases ## Opportunity for employment ## % refugees at the end of conflict that are in host countries with high ## unemployment rate. Global average unemployment rate has varied between ## 4.4 and 6% over 1991-2019. A host country is arbitrarily defined as having ## high unemployment if the rate exceeds 10% ##### environment set up rm(list = ls()) ## Load Libraries library(data.table) library(stringr) ## Load Functions source(file = "00a_Custom_Functions.R") ## Load Conflict Cases load("../Intermediate/Conflict case with refugee origin and host state information.RData") ## Load Refugee Information load("../Intermediate/Refugee Population.RData") ## Load WB-UNHCR Country Name Lookup Dataframe load("../Intermediate/WB-UNHCR Country Name Lookup.RData") ## Import Unemployment Rate Data unemp_df <- fread("../Data/World Bank/Umemployment/API_SL.UEM.TOTL.ZS_DS2_en_csv_v2_672963.csv", skip = 4, header = TRUE) ##### Clean Unemployment Rate Data ## Function to identify missing data MissingIndex <- function(x, row = TRUE, n = 1){ # x is a matrix or dataframe y <- is.na(x) m <- ifelse(row, 2, 1) n <- dim(x)[m] - n m <- ifelse(row, 1, 2) y <- apply(y, MARGIN = m, FUN = sum) y <- y >= n return(y) } ## Drop completely empty rows temp <- MissingIndex(unemp_df, row = TRUE, n = 4) unemp_df <- unemp_df[!temp] ## Drop completely empty columns temp <- MissingIndex(unemp_df, row = FALSE, n = 0) unemp_df <- unemp_df[, .SD, .SDcols=-temp] rm(temp) ## Drop other unnecessary columns unemp_df <- unemp_df[, !c("Country Code", "Indicator Name", "Indicator Code")] ## Clean names setnames(unemp_df, "Country Name", "country") ##### Harmonize Country Names unemp_df$country <- NameCleaning(unemp_df$country) ## Replace Country names using Lookup Dataframe unemp_df[lookup_df, on=.(country = wb_names), country := i.unhcr_names ] rm(lookup_df) ##### Calculate % refugee ## Write a function to calculate % refugees PropUnemp <- function(cas, r_df = refugee_df, econ_df = unemp_df, ref_threshold = 10){ ## Source Country s <- gsub('(.*) ([0-9]{4})','\\1',cas) ## Year0 y <- as.numeric(gsub('(.*) ([0-9]{4})','\\2',cas)) ## Subset Refugee Data to Year0 temp_df <- r_df[year == y, ] ## Subset to sources that host are above the threshold hosts <- SubsetHosts(country = s, df = temp_df) ## Subset Unemployment rate to year0 ## we don't have unemployment data from before 1991 if(y < 1991){ y <- 1991:1993 econ_df <- econ_df[, .SD, .SDcols= c("country", as.character(y))] econ_df <- econ_df[, unemp := rowSums(.SD, na.rm = TRUE)/3, .SDcols = as.character(y)] econ_df <- econ_df[, .SD, .SDcols = c("country", "unemp")] } else { econ_df <- econ_df[, .SD, .SDcols= c("country", as.character(y))] names(econ_df) <- c("country", "unemp") } ## Add Unemployment rate for each host country hosts <- merge(x = hosts, y = econ_df, by = "country", all.x = TRUE) ## Hosts are considered to have high unemployment if the rate exceeds ref_threshold hosts$high_unemp <- as.logical( as.character( cut(hosts$unemp, breaks = c(-Inf, ref_threshold, Inf), labels = c("FALSE", "TRUE")))) ## Calculate the proportion of source country refugees that live in host countries with high unemployment hosts$high_unemp <- hosts$high_unemp * hosts$pop / sum(hosts$pop, na.rm = TRUE) ## Return proportion return(sum(hosts$high_unemp, na.rm = TRUE)) } ## Calculate % refugee agg_df$punemp_5 <- sapply(agg_df$case, FUN = PropUnemp, ref_threshold = 5) summary(agg_df$punemp_5) agg_df$punemp_10 <- sapply(agg_df$case, FUN = PropUnemp, ref_threshold = 10) summary(agg_df$punemp_10) agg_df$punemp_15 <- sapply(agg_df$case, FUN = PropUnemp, ref_threshold = 15) summary(agg_df$punemp_15) ## Save File save(agg_df, file = "../Intermediate/Conflict case with refugee origin and host state information.RData") print("05e")
849e51ea6f2a3c8b8319bf548c0bdc5858d81c14
c78d6c58e47ff01700e082dde12bd08523352cc2
/plot3.R
609fc11ad01058a7f4fd2c8023438c48494de2c6
[]
no_license
kaniapiatkowska/ExData_Plotting1
e5786f12120af6a2bb7d888e0813805e1a7236bf
4d705024285651831e7f605d01f17580496a5168
refs/heads/master
2020-12-28T19:34:02.917674
2019-11-04T11:22:47
2019-11-04T11:22:47
45,624,088
0
0
null
2015-11-05T16:30:07
2015-11-05T16:30:07
null
UTF-8
R
false
false
1,095
r
plot3.R
#Loading the data setwd("C:/Dane/MOJE/Zuzka/Coursera/DataScientistToolbox/Rscripts/Course4") file<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(file, destfile = "./exdata_data_household_power_consumption.zip") unzip("./exdata_data_household_power_consumption.zip", overwrite = TRUE) data<-subset(read.csv("./household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?"), Date=="1/2/2007" | Date=="2/2/2007") data$Date<-strptime(paste(data$Date, data$Time), "%e/%m/%Y %H:%M:%S") library(dplyr) data<-select(data, -Time) #plot3 png(filename = "plot3.png", width = 480, height = 480, units = "px") plot(data$Date, data$Sub_metering_1, type = "l", xlab = NA, ylab = "Energy sub metering") points(data$Date, data$Sub_metering_2, type = "l", col="red") points(data$Date, data$Sub_metering_3, type = "l", col = "blue") legend("topright", lty = c(1,1,1), col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) dev.off()
bfa3827d5f8e763dec4e8be3ed41b6d91f417ac3
c5fac476b276f2d1c65547ec4f89292d3abf8ba8
/man/confint.Rd
c286b68867edb01454680ec4508f1476fe3ee803
[]
no_license
dakep/complmrob
1f3090343de2cb6319b87fae289047ff60049b92
c904ac453cb501417acc1bb7ec41a3c80ecc4015
refs/heads/master
2020-05-09T12:11:05.675243
2019-09-17T18:25:06
2019-09-17T18:25:06
181,104,338
0
0
null
null
null
null
UTF-8
R
false
true
1,765
rd
confint.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/confint-methods.R \name{confint.bccomplmrob} \alias{confint.bccomplmrob} \alias{confint.bclmrob} \title{Calculate confidence intervals} \usage{ \method{confint}{bccomplmrob}(object, parm, level = 0.95, type = c("bca", "perc", "norm", "basic", "stud"), ...) \method{confint}{bclmrob}(object, parm, level = 0.95, type = c("bca", "perc", "norm", "basic", "stud"), ...) } \arguments{ \item{object}{an object returned from \code{\link{bootcoefs}}.} \item{parm}{a specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered.} \item{level}{the confidence level required.} \item{type}{the type of interval required (see the type argument of \code{\link{boot.ci}}).} \item{...}{currently ignored.} } \description{ Calculate confidence intervals for bootstrapped robust linear regression estimates with or without compositional data } \section{Methods (by class)}{ \itemize{ \item \code{bccomplmrob}: for bootstrapped estimates of robust linear regression models for compositional data \item \code{bclmrob}: for bootstrapped estimates of robust linear regression models }} \examples{ data <- data.frame(lifeExp = state.x77[, "Life Exp"], USArrests[ , -3]) mUSArr <- complmrob(lifeExp ~ ., data = data) bc <- bootcoefs(mUSArr, R = 200) # the number of bootstrap replicates should # normally be higher! confint(bc, level = 0.95, type = "perc") ### For normal robust linear regression models ### require(robustbase) data(aircraft) mod <- lmrob(Y ~ ., data = aircraft) bootEst <- bootcoefs(mod, R = 200) confint(bootEst, level = 0.95, type = "perc") }
45e087a178fdb01523fbc8a01d411cf0684e8f8e
0471999fce7bfcba220ae361a843b9fc69af53e7
/tests/testthat/test-docker-client-networks.R
37401523942f4c88fad43f557324286bdfaa6867
[ "MIT" ]
permissive
karthik/stevedore
e25d0c1fb9073de4979a22c69e92acb54d7ab2d6
8d12d3a02a211557ff264780a17a4789604ee40e
refs/heads/master
2020-03-19T10:06:51.132342
2018-06-06T15:03:29
2018-06-06T15:03:29
136,344,185
0
0
null
2018-06-06T14:54:38
2018-06-06T14:54:37
null
UTF-8
R
false
false
2,186
r
test-docker-client-networks.R
context("docker client: networks") test_that("create", { d <- test_docker_client() nm <- rand_str(10, "stevedore_") nw <- d$network$create(nm) on.exit(try_silent(nw$remove())) expect_is(nw, "docker_network") expect_is(nw, "stevedore_object") expect_equal(nw$name(), nm) expect_equal(nw$inspect()$name, nm) expect_identical(nw$reload(), nw) expect_null(nw$remove()) e <- get_error(nw$inspect()) expect_is(e, "docker_error") expect_equal(e$code, 404L) }) test_that("get", { d <- test_docker_client() nm <- rand_str(10, "stevedore_") nw1 <- d$network$create(nm) on.exit(try_silent(nw1$remove())) nw2 <- d$network$get(nm) expect_identical(nw1$inspect(FALSE), nw2$inspect(FALSE)) d$network$remove(nm) e <- get_error(d$network$get(nm)) expect_is(e, "docker_error") expect_equal(e$code, 404L) }) test_that("list", { d <- test_docker_client() nm <- rand_str(10, "stevedore_") nw <- d$network$create(nm) on.exit(nw$remove()) nwl <- d$network$list() expect_is(nwl, "data.frame") expect_true("name" %in% names(nwl)) expect_true(nm %in% nwl$name) }) test_that("prune", { d <- test_docker_client() nm <- rand_str(10, "stevedore_") nw <- d$network$create(nm) ans <- d$network$prune() expect_match(ans$networks_deleted, "^stevedore_", all = FALSE) }) test_that("containers", { d <- test_docker_client() server <- rand_str(10, "stevedore_") network <- rand_str(3, "stevedore_") d <- test_docker_client() nw <- d$network$create(network) on.exit(nw$remove()) expect_identical(nw$containers(), list()) x <- d$container$create("nginx", name = server, network = network) on.exit({ x$remove(force = TRUE) nw$remove() }) x$start() res <- nw$containers() expect_is(res, "list") expect_equal(length(res), 1L) expect_is(res[[1]], "docker_container") expect_identical(res[[1]]$id(), x$id()) }) test_that("connect", { skip("connect is untested") }) test_that("disconnect", { skip("disconnect is untested") }) test_that("get (offline)", { cl <- null_docker_client() x <- cl$network$get(dummy_id()) expect_is(x, "docker_network") expect_equal(x$id(), dummy_id()) })
8bd0d0f071e7b1405d567055b70e056bfa48d1ce
7a903df21dcb2d80c726137438068f6335582aa4
/R/tidydf.R
3189429581249d45e6c455589d1a10496817c8e6
[]
no_license
anujkhare/iregnet
7cb9b739c64afe55d7a15f5ac1485258c6dd7758
89cc904894495511b801f71ff99cbfed6043dd97
refs/heads/master
2023-06-25T11:41:50.890845
2019-08-22T16:29:41
2019-08-22T16:29:41
59,438,433
7
16
null
2023-06-15T14:42:55
2016-05-22T23:04:32
R
UTF-8
R
false
false
1,108
r
tidydf.R
#' @title Return a tidy data.frame from iregnet fit #' @export #' @description #' Returns a tidy \link{data.frame} from a fitted "iregnet" object. #' #' @param x The S3 object of type \code{iregnet} returned by the \code{iregnet} #' method. #' #' @param ... Other parameters. Currently unused. #' #' @details #' This function is used to obtain an intermediate \code{data.frame} used in #' \link{plot.iregnet}. #' It can be used for producing other plots using \code{ggplot2}. #' NOTE: \code{Intercept} (if present) is \strong{not} included in the #' \code{arclength} since it is never regularized. #' tidydf <- function(x, ...) { stopifnot_error("Invalid / no fit object provided", class(x) == "iregnet") # Don't include intercept in norm (arclength) since it is never regularized. start_index <- as.integer(x$intercept) + 1 n <- nrow(x$beta) arclength <- apply(x$beta, 2, function(x) sum(abs(x[start_index: n]))) tidy.df <- with(x, data.frame( weight=as.numeric(t(beta)), lambda, arclength, variable=rownames(beta)[as.integer(col(t(beta)))] )) tidy.df }
2261cb28eb4d9e233a1dbc2557979f3ec93121a8
f0ddc6dfc85a777561ad0e2a86ca4679cca8ca33
/Final R Script.R
9d1952ea8c3c05b4f744fdf2932b6ff01e20ddbf
[]
no_license
sagar-aps/movie-rating-predictor
f0e11141314f5e88ffa79f411382c38333d287df
3444d868fd931fe779433ac9a4fc84ad09696d13
refs/heads/master
2023-06-15T17:57:55.805663
2021-07-08T19:38:05
2021-07-08T19:38:05
346,516,903
0
0
null
null
null
null
UTF-8
R
false
false
31,754
r
Final R Script.R
# Dependencies packages_required <-c("dplyr", # Wrangling "ggplot2", #for graphics "stringr", #string operations "tidyr", # Wrangling "caret", #for CreateDataPartition and train() "ggthemes", #for graphics themes "lubridate", #for dealing with dates "tidyverse", # Wrangling "data.table", #for wrangling "Matrix", #for SparseMatrix "recosystem", #for Matrix Factorization model "ggthemes", #for ggplot theme economist "recommenderlab",#for UBCF,IBCF, POPULAR models "knitr" #for RMD options ) using<-function(...) { libs<-unlist(list(...)) req<-unlist(lapply(libs,require,character.only=TRUE)) need<-libs[req==FALSE] if(length(need)>0){ install.packages(need) lapply(need,require,character.only=TRUE) } } using(packages_required) # Create Initial Dataframes # MovieLens 10M dataset: # https://grouplens.org/datasets/movielens/10m/ # http://files.grouplens.org/datasets/movielens/ml-10m.zip dl <- tempfile() download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl) ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))), col.names = c("userId", "movieId", "rating", "timestamp")) movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3) colnames(movies) <- c("movieId", "title", "genres") # if using R 3.6 or earlier: #movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId], # title = as.character(title), # genres = as.character(genres)) # if using R 4.0 or later: movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId), title = as.character(title), genres = as.character(genres)) movielens <- left_join(ratings, movies, by = "movieId") # Validation set will be 10% of MovieLens data set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE) edx <- movielens[-test_index,] temp <- movielens[test_index,] # Make sure userId and movieId in validation set are also in edx set validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId") # Add rows removed from validation set back into edx set removed <- anti_join(temp, validation) edx <- rbind(edx, removed) rm(dl, ratings, movies, temp, movielens, removed,test_index) # Basic Exploration paste("The dataset has " , nrow(edx) , "ratings") paste("There are " , ncol(edx), "columns") #Checking data validity paste("There are " , edx %>% filter(rating==0) %>% nrow(), "ratings = 0") paste("There are " , edx %>% filter(rating>5) %>% nrow(), "ratings > 5") paste("There are " , edx %>% distinct(movieId) %>% count(), "distinct movies") paste("There are " ,edx %>% distinct(userId) %>% count(), "distinct users") summary(edx) glimpse(edx) # how many movies have rating X? edx %>% group_by(rating) %>% summarize(count = n()) %>% ggplot(aes(x = rating, y = count)) + geom_line()+ theme_economist() # how many movies do users rate ? edx %>% group_by(userId)%>% summarise(n=n())%>% arrange(desc(n))%>% ggplot(aes(n))+ geom_histogram(bins=200)+ scale_x_continuous(limits = c(0,750))+ theme_economist() # how many ratings do individual movies usually receive? edx %>% group_by(movieId)%>% summarise(n=n())%>% arrange(desc(n))%>% ggplot(aes(n))+ geom_histogram(bins=200)+ scale_x_continuous(limits = c(0,750))+ theme_economist() # What is the most common rating a movie receives? mu <- mean(edx$rating) movie_avgs <- edx %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu)) qplot(movie_avgs$b_i,bins=10,color=I("red")) #How do most users rate? mu <- mean(edx$rating) user_avgs <- edx %>% group_by(userId) %>% summarize(b_u = mean(rating - mu)) qplot(user_avgs$b_u,bins=10,color=I("red")) # Which genres get the best rating? What is their spread? #The following code is decodes the genre data by adding a 0 or an NA based on #whether the name of the genre is present in the genre column n_a=NA genre_data <- edx %>% mutate( Romance = ifelse(str_detect(genres,"Romance"),1*rating,n_a), Comedy = ifelse(str_detect(genres,"Comedy"),1*rating,n_a), Action = ifelse(str_detect(genres,"Action"),1*rating,n_a), Crime = ifelse(str_detect(genres,"Crime"),1*rating,n_a), Thriller = ifelse(str_detect(genres,"Thriller"),1*rating,n_a), Drama = ifelse(str_detect(genres,"Drama"),1*rating,n_a), Sci_Fi = ifelse(str_detect(genres,"Sci-Fi"),1*rating,n_a), Adventure = ifelse(str_detect(genres,"Adventure"),1*rating,n_a), Children = ifelse(str_detect(genres,"Children"),1*rating,n_a), Fantasy = ifelse(str_detect(genres,"Fantasy"),1*rating,n_a), War = ifelse(str_detect(genres,"War"),1*rating,n_a), Animation = ifelse(str_detect(genres,"Animation"),1*rating,n_a), Musical = ifelse(str_detect(genres,"Musical"),1*rating,n_a), Western = ifelse(str_detect(genres,"Western"),1*rating,n_a), Mystery = ifelse(str_detect(genres,"Mystery"),1*rating,n_a), Film_Noir = ifelse(str_detect(genres,"Film-Noir"),1*rating,n_a), Horror = ifelse(str_detect(genres,"Horror"),1*rating,n_a), Documentary = ifelse(str_detect(genres,"Documentary"),1*rating,n_a), IMAX = ifelse(str_detect(genres,"IMAX"),1*rating,n_a) ) #In order to make a errorbar plot , we now need a table containing genre and rating # we first gather all the columns into a single values column called rating and then # we filter out all the na rows # later we calculate the 2se for making the error plot genre_data %>% select(Romance:IMAX) %>% gather(., key="Genre",value = "rating") %>% filter(is.na(rating)==FALSE) %>% group_by(Genre) %>% summarise(n = n(), avg = mean(rating), se = sd(rating)/sqrt(n)) %>% mutate(Genre = reorder(Genre, avg)) %>% ggplot(aes(x = Genre, y = avg, ymin = avg - 2*se, ymax = avg + 2*se)) + geom_errorbar() + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + labs(title = "Error bar plots by Genre" , caption = "Separated Genres from edx dataset") # Does the mean rating change over time? #Here, we use functions from the lubridate package to extract the week from the timestamp edx %>% mutate(week = round_date(as_datetime(timestamp), unit = "week")) %>% group_by(week) %>% summarize(rating = mean(rating)) %>% ggplot(aes(week, rating)) + geom_point() + geom_smooth() + ggtitle("Rating evolution by week")+ theme_economist() # Is a rating given at night different than one given in the morning? edx %>% mutate(hour = hour(as_datetime(timestamp))) %>% group_by(hour) %>% summarize(rating = mean(rating)) %>% ggplot(aes(hour, rating)) + geom_point() + geom_smooth() + ggtitle("Rating evolution by hour")+ theme_economist() # Add columns for each genre #We choose 0 or 1 as the output since this will make it easier to make a linear model data <- edx %>% mutate(Romance = ifelse(str_detect(genres,"Romance"),1,0), Comedy = ifelse(str_detect(genres,"Comedy"),1,0), Action = ifelse(str_detect(genres,"Action"),1,0), Crime = ifelse(str_detect(genres,"Crime"),1,0), Thriller = ifelse(str_detect(genres,"Thriller"),1,0), Drama = ifelse(str_detect(genres,"Drama"),1,0), Sci_Fi = ifelse(str_detect(genres,"Sci-Fi"),1,0), Adventure = ifelse(str_detect(genres,"Adventure"),1,0), Children = ifelse(str_detect(genres,"Children"),1,0), Fantasy = ifelse(str_detect(genres,"Fantasy"),1,0), War = ifelse(str_detect(genres,"War"),1,0), Animation = ifelse(str_detect(genres,"Animation"),1,0), Musical = ifelse(str_detect(genres,"Musical"),1,0), Western = ifelse(str_detect(genres,"Western"),1,0), Mystery = ifelse(str_detect(genres,"Mystery"),1,0), Film_Noir = ifelse(str_detect(genres,"Film-Noir"),1,0), Horror = ifelse(str_detect(genres,"Horror"),1,0), Documentary = ifelse(str_detect(genres,"Documentary"),1,0), IMAX = ifelse(str_detect(genres,"IMAX"),1,0) ) glimpse (data) # Ready for modeling! Create test and train #We divide the data into training and test sets. #Before this, we set a seed to ensure that the work is reproducible #The index already made in the first code chunk was to separate #the validation set so we need to remake the index #We choose an 80/20 split, the standard in ML test_index <- createDataPartition(y = data$rating, times = 1, p = 0.2, list = FALSE) train_set <- data[-test_index,] test_set <- data[test_index,] #We don't want movies or users in the test set that don't appear in the training set test_set <- test_set %>% semi_join(train_set, by = "movieId") %>% semi_join(train_set, by = "userId") #Validation set was already created in the code chunk Initial dataframes # Function to create sparse matrix. Somehow, recommenderlab models change the prior data to its handy to have a function to reproduce it #For using recommenderlab models, we need to make a sparse matrix and partition it #using the inbuilt partition functions create_sparse_m <- function(){ sparse_m <- sparseMatrix( i = as.numeric(as.factor(edx$userId)), j = as.numeric(as.factor(edx$movieId)), x = edx$rating, dims = c(length(unique(edx$userId)), length(unique(edx$movieId))), dimnames = list(paste("u", 1:length(unique(edx$userId)), sep = ""), paste("m", 1:length(unique(edx$movieId)), sep = ""))) sparse_m <- new("realRatingMatrix", data = sparse_m) return (sparse_m) } # Data Reduction #90 percentile data selection sparse_m <- create_sparse_m() y = 0.9 min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) # Function to calculate RMSE RMSE <- function(true_ratings, predicted_ratings){ sqrt(mean((true_ratings - predicted_ratings)^2)) } # Basic Model: predict mean mu <- mean(data$rating) #We make a a dataframe as long as test_set with mu repeated as R Markdown #interprets the difference in length between mu and test_set as an error. naive_rmse <- RMSE(test_set$rating,rep(mu,nrow(test_set))) naive_rmse rmse_results <- tibble(method = "Just the average", RMSE = naive_rmse) # User and Movie model, non regularized movie_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu)) user_avgs <- train_set %>% left_join(movie_avgs, by="movieId") %>% group_by(userId) %>% summarise(b_u= mean(rating-b_i-mu)) predicted_ratings <- test_set %>% left_join(movie_avgs, by="movieId")%>% left_join(user_avgs, by="userId") %>% mutate(pred = mu + b_i +b_u ) %>% pull(pred) User_And_Movie_effect <- RMSE(test_set$rating,predicted_ratings) User_And_Movie_effect rmse_results <- add_row(rmse_results,method = "Movie Average and User Average", RMSE = User_And_Movie_effect) # Motivation for regularization #Checking errors to see which are the one we got wrong nmbr_ratings <- data %>% group_by(movieId) %>% summarise(n=n()) test_set %>% left_join(movie_avgs, by='movieId') %>% left_join(user_avgs, by="userId") %>% left_join(nmbr_ratings, by = "movieId") %>% mutate(residual = rating - (mu + b_i + b_u)) %>% arrange(desc(abs(residual))) %>% slice(1:10) %>% select(title, residual, n) %>% knitr::kable() # Regularized user and movie model lambdas <- seq(0, 10, 0.25) rmses <- sapply(lambdas, function(l){ mu <- mean(train_set$rating) b_i <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+l)) b_u <- train_set %>% left_join(b_i, by="movieId") %>% group_by(userId) %>% summarize(b_u = sum(rating - b_i - mu)/(n()+l)) predicted_ratings <- test_set %>% left_join(b_i, by = "movieId") %>% left_join(b_u, by = "userId") %>% mutate(pred = mu + b_i + b_u) %>% pull(pred) return(RMSE(predicted_ratings, test_set$rating)) }) lambda <- lambdas[which.min(rmses)] b_i <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+lambda)) b_u <- train_set %>% left_join(b_i, by="movieId") %>% group_by(userId) %>% summarize(b_u = sum(rating - b_i - mu)/(n()+lambda)) gc() qplot(lambdas,rmses) rmse_results <- rmse_results %>% add_row(method ="Regularized user & Movie effects", RMSE =min(rmses)) rmse_results # Regularized User Movie, Hour model lambdas <- seq(0, 10, 0.25) rmses <- sapply(lambdas, function(l){ mu <- mean(train_set$rating) b_i <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+l)) b_u <- train_set %>% left_join(b_i, by="movieId") %>% group_by(userId) %>% summarize(b_u = sum(rating - b_i - mu)/(n()+l)) b_t <- train_set %>% left_join(b_i, by="movieId") %>% left_join(b_u, by="userId") %>% mutate(hour = hour(as_datetime(timestamp))) %>% group_by(hour) %>% summarize(b_t = sum(rating - mu - b_i - b_u)/(n()+l) ) predicted_ratings <- test_set %>% mutate(hour = hour(as_datetime(timestamp))) %>% left_join(b_i, by = "movieId") %>% left_join(b_u, by = "userId") %>% left_join(b_t, by = "hour") %>% mutate(pred = mu + b_i + b_u + b_t) %>% pull(pred) return(RMSE(predicted_ratings, test_set$rating)) }) lambda <- lambdas[which.min(rmses)] b_i <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+lambda)) b_u <- train_set %>% left_join(b_i, by="movieId") %>% group_by(userId) %>% summarize(b_u = sum(rating - mu -b_i)/(n()+lambda)) b_t <- train_set %>% left_join(b_i, by="movieId") %>% left_join(b_u, by="userId") %>% mutate(hour = hour(as_datetime(timestamp))) %>% group_by(hour) %>% summarize(b_t = sum(rating - mu - b_i - b_u)/(n()+lambda) ) qplot(lambdas,rmses) gc()#Garbage collection rmse_results <- rmse_results %>% add_row(method ="Regularized user,movie,time effects", RMSE =min(rmses)) # Regularized user movie, time model with lm genre genre_train <- train_set %>% mutate(hour = hour(as_datetime(timestamp))) %>% left_join(b_i, by = "movieId") %>% left_join(b_u, by = "userId") %>% left_join(b_t, by = "hour") %>% mutate(residual = rating - mu- b_i-b_u-b_t ) temp <- genre_train %>% select(c(Romance:IMAX,residual)) lm_fit <- lm(residual ~ ., data=temp) temp2 <- test_set %>% select(c(Romance:IMAX)) pred <- predict.lm(lm_fit, newdata = temp2) predicted_ratings <- test_set %>% mutate(hour = hour(as_datetime(timestamp))) %>% left_join(b_i, by = "movieId") %>% left_join(b_u, by = "userId")%>% left_join(b_t, by = "hour") %>% mutate(pred = mu + b_i + b_u + b_t + pred) %>% pull(pred) rmse_genre_effect <- RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method ="Regularized user,movie,time effects with Genre (lm)", RMSE =min(rmse_genre_effect)) rm(temp,temp2) rmse_results # UBCF Model y=0.9 sparse_m <- create_sparse_m() nn=15 set.seed(1991) min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] e4 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=8) r_UBCF <- Recommender(getData(e4, "train"), "UBCF",parameter=c(nn=nn)) p_UBCF <- predict(r_UBCF, getData(e4, "known"), type="ratings") rmse_UBCF <- calcPredictionAccuracy(p_UBCF, getData(e4, "unknown"))[1] rmse_results <- rmse_results %>% add_row(method ="UBCF using reccomenderlab", RMSE =rmse_UBCF) # IBCF Model y=0.9 sparse_m <- create_sparse_m() nn=15 set.seed(1991) min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] model_ibcf <- Recommender(sparse_m_limited, method="IBCF", param=list(normalize="center")) #Testing prediction #pred_pop <- predict(model_ibcf, sparse_m[1:10], type="ratings") #as(pred_pop, "matrix")[,1:10] #Finding RMSE e6 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=8) p_IBCF <- predict(model_ibcf, getData(e6, "known"), type="ratings") rmse_IBCF <- calcPredictionAccuracy(p_IBCF, getData(e6, "unknown"))[1] rmse_results <- rmse_results %>% add_row(method ="IBCF using reccomenderlab", RMSE =rmse_IBCF) # Popular model sparse_m <- create_sparse_m() #Reduction parameter y=0.9 min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] #Making the model model_popular <- Recommender(sparse_m_limited, method="POPULAR", param=list(normalize="center")) #Testing prediction #Finding RMSE e5 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=8) p_POPULAR <- predict(model_popular, getData(e5, "known"), type="ratings") rmse_POP <- calcPredictionAccuracy(p_POPULAR, getData(e5, "unknown"))[1] rmse_results <- rmse_results %>% add_row(method ="POPULAR using reccomenderlab", RMSE =rmse_POP) # Recosystem Matrix factorization model #we select only the userID, MovieID and rating in the next three statements test_GD <- as.matrix (test_set [,1:3]) train_GD <- as.matrix(test_set [,1:3]) set.seed(1) #We need to build data stream objects as these are accepted inputs for this algorithm train_GD_2 <- data_memory(train_GD[,1],train_GD[,2],train_GD[,3]) test_GD_2 <- data_memory(test_GD[,1],test_GD[,2],test_GD[,3]) #Next step is to build Recommender object r = Reco() # Matrix Factorization : tuning training set # lrate is the gradient descend step rate # dim are the number of latent factors # nthread is number of threads to use : REDUCE IF YOUR PROCESSOR DOESN'T SUPPORT 6 THREADS opts = r$tune(train_GD_2, opts = list(dim = c(10, 20, 30), lrate = c(0.1, 0.2), costp_l1 = 0, costq_l1 = 0, nthread = 6, niter = 10)) r$train(train_GD_2, opts = c(opts$min, nthread = 6, niter = 20)) pred <- r$predict(test_GD_2, out_memory()) rmse_MFGD <- RMSE(pred,test_set$rating) rmse_results <- rmse_results %>% add_row(method ="Matrix Factorization using recosystem", RMSE =rmse_MFGD) rmse_results # Checking RMSE on validation set with MF model from recosystem #Selecting only pertinent columns valid_GD <-as.matrix(validation [,1:3]) valid_GD_2 <- data_memory(valid_GD[,1],valid_GD[,2],valid_GD[,3]) #selecting to output to variable pred <- r$predict(valid_GD_2, out_memory()) rmse_MFGD_final <- RMSE(pred,validation$rating) rmse_results <- rmse_results %>% add_row(method ="Matrix Factorization using recosystem : FINAL Validation RMSE ", RMSE =rmse_MFGD_final) options(pillar.sigfig = 10) rmse_results #------- END ----- # Testing of reccomenderlab models #RecommenderLab Full Redo install.packages("tictoc") library(recommenderlab) library(dplyr) library(Matrix) library(tictoc) create_sparse_m <- function(){ sparse_m <- sparseMatrix( i = as.numeric(as.factor(edx$userId)), j = as.numeric(as.factor(edx$movieId)), x = edx$rating, dims = c(length(unique(edx$userId)), length(unique(edx$movieId))), dimnames = list(paste("u", 1:length(unique(edx$userId)), sep = ""), paste("m", 1:length(unique(edx$movieId)), sep = ""))) sparse_m <- new("realRatingMatrix", data = sparse_m) return (sparse_m) } identical(sparse_m,sparse_m2) create_sparse_m() as(sparse_m[1:10,1:10],"matrix") as(sparse_m2[1:10,1:10],"matrix") tic("Full r_UBCF_nn_25_given~15/36") ## create 90/10 split (known/unknown) for the first 500 users in Jester5k given <- min(rowCounts(sparse_m)) e <- evaluationScheme(sparse_m, method="split", train=0.9, k=1, given=4) e ## create a user-based CF recommender using training data r <- Recommender(getData(e, "train"), "UBCF", parameter= c(nn=15)) ## create predictions for the test data using known ratings (see given above) p <- predict(r, getData(e, "known"), type="ratings") p as(p[1:5,1:5],"matrix") ## compute error metrics averaged per user and then averaged over all ## recommendations rmse_UBCF <-calcPredictionAccuracy(p, getData(e, "unknown")) #Above statement executed to give an RMSE of 1.08 on the whole dataset with all standard parameters. #The predict function took ~8 hours to execute #head(calcPredictionAccuracy(p, getData(e, "unknown"), byUser=TRUE)) gc() toc() #seq(0,1000,by=100) #sparse_m <- new("realRatingMatrix", data = sparse_m) #hist(rowCounts(sparse_m)[rowCounts(sparse_m) >= 0 &rowCounts(sparse_m)< 1000], xlim=c(0,1000),breaks = seq(0,1000,by=25), main="Movie Rating Count") #median(rowCounts(sparse_m)) #quantile(rowCounts(sparse_m),0.25) #Making a model with the entire set of users and movies takes 8 hours #the median user rates 51 movies #Let us eliminate the bottom x percentiles. #hist(colCounts(sparse_m)[colCounts(sparse_m)<2000], main="Ratings by movie Count", xlim = c(0,2000)) #quantile(colCounts(sparse_m)) x<-0.25 min_movies_by_user <- quantile(rowCounts(sparse_m),x) min_ratings_per_movie<- quantile(rowCounts(sparse_m),x) sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] #tail(recommenderRegistry$get_entries(dataType = "realRatingMatrix"), 1) tic("75 pc r_UBCF_nn_25_given_4") #min(rowCounts(sparse_m_limited)) dim(sparse_m_limited) - dim(sparse_m) ## create 90/10 split (known/unknown) for the first 500 users in Jester5k e2 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=4) r_UBCF_nn_25 <- Recommender(getData(e2, "train"), "UBCF") ## create predictions for the test data using known ratings (see given above) p_UBCF_nn_25 <- predict(r_UBCF_nn_25, getData(e2, "known"), type="ratings") p_UBCF_nn_25 ## compute error metrics averaged per user and then averaged over all ## recommendations rmse_UBCF_limited_data <- calcPredictionAccuracy(p_UBCF_nn_25, getData(e2, "unknown")) toc() test_percentiles <- seq(0.95,0.25,-0.10) #Checking time taken by percentile exclusion of data time_and_rmse <- sapply(test_percentiles, function(y){ tic(y) min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited_t <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] e3 <- evaluationScheme(sparse_m_limited_t , method="split", train=0.9, k=1, given=4) r_UBCF_nn_25_t <- Recommender(getData(e3, "train"), "UBCF") p_UBCF_nn_25_t <- predict(r_UBCF_nn_25_t, getData(e3, "known"), type="ratings") rmse_ <- calcPredictionAccuracy(p_UBCF_nn_25_t, getData(e3, "unknown"))[1] print(rmse_) return(list(perc = y,time = toc(),rmse = rmse_)) }) #Checking response of rmse to change in nn keeping percentile reduction = 0.9 tic.clearlog() test_nn <- seq(10,50,5) time_and_rmse <- sapply(test_nn, function(nn){ y=0.9 tic(nn) min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited_nn <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] e4 <- evaluationScheme(sparse_m_limited_nn , method="split", train=0.9, k=1, given=4) r_UBCF_nn <- Recommender(getData(e4, "train"), "UBCF",parameter=c(nn=nn)) p_UBCF_nn <- predict(r_UBCF_nn, getData(e4, "known"), type="ratings") rmse_ <- calcPredictionAccuracy(p_UBCF_nn, getData(e4, "unknown"))[1] toc(log=TRUE,quiet = FALSE) print(paste(nn," is nn. ", rmse_, " is RMSE" )) return(list(perc = nn, rmse = rmse_)) }) #RMSE minimum at nn=25 #Checking response of rmse to change in given keeping percentile reduction = 0.2 min(rowCounts(sparse_m)) tic.clearlog() test_given <- seq(3,8,1) given_test <- sapply(test_given, function(g){ y=0.2 tic(g) min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] e4 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=g) r_UBCF <- Recommender(getData(e4, "train"), "UBCF",parameter=c(nn=25)) p_UBCF <- predict(r_UBCF, getData(e4, "known"), type="ratings") rmse_ <- calcPredictionAccuracy(p_UBCF, getData(e4, "unknown"))[1] toc(log=TRUE,quiet = FALSE) print(paste(g," is given. ", rmse_, " is RMSE" )) return(list(given = g, rmse = rmse_)) }) log_g1 <- tic.log(format = TRUE) given_test #increasing given reduced rmse until given was 7. Then reduced. #Checking response of rmse to change in given with no reduction of data min(rowCounts(sparse_m)) tic.clearlog() test_given <- seq(3,8,1) given_test_2 <- sapply(test_given, function(g){ y=0.2 tic(g) min_movies_by_user <- 0 min_ratings_per_movie<- 0 sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] e4 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=g) r_UBCF <- Recommender(getData(e4, "train"), "UBCF",parameter=c(nn=25)) p_UBCF <- predict(r_UBCF, getData(e4, "known"), type="ratings") rmse_ <- calcPredictionAccuracy(p_UBCF, getData(e4, "unknown"))[1] toc(log=TRUE,quiet = FALSE) print(paste(g," is given. ", rmse_, " is RMSE" )) return(list(given = g, rmse = rmse_)) }) log_g2 <- tic.log(format = TRUE) #One iteration ran in 17.6 K secs and RMSE 2926 (bullshit) #Given Test 3 : Adding sparse_m creation function and gc() #Checking response of rmse to change in given keeping percentile reduction = 0.9 min(rowCounts(sparse_m)) tic.clearlog() test_given <- seq(8,7,-1) given_test_3 <- sapply(test_given, function(g){ y=0.9 tic(g) sparse_m <- create_sparse_m() print(identical(sparse_m,sparse_m2)) min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] e4 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=g) r_UBCF <- Recommender(getData(e4, "train"), "UBCF",parameter=c(nn=25)) p_UBCF <- predict(r_UBCF, getData(e4, "known"), type="ratings") rmse_ <- calcPredictionAccuracy(p_UBCF, getData(e4, "unknown"))[1] toc(log=TRUE,quiet = FALSE) print(paste(g," is given. ", rmse_, " is RMSE" )) gc() print(identical(sparse_m,sparse_m2)) return(list(given = g, rmse = rmse_)) }) log_g1 <- tic.log(format = TRUE) given_test_3 #FINALLY!!! given 7 decreased rmse to 1.009 from 1.029 #Testing with different quantities of data from 0.9 - 0.5 and for given from 8-4 , nn from 15 to 55 test_perc_given_nn <- expand.grid(perc=seq(0.9,0.5,-0.1),given=seq(8,4,-1),nn=seq(15,55,10)) #min(rowCounts(sparse_m)) tic.clearlog() #toString(test_perc_given_nn[1,],) #test_perc_given_nn[1,1] full_test_UBCF <- apply(test_perc_given_nn,1, function(t){ print( paste("Now running model :", t[1], " is perc ", t[2]," is given. ", t[3]," is nn. ")) tic(toString(t)) y=t[1] sparse_m <- create_sparse_m() nn=t[3] print(identical(sparse_m,sparse_m2)) set.seed(1991) min_movies_by_user <- quantile(rowCounts(sparse_m),y) min_ratings_per_movie<- quantile(rowCounts(sparse_m),y) sparse_m_limited <- sparse_m[rowCounts(sparse_m)>=min_movies_by_user, colCounts(sparse_m)>=min_ratings_per_movie] e4 <- evaluationScheme(sparse_m_limited , method="split", train=0.9, k=1, given=t[2]) r_UBCF <- Recommender(getData(e4, "train"), "UBCF",parameter=c(nn=nn)) p_UBCF <- predict(r_UBCF, getData(e4, "known"), type="ratings") rmse_ <- calcPredictionAccuracy(p_UBCF, getData(e4, "unknown"))[1] toc(log=TRUE,quiet = FALSE) print(paste( rmse_, " is RMSE")) gc() print(identical(sparse_m,sparse_m2)) return(list(perc = t[1] , given = t[2], nn=t[3] , rmse = rmse_)) }) log_full_test <- tic.log(format = TRUE) full_test_UBCF log_full_test df <- data.frame(matrix(unlist(full_test_UBCF), nrow=125, byrow=TRUE),stringsAsFactors=FALSE) df which.min(df$X4) min(df$X4) #Best parameters are : perc = 0.9. given = 8, nn=15 #somehow, 8 validation set items aren't a part of the test and validation set # Probably because we didn't semijoin on genre #With only 8 it wont make a difference to RMSE na_index <- c(50641,118361,223470,487503,674188,688404,800929,830872) 8/nrow(validation) val_set <- validation[-na_index] pred <- predict.lm(lm_fit, newdata = temp2) predicted_ratings_validation val_set <- val_set %>% mutate(Romance = ifelse(str_detect(genres,"Romance"),1,0), Comedy = ifelse(str_detect(genres,"Comedy"),1,0), Action = ifelse(str_detect(genres,"Action"),1,0), Crime = ifelse(str_detect(genres,"Crime"),1,0), Thriller = ifelse(str_detect(genres,"Thriller"),1,0), Drama = ifelse(str_detect(genres,"Drama"),1,0), Sci_Fi = ifelse(str_detect(genres,"Sci-Fi"),1,0), Adventure = ifelse(str_detect(genres,"Adventure"),1,0), Children = ifelse(str_detect(genres,"Children"),1,0), Fantasy = ifelse(str_detect(genres,"Fantasy"),1,0), War = ifelse(str_detect(genres,"War"),1,0), Animation = ifelse(str_detect(genres,"Animation"),1,0), Musical = ifelse(str_detect(genres,"Musical"),1,0), Western = ifelse(str_detect(genres,"Western"),1,0), Mystery = ifelse(str_detect(genres,"Mystery"),1,0), Film_Noir = ifelse(str_detect(genres,"Film-Noir"),1,0), Horror = ifelse(str_detect(genres,"Horror"),1,0), Documentary = ifelse(str_detect(genres,"Documentary"),1,0), IMAX = ifelse(str_detect(genres,"IMAX"),1,0)) %>% mutate(hour = hour(as_datetime(timestamp))) %>% left_join(b_i, by = "movieId") %>% left_join(b_u, by = "userId")%>% left_join(b_t, by = "hour") genre_pred <- predict.lm(lm_fit, newdata = val_set) predicted_ratings_Validation<-val_set%>% cbind(genre_pred)%>% mutate(pred = mu + b_i + b_u + b_t + genre_pred) %>% pull(pred) #na_index <-which(is.na(predicted_ratings_Validation)) RMSE(predicted_ratings_Validation,val_set$rating)
51666f0698e5ff9b2c788774011ffcc073ecf92a
57b72c064f833f4a8ed2535ed4b05d90a002e83c
/R/DatabaseConnector-internal.R
c6c052931e9af02cbacc06efb1affabaf0175e2e
[]
no_license
writetoritu/DatabaseConnector
ebcfb34253c3b1cb42013dd7bae8886ab7ec46cd
c8deda5a051a89e75c2686a471e1fdadd979f7ef
refs/heads/master
2021-01-22T20:39:04.312528
2014-12-29T19:42:22
2014-12-29T19:42:22
null
0
0
null
null
null
null
UTF-8
R
false
false
8,368
r
DatabaseConnector-internal.R
.Random.seed <- c(403L, 624L, 723094258L, 1517587915L, -397157744L, 667899217L, -1112707362L, 560008647L, -1238228132L, -1790051155L, -2114642038L, -1553054845L, -1136871704L, -1999618103L, 1640581366L, -807802113L, 1835999540L, 1742813861L, -637839582L, -1462534085L, 1864085056L, 256100673L, 1942058510L, -84394185L, -751343092L, 1573816221L, 585273786L, 42935283L, 1969627288L, 1393021369L, -1264429530L, 981789295L, -2033820188L, 1568785301L, 997011282L, 1361579691L, 343764464L, 888551729L, 591677758L, -22754137L, 352541884L, 1545784973L, 1525256170L, 960569443L, 1211037256L, 644145065L, -1110724778L, 56149471L, -167655788L, -585791355L, -1431170174L, -820430565L, 1479809440L, 1621468449L, 1932060782L, 872265239L, 981531500L, 1635373437L, 263002650L, 1913338067L, 498219000L, 210135449L, 1150833798L, -20153009L, -378774716L, -945059467L, 600638386L, 403570571L, -121982640L, 1506872593L, -1759358050L, 408550279L, 224085532L, -1698525075L, 1603285066L, 154430787L, 1976231336L, -2033487991L, -1256503882L, -1357442881L, 1913723892L, 1376962149L, 1937833954L, 187523579L, -1555286784L, -675843839L, 2141424334L, 248552695L, 301808844L, -2136208547L, -776613254L, -54280781L, 387186520L, 2119364985L, 1344766694L, -1271955409L, 867895460L, -187023531L, 1733363730L, -513908629L, -1545362256L, 1901580529L, 165645822L, -801596825L, 932306812L, -945470899L, -2046754646L, 1551903267L, -1092057848L, 872148841L, 1531972630L, 1003277215L, 351609172L, 1548809285L, -41935806L, -1656227109L, -1791229856L, -1921962783L, 801725742L, -618073129L, -2012991956L, 1339897149L, 1695857370L, -1175407981L, -782010696L, 811952473L, 1357373766L, 1902506767L, -284531196L, 1485169973L, -1794032526L, 2068918603L, 282085392L, 1384285393L, 940832862L, -470243001L, -671143716L, 356705325L, 1347682570L, -1738778877L, 157275240L, 871259977L, 268567158L, -298715521L, 1040570036L, -850914779L, 499607714L, 1637942203L, 1591442368L, -1765091135L, -432906354L, 1153350327L, 1995296652L, 528830237L, 1373754170L, -590735501L, 678993432L, 649449785L, 208760742L, 690476527L, -793611420L, -1574536427L, 1210425554L, 1344172587L, 643341168L, -800511823L, -1437101378L, 1834100775L, -553967044L, 1913878029L, -861851286L, 1040249827L, -1492593720L, 144359209L, 2127112406L, -419524257L, 2023439380L, -1301274619L, -1251023614L, -797934437L, 486713120L, 79459489L, -780758546L, 1397354903L, 1695686892L, -65251075L, -1405805670L, -1266160557L, 1680569720L, -516151015L, -1805898234L, -1439881009L, -1173671740L, -1120665355L, 665204018L, 1711153931L, -1219191088L, -1180451695L, -1253941986L, -503507193L, -388117604L, -2003896339L, -1795124790L, -358219581L, 1438352168L, -1342632183L, -1623359690L, 585800767L, 2081247604L, 1430330853L, -1316056734L, -34708101L, -662076800L, -465698687L, -332532658L, 1786943607L, -2067193268L, -1374006563L, 338072570L, -1402851021L, 945028312L, 1542484217L, 1178606694L, -1624380497L, -1200841180L, -786008363L, -425002606L, 1596474347L, -2103684560L, -645250959L, 2037143422L, 330397159L, 1040131324L, -1087542835L, -813030870L, 1468235171L, 1062199944L, -1543418135L, -1258879594L, 2058229535L, 733222612L, 993201093L, 188640706L, -1705853349L, -2042098208L, 1044872289L, -51345746L, 1243656023L, -1268067412L, -1174979395L, -1244775334L, 1267787283L, -993381320L, 248162521L, -877315386L, -1897783665L, 86948740L, 1098243253L, 1145826802L, 2043991243L, 856143248L, -151735215L, -479362594L, 807879879L, -759746980L, 946260909L, 777380490L, 1506896515L, -107089432L, -626295095L, 1334688758L, -1560215041L, -1763210188L, 633203109L, -946439646L, -237664453L, 118901056L, 399566913L, -1772730098L, 232535607L, 2119120140L, 1613436573L, 1229198522L, 894175987L, -1788470376L, -305999687L, 441682214L, -603864721L, 94996708L, -1382403435L, 195105362L, -1875126869L, 1601095920L, -723838927L, -1411523522L, -1055489113L, 1197336508L, -760013427L, -277097750L, -490223773L, -2038582968L, -974152023L, 1251582550L, 748379359L, -117460588L, 307869573L, -139544958L, -324091877L, 667694240L, 1909605409L, 385301358L, 762129687L, 416171628L, -1657937795L, 312483098L, 684433363L, -1577078024L, 1758521497L, 1941112710L, -985344945L, 1261766212L, -250906507L, 331507378L, 412436107L, -1968599984L, 842361873L, 1465214622L, -1405668729L, -397717220L, 680157037L, -585895094L, -6384573L, 1405558952L, 1405464201L, -843925322L, -2076891201L, -1185642764L, 981366117L, -1197530398L, 253126907L, -1589522432L, 1229164545L, -1452581426L, 2089522167L, 1715472332L, 695704157L, -503687814L, 21472435L, 1315895896L, 1810690169L, 1701557734L, 1701009199L, -1826016348L, 591507029L, 1070066450L, 622405483L, 596639664L, -906252303L, 913055998L, 859918695L, -1378391428L, -1872457395L, 1295401898L, -645017309L, 995246088L, -299520407L, 1282473750L, -226073953L, 1799228500L, 329524037L, 864010050L, 2034532827L, 407837536L, -1759720479L, 1145745454L, 720333527L, -184780500L, 2038449213L, 325762522L, -1242031725L, 1787556280L, 1594812505L, -921332666L, -1290450417L, -958525180L, -1749756875L, 2127142770L, 1677686859L, -2063981808L, 1394992081L, 1706691422L, -202579897L, 1012544476L, 481932077L, 570082314L, -1244824061L, -2097737880L, 2064096841L, -2039712394L, -1673065089L, -837533260L, 1329774885L, -1654092894L, -412022085L, 499914432L, 1347811265L, -1439878514L, -1001344585L, -10773876L, -1111499235L, -1895213510L, 1511325299L, 841914648L, 640602169L, -1001872730L, -2129482513L, -72638876L, -572724715L, -874544174L, 528496939L, -171968912L, -2136209487L, -841538114L, -403579097L, -466899652L, -1677605619L, -914787222L, -174744861L, -594702648L, 1550024233L, -2026038314L, -2110838689L, -852547820L, -623751419L, 930187266L, -1311505509L, 781238816L, 1709642657L, 1772807406L, 812083351L, 1807051756L, -491886595L, -923918694L, 483808083L, 1294921848L, 558147609L, -929242874L, 2115207119L, 1927928772L, -1053691915L, 773953586L, 1037265419L, -1464239664L, 25566097L, 593195038L, 1695043079L, -1583098212L, -1732983059L, 1036670154L, 371075011L, 1710079528L, 1882279433L, -1501892042L, -2131315905L, 2145827956L, -540357403L, 1320334434L, -861574021L, -1284170368L, -993517695L, -581187762L, -1293185161L, -865997492L, -1866210851L, -1053747462L, 1232345139L, -815466536L, 742944761L, -1817555098L, 886031023L, -1712273116L, 1227613653L, -1196958574L, 1093733099L, -1128354512L, -2136203407L, -421598594L, 484977895L, 488288252L, 1498069197L, 84239658L, -1331747677L, -1460691576L, 275320297L, -2017593194L, 1364569631L, 697500116L, -1012647227L, 1011093698L, -937605797L, 22096096L, 1441864545L, 735566254L, -342546857L, 1605967532L, 746081213L, 65683290L, 1193692435L, 950579000L, -1624102951L, 789114310L, 201291151L, 169371268L, -1186804811L, -1970646798L, 1204886475L, 1017614480L, -1525279919L, 1694078174L, 491355079L, -1427621540L, -532964691L, 826451338L, 2052100483L, -1687474968L, 318946761L, 446574326L, -2012997377L, 965474100L, 668375205L, 1698536738L, -797733317L, 1492968512L, -27654335L, 1203182606L, -702796489L, 269680652L, -700209763L, -1456367686L, -1725632013L, 1959925400L, 1308217273L, -263144410L, 1180342383L, -2001161244L, -1863409259L, -824117934L, 199990443L, 525083632L, 297531185L, -1237094594L, -807126361L, 1264874172L, -335582067L, 1620710890L, 1147825763L, -1623692216L, -906601047L, -1699506858L, -1782975521L, 1261018260L, -371595643L, 985094530L, -1377810661L, -414167136L, -1627725023L, -275673490L, -902257641L, 1942458732L, 1788735357L, 1428103194L, -559413549L, -608621064L, -2003343463L, 2031729286L, 43592527L, 120172868L, -1951963275L, -1128019534L, -474444405L, 1199859536L, 1704315665L, -1084982882L, -93296249L, -1427678180L, -150065555L, -1121733046L, -64701629L, -2110825560L, 54259081L, -1885983818L, -953205057L, 633598452L, 589702245L, 969491938L, -996446213L, -987534592L, 348892929L, -1375784754L, 2074249975L, -852569396L, -2113984163L, 936040570L, -756577357L, 825619800L, 453177209L, -1225004826L, 997404207L, -1564254556L, -1595597483L, -1756705262L, -1049629077L, -2077730128L, 1000050417L, 818197502L, -1096415129L, 566818172L, 937418829L, -50886998L, -1430816733L, 2116549384L, 147549545L, -857869802L, 1159461279L, -1129121964L, 691229253L)
d2107ae7168d6a3d83997e2ffa8ba8ddaf4f052b
8b756c46656cef6637e8b21c79b0b180747453c6
/projekt2.R
29bc117898374ffea60938500accf5d935feeaba
[]
no_license
knycz/shiny_macro
b5680d0ce1fe620ef4688ff1ab9d8f5051d181f0
1938ad1a0c1719e778775af328d722665faaf162
refs/heads/main
2023-01-11T22:40:08.854617
2020-11-10T03:34:49
2020-11-10T03:34:49
311,534,711
0
0
null
null
null
null
UTF-8
R
false
false
19,255
r
projekt2.R
#available on https://nomicten.shinyapps.io/WIZN/ library("shiny") library("tidyr") library("dplyr") library("XML") library("ggplot2") library("grid") library("plotly") library("devtools") library("WDI") library("usethis") ### List of countries ### WDI::WDI(extra = T) %>% filter(region != "NA") %>% select(iso2c, country) -> countries #NA instead of Aggregates countries <- as.data.frame(cbind(unique(countries$iso2c), unique(countries$country))) regions <- list("East Asia & Pacific", "Europe & Central Asia", "Latin America & Caribbean", "Middle East & North Africa", "North America", "South Asia", "Sub-Saharan Africa") ### DATA ### gdp_growth_an <- WDI(indicator ="NY.GDP.MKTP.KD.ZG", extra = T) gdp_growth_an_world <- subset(gdp_growth_an, country == "World") gdp_growth_an_world %>% select(NY.GDP.MKTP.KD.ZG, year) -> gdp_growth_an_world gdp_growth_an <- subset(gdp_growth_an, region != "NA") gdp_growth_pc <- WDI(indicator = "5.51.01.10.gdp", extra = T) gdp_growth_pc <- subset(gdp_growth_pc, region != "NA") gdp_level_2011 <- WDI(indicator = "NY.GDP.MKTP.KD", extra = T) gdp_level_2010_world <- subset(gdp_level_2011, country == "World") gdp_level_2011 <- subset(gdp_level_2011, region != "NA") gdp_level_2011 %>% rename(gdp_lvl2010const = NY.GDP.MKTP.KD) %>% arrange(country, year) -> gdp_level_2011 gdp_level_2011 %>% group_by(country) %>% mutate(change=(gdp_lvl2010const-lag(gdp_lvl2010const,1))/lag(gdp_lvl2010const,1)*100) %>% as.data.frame -> gdp_level_2011 gdp_level_2010_world %>% rename(gdp_lvl2010const = NY.GDP.MKTP.KD) %>% arrange(country, year) -> gdp_level_2010_world gdp_level_2010_world %>% group_by(country) %>% mutate(change=(gdp_lvl2010const-lag(gdp_lvl2010const,1))/lag(gdp_lvl2010const,1)*100) %>% as.data.frame -> gdp_level_2010_world #Define UI for app that draws a histogram ---- ui <- navbarPage("Macro in a nutshell", tabPanel("Global growth", fluidPage( titlePanel("World Development Indicators"), sidebarLayout( sidebarPanel( sliderInput(inputId = "bins", label = h3("Group countries into subgroups"), min = 1, max = 215, value = 30), sliderInput(inputId = "selectedyear", label = h3("Select year"), min = min(gdp_growth_an$year)+1, max = max(gdp_growth_an$year)-1, value = max(gdp_growth_an$year)-1, step = 1), sliderInput("growthrange", label = h3("Range of GDP growth"), min = round(min(gdp_growth_an$NY.GDP.MKTP.KD.ZG, na.rm = T),-1), max = round(max(gdp_growth_an$NY.GDP.MKTP.KD.ZG, na.rm = T),-1), value = c(-20, 20)) ), mainPanel( plotOutput(outputId = "distPlot") ) ) ) ), tabPanel("Country profile", fluidPage( titlePanel("Demographics"), sidebarLayout( sidebarPanel( selectInput("country", label = h3("Select country"), choices = countries[,2], selected = "Poland"), sliderInput(inputId = "year", label = h3("Select year"), min = 1960, max = 2018, value = 2018, step = 1, animate = T, animationOptions(interval = 150, loop = FALSE, playButton = "play", pauseButton = "pause")) ), mainPanel( plotlyOutput(outputId = "population_structure"), plotlyOutput(outputId = "population_total"), plotlyOutput(outputId = "TFR") ) ) ) ), tabPanel("International comparison", fluidPage( titlePanel("Comparison"), sidebarLayout( sidebarPanel( selectInput("indicator100", label = h3("Select indicator"), choices = list( # "Per capita GDP growth" = "5.51.01.10.gdp" , # "GDP (current $)" = "6.0.GDP_current" , # "GDP growth (annual %)" = "6.0.GDP_growth" , # "GDP (constant 2005 $)" = "6.0.GDP_usd" , # "GDP per capita, PPP (constant 2011 international $) " = "6.0.GDPpc_constant" , # "Trade in services (% of GDP)" = "BG.GSR.NFSV.GD.ZS" , # "Gross private capital flows (% of GDP, PPP)" = "BG.KAC.FNEI.GD.PP.ZS" , # "Gross private capital flows (% of GDP)" = "BG.KAC.FNEI.GD.ZS" , # "Gross foreign direct investment (% of GDP, PPP)" = "BG.KLT.DINV.GD.PP.ZS" , # "Gross foreign direct investment (% of GDP)" = "BG.KLT.DINV.GD.ZS" , # "Wage bill as a percentage of GDP" = "BI.WAG.TOTL.GD.ZS" , # "Merchandise imports (BOP): percentage of GDP (%)" = "BM.GSR.MRCH.ZS" , # "Foreign direct investment, net outflows (% of GDP)" = "BM.KLT.DINV.GD.ZS" , # "Foreign direct investment, net outflows (% of GDP)" = "BM.KLT.DINV.WD.GD.ZS" , # "Current account balance (% of GDP)" = "BN.CAB.XOKA.GD.ZS" , # "Current account balance (% of GDP)" = "BN.CAB.XOKA.GDP.ZS" , # "Curr. acc. bal. before official transf. (% of GDP)" = "BN.CAB.XOTR.ZS" , # "Current account balance excluding net official capital grants (% of GDP)" = "BN.CUR.GDPM.ZS" , # "Net income (% of GDP)" = "BN.GSR.FCTY.CD.ZS" , # "Foreign direct investment (% of GDP)" = "BN.KLT.DINV.CD.ZS" , "GDP per capita, PPP (constant 2011 international $)" = "NY.GDP.PCAP.PP.KD", "Urban population (% of total)" = "SP.URB.TOTL.IN.ZS"), selected = "NY.GDP.PCAP.PP.KD" ), sliderInput(inputId = "year1", label = h3("Select year"), min = 1990, max = 2018, value = 2018, step = 1, animate = F ), sliderInput("slider2", label = h3("Position in the ranking"), min = 1, max = 215, value = c(1, 10) ) # , # checkboxGroupInput("region", label = h3("Choose area"), # choices = regions) ), mainPanel( plotlyOutput(outputId = "international_comparison") ) ) ) ) ) server <- function(input, output) { ### reactive expressions ### country_code <- reactive({ countries %>% filter(V2 == input$country) -> tmp as.character(tmp[,1]) }) population_data <- reactive({ piramida_ma <- c("SP.POP.0004.MA","SP.POP.0509.MA","SP.POP.1014.MA","SP.POP.1519.MA", "SP.POP.2024.MA","SP.POP.2529.MA","SP.POP.3034.MA","SP.POP.3539.MA", "SP.POP.4044.MA","SP.POP.4549.MA","SP.POP.5054.MA","SP.POP.5559.MA", "SP.POP.6064.MA","SP.POP.6569.MA","SP.POP.7074.MA","SP.POP.7579.MA", "SP.POP.80UP.MA") datatmp_ma <- WDI(indicator = piramida_ma, country = country_code(), extra = T) datatmp_ma$id <- rownames(datatmp_ma) datatmp_ma %>% select(id, country, year, SP.POP.0004.MA:SP.POP.80UP.MA) -> datatmp_ma datatmp_ma_narrow <- gather(datatmp_ma, age, population, SP.POP.0004.MA:SP.POP.80UP.MA) datatmp_ma_narrow %>% mutate(gender = "Male") -> datatmp_ma_narrow datatmp_ma_narrow$age <- paste(substr(datatmp_ma_narrow$age,8,9), "-", substr(datatmp_ma_narrow$age,10,11)) datatmp_ma_narrow$population <- -datatmp_ma_narrow$population piramida_fe <- c("SP.POP.0004.FE","SP.POP.0509.FE","SP.POP.1014.FE","SP.POP.1519.FE", "SP.POP.2024.FE","SP.POP.2529.FE","SP.POP.3034.FE","SP.POP.3539.FE", "SP.POP.4044.FE","SP.POP.4549.FE","SP.POP.5054.FE","SP.POP.5559.FE", "SP.POP.6064.FE","SP.POP.6569.FE","SP.POP.7074.FE","SP.POP.7579.FE", "SP.POP.80UP.FE") datatmp_fe <- WDI(indicator = piramida_fe, country = country_code(), extra = T) datatmp_fe$id <- rownames(datatmp_fe) datatmp_fe %>% select(id, country, year, SP.POP.0004.FE:SP.POP.80UP.FE) -> datatmp_fe datatmp_fe_narrow <- gather(datatmp_fe, age, population, SP.POP.0004.FE:SP.POP.80UP.FE) datatmp_fe_narrow %>% mutate(gender = "Female") -> datatmp_fe_narrow datatmp_fe_narrow$age <- paste(substr(datatmp_fe_narrow$age,8,9), "-", substr(datatmp_fe_narrow$age,10,11)) datatmp <- rbind(datatmp_ma_narrow, datatmp_fe_narrow) return(datatmp) }) TFR_data <- reactive({ WDI(indicator = "SP.DYN.TFRT.IN", country = country_code(), extra = T) %>% rename(TFR = SP.DYN.TFRT.IN) -> datatmp return(datatmp) }) ### range <- reactive({ return(seq(input$slider2[1], input$slider2[2])) }) data_indicator <- reactive({ WDI(indicator = as.character(input$indicator100), extra = T) -> temp return(temp) }) year_indicator <- reactive({ data_indicator() %>% filter(year == input$year1) -> temp return(temp) }) region_indicator <- reactive({ if(is.null(input$region)){ year_indicator() %>% filter(region != "NA") %>% arrange(-.[,3]) -> temp } else { year_indicator() %>% filter(region != "NA") %>% filter(region == input$region) %>% arrange(-.[,3]) -> temp } return(temp) }) ############################################# output$distPlot <- renderPlot({ x <- gdp_growth_an %>% filter(year == input$selectedyear) %>% select(NY.GDP.MKTP.KD.ZG) x$positive <- (x$NY.GDP.MKTP.KD.ZG >= 0) bins <- seq(min(x$NY.GDP.MKTP.KD.ZG, na.rm = T), max(x$NY.GDP.MKTP.KD.ZG, na.rm = T), length.out = input$bins + 1) p1 <- ggplot(x, aes(x=NY.GDP.MKTP.KD.ZG, fill = positive)) + geom_histogram(bins = input$bins + 1, colour = "white") + geom_vline(xintercept = gdp_growth_an_world %>% filter(year == input$selectedyear) %>% select(NY.GDP.MKTP.KD.ZG) %>% as.numeric -> temp, linetype = "dashed") + geom_text(aes(x=temp, label=paste("\nworld avg: ", round(temp,1), "%"), y = 0, hjust=0), colour="black", angle=90) + theme_minimal() + scale_x_continuous(name = "GDP growth", limits = c(input$growthrange[1], input$growthrange[2]))+ ggtitle("Distribution of GDP growth - number of countries") + theme(plot.title = element_text(hjust = 0.5)) x2 <- gdp_level_2011 %>% filter(year == input$selectedyear) %>% select(change, gdp_lvl2010const) x2$positive <- (x2$change >= 0) p2 <- ggplot(x2, aes(x=change, weight=gdp_lvl2010const/10^12, fill = positive)) + geom_histogram(bins= input$bins + 1, colour = "white") + theme_minimal() + geom_vline(xintercept = gdp_growth_an_world %>% filter(year == input$selectedyear) %>% select(NY.GDP.MKTP.KD.ZG) %>% as.numeric, linetype = "dashed")+ geom_text(aes(x=temp, label=paste("\nworld avg: ", round(temp,1), "%"), y = 0, hjust=0), colour="black", angle=90) + scale_x_continuous(name = "GDP growth", limits = c(input$growthrange[1], input$growthrange[2]))+ scale_y_continuous(name = "GDP in trillion US dollars, 2010 prices")+ ggtitle("Distribution of GDP growth - size of GDP") + theme(plot.title = element_text(hjust = 0.5)) grid.newpage() grid.draw(rbind(ggplotGrob(p1), ggplotGrob(p2), size = "last")) }) ##### output$population_total <- renderPlotly({ population_data() %>% mutate(population = abs(population)) %>% group_by(year) %>% summarise(population_total = sum(population)) -> temp ggplot(temp, aes(x=year, y=population_total)) + geom_line() + theme_classic() + geom_vline(xintercept = input$year, linetype = "dashed") + # geom_text(aes(x = input$year+0.5, # label = paste0(temp %>% filter(year == input$year) %>% select(population_total) %>% as.numeric %>% round(-5)/10^6, "m"), # y = temp %>% filter(year == input$year) %>% select(population_total) %>% as.numeric()*1.08, hjust=0), colour="black", angle=90) + scale_y_continuous(limits = c(0, 1.1*max(temp$population_total, na.rm = T)), name = "Population in total", labels = f <- function(x){ return(paste0(abs(x)/10^6, "m")) }) + ggtitle(paste("Population of", input$country, "in total")) + theme(plot.title = element_text(hjust = 0.5)) -> p3 p3 <- ggplotly(p3) p3 }) output$population_structure <- renderPlotly({ validate( need(!(population_data() %>% filter(year == input$year) %>% select(population) %>% anyNA), 'Data not complete') ) pyramid <- ggplot(population_data() %>% filter(year == input$year) -> tmp, aes(x = age, y = population, fill = gender)) + geom_bar(data = tmp1 <- subset(tmp, gender == "Female") , stat = "identity") + geom_bar(data = tmp2 <- subset(tmp, gender == "Male") , stat = "identity") + scale_y_continuous(limits = 1.05*c(-max(max(-population_data()$population, na.rm=T), max(population_data()$population, na.rm = T)), max(max(-population_data()$population, na.rm=T), max(population_data()$population, na.rm = T))), labels = f <- function(x){ return(paste0(abs(x)/10^6, "m")) }, name = "number of people")+ theme_classic() + ggtitle(paste("The age structure of the population:", input$country)) + theme(plot.title = element_text(hjust = 0.5)) + coord_flip() pyramid <- ggplotly(pyramid) pyramid }) output$TFR <- renderPlotly({ ggplot(TFR_data() -> temp, aes(x=year, y=TFR)) + geom_line() + theme_classic() + geom_vline(xintercept = input$year, linetype = "dashed") + geom_hline(yintercept = 2.1, linetype = "dashed", colour = "red") + # geom_text(aes(x = input$year+0.5, # label = temp %>% filter(year == input$year) %>% select(TFR) %>% as.numeric, # y = temp %>% filter(year == input$year) %>% select(TFR) %>% as.numeric()*1.5, hjust=0), colour="black", angle=90) + scale_y_continuous(limits = c(0, 1.1*max(temp$TFR, na.rm = T))) + ggtitle(paste("Total fertility rate of", input$country)) + theme(plot.title = element_text(hjust = 0.5)) -> tmp tmp <- ggplotly(tmp) tmp }) output$international_comparison <- renderPlotly({ temp <- region_indicator()[range(),] countries_tmp <- temp[,1:2] rownames(countries_tmp) <- countries_tmp[,1] ggplot(temp , aes(x = reorder(iso2c, -temp[,3]), y = temp[,3],#)#)+ text = paste('country: ', countries_tmp[as.character(temp$iso2c),2], '<br>value:', round(temp[,3])))) + geom_bar(stat = "identity", aes(fill = "darkorange3")) + theme_classic() + scale_y_continuous(name = tmp <- (WDIsearch(tmp <- colnames(temp)[3], field = "indicator") -> temp2) %>% is.vector %>% ifelse(., return(t(temp2)), return(temp2)) %>% as.data.frame %>% filter(indicator == tmp) %>% select(name) %>% .[1,] %>% as.character) + scale_x_discrete(name = "country code") + ggtitle("Comparison of countries") + theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(angle = 90, size = 8, vjust =.5), axis.text.y = element_text(angle = 0, size = 8, vjust =.5), legend.position = "none") + guides(fill=FALSE) -> tmp tmp <- ggplotly(tmp, tooltip = c("text")) tmp }) } shinyApp(ui = ui, server = server)
366c3645d7500a4f0cbd492a48524a2d17c0265f
7b2aa85d3249b3c1ef4a0db5512b9bb2b152984c
/R/data.R
3a74548e6faab3b61990cbf6f291ef29ce69342e
[]
no_license
ZW-xjtlu/RbashGEO
5aed85a80e02665a614758590f4ddc99d621e0af
50d77c6b2d03e7635434740070368b63ae282fb0
refs/heads/master
2021-09-12T13:17:04.946456
2018-04-17T05:28:23
2018-04-17T05:28:23
null
0
0
null
null
null
null
UTF-8
R
false
false
286
r
data.R
#' The example Coldata data.frame #' #' #' #' @format A \code{data.frame} #' \describe{ #' As long as the table contains a collumn of the file names, and indicating it is paired or single end library, then it should work fine. #' } #' #' @usage Coldata_example #' "Coldata_example"
2a674368e9e2abb5b0635696ad64010ae4504771
7aa60ad6e7aebc90dffbe6d6de2d9e431de30601
/man/area_cal.Rd
6c80bdd06f00839c7fb25934edd94363fcc62efb
[ "MIT" ]
permissive
zhujiedong/jiptest
f16a07730fdaf5183e0a6f4b5f8f9731b79ae4bb
19eadc914c3481f218788b64c5f6d30e4c89b80e
refs/heads/main
2022-05-21T20:39:16.406455
2022-05-09T02:00:57
2022-05-09T02:00:57
313,541,102
1
0
null
null
null
null
UTF-8
R
false
true
577
rd
area_cal.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/area_cal.R \name{area_cal} \alias{area_cal} \title{calculate the area under curve (BLUE > 1.2.2)} \usage{ area_cal(df, use_PAM = FALSE) } \arguments{ \item{df}{data of a type dataframe.} \item{use_PAM}{indicate to use PAM or continuous fluorescence signals} } \description{ use a method similar method like trapezium intergration } \details{ use Soto's answer as https://stackoverflow.com/questions/4954507/calculate-the-area-under-a-curve } \examples{ \dontrun{ library(jiptest) area_cal(df) } }
882e32d4ab94a0b551a68e20d0d31f2a9c0bfa16
57b6bc2896092a29cbd829199359f96be3da7572
/2020/R/explore.R
c1fef85fa19cd2220ddcba4c1ec2824d6ab34ab1
[]
no_license
azambranog/hash_code
1a6b43c8af3388df1bba1e4750e092825eca6886
0e0787d72712d7084bb8c397e3e70c372ee1e3f0
refs/heads/master
2022-03-13T15:20:22.978505
2022-02-24T21:39:54
2022-02-24T21:39:54
239,148,802
0
0
null
null
null
null
UTF-8
R
false
false
894
r
explore.R
library(data.table) filesx <- list.files('data', full.names = T) for (f in filesx) { message(f) data <- readLines(f) D <- as.numeric(strsplit(data[1], ' ')[[1]][3]) books <- as.numeric(strsplit(data[2], ' ')[[1]]) books <- data.table(score = books) books[, book := .I -1] libs <- tail(data, -2) L <- lapply(as.list(seq(1,length(libs), by = 2)), function(i) { libinfo <- as.numeric(strsplit(libs[i], ' ')[[1]]) libbooks <- data.table(book = as.numeric(strsplit(libs[i+1], ' ')[[1]])) libbooks[, lib := (i-1)/2] libbooks[, sign := libinfo[2]] libbooks[, bpd := libinfo[3]] }) L <- rbindlist(L) L <- merge(L, books, by = 'book') data <- list(data = L, D = D) ff <- gsub('^(.{1}).*$', '\\1', basename(f)) saveRDS(data, file.path('clean', paste0(ff, '.rds'))) message('DONE FILE') }
8ae24fc96ea777ed52f9d852ae6f6ae64c78b2db
0a906cf8b1b7da2aea87de958e3662870df49727
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609960793-test.R
48307a306425fedc8b210d7a4479beba60dfb850
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
784
r
1609960793-test.R
testlist <- list(x = c(-134225921L, NA, -256L, 0L, 16777215L, -1537L, -687865865L, NA, -1895825409L, -48897L, -702926875L, -1L, -1L, -702926849L, 673513472L, 15728639L, -1L, -42L, 439353343L, 620756992L, -268435712L, 16777215L, -687865857L, -2686977L, -134225921L, 405405516L, -162783703L, 692857129L, 1291845623L, -2107878L, 802619391L, 1277100031L, -10726L, 805306367L, -10497L), y = c(-1L, 692866262L, 858993459L, -10479057L, -24673L, -553638698L, 439353343L, -1L, -256L, 0L, 0L, 409104239L, 1814571619L, 1819243365L, 1466527309L, 1634752105L, 692854313L, -42L, 1377447756L, 704643071L, 405405516L, NA, 692857129L, 1289158614L, 438697942L, -2133452288L, NA, -2049L, -539616721L, -11788545L, -42L)) result <- do.call(diffrprojects:::dist_mat_absolute,testlist) str(result)
a6629e0d98ea9f91613769fae7b406f259da4f0a
77eb1ef8d5984f3e2e925991d56faf4a24707850
/rocketpackMM/R/yourncurve.R
4ea18eba73b556e4825b01eb42ab0117877058d8
[]
no_license
mm4753-12/rocketpackMM
3d3b6837aae9a8a50fcf716da6bfbb9497b170dc
220c0d2e8a3fa0de40d5f4fac8b3fa53dde22853
refs/heads/main
2023-01-19T15:38:39.594540
2020-11-19T05:35:38
2020-11-19T05:35:38
314,137,818
0
0
null
null
null
null
UTF-8
R
false
false
524
r
yourncurve.R
#' YOURNCURVE #' #' @param A value for a variable distributed normally, a mean, and a standard deviation. #' #' @return A density curve, an area, and a probability. #' @export #' #' @examples #' \dontrun yourncurve(6,10,5) yourncurve = function(Y,mu,sigma){ curve(dnorm(x,mean=mu,sd=sigma),xlim=c(mu-3*sigma,mu+3*sigma)) xcurve=seq(Y-10*sigma, Y, length=10000) ycurve=dnorm(xcurve,mean=mu,sd=sigma) polygon(c(Y-10*sigma,xcurve,Y),c(0,ycurve,0),col="Light Green") list(round(pnorm(Y,mu,sigma),4)) }
3382f2621cefe0773595fa62bf522e0d3fb8f01f
b8deda0293025b126958530c589899fd43ea8197
/arima_garch.r
fe86592c25c30a36b1752655f4ef817bcf0c40c2
[]
no_license
Vikas1667/AnomalyDetectionOnRisk
64c87d4ff6230a17bc1c7683bc5fcaff978eee76
7ed7fcce5aa22d308b76b1fb6bfe6a2c228f9510
refs/heads/master
2023-03-21T17:36:37.400139
2018-05-31T16:18:28
2018-05-31T16:18:28
null
0
0
null
null
null
null
UTF-8
R
false
false
5,943
r
arima_garch.r
#"quantmod---------------------------------------------------------------------" #install.packages("quantmod", repos='https://ftp.acc.umu.se/mirror/CRAN/') #"lattice---------------------------------------------------------------------" #install.packages("lattice", repos='https://ftp.acc.umu.se/mirror/CRAN/') #"timeSeries---------------------------------------------------------------------" #install.packages("timeSeries", repos='https://ftp.acc.umu.se/mirror/CRAN/') #"rugarch---------------------------------------------------------------------" #install.packages("rugarch", repos='https://ftp.acc.umu.se/mirror/CRAN/') "Packages installed." # defining a function is.installed <- function(mypkg) is.element(mypkg, installed.packages()[,1]) checkNecessaryPackages <- function(){ "timeSeries:" library("timeSeries") is.installed('timeSeries') "quantmod:" library("quantmod") is.installed('quantmod') "lattice:" library("lattice") is.installed('lattice') "nloptr:" library("nloptr") is.installed('nloptr') "rugarch:" library("rugarch") is.installed('rugarch') } library("rugarch") #print("RUGARCH IS INSTALLED:") is.installed('rugarch') args <- commandArgs(trailingOnly = TRUE) datatype <- args[1] size <- args[2] nr_of_series <- args[3] differentiation <- args[4] #print(datatype) #print(size) file_name = paste("garch/", datatype, "_", size, ".csv", sep="") #print(file_name) #Convert strings to intergers size_string = size size = strtoi(size) nr_of_series = strtoi(nr_of_series) return_series = read.csv(file_name, header=FALSE) tmp_serie = return_series[1,] tmp_time_serie = as.ts(tmp_serie) windowLength = as.integer(length(tmp_time_serie)*0.10) forecasts_for_all_series=matrix(nrow=nr_of_series, ncol=size-windowLength, byrow = TRUE) # fill matrix by rows sigmas_for_all_series =matrix(nrow=nr_of_series, ncol=size-windowLength, byrow = TRUE) # fill matrix by rows ##print(forecasts_for_all_series) #Judge Order #final.bic <- Inf final.order <- c(2,0,2) #first_serie = return_series[1,] #first_serie_window = as.ts(first_serie[(1):(windowLength)]) #for (p in 0:5) for (q in 0:5) { # if ( p == 0 && q == 0) { # next # } # # arimaFit = tryCatch( arima(first_serie_window, order=c(p, 0, q)), # error=function( err ) FALSE, # warning=function( err ) FALSE ) # ##print(arimaFit) # if( !is.logical( arimaFit ) ) { # ##print('actually managed to fit') # ##print(p) # ##print(q) # current.bic <- BIC(arimaFit) # if (current.bic < final.bic) { # final.bic <- current.bic # final.order <- c(p, 0, q) # final.arima <- arima(first_serie_window, order=final.order) # } # } else { # ##print('didnt managed to fit') # next # } #} print("Final order: ") print(final.order) #Start modellling for (serie_nr in 1:nr_of_series) { one_serie = return_series[serie_nr,] one_time_serie = as.ts(one_serie) if(differentiation == "Once") { one_time_serie = c(0, diff(one_time_serie[1:length(one_time_serie)])) } foreLength = length(one_time_serie) - windowLength - 1 forecasts <- vector(mode="character", length=foreLength) sigmas <- vector(mode="character", length=foreLength) truths <- vector(mode="character", length=foreLength) spec = ugarchspec( variance.model=list(garchOrder=c(1,1)), mean.model=list(armaOrder=final.order, include.mean=TRUE), #mean.model=list(armaOrder=c(final.order[1], final.order[3]), include.mean=TRUE), distribution.model="sged") d = 0 max_roll = 100 while (d < foreLength) { one_serie_window = as.ts(one_time_serie[(1+d):(windowLength+d)]) datapoints_left_in_serie = foreLength - d nr_of_rolls = min(datapoints_left_in_serie, max_roll) one_serie_window_future = as.ts(one_time_serie[(1+d):(windowLength+d+nr_of_rolls)]) #one_diffed_serie_window = one_serie_window #diff(one_serie_window) #spec = ugarchspec( # variance.model=list(garchOrder=c(1,1)), # mean.model=list(armaOrder=c(1, 1), include.mean=TRUE), # #mean.model=list(armaOrder=c(final.order[1], final.order[3]), include.mean=TRUE), # distribution.model="sged") model<-ugarchspec(variance.model = list(model = "sGARCH", garchOrder = c(1, 1), variance.targeting=TRUE), mean.model = list(armaOrder = c(1, 1), include.mean = TRUE), distribution.model = "norm") modelfit<-ugarchfit(spec=model,data=one_serie_window, solver='hybrid') #print(coef(modelfit)) if(is.null(coef(modelfit))){ print("FAILED to CONVERGE") #Handles exception naively forecasts[d+1] = 0 sigmas[d+1] = 1 d = d + 1 } else{ spec = getspec(modelfit); setfixed(spec) <- as.list(coef(modelfit)) fore = ugarchforecast(spec, n.ahead=1, n.roll = nr_of_rolls, data = one_serie_window_future, out.sample = nr_of_rolls) for (index in 0:nr_of_rolls) { forecasts[d+index+1] = fitted(fore)[index+1] sigmas[d+index+1] = sigma(fore)[index+1] } d = d + nr_of_rolls } } forecasts_for_all_series[serie_nr,] = forecasts sigmas_for_all_series[serie_nr,] = sigmas cat(".") } forecasts_for_all_series <- mapply(forecasts_for_all_series, FUN=as.numeric) sigmas_for_all_series <- mapply(sigmas_for_all_series, FUN=as.numeric) forecasts_for_all_series <- matrix(data=forecasts_for_all_series, ncol=foreLength+1, nrow=nr_of_series) sigmas_for_all_series <- matrix(data=sigmas_for_all_series, ncol=foreLength+1, nrow=nr_of_series) file_name_mean = paste("forecasts_mean_", datatype, "_", size, ".csv", sep="") write.table(forecasts_for_all_series, row.names=FALSE, col.names=FALSE, file = file_name_mean) file_name_var = paste("forecasts_variance_", datatype, "_", size, ".csv", sep="") write.table(sigmas_for_all_series, row.names=FALSE, col.names=FALSE, file = file_name_var) print("End of R Program")
16356a8ddad1b80e1802dc631b3c3194d2434ebc
c1bf7397ddc833b7ba9617e22ee2bdc86e646ee4
/rCode/aag_rank.R
9a3417bae7018f6d5990932ea7db52d6696bb65e
[]
no_license
cxq914/Data-Science
1b5f6f9ac089d098b8d6c91d722dbf70f75e5613
6f9648cf323f8cfbcd69aa6b97991f9162013d34
refs/heads/master
2021-01-19T18:55:56.696529
2016-02-18T00:34:11
2016-02-18T00:34:11
21,634,205
0
0
null
null
null
null
UTF-8
R
false
false
5,807
r
aag_rank.R
library(data.table) #########identify the best and worst restaurant########### fileName2 <- paste('Applebee Data/Result/Data with Payment( 2015-11-23 - 2016-01-03 ).xlsx') merged <- read.xlsx(fileName2,detectDates = TRUE) aag.sub <- subset(merged,merged$Parent.Account=='Apple American Group, LLC (AAG)') res.dat <- aag.sub[,c(3,4,12,19)] res.dat$average.NetRevenue.perNBNTG <- round(res.dat$`199.Net.Sales`/res.dat$DineInNoBarStoolTickest,2) res.dat$weekly <- as.Date(cut(res.dat$`Business.Day`,breaks = "week",start.on.monday = TRUE)) res.avg <- aggregate(res.dat$average.NetRevenue.perNBNTG,by=list(res.dat$`Restaurant.Number`,res.dat$weekly),data=res.dat,mean) colnames(res.avg) <- c('Restaurant.Number','Week','Average.NetRevenue.per.NBNT') res.avg$`Average.NetRevenue.per.NBNT` <- round(res.avg$`Average.NetRevenue.per.NBNT`,3) period <- unique(res.avg$Week) n <- length(period) rank.dat <- as.data.frame(unique(res.dat$Restaurant.Number)) colnames(rank.dat) <- 'Restaurant.Number' j=1 for (i in 1:n) { sub <- subset(res.avg,res.avg$Week==period[i]) sub$rank <- rank(-sub$`Average.NetRevenue.per.NBNT`,ties.method= "first") sub<-sub[,c(1,3,4)] rank.dat <- merge(rank.dat,sub,by="Restaurant.Number",all.x = TRUE) colnames(rank.dat) <- c(colnames(rank.dat)[1:j],paste('NetRevenue',period[i]),paste('Rank',period[i])) j = j+2 } ########################analysis################## rank.dat[is.na(rank.dat)] <- 0 for (i in 1:6) { cut1 <- quantile(rank.dat[,(i*2)],probs=c(0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1,0)) #a <- rbind(a,cut1) level <- cut(rank.dat[,(i*2)],breaks=c(0.5,cut1),include.lowest = TRUE,right = TRUE) levels(level) <- c(10,9,8,7,6,5,4,3,2,1) rank.dat <- cbind(rank.dat,level) } colnames(rank.dat) <- c(colnames(rank.dat)[1:13], 'week1','week2','week3','week4','week5','week6') ################Transition Matrix####### ranking.info <- createWorkbook() week12.dat <- addWorksheet(wb=ranking.info,sheetName = 'Week1-Week2') week13.dat <- addWorksheet(wb=ranking.info,sheetName = 'Week1-Week3') week14.dat <- addWorksheet(wb=ranking.info,sheetName = 'Week1-Week4') week15.dat <- addWorksheet(wb=ranking.info,sheetName = 'Week1-Week5') week16.dat <- addWorksheet(wb=ranking.info,sheetName = 'Week1-Week6') for (i in 16:20) { week <- as.matrix(table(rank.dat.na.rm[,15],rank.dat.na.rm[,i])) week <- week[10:1,10:1] week.per <- matrix(data = 0,nrow = 10,ncol = 10) for (j in 1:10) { week.per[j,] <- percent(round(week[j,]/sum(week[j,]),2)) } colnames(week.per) <- c('Level1','Level2','Level3','Level4','Level5','Level6','Level7', 'Level8','Level9','Level10') writeData(ranking.info,i-15,week,rowNames = TRUE,colNames = TRUE) writeData(ranking.info,i-15,week.per,startRow = 14,rowNames = TRUE,colNames = TRUE) } saveWorkbook(ranking.info,'RestaurantRank_Analysis_final.xlsx') ####################second method######################## ranking.info1 <- createWorkbook() week12.dat <- addWorksheet(wb=ranking.info1,sheetName = 'Week1-Week2') week23.dat <- addWorksheet(wb=ranking.info1,sheetName = 'Week2-Week3') week34.dat <- addWorksheet(wb=ranking.info1,sheetName = 'Week3-Week4') week45.dat <- addWorksheet(wb=ranking.info1,sheetName = 'Week4-Week5') week56.dat <- addWorksheet(wb=ranking.info1,sheetName = 'Week5-Week6') for (i in 15:19) { week <- as.matrix(table(rank.dat.na.rm[,i],rank.dat.na.rm[,i+1])) week <- week[10:1,10:1] week.per <- matrix(data = 0,nrow = 10,ncol = 10) for (j in 1:10) { week.per[j,] <- percent(round(week[j,]/sum(week[j,]),2)) } colnames(week.per) <- c('Level1','Level2','Level3','Level4','Level5','Level6','Level7', 'Level8','Level9','Level10') writeData(ranking.info1,i-14,week,rowNames = TRUE,colNames = TRUE) writeData(ranking.info1,i-14,week.per,startRow = 14,rowNames = TRUE,colNames = TRUE) } saveWorkbook(ranking.info1,'RestaurantRank_Analysis_method2.xlsx') rank.dat.na.rm$general_level <- NULL write.xlsx(rank.dat,'Applebee Data/Restaurant_Rank_Level_aag.xlsx') ######### res.rank <- read.xlsx('Applebee Data/Restaurant_Rank_Level_aag.xlsx') res.rank[,14] <- as.numeric(res.rank[,14]) res.rank[,15] <- as.numeric(res.rank[,15]) res.rank[,16] <- as.numeric(res.rank[,16]) res.rank[,17] <- as.numeric(res.rank[,17]) res.rank[,18] <- as.numeric(res.rank[,18]) res.rank[,19] <- as.numeric(res.rank[,19]) res.rank$rank_avg <- round((res.rank$week1+res.rank$week2+res.rank$week3 +res.rank$week4+res.rank$week5+res.rank$week6)/6,2) res.rank$avg_netRevenue <- round((res.rank[,2]+res.rank[,4]+res.rank[,6] +res.rank[,8]+res.rank[,10]+res.rank[,12])/6,2) #res.rank$type <- ifelse(res.rank$rank_avg<2,'Top10%',ifelse(res.rank$rank_avg>=9,'Bottom10%','others')) res.rank <- res.rank[order(res.rank$rank_avg),] res.rank$type <- c(rep('Top10%',49),rep('10%-20%',48),rep('20%-30%',48),rep('30%-40%',48), rep('40%-50%',48),rep('50%-60%',48),rep('60%-70%',48),rep('70%-80%',48), rep('80%-90%',48),rep('Bottom10%',49)) #top <- res.rank[1:120,] #bottom <- res.rank[1051:1170,] #top$type <- 'Top10%' #bottom$type <- 'Bottom10%' #rest.list <- rbind(top,bottom) #write.xlsx(rest.list,'TopBottomRestaurants.xlsx') total.dat <- read.xlsx('Applebee Data/Result/Data with Payment( 2015-11-23 - 2016-01-03 ).xlsx',detectDates = TRUE) #aggregated.dat <- aggregate(.~Restaurant.Number,data = rest.merged,mean) res.rank.sub <- res.rank[,c(1,20,21,22)] rest.rank.merged <- merge(total.dat,res.rank.sub,by='Restaurant.Number') rest.rank.merged <- rest.rank.merged[,c(1,2,4,9:12,17:26,3,27,28:33,5,6,34:36)] write.xlsx(rest.rank.merged,'Applebee Data/merged_restaurant_rank_aag.xlsx')
f01ae2cb9e1ad5a58359a32100761727b67ac152
2bb4abd3c6418f52aeb8591aa389d145f1fc4dc1
/cachematrix.R
9338efcd7e34c3f240da30e32d110cf813a59d77
[]
no_license
reg401/ProgrammingAssignment2
1cdb676b27a1de1c857aae3c1ba3dd8515ce103e
94f594ea4f7989d79b8364b99d433153f84b1d36
refs/heads/master
2021-01-15T11:42:38.712484
2015-09-25T05:36:46
2015-09-25T05:36:46
43,109,819
0
0
null
2015-09-25T04:34:00
2015-09-25T04:33:59
null
UTF-8
R
false
false
3,547
r
cachematrix.R
## The following file contains 2 functions to address the requirments set in:R Programming - Programming Assignment 2 - Caching the Inverse of a Matrix ## The file also contains 2 test functions - one using the cache to retrieve the inverse matrix from the cache and the other doesn't. ## ## Please note that it is assumed that the matrix supplied to the functions is always invertible. ## ######################################################### F U N C T I O N S ######################################################### ## This function creates a special "matrix" object that can cache its inverse. ## The following functions are available for the matrix object created: ## set - set the value of the matrix ## get - get the value of the matrix ## setInv - set the value of the inverse matrix ## getInv - get the value of the inverse matrix makeCacheMatrix <- function(x = matrix()) { ## First, set the inverse matrix variable to NULL im <- NULL ## This function applies the passed matrix (pm) to the special "matrix" object (x) and sets the inverse matrix variable (im) to NULL set <- function(pm) { x <<- pm im <<- NULL } ## This function returns the special "matrix" object get <- function() x ## This function sets the inverse matrix of the original matrix in the special "matrix" object setInv <- function(solve) im <<- solve ## This function gets the inverse matrix of the original matrix from the special "matrix" object getInv <- function() im ## The list of available functions list(set = set, get = get, setInv = setInv, getInv = getInv) } ## This function computes (and returns) the inverse of the special "matrix" (x) returned by makeCacheMatrix function above. ## If the inverse has already been calculated then this function should retrieve the inverse matrix from the special "matrix" cache. cacheSolve <- function(x, ...) { ## First, try to get the inverse matrix (im) from the passed matrix (x) im <- x$getInv() ## Check if the im is null, if it's not, we are actually retrieving the im from the cache, return the im value and exist the function if(!is.null(im)) { message("getting inverse matrix cached data") return(im) } ## Too bad, no im value in the cache, let's get the original matrix data data <- x$get() ## Let's create the inverse matrix (im) im <- solve(data, ...) ## Set the inverse matrix in the special "matrix" object x$setInv(im) ## Return the inverse matrix ## im variable can be used ## However, returning the inverse matrix stored in the special "matrix" (X) ensures that the set function is working properly x$getInv() } ######################################################### T E S T I N G ######################################################### ##Test the above functions, not getting the cached inverse matrix testNoCache <- function(){ ## Create the test matrix tm <- matrix(c(1, 2, 3, 4), nrow=2, ncol=2) ## Add the matrix to the special "matrix" mt <- makeCacheMatrix(tm) ## inverse the matrix and print it im <- cacheSolve(mt) im } ##Test the above functions, get the cached inverse matrix testCache <- function(){ ## Create the test matrix tm <- matrix(c(1, 2, 3, 4), nrow=2, ncol=2) ## Add the matrix to the special "matrix" mt <- makeCacheMatrix(tm) ## inverse the matrix im <- cacheSolve(mt) ## get the inverse matrix from the cache and print it. This should also trigger a "getting inverse matrix cached data" message. cim <- cacheSolve(mt) cim }
da9ecf2f921125c49264997dbbfaf17da8e51ba5
f73882ccd9a9a2db72017bca597b06466524b050
/R/auth.r
ce57994e1c493ff700746d465ccc922cc330a63f
[ "LicenseRef-scancode-us-govt-public-domain", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-unknown-license-reference" ]
permissive
usgs-lcmap/client-r
f6e377157194b7db77dbb0041d2a155dfdcdf677
929128689b20fd92a7399ba892b86f7c38ed81fd
refs/heads/master
2021-05-31T04:16:50.261908
2016-03-24T04:34:59
2016-03-24T04:34:59
null
0
0
null
null
null
null
UTF-8
R
false
false
557
r
auth.r
#' Authenticate to LCMAP using ERS #' #' @param username username #' @param password password #' @param version version #' @export #' @examples #' login("alice", "secret") #' login("alice", "secret", "2.0") login <- function (username, password, version) { if (missing(version)) { version<-lcmap::defaultAPIVersion } cfg<-lcmap::getCfg() payload<-list(username=cfg$username, password=cfg$password) result<-lcmap::post(lcmap::routes$loginContext, version, body=payload, encode="form") return(result) }
afbd66670a15853a9933724704c27ff680f2f191
1aa29f155cdf9bf9b17bcc5699a9bf0c20b82993
/man/tianComponent.Rd
bdebeb4fcb450dc4db9ed527d3a31e7adbf6f7b6
[]
no_license
Lucaweihs/SEMID
d165471e59e41ce1437d642d60370854ecf8ddd7
a079c69bde921f105273e20f40708d3e851ec011
refs/heads/master
2023-08-03T08:34:41.337736
2023-07-19T12:09:33
2023-07-19T12:09:33
48,009,470
5
1
null
2023-07-21T09:11:36
2015-12-15T00:23:48
R
UTF-8
R
false
true
462
rd
tianComponent.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MixedGraph.R \name{tianComponent} \alias{tianComponent} \alias{tianComponent.MixedGraph} \title{Returns the Tian c-component of a node} \usage{ tianComponent(this, node) \method{tianComponent}{MixedGraph}(this, node) } \arguments{ \item{this}{the mixed graph object} \item{node}{the node for which to return its c-component} } \description{ Returns the Tian c-component of a node }
5df4a27eb69b75868207d5610c87e1dd1c318779
257ebc3fac290fb366f26d2559f870d2e0994c8a
/load_libraries.R
4771135cf9defd64e83a94702a905f7c93c0a116
[]
no_license
cordura21/risk_consumption_backtest
cbbc51164c4db8f7c1ab95a0bbb3b2177ed3b722
7a841a2fa5087e887e5f233b504d52ccbbb5249f
refs/heads/master
2021-01-18T01:26:07.759629
2016-09-13T22:15:48
2016-09-13T22:15:48
68,021,022
0
0
null
null
null
null
UTF-8
R
false
false
184
r
load_libraries.R
library(dplyr) library(lubridate) library(xts) library(zoo) library(TTR) library(ggplot2) library(scales) library(tidyr) library(PerformanceAnalytics) library(stringr) library(readxl)
a6c4bf4bb6bdfe8c08d11e2dbced090648e77141
34f65d1110083e270d3b5a103e49284fc1295e93
/code/updateCompletemiRNASeqMetadata.R
d1c0059379139dce0a12d761316cdb3743f348b9
[]
no_license
kdaily/PCBCSampleMetadata
7bf884789f76e79642631c13b2e34e23d49dfaba
e12e2a39cd07364f42507eeb5e2ffc7caa54c9f1
refs/heads/master
2020-12-24T16:25:06.498568
2016-05-04T00:01:02
2016-05-04T00:01:02
28,197,176
0
0
null
null
null
null
UTF-8
R
false
false
568
r
updateCompletemiRNASeqMetadata.R
library(plyr) library(dplyr) library(synapseClient) synapseLogin() tblCurrent <- synGet('syn3219876') res <- synTableQuery(paste("SELECT * FROM", tblCurrent@properties$id)) bak <- res@values source("./createCompleteMIRNASeqMetadata.R") ### CAREFUL! THIS WILL DELETE ALL ROWS. synDeleteRows(res) tblCurrent <- synGet('syn3219876') tblNew <- Table(tableSchema=tblCurrent, values=as.data.frame(tblAll)) tblNew <- synStore(tblNew) ## Just in case, restore from backup of values # tblNew <- Table(tableSchema=tblCurrent, # values=bak)
c0eea23eb07f4c1de2d629c5f54b3889315a3eeb
fb26c3133d1b44e22a355b3997178f605df195f5
/cluster_validation.r
a771b4e5bd19fe53fb9cfbd469dbb248e610eff2
[]
no_license
MwandongaAbel/Clustering
fe8121795dbfefff9cfdb8302e9842c1bf49d48e
69fbdc36f60cfa6f33e5be7714f81ba54c999537
refs/heads/master
2022-11-22T07:43:54.807212
2020-07-30T12:02:35
2020-07-30T12:02:35
283,760,908
0
0
null
null
null
null
UTF-8
R
false
false
1,866
r
cluster_validation.r
# Clustering Validation......Abel Mwandonga Haro. install.packages("clustertend") library(factoextra) library(clustertend) data("iris") head(iris) #Excluding column species from data set. gf<-iris[,-5] # Random data generated from the iris data set random_gf<-apply(gf,2,function(x){runif(length(x),min(x),(max(x)))}) random_gf<-as.data.frame(random_gf) # Standardize the data sets gf<-iris_scaled<-scale(gf) random_gf<-scale(random_gf) #Visualization of the data set. install.packages("backports") library(backports) fviz_pca_ind(prcomp(gf), title = "PCA - Iris data", habillage = iris$Species, palette = "jco", geom = "point", ggtheme = theme_classic(), legend = "bottom") #plot random gf fviz_pca_ind(prcomp(random_gf),title="PCA_random_data",geom = "point",ggtheme=theme_classic()) # K-means on iris dataset set.seed(123) km_res<-kmeans(gf,3) fviz_cluster(list(data=gf,cluster=km_res$cluster), ellipse.type = "norm",geom = "point",stand = FALSE, palette="jco",ggtheme = theme_classic()) # K-means on the random dataset k_means_random<-kmeans(random_gf,3) fviz_cluster(list(data=random_gf,cluster=k_means_random$cluster), ellipse.type = "norm",geom="point",stand = FALSE, palette="jco",ggtheme = theme_classic()) # Hierarchical clustering on the random dataset fviz_dend(hclust(dist(random_gf)),k=3,k_colors = "jco", as.ggplot=TRUE,show_labels = FALSE) #Hopkins # Compute Hopkins statistic for iris dataset set.seed(123) hopkins(gf,n=nrow(gf)-1) # Compute Hopkins statistic for a random dataset hopkins(random_gf,n=nrow(gf)-1) #visual assessment of cluster tendency (VAT) fviz_dist(dist(gf),show_labels = FALSE)+ labs(title="iris_data") fviz_dist(dist(random_gf),show_labels = FALSE)+ labs(title="Random_data")
58471ed4b20325e2748381e4c4f72b9870bebdde
0b197ef046a9bb44e13f3c9637990a40ce6aee82
/exper/exp0044/rfr_us.R
d08a9e995446a82d10c9168964edd732045e9e7f
[]
no_license
jbrowne6/exper
52da8ae63faebe295f97e6507328a8b21d459e06
10b829ed79d6c2addf1e191b2a6291b4e90e8484
refs/heads/master
2021-09-17T06:41:29.141469
2018-06-28T18:57:17
2018-06-28T18:57:17
107,877,985
0
0
null
null
null
null
UTF-8
R
false
false
20,643
r
rfr_us.R
bestCutForFeature <- function(X){ minVal <- min(X) maxVal <- max(X) if(minVal == maxVal){ return(NULL)} X <- sort(X) normX <- (X-minVal)/(maxVal-minVal) sumLeft <- 0 sumRight <- sum(normX) errLeft <- 0 errRight <- 0 meanLeft <- 0 meanRight <- 0 errCurr <- 0 minErr <- Inf vectorLength <- length(X) cutPoint <- NULL for (m in 1:(vectorLength-1)){ sumLeft <- sumLeft + normX[m] sumRight <- sumRight - normX[m] meanLeft <- sumLeft/(m) meanRight <- sumRight/(vectorLength-m) errLeft <-sum((normX[1:m]-meanLeft)^2) errRight <-sum((normX[(m+1):vectorLength]-meanRight)^2) errCurr <- errLeft + errRight # Determine if this split is currently the best option if (errCurr < minErr){ cutPoint <- (X[m] + X[m+1])/2 minErr <- errCurr } } return(c(cutPoint, minErr)) } rfrus <- function(X, MinParent=1, trees=100, MaxDepth="inf", bagging=.2, replacement=TRUE, FUN=makeA, options=c(ncol(X), round(ncol(X)^.5),1L, 1/ncol(X)), COOB=TRUE, Progress=TRUE){ forest <- vector("list",trees) BV <- NA # vector in case of ties BS <- NA # vector in case of ties MaxDeltaI <- 0 nBest <- 1L BestIdx <-0L BestVar <-0L BestSplitIdx<-0L BestSplitValue <- 0 w <- nrow(X) p <- ncol(X) perBag <- (1-bagging)*w Xnode<-double(w) # allocate space to store the current projection SortIdx<-integer(w) if(object.size(X) > 1000000){ OS<-TRUE }else{ OS<-FALSE } # Calculate the Max Depth and the max number of possible nodes if(MaxDepth == "inf"){ StopNode <- 2L*w #worst case scenario is 2*(w/(minparent/2))-1 MaxNumNodes <- 2L*w # number of tree nodes for space reservation }else{ if(MaxDepth==0){ MaxDepth <- ceiling(log2(w)) } StopNode <- 2L^(MaxDepth) MaxNumNodes <- 2L^(MaxDepth+1L) # number of tree nodes for space reservation } CutPoint <- double(MaxNumNodes) Children <- matrix(data = 0L, nrow = MaxNumNodes,ncol = 2L) NDepth <- integer(MaxNumNodes) matA <- vector("list", MaxNumNodes) Assigned2Node<- vector("list",MaxNumNodes) Assigned2Leaf <- vector("list", MaxNumNodes) ind <- double(w) min_error <- Inf #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Start tree creation #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% for(treeX in 1:trees){ # intialize values for new tree before processing nodes CutPoint[] <- 0 Children[] <- 0L NDepth[]<- 0L #delete this? NDepth[1]<-1L CurrentNode <- 1L NextUnusedNode <- 2L NodeStack <- 1L highestParent <- 1L Assigned2Leaf <- vector("list", MaxNumNodes) ind[] <- 0L # Determine bagging set # Assigned2Node is the set of row indices of X assigned to current node if(bagging != 0){ if(replacement){ ind<-sample(1:w, w, replace=TRUE) Assigned2Node[[1]] <- ind }else{ ind[1:perBag] <- sample(1:w, perBag, replace = FALSE) Assigned2Node[[1]] <- ind[1:perBag] } }else{ Assigned2Node[[1]] <- 1:w } # main loop over nodes while (CurrentNode < NextUnusedNode && CurrentNode < StopNode){ # determine working samples for current node. NodeRows <- Assigned2Node[CurrentNode] Assigned2Node[[CurrentNode]]<-NA #remove saved indexes NdSize <- length(NodeRows[[1L]]) #determine node size # create projection matrix (sparseM) by calling the custom function FUN sparseM <- FUN(options) #isolate objective function # if node is impure and large enough then attempt to find good split if (NdSize < MinParent || NDepth[CurrentNode]==MaxDepth || NextUnusedNode+1L >= StopNode || NdSize == 1){ Assigned2Leaf[[CurrentNode]] <- NodeRows[[1L]] NodeStack <- NodeStack[-1L] CurrentNode <- NodeStack[1L] if(is.na(CurrentNode)){ break } next } min_error <- Inf cut_val <- 1 BestVar <- 1 # nBest <- 1L for(q in unique(sparseM[,2])){ #Project input into new space lrows <- which(sparseM[,2]==q) Xnode[1:NdSize] <- X[NodeRows[[1L]],sparseM[lrows,1], drop=FALSE]%*%sparseM[lrows,3, drop=FALSE] #Sort the projection, Xnode, and rearrange Y accordingly results <- bestCutForFeature(Xnode[1:NdSize]) if (is.null(results)) next if(results[2] < min_error){ cut_val <- results[1] min_error <- results[2] bestVar <- q } }#end loop through projections. if (min_error == Inf){ Assigned2Leaf[[CurrentNode]] <- NodeRows[[1L]] NodeStack <- NodeStack[-1L] CurrentNode <- NodeStack[1L] if(is.na(CurrentNode)){ break } next } # Recalculate the best projection lrows<-which(sparseM[,2L]==bestVar) Xnode[1:NdSize]<-X[NodeRows[[1L]],sparseM[lrows,1], drop=FALSE]%*%sparseM[lrows,3, drop=FALSE] # find which child node each sample will go to and move # them accordingly # changed this from <= to < just in case best split split all values MoveLeft <- Xnode[1:NdSize] < BestSplitValue numMove <- sum(MoveLeft) if (is.null(numMove)){ print("numMove is null") flush.console() } if(is.na(numMove)){ print("numMove is na") flush.console() } #Check to see if a split occured, or if all elements being moved one direction. if(numMove!=0L && numMove!=NdSize){ # Move samples left or right based on split Assigned2Node[[NextUnusedNode]] <- NodeRows[[1L]][MoveLeft] Assigned2Node[[NextUnusedNode+1L]] <- NodeRows[[1L]][!MoveLeft] #highest Parent keeps track of the highest needed matrix and cutpoint # this reduces what is stored in the forest structure if(CurrentNode>highestParent){ highestParent <- CurrentNode } # Determine children nodes and their attributes Children[CurrentNode,1L] <- NextUnusedNode Children[CurrentNode,2L] <- NextUnusedNode+1L NDepth[NextUnusedNode]=NDepth[CurrentNode]+1L NDepth[NextUnusedNode+1L]=NDepth[CurrentNode]+1L # Pop the current node off the node stack # this allows for a breadth first traversal Assigned2Leaf[[CurrentNode]] <- NodeRows[[1L]] NodeStack <- NodeStack[-1L] NodeStack <- c(NextUnusedNode, NextUnusedNode+1L, NodeStack) NextUnusedNode <- NextUnusedNode + 2L # Store the projection matrix for the best split matA[[CurrentNode]] <- as.integer(t(sparseM[which(sparseM[,2]==BestVar),c(1,3)])) CutPoint[CurrentNode] <- BestSplitValue }else{ # There wasn't a good split so ignore this node and move to the next NodeStack <- NodeStack[-1L] } # Store ClassProbs for this node. # Only really useful for leaf nodes, but could be used instead of recalculating # at each node which is how it is currently. CurrentNode <- NodeStack[1L] if(is.na(CurrentNode)){ break } } #If input is large then garbage collect prior to adding onto the forest structure. if(OS){ gc() } # save current tree structure to the forest if(bagging!=0 && COOB){ forest[[treeX]] <- list("CutPoint"=CutPoint[1:highestParent],"Children"=Children[1L:(NextUnusedNode-1L),,drop=FALSE], "matA"=matA[1L:highestParent], "ALeaf"=Assigned2Leaf[1L:(NextUnusedNode-1L)]) }else{ forest[[treeX]] <- list("CutPoint"=CutPoint[1:highestParent],"Children"=Children[1L:(NextUnusedNode-1L),,drop=FALSE], "matA"=matA[1L:highestParent], "ALeaf"=Assigned2Leaf[1L:(NextUnusedNode-1L)]) } if(Progress){ cat("|") flush.console() } } return(forest) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Default option to make projection matrix # # this is the randomer part of random forest. The sparseM # matrix is the projection matrix. The creation of this # matrix can be changed, but the nrow of sparseM should # remain p. The ncol of the sparseM matrix is currently # set to mtry but this can actually be any integer > 1; # can even greater than p. #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% makeA <- function(options){ p <- options[[1L]] d <- options[[2L]] method <- options[[3L]] if(method == 1L){ rho<-options[[4L]] nnzs <- round(p*d*rho) sparseM <- matrix(0L, nrow=p, ncol=d) sparseM[sample(1L:(p*d),nnzs, replace=F)]<-sample(c(1L,-1L),nnzs,replace=T) } #The below returns a matrix after removing zero columns in sparseM. ind<- which(sparseM!=0,arr.ind=TRUE) return(cbind(ind,sparseM[ind])) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Create Distance Matrix #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% dist <- function(X, Forest, maxDepth=0){ n <- nrow(X) dist <- matrix(0, nrow=n, ncol=n) numT <- length(Forest) currBin <- integer(n) if (maxDepth==0){ for(j in 1:numT){ for(i in 1:n){ currentNode <- 1L while(Forest[[j]]$Children[currentNode]!=0L){ s<-length(Forest[[j]]$matA[[currentNode]])/2 rotX <-sum(Forest[[j]]$matA[[currentNode]][(1:s)*2]*X[i,Forest[[j]]$matA[[currentNode]][(1:s)*2-1]]) if(rotX<=Forest[[j]]$CutPoint[currentNode]){ currentNode <- Forest[[j]]$Children[currentNode,1L] }else{ currentNode <- Forest[[j]]$Children[currentNode,2L] } } dist[Forest[[j]]$ALeaf[[currentNode]]] <- dist[Forest[[j]]$ALeaf[[currentNode]]] + 1 for(z in 1:(i-1)){ if(currBin[z] == currentNode){ dist[i,z] <- dist[i,z]+1 dist[z,i] <- dist[z,i]+1 } } dist[i,i] <- dist[i,i]+1 } } }else{ for(j in 1:numT){ for(i in 1:n){ currentNode <- 1L depth <- 1L while(Forest[[j]]$Children[currentNode]!=0L && depth <= maxDepth){ s<-length(Forest[[j]]$matA[[currentNode]])/2 rotX <-sum(Forest[[j]]$matA[[currentNode]][(1:s)*2]*X[i,Forest[[j]]$matA[[currentNode]][(1:s)*2-1]]) if(rotX<=Forest[[j]]$CutPoint[currentNode]){ currentNode <- Forest[[j]]$Children[currentNode,1L] }else{ currentNode <- Forest[[j]]$Children[currentNode,2L] } depth <- depth+1L } currBin[i] <- currentNode if(i>1){ for(z in 1:(i-1)){ if(currBin[z] == currentNode){ dist[i,z] <- dist[i,z]+1 dist[z,i] <- dist[z,i]+1 } } } dist[i,i] <- dist[i,i]+1 } } } return(dist) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Find Potential Nearest Neighbors Vector #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% distNN <- function(y, X, Forest, maxDepth=0){ dist <- integer(nrow(X)) numT <- length(Forest) if (maxDepth==0){ maxDepth <- Inf } for(j in 1:numT){ currentNode <- 1L depth <- 1L while(Forest[[j]]$Children[currentNode]!=0L && depth <= maxDepth){ s<-length(Forest[[j]]$matA[[currentNode]])/2 rotX <-sum(Forest[[j]]$matA[[currentNode]][(1:s)*2]*y[Forest[[j]]$matA[[currentNode]][(1:s)*2-1]]) if(rotX<=Forest[[j]]$CutPoint[currentNode]){ currentNode <- Forest[[j]]$Children[currentNode,1L] }else{ currentNode <- Forest[[j]]$Children[currentNode,2L] } depth <- depth+1L } dist[Forest[[j]]$ALeaf[[currentNode]]] <- dist[Forest[[j]]$ALeaf[[currentNode]]] + 1 } return(dist) #this is the similarity vector } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Find Nearest Neighbors from similarity vector #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% distNNk <- function(y, X, sv, k, adder){ index <- order(sv, decreasing=TRUE) simCount <- tabulate(sv) multiplier <- adder if(sum(simCount) < multiplier+k){ remainingNN <- sum(simCount) print("Not enough points. Decrease search depth.") flush.console() return(NULL) }else{ remainingNN <- multiplier+k } simLength <- length(simCount) NNindex <- NULL while (remainingNN >0){ if (remainingNN >= simCount[simLength]){ if(simCount[simLength]>0){ NNindex <- c(NNindex, index[1:simCount[simLength]]) index <- index[-(1:simCount[simLength])] remainingNN <- remainingNN-simCount[simLength] } simLength <- simLength -1 }else{ #NNorder <- order(sqrt(rowSums((y-X[index[1:simCount[simLength]],])^2))) NNorder <- order(sqrt(rowSums(sweep(X[index[1:simCount[simLength]],],2,y)^2))) NNindex <- c(NNindex, index[NNorder[1:remainingNN]]) remainingNN = 0 } } kNearest <- order(sqrt(rowSums(sweep(X[NNindex,],2,testSamples[1,])^2)))[1:k] return(NNindex[kNearest]) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Check K-Means #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% CheckKmeans <- function(Y, Yp){ uY <- length(unique(Y)) classCt <- tabulate(Y, uY) class_order <- order(classCt, decreasing=TRUE) used_class <-NULL curr_class <- NA class_error <- NA for(z in 1:uY){ Cindex <- which(Y==class_order[z]) subClassCt <- tabulate(Yp[Cindex], uY) subClass_order <- order(subClassCt, decreasing=TRUE) if(!is.null(used_class)){ for(m in 1:uY){ curr_class <- subClass_order[m] if(!(curr_class%in%used_class)){ break } } used_class <- c(used_class, curr_class) }else{ curr_class <- subClass_order[1] used_class <- curr_class } class_error[z] <- subClassCt[curr_class]/classCt[class_order[z]] } print(class_error) oe <- sum(class_error*classCt[class_order])/length(Y) cat("the overall error is: ", oe, "\n") flush.console() } #############################Swiss Roll Code################################### swissRoll <- function(n1, n2 = NULL, size = 6, dim3 = FALSE, rand_dist_fun = NULL, ...) { ### If n2 is NULL, then generate a balanced dataset of size 2*n1 if (is.null(n2)) n2 <- n1 xdim <- ifelse(dim3, 3, 2) ### GROUP 1 # Generate Angles rho <- runif(n1, 0, size*pi) # Create Swiss Roll g1x1 <- rho*cos(rho) g1x2 <- rho*sin(rho) ### GROUP 2 # Generate Angles rho <- runif(n2, 0, size*pi) # Create Inverse Swiss Roll g2x1 <- -rho*cos(rho) g2x2 <- -rho*sin(rho) ### Generate the 3rd dimension if (dim3) { z_range <- range(c(g1x1, g1x2, g2x1, g2x2)) x3 <- runif(n1 + n2, z_range[1], z_range[2]) } ### If needed random perturbation on the data ### please specify the random generation funciton in R to 'rand_dist_fun' ### and the corresponding parameters in '...'. ### For example, ### rand_dist_fun = rnorm, mean = 0, sd = 0.2 err <- matrix(0, n1 + n2, xdim) if (!is.null(rand_dist_fun)) err <- matrix(rand_dist_fun(xdim*(n1 + n2), ...), n1 + n2, xdim) ### Output the Swiss Roll dataset if (dim3) { out <- data.frame(y = c(rep(0:1, c(n1, n2))), x1 = c(g1x1, g2x1) + err[,1], x2 = c(g1x2, g2x2) + err[,2], x3 = x3 + err[,3]) } else { out <- data.frame(y = c(rep(0:1, c(n1, n2))), x1 = c(g1x1, g2x1) + err[,1], x2 = c(g1x2, g2x2) + err[,2]) } out } ################################################################################ ################################################################################ #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Hartigan's Method #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% findClusters <- function(nearnessMatrix, numClusters=3, numNearestNeighbors=10){ q <- rep(0,numClusters) clusters <- vector("list", numClusters) numSamples <- nrow(nearnessMatrix) numNN <- numNearestNeighbors randomOrdering <- sample(1:numSamples, numSamples) # randomOrdering <- 1:numSamples step <- floor(numSamples/numClusters) stepStart <- 1 stepEnd <- stepStart+step for(z in 1:(numClusters-1)){ clusters[[z]] <- randomOrdering[stepStart:stepEnd] stepStart <- stepEnd+1 stepEnd <- stepStart+step } clusters[[numClusters]] <- randomOrdering[stepStart:numSamples] for(z in 1:numSamples){ nearnessMatrix[z,z] <- 0 } for(z in 1:numClusters){ for(m in clusters[[z]]){ biggestNN <- order(nearnessMatrix[m,], decreasing=TRUE)[1:numNN] q[z] <- q[z] + sum(biggestNN%in%clusters[[z]]) } } print(paste("initial q", q)) currQ <- rep(0,numClusters) for(p in 1:30){ for(z in 1:numClusters){ for(m in clusters[[z]]){ biggestNN <- order(nearnessMatrix[m,], decreasing=TRUE)[1:numNN] for(k in 1:numClusters){ currQ[k] <- sum(biggestNN%in%clusters[[k]]) } QOrder <- order(currQ, decreasing=TRUE) if(QOrder[1] != z){ q[z] <- q[z] - currQ[z] q[QOrder[1]] <- q[QOrder[1]] + currQ[QOrder[1]] clusters[[z]] <- clusters[[z]][-which(clusters[[z]]==m)] clusters[[QOrder[1]]] <- c(clusters[[QOrder[1]]], m) } } } } print(paste("after 1", q)) return(clusters) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Spectral Cluster #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% specN <- function(distMat, numClust){ Y <- kmeans(distMat, numClust)$cluster return(Y) } ############################################################################# require(compiler) rfrus <- cmpfun(rfrus) distNN <- cmpfun(distNN) createSimilarityMatrix <- function(X, numTrees=100, K=10){ numberSamples <- nrow(X) similarityMatrix <- matrix(0,nrow= numberSamples, ncol=numberSamples) forest <- invisible(rfrus(X,trees=numTrees, MinParent=K)) for(z in 1:numberSamples){ NN1 <- distNN(X[z,], X, forest) similarityMatrix[z,] = NN1 # for(q in 1:numberSamples){ #Why did I do this? # if(NN1[q]==0){ # similarityMatrix[z,q]<-0 # } # } } return(similarityMatrix) }
a3523fffcdc9627f1c445a5fe2b90f63e1af9bbf
f53e353c54541c9282a9822e1fa23698bf533bd7
/Statistical experiment/5/5.R
a54be8f9e6e30ad3147d2f1392f3ff0ad340beef
[]
no_license
sakur0zxy/R-practice
acee9b2335077365e70e94fdf4734ed6dee58485
b8ec1f0a0feddcb16f988e0ca45c9b45b908440b
refs/heads/master
2020-03-26T05:58:17.014993
2018-09-27T08:04:53
2018-09-27T08:04:53
144,583,426
1
0
null
null
null
null
UTF-8
R
false
false
1,152
r
5.R
#12个球中有9个新球3个旧球,第一次从中取出三个使用后放回, #第二次又取出三个 #1.求第二次取出的三个球都是新球的概率 #2.已知第二次取出的三个球都是新球,求第一次取出的球都是 #新球的概率 #There are 12 balls including 9 new balls and 3 used #balls. At the first time, randomly select 3 balls #and return them after playing. At the second time, #randomly select 3 balls again. #1.Find the probability that the three balls taken #out for the second time are all new balls. #2.It is known that the three balls taken out for #the second time are all new balls, and the probability #that the first three balls taken out are new ball,too. select5<-function(n){ p1=0;p2=0 for(i in 1:n){ x1<-sample(1:12, size=3, replace = FALSE, prob = NULL) used0=3 used1=used0+sum(x1<=(12-used0)) x2<-sample(1:12, size=3, replace = FALSE, prob = NULL) if(sum(x2<=(12-used1))==3) p1=p1+1 if(sum(x1<=(12-used0))==3){ if(sum(x2<=(12-used1))==3) p2=p2+1 } } rt<-c('P1'=p1/n,'P2'=p2/p1) rt } select5(10000)
9dc6ff2467aa9ec5ac78b322df73577f60bbcbb0
10a72ed9289fbed1832a926ee017902eba6df3bd
/man/gplotgraph.Rd
26cc1156cec27207ad8bbd92c7e3ba022ef74320
[ "MIT" ]
permissive
jfontestad/sirgraph
4a6e9be86c737ac507f5512f0543180374042649
cf0ec32e7bd4e4250a1c33afa8bbcd4eb5aa22a6
refs/heads/master
2022-04-19T04:14:17.496926
2020-04-04T20:12:59
2020-04-04T20:12:59
null
0
0
null
null
null
null
UTF-8
R
false
false
301
rd
gplotgraph.Rd
% Generated by roxygen2 (4.0.0): do not edit by hand \name{gplotgraph} \alias{gplotgraph} \title{gplotgraph} \usage{ gplotgraph(g) } \arguments{ \item{g}{an SIR graph} } \value{ make a plot } \description{ plot an SIR graph } \details{ use ggplot to show an SIR graph } \author{ Barry S Rowlingson }
2c1e5d1551d7d8aba2a7bfdfedcb0e45f83a7b1f
e41102477bb433bfb6e49dd6e6bfc7e7333cae0a
/week3/modeling.R
7baeb4b28322024dd65cc72a35cde10f7836f298
[]
no_license
ettirapp/coursework
46950d5ca3b64ca3d63551a02030d4acde2118a7
00d6da4925d15bdf99d4b2abecbd8b796867a7cd
refs/heads/master
2020-06-03T03:14:51.534760
2019-07-16T00:59:59
2019-07-16T00:59:59
191,412,559
0
0
null
2019-06-11T16:45:46
2019-06-11T16:45:45
null
UTF-8
R
false
false
7,060
r
modeling.R
library(scales) library(broom) library(modelr) library(tidyverse) options(na.action = na.warn) theme_set(theme_bw()) options(repr.plot.width=4, repr.plot.height=3) users <- read_tsv(gzfile('users.tsv.gz')) head(users) # histogram of the label/regressor variable: ggplot(users, aes(x = daily.views)) + geom_histogram(bins = 50) + scale_x_log10(label=comma, breaks=10^(0:ceiling(log10(max(users$daily.views))))) + scale_y_continuous(label = comma) + xlab('Daily pageviews') + ylab('') ggplot(data = users, aes(x = age, y = daily.views)) + geom_point() + facet_wrap(~ gender) + xlab('Age') + ylab('Daily pageviews') nrow(users) users <- filter(users, daily.views > 0) nrow(users) views_by_age_and_gender <- users %>% filter(age <= 90) %>% group_by(age, gender) %>% summarize(count = n(), median_daily_views = median(daily.views)) head(views_by_age_and_gender) options(repr.plot.width=6, repr.plot.height=3) ggplot(views_by_age_and_gender, aes(x = age, y = median_daily_views, color = gender)) + geom_line(aes(linetype=gender)) + geom_point(aes(size = count)) + xlab('Age') + ylab('Daily pageviews') + scale_size_area(guide = F) + theme(legend.title=element_blank()) model_data <- filter(users, age >= 18 & age <= 65) options(repr.plot.width=4, repr.plot.height=3) ggplot(model_data, aes(x = age, y = daily.views)) + geom_smooth(method = "lm") + scale_y_log10(breaks = 1:100) model <- lm(log10(daily.views) ~ age, model_data) model summary(model) tidy(model) glance(model) M <- model.matrix(log10(daily.views) ~ age, model_data) head(M) tail(M) plot_data <- model_data %>% data_grid(age) %>% add_predictions(model) %>% mutate(pred = 10^pred) head(plot_data) plot_data <- model_data %>% data_grid(age) %>% add_predictions(model) %>% mutate(pred = 10^pred) head(plot_data) ggplot(plot_data, aes(x = age, y = pred)) + geom_line() plot_data <- model_data %>% group_by(age) %>% summarize(count = n(), geom_mean_daily_views = 10^(mean(log10(daily.views)))) %>% add_predictions(model) %>% mutate(pred = 10^pred) head(plot_data) ggplot(plot_data, aes(x = age, y = pred)) + geom_line(aes(y = pred)) + geom_point(aes(y = geom_mean_daily_views, size = count)) + scale_size_area(guide = F) model <- lm(log10(daily.views) ~ age + I(age^2), model_data) model tidy(model) glance(model) M <- model.matrix(log10(daily.views) ~ age + I(age^2), model_data) head(M) plot_data <- model_data %>% group_by(age) %>% summarize(count = n(), geom_mean_daily_views = 10^(mean(log10(daily.views)))) %>% add_predictions(model) %>% mutate(pred = 10^pred) ggplot(plot_data, aes(x = age, y = pred)) + geom_line(aes(y = pred)) + geom_point(aes(y = geom_mean_daily_views, size = count)) + scale_size_area(guide = F) form <- as.formula(log10(daily.views) ~ gender + age + I(age^2)) M <- model.matrix(form, model_data) model <- lm(form, model_data) head(M) model options(repr.plot.width=6, repr.plot.height=3) plot_data <- model_data %>% group_by(age, gender) %>% summarize(count = n(), geom_mean_daily_views = 10^(mean(log10(daily.views)))) %>% add_predictions(model) %>% mutate(pred = 10^pred) ggplot(plot_data, aes(x = age, y = pred, color = gender)) + geom_line(aes(y = pred)) + geom_point(aes(y = geom_mean_daily_views, size = count)) + scale_size_area(guide = F) form <- as.formula(log10(daily.views) ~ gender * (age + I(age^2))) M <- model.matrix(form, model_data) model <- lm(form, model_data) head(M) model plot_data <- model_data %>% group_by(age, gender) %>% summarize(count = n(), geom_mean_daily_views = 10^(mean(log10(daily.views)))) %>% add_predictions(model) %>% mutate(pred = 10^pred) options(repr.plot.width=6, repr.plot.height=3) ggplot(plot_data, aes(x = age, y = pred, color = gender)) + geom_line(aes(y = pred)) + geom_point(aes(y = geom_mean_daily_views, size = count)) + scale_size_area(guide = F) #EVALUATING MODELS: library(tidyverse) library(scales) library(modelr) options(na.action = na.warn) library(broom) theme_set(theme_bw()) options(repr.plot.width=4, repr.plot.height=3) users <- read_tsv(gzfile('users.tsv.gz')) model_data <- filter(users, daily.views > 0, age >= 18 & age <= 65) form <- as.formula(log10(daily.views) ~ gender * (age + I(age^2))) M <- model.matrix(form, model_data) head(M) model <- lm(form, model_data) tidy(model) glance(model) plot_data <- model_data %>% group_by(age, gender) %>% summarize(count = n(), geom_mean_daily_views = 10^(mean(log10(daily.views)))) %>% add_predictions(model) %>% mutate(pred = 10^pred) options(repr.plot.width=6, repr.plot.height=3) ggplot(plot_data, aes(x = age, y = pred, color = gender)) + geom_line(aes(y = pred)) + geom_point(aes(y = geom_mean_daily_views, size = count)) + scale_size_area(guide = F) options(repr.plot.width=4, repr.plot.height=3) ggplot(plot_data, aes(x = pred, y = geom_mean_daily_views)) + geom_point() + geom_abline(linetype = "dashed") + xlab('Predicted') + ylab('Actual') ggplot(plot_data, aes(x = pred, y = geom_mean_daily_views, color = gender)) + geom_point() + geom_abline(linetype = "dashed") + xlab('Predicted') + ylab('Actual') ggplot(plot_data, aes(x = pred, y = geom_mean_daily_views, color = desc(age))) + geom_point() + geom_abline(linetype = "dashed") + xlab('Predicted') + ylab('Actual') + facet_wrap(~ gender, scale = "free") pred_actual <- model_data %>% add_predictions(model) %>% mutate(actual = log10(daily.views)) head(pred_actual) ggplot(pred_actual, aes(x = 10^pred, y = 10^actual)) + geom_point(alpha = 0.1) + geom_abline(linetype = "dashed") + scale_x_log10(label = comma, breaks = seq(0,100,by=10)) + scale_y_log10(label = comma) + xlab('Predicted') + ylab('Actual') pred_actual %>% summarize(rmse = sqrt(mean((pred - actual)^2))) pred_actual %>% summarize(rmse = sqrt(mean((10^pred - 10^actual)^2))) pred_actual %>% summarize(rmse = sqrt(mean((pred - actual)^2)), cor = cor(pred, actual), cor_sq = cor^2) rmse(model, model_data) rsquare(model, model_data) pred_actual %>% summarize(mse = mean((pred - actual)^2), mse_baseline = mean((mean(actual) - actual)^2), mse_vs_baseline = (mse_baseline - mse) / mse_baseline, cor = cor(pred, actual), cor_sq = cor^2) K <- 30 form <- as.formula(log10(daily.views) ~ gender * poly(age, K, raw=T)) M <- model.matrix(form, model_data) head(M) model <- lm(form, model_data) glance(model) tidy(model) plot_data <- model_data %>% group_by(age, gender) %>% summarize(count = n(), geom_mean_daily_views = 10^(mean(log10(daily.views)))) %>% add_predictions(model) %>% mutate(pred = 10^pred) options(repr.plot.width=6, repr.plot.height=3) ggplot(plot_data, aes(x = age, y = pred, color = gender)) + geom_line(aes(y = pred)) + geom_point(aes(y = geom_mean_daily_views, size = count)) + scale_size_area(guide = F)
cce186bb01a74ee731238f400e87ef3d844adf0e
6cabc0530af701aea5e5d76c3b0f242479d78140
/install.R
92255cf0614c3137035cf68ac4aae8a20e762abf
[ "MIT" ]
permissive
Atheloses/VSB-S8-PS
06ff0f551124dc692a47cb575a88e9397d3b4506
fdef214f8169094b6366ce0489f92ef20b460702
refs/heads/main
2023-05-28T14:07:59.232669
2021-06-04T14:04:26
2021-06-04T14:04:26
373,856,169
0
0
null
null
null
null
UTF-8
R
false
false
417
r
install.R
install.packages("readxl") install.packages("dplyr") install.packages("openxlsx") install.packages("moments") install.packages("lawstat") install.packages("BSDA") install.packages("EnvStats") install.packages("binom") install.packages("car") install.packages("dunn.test") install.packages("lsr") install.packages("epiR") install.packages("nortest") #install.packages('IRkernel') #IRkernel::installspec(user = TRUE)
1fb171ff832e64d1d702a3bf9124b74736ef29a7
ec78bae636ce940611e2a198539f12c12b330401
/intro_to_data_modeling/week2/hw_4_2.R
b6e04d0e7c4d4312695ca98c4736589b67526fad
[ "MIT" ]
permissive
bwilson668/gt
cc4495d8f8b0fb2506b9a5e916839c12da1d9080
d474598425d70f774a6d509761640ebc4516a1f5
refs/heads/master
2020-05-30T10:36:11.657344
2019-07-03T12:48:36
2019-07-03T12:48:36
189,676,200
0
0
null
null
null
null
UTF-8
R
false
false
1,917
r
hw_4_2.R
# QUESTION # # Use the R function kmeans to cluster the points as well as possible. # Report the best combination of predictors, your suggested value of k, # and how well your best clustering predicts flower type. # Import Libraries library(tidyverse) # Set Seed for Reproducability set.seed(1234) # Read the data in iris <- read.table("_data/iris.txt", header = TRUE, sep = "", dec = ".") # Data Prep # Scaled around center due to suggestion in week 1. # Scaling from 0-1 is good for classification. # Scaling around center is good for clustering. iris_scaled <- mutate( iris, Sepal.Length = scale(Sepal.Length)[,1], Sepal.Width = scale(Sepal.Width)[,1], Petal.Length = scale(Petal.Length)[,1], Petal.Width = scale(Petal.Width)[,1] ) %>% select( -Species ) # Determine which predictors should be used my_cols <- c("#FF0000", "#00FF00", "#0000FF") # RGB pairs( iris_scaled, cex = 0.5, col = my_cols[iris$Species], lower.panel=NULL ) ### # # REPORT # # The Green and Blue species are the hardest to distinguish. # Each of the 4 features look to contribute to the ability of separating the species. # For my analysis, I'll leave all 4 features in for the clustering. # ### k_range <- 1:10 # Loop over all possible Ks iris_clusters <- sapply( k_range, function(k){ kmeans(iris_scaled, k) } ) # Elbow Curve Plot plot( k_range, iris_clusters[5,], type="b", main="Elbow Curve Plot", xlab="Number of clusters K", ylab="Total within-clusters sum of squares" ) ### # # Report # # Judging by the Elbow Curve, my suggested value of K would be 4 clusters. # # Although, we know the dataset has 3 species. # If these species were previously unknown, then we might name 4 different species. # However, since we know there are 3, the clustering would not make for the best classifier, # especially between the blue and green species. # ###
dec2bd262af915000f2ef29e69ae39394c765a49
38ee778acc8edb66cf5b9649c932ca8f8a154b8c
/study_data-analysis/5. data_visualization/practice01.R
abbea0204b093caca685125cb5c5637c6ed1f3de
[]
no_license
sangm1n/BITLab
55e73119af544a209cc00676c4efc9d00441211a
7c40ecaac755facf5fc7b5232d20b6430562cc28
refs/heads/master
2023-04-18T06:16:01.093813
2021-04-28T05:23:53
2021-05-04T07:36:08
290,684,489
1
0
null
null
null
null
UTF-8
R
false
false
1,719
r
practice01.R
# data visualization practice in R by sangmin # treemap and symbols using state.x77 dataset us <- data.frame(state.x77, state.division) head(us) library(treemap) state.name <- as.factor(rownames(us)) us.2 <- data.frame(us, state.name) head(us.2) treemap(us.2, index=c("state.division", "state.name"), vSize="Population", vColor="Income", type="dens", bg.labels="yellow", title="US state") treemap(us.2, index=c("state.division", "state.name"), vSize="HS.Grad", vColor="Murder", type="value", bg.labels="yellow", title="US state") symbols(us.2$Income, us.2$Illiteracy, circles=us.2$Population, inches=0.4, fg="grey", bg="green", xlab="Income", ylab="Illiteracy", main="Income and Illiteracy") text(us.2$Income, us.2$Illiteracy, rownames(us), cex=0.6, col="brown") symbols(us.2$Illiteracy, us.2$Murder, circles=us.2$Area, inches=0.5, fg="grey", bg="green", xlab="Illiteracy", ylab="Murder", main="Illiteracy and Murder") text(us.2$Illiteracy, us.2$Murder, rownames(us), cex=0.6, col="brown") # treemap using swiss dataset head(swiss) a <- data.frame(subset(swiss, Education <= 6), group="low") b <- data.frame(subset(swiss, Education >= 13), group="high") c <- data.frame(subset(swiss, Education > 6 & Education < 13), group="mid") swiss.name <- rownames(swiss) swiss.2 <- rbind(a, b, c) swiss.2 <- data.frame(swiss.2, swiss.name) swiss.2 treemap(swiss.2, index=c("group", "swiss.name"), vSize="Fertility", vColor="Agriculture", type="value", bg.labels="yellow", title="Swiss state name") treemap(swiss.2, index="swiss.name", vSize="Catholic", vColor="Examination", type="dens", title="Swiss state name")
fad4b0132f39efbe1b62c2cddd18791fb45f142f
18433088b0f83ad44064bcc50d5900d99b737ec9
/StartService.R
946f00c87957ceab4df76ead49c0c82c9e77a3f2
[]
no_license
ClausBoHansen/Rserve
ff4900b979819781f1c9e37d28f9c5b480810121
a474251efffb348259c20fee7e933834933f5657
refs/heads/master
2020-04-15T17:05:02.451289
2019-01-30T13:12:31
2019-01-30T13:12:31
164,859,901
0
0
null
null
null
null
UTF-8
R
false
false
475
r
StartService.R
# Installation notes # Install lssl (console) # apt-get install libssl-dev # Install Rserve # install.packages('Rserve',,'http://www.rforge.net/') #### Load libraries #### library(Rserve) library(RserveFunctions) # Load library with Machine Learning function #library(testpackage) load("finalModel.rf.RData") # Call Prediction example # Prediction(TrainedModel, 12) #### Initiate server #### system("killall -INT Rserve") # Start Rserve Rserve(args = "--no-save")
426e97b186bd7e32ec393cc532d3b9e8ec95fdf3
ee4a94d66353c78d326489ce02287bffb993535b
/Gibbs/man/sampling_mu.Rd
9b55b4486998b356c928a551867419be992a493b
[]
no_license
jeonfect/GibbsSampler
bbf93b23568a895e9bbf1b3dfae257f4bcf68b1a
8bbb40840dcf901b4831728af7d8a062d6f5bdec
refs/heads/master
2022-12-31T16:37:55.150318
2020-10-15T15:47:22
2020-10-15T15:47:22
264,549,762
0
0
null
null
null
null
UTF-8
R
false
true
978
rd
sampling_mu.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GibbsSampler.R \name{sampling_mu} \alias{sampling_mu} \title{Sampling method for within-group sample mean.} \usage{ sampling_mu(z, theta, mu, Sigma) } \arguments{ \item{z}{vector length \code{n} of observation's cluster allocation} \item{theta}{matrix \verb{n x p} of observation means} \item{mu}{matrix \verb{k x p} of mean values for each cluster} \item{Sigma}{matrix \verb{p x p x k} of variances for each cluster} } \value{ matrix \verb{k x p} of sampled mu } \description{ Sampling method for within-group sample mean. } \details{ \code{mu_k} has conditional distribution \verb{N(\\hat\\theta_k,\\Sigma_k/N_k)} where \verb{N_k=\\sum_\{i=1\}^\{N\}I(z_i=k)}. Sampling is done using \code{rmNorm()} from \code{mniw} package. } \references{ Martin Lysy and Bryan Yates (2019). mniw: The Matrix-Normal Inverse-Wishart Distribution. R package version 1.0. https://CRAN.R-project.org/package=mniw }
6caeb70a43f0d203932d894c08ba13b7a2c67a7e
73aac21a9f317c5fccc5a662c7a44b31307e4f93
/8-pcurve recover AEO.R
239ebc9e9989cadbaa41a6751dd77c70a52345c9
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
martinasladek/meta-showdown
d76f3e00ee6615ccd7821986277d4008886b483d
371b41aea50e63158960e4c7136238baa54e1803
refs/heads/master
2023-02-23T10:59:31.430984
2019-06-14T07:22:25
2019-06-14T07:22:25
null
0
0
null
null
null
null
UTF-8
R
false
false
2,526
r
8-pcurve recover AEO.R
## ====================================================================== ## This code checks the alternative interpretation of p-curve (a la Simonsohn) ## which states that p-curve is supposed to recover the "average true effect of studies submitted to p-curve" ## ===================================================================== library(dplyr) library(ggplot2) load("dataFiles/res.wide.RData") # res.wide$qrp.label <- factor(res.wide$qrpEnv, levels=c("none", "med", "high"), labels=paste0("QRP = ", c("none", "med", "high")), ordered=TRUE) # res.wide$delta.label <- factor(res.wide$delta, levels=c(0, 0.2, 0.5, 0.8), labels=paste0("delta = ", c(0, 0.2, 0.5, 0.8)), ordered=TRUE) # res.wide$censor <- factor(res.wide$selProp, levels=unique(res.wide$selProp), labels=paste0("PB = ", unique(res.wide$selProp))) # --------------------------------------------------------------------- # Compute summary measures across replications # use the data set without any reductions (i.e., also keep p-curves with <=3 sign. studies) PC <- res.wide %>% filter(method=="pcurve", !is.na(kSig_estimate) & kSig_estimate >= 1) summ.PC <- PC %>% group_by(condition, k, k.label, delta, delta.label, qrpEnv, qrp.label, censor, censor.label, tau, tau.label) %>% dplyr::summarise( meanEst.AEO = mean(b0_estimate, na.rm=TRUE), ME.AEO = mean(b0_estimate - delta.included_mean, na.rm=TRUE), RMSE.AEO = sqrt(mean((b0_estimate - delta.included_mean)^2, na.rm=TRUE)), perc2.5.AEO = quantile(b0_estimate, probs=.025, na.rm=TRUE), perc97.5.AEO = quantile(b0_estimate, probs=.975, na.rm=TRUE), nSig = mean(kSig_estimate) ) # average kSig in tau=0 conditions: summ.PC %>% filter(tau==0, delta == 0, qrpEnv=="none") %>% dplyr::select(1:8, nSig) # --------------------------------------------------------------------- # Plot # order the delta.label factor alphabetically summ.PC$delta.label2 <- factor(summ.PC$delta.label, levels=sort(levels(summ.PC$delta.label))) summ.PC$censor.label2 <- factor(paste0("PB = ", summ.PC$censor)) # raw estimates (not posified) summ.PC %>% ggplot(aes(x=k.label, y=ME.AEO, shape=delta.label2)) + geom_point(position=position_dodge(width=0.7)) + geom_hline(yintercept=0) + coord_flip(ylim=c(-0.4, 0.25)) + xlab("k") + ylab("Mean error (relative to average true effect size of studies submitted to p-curve)") + facet_grid(tau.label~censor.label2~qrp.label) + guides(shape=guide_legend("True effect size")) + xlab("") + theme_bw() ggsave("Plots/ME_AEO_raw.jpg", dpi=120)
1c7a44e64b9a73ee6cc30b587d40f4f8d3d6dc3a
0a3d5398e435fc81a61f832c0921304c72d7dbd5
/tests/testthat/test-examples.R
8296ee65662233dd65d0fb35b640b25336fe9181
[]
no_license
r-lib/pkgload
609ea95e6f19b51f2ccfed1f71bbb0adbc647283
75938cd13f80e912af43d9632cdb54aa0bc9ebff
refs/heads/main
2023-09-01T00:53:19.446111
2023-07-06T07:34:53
2023-07-06T08:15:47
73,123,753
46
38
null
2023-09-11T13:26:16
2016-11-07T21:45:48
R
UTF-8
R
false
false
1,145
r
test-examples.R
local_load_all_quiet() test_that("default run_example ignores donttest and dontrun ", { env <- run_example(test_path("test-examples.Rd"), quiet = TRUE) expect_equal(env$a, 1) }) test_that("run donttest when requested", { env <- run_example(test_path("test-examples.Rd"), run_donttest = TRUE, quiet = TRUE) expect_equal(env$a, 2) }) test_that("run dontrun when requested", { env <- run_example(test_path("test-examples.Rd"), run_dontrun = TRUE, quiet = TRUE) expect_equal(env$a, 3) }) test_that("can run example package", { load_all(test_path("testHelp")) on.exit(unload(test_path("testHelp"))) env <- dev_example("foofoo", quiet = TRUE) expect_equal(env$a, 101) }) test_that("can use system macros", { load_all(test_path("testHelp")) on.exit(unload(test_path("testHelp"))) expect_silent( run_example( test_path("testHelp", "man", "testSysMacro.Rd"), quiet = TRUE ) ) }) test_that("can use extra Rd macros", { macros <- load_rd_macros("testHelp") expect_silent( run_example( test_path("testHelp", "man", "testCustomMacro.Rd"), quiet = TRUE, macros = macros ) ) })
bc7af98550efb55329d14b71235388cd23c4000e
6721e8fd9ee08ed6c4fabd2d6556558779185697
/man/colorList.Rd
46497b75b9ee34251c283c09ff5851bebe6559fe
[]
no_license
uschiLaa/galahr
47befbd4a164a44ecba0615ec65e6bbca13daf73
0b7d9b78694e4d711e09987e073bfa9f54987a6c
refs/heads/master
2021-06-28T08:43:32.326374
2020-03-12T01:26:14
2020-03-12T01:26:14
186,314,758
3
1
null
null
null
null
UTF-8
R
false
true
406
rd
colorList.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotting.R \name{colorList} \alias{colorList} \title{Mapping grouping to color.} \usage{ colorList(gr) } \arguments{ \item{gr}{Vector containing group assignment for each entry.} } \value{ Named list containing assigned colors ("color") and list of colors ("col") } \description{ Mapping grouping to color. } \keyword{internal}
5d6792f9f337009e9d2d4f6abc84de883c1cf9e7
c4d69c23de8fba861a5f258e13a86a8a769cf81c
/process_script.R
2144704876eea7c6d1ebfe39ee8e8319505ea3b0
[]
no_license
guhao0809/VAR
2e1af7611a8021295f230285d8d3625ac3cf40f2
587d73bb05969eb916c10a6df78d3a10cbdc4491
refs/heads/master
2020-03-28T05:13:58.439649
2018-11-26T03:16:54
2018-11-26T03:16:54
147,764,116
0
1
null
2018-09-07T03:13:53
2018-09-07T03:13:53
null
UTF-8
R
false
false
1,096
r
process_script.R
# if (length(date.list) < 570){ # date.list # process_period = ceiling(length(date.list)/2) # date.list # review_period =floor(length(date.list)/2) # date.list # } else { # process_period = 285 # review_period = 285 # } process_period = 285 review_period = 285 total_days = process_period + review_period # data_end_day = as.Date(date.list[length(date.list)]) # date.list data_end_day = date.list[length(date.list)] data_start_day = date.list[length(date.list)-total_days+1] # Other Time parameters needed process_start_day = review_period + 1 temp_result = Lambda.optimize(process_period, selected_index_dt_final) lambda.var.list = temp_result$lambda.var.list lambda.cov.list = temp_result$lambda.cov.list temp_list = Index.var.cov.calculation(selected_index_dt_final, process_period, lambda.var.list, lambda.cov.list) full_data = temp_list$full_data setorder(full_data, seccode, date) final_data = temp_list$final_data cov_df = temp_list$cov_df cov.list = temp_list$variance.list cov.mat = temp_list$cov.mat cov.series = as.vector(cov.mat) # matrix(as.vector(cov.series),ncol = 30)
ec8e3ef164403e4ce215148cf90e92f849f3bf95
7f31661d2c24df6ac261f66065f687dbf848a5a5
/man/form.Rd
6527108a9a0b563614cfe0f7cf4b05f840f18cff
[]
no_license
cran/sudachir
0ed0ba2b27f739f3809d38e0178c58bf02958d52
9f1251aad5ef5460ddbcb9d3f2aa1677d370a941
refs/heads/master
2023-01-14T00:01:41.487373
2020-11-10T14:20:02
2020-11-10T14:20:02
312,241,005
0
0
null
null
null
null
UTF-8
R
false
true
669
rd
form.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/form.R \name{form} \alias{form} \title{Parse tokenized input text} \usage{ form(x, mode, type, pos = TRUE) } \arguments{ \item{x}{Input text vectors} \item{mode}{Select split mode (A, B, C)} \item{type}{return form. One of the following "surface", "dictionary", "normalized", "reading" or "part_of_speech".} \item{pos}{Include part of speech information with object name.} } \description{ Parse tokenized input text } \examples{ \dontrun{ form("Tokyo", mode = "B", type = "normalized") form("Osaka", mode = "B", type = "surface") form("Hokkaido", mode = "C", type = "part_of_speech") } }
76b961e4acacb2f40e8c74be7124f8101a1ad537
db82df76516ffbdd78e5e49da8304c8f872c6385
/MarkDown_Dynamic Report/in Word/Dynamic Reportin word.R
78a05b8766026f82aaea9efd18e41e3f1b262fa4
[]
no_license
Crystal0108W/R
0023f7ddc392ec882b9114b2bc832dddb5ec7904
4d8255cfc85e748ae38136bee87301aea8e53151
refs/heads/master
2021-01-17T15:49:25.903400
2017-05-13T03:55:36
2017-05-13T03:55:36
84,112,389
0
0
null
null
null
null
UTF-8
R
false
false
1,339
r
Dynamic Reportin word.R
#####################Creating Dynamic Repors with R and Markdowns################################# #R Markdown templates can be used to create HTML, PDF, and MS Word documents. #ODT and DOCX tem- plates are used to create Open Document and Microsoft Word documents, respectively. #LaTeX templates are used to create publication-quality PDF documents, including reports, articles, and books ## With R and Microsoft word install.packages("R2wd") install.packages("RDCOMClient") library(R2wd) library(RDCOMClient) require(R2wd) require(car) df<- Salaries n <- nrow(df) fit <- lm(salary~rank*sex, data = df) aovTable <- Anova(fit, type = 3) aovTable <- round(as.data.frame(aovTable), 3) aovTable[is.na(aovTable)] <- "" wdGet("C:/Users/Crystal/Desktop/Sample Report1.docx", method = "RDCOMClient") wdGoToBookmark("n") wdWrite(n) wdGoToBookmark("aovTable") wdTable(aovTable, caption = "Two-way Analysis of Variance", caption.pos = "above", pointsize = 12, autoformat = 4) wdGoToBookmark("effectsPlot") myplot <- function(){ require(effects) par(mar = c(2,2,2,2)) plot(allEffects(fit), main = "") } wdPlot(plotfun = myplot, caption = "Mean Effects Plot", height = 4, width = 5, method = "metafile") wdSave("C:/Users/Crystal/Desktop/Sample Report1.docx") wdQuit()
58971f74e75ef0207b9eed69253ae4d3c221452b
34b429c98a64f8a9c43908cc57d6d622d4df1ffc
/man/multi_post.Rd
29a8f9c6906bf823641615e839ccbb77e90de42f
[]
no_license
mkoohafkan/wqpr-clone
f55212370ff548fab586fc28c3d91fda408f1a28
96f17b980125420a0d170dd9d145509c31f60e30
refs/heads/master
2023-04-18T03:02:17.881632
2021-04-27T16:37:29
2021-04-27T16:37:29
362,178,533
0
0
null
null
null
null
UTF-8
R
false
true
437
rd
multi_post.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/util.r \name{multi_post} \alias{multi_post} \title{Multi POST} \usage{ multi_post(service.url, body, stop.on.fail = FALSE) } \arguments{ \item{service.url}{The complete URL of the web service.} \item{body}{The POST field(s) to include in the request body.} } \value{ The server response. } \description{ Asynchronous WQP POST interface. } \keyword{internal}
94b98c801af880b560aa4896fc9055bd912c039e
f245521e63b59e37470070092b7d1d38a87b2e48
/libs/addLetLab.r
b3292c9bbc2d0b2b86a43d8abd3a39906a541e02
[]
no_license
douglask3/UKESM-land-eval
3c10d10eba32bcef1e7b2a057db3b22fdf2fd621
aad3f6902e516590be02585ad926bfe1cf5770bf
refs/heads/master
2021-08-17T06:32:10.736606
2021-07-14T12:57:13
2021-07-14T12:57:13
242,747,830
0
0
null
null
null
null
UTF-8
R
false
false
235
r
addLetLab.r
addLetLab <- function(let, letAsTitle = TRUE) { if (!is.null(let)) { if (letAsTitle) mtext(side = 3, adj = 0.05, let) else mtext(side = 3, line = -1, adj = 0.1, paste0(let, ')')) } }
d950e47cf878224d5b22b3d433a07cd0b5b2af46
87e1faf49719839550f1249cc14ec3e1da27578d
/man/compareCats.Rd
ab0e517782fe5a439fadc0246feccfda917514d5
[]
no_license
bryanhanson/HandyStuff
cd9a126d6f344b62ccca79831f174f872136ed00
d8f292cdbc52f8938a67216a0d63b7168f6b2a00
refs/heads/master
2022-10-10T15:56:56.479427
2022-07-18T01:38:29
2022-07-18T01:38:29
1,523,562
1
1
null
null
null
null
UTF-8
R
false
true
3,131
rd
compareCats.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compareCats.R \name{compareCats} \alias{compareCats} \title{Compare Data by Category} \usage{ compareCats( formula = NULL, data = NULL, cols = NULL, freckles = FALSE, method = c("sem", "sem95", "iqr", "mad", "box", "points"), theme = screenTheme(), ... ) } \arguments{ \item{formula}{A formula with both LHS and RHS. The formula should comply with the usual \code{lattice} conventions, for instance \code{y ~ factor1 | factor2}.} \item{data}{A data frame containing the response and categorical/factor data. A maximum two categories with any number of levels can be plotted at one time.} \item{cols}{A vector of colors to be used for factor 1; must be as along as the levels in factor 1.} \item{freckles}{Logical; if \code{TRUE}, the actual data points are plotted as small points, hence the name, jittered slightly in the horizontal direction only.} \item{method}{One of \code{c("sem", "sem95", "iqr", "mad", "box", "points")}. Various methods for computing measures of spread and central tendency. See the documentation for \code{ChemoSpec::.seXy}.} \item{theme}{Character; A suitable \code{lattice} theme. There are two built-in themes which you can use "as is" or modify to your heart's content. If none is given, \code{\link{screenTheme}} will be used. The other option provided is \code{\link{posterTheme}}.} \item{\dots}{Other parameters to be passed downstream.} } \value{ A \code{lattice} object. These can be modified by the usual methods associated with these packages. } \description{ This function allows one to generate a nice plot comparing a response from samples falling into one or two categories with corresponding levels. \code{lattice} is used to create plots, with options to include the original data and to use various measures of central tendency and spread. A count of the number of samples in each level is included. Gives a visual impression of the data to go along with hypothesis tests. } \details{ The supplied data frame is stripped down to just the response and factor columns, then \code{NAs} are removed; the counts reflect this. } \examples{ # ### Set up test data # require("ChemoSpec") require("lattice") require("latticeExtra") require("plyr") mydf <- data.frame( resp = rnorm(40), cat1 = sample(LETTERS[1:3], 40, replace = TRUE), cat2 = sample(letters[1:2], 40, replace = TRUE), stringsAsFactors = TRUE) # ### One factor: # p <- compareCats(formula = resp~cat1, data = mydf, method = "sem", freckles = TRUE, poster = FALSE, cols = c("red", "orange", "blue")) print(p) # ### Two factors: # p <- compareCats(formula = resp~cat1 | cat2, data = mydf, method = "sem", freckles = TRUE, cols = c("red", "orange", "blue")) print(p) # ### Interchange the roles of the factors # p <- compareCats(formula = resp~cat2 | cat1, data = mydf, method = "sem", freckles = TRUE, cols = c("red", "blue")) print(p) } \references{ \url{https://github.com/bryanhanson/HandyStuff} } \author{ Bryan A. Hanson, DePauw University. \email{hanson@depauw.edu} } \keyword{plot} \keyword{univariate}
cb6384f61ea222ae1586305999e41884830f20de
297f0c096ee298349bb12d2dd513205df1a19439
/R programming week 2/pollutantmean-demo.R
147981a300e867b3c3e56480f085922dd13e1cf7
[]
no_license
coenschoof/R-Programming
3f28f48bd5c6d0c4b71bf43e1410c4d431c94d3a
954ab11898d051d0c5a4e3c6b0ad77dcb4d95eda
refs/heads/main
2023-01-10T13:38:18.379227
2020-11-10T06:50:10
2020-11-10T06:50:10
311,568,046
0
0
null
null
null
null
UTF-8
R
false
false
483
r
pollutantmean-demo.R
pollutantmean <- function(directory, pollutant, id = 1:332) { setwd(directory) listOfFiles <- list.files() vec <- vector() for(i in id) { csvFile <- read.csv(listOfFiles[i]) pollutantColumn <- csvFile[,pollutant] pollutantColumnWithoutNA <- pollutantColumn[!is.na(pollutantColumn)] vec <- append(vec, pollutantColumnWithoutNA) } mean(vec) } pollutantmean("C:\\Users\\coen_\\OneDrive\\Bureaublad\\datasciencecoursera\\specdata", "nitrate")
65e85d97c5c03babf883ce0b426811017ac0fcc0
c0931ba541a18095a54e3c2667d02a22d0648015
/scripts/01_pnv.R
ba44f0b0e5b4166d3abb472018b0355f940cfefe
[]
no_license
juliussebald/formasam
dbd7289aca0350562cd995a64061cc0b73ddc07d
8927800d1cf3098d1fd1cb47ac9acc00a4073e4b
refs/heads/master
2023-04-22T06:53:31.893154
2021-01-21T08:27:34
2021-01-21T08:27:34
306,577,923
1
0
null
null
null
null
UTF-8
R
false
false
14,494
r
01_pnv.R
# This script loads in the raw simulation data of iLand and # converts it to the FORMASAM format # Further it combines the pnv data from iLand with pnv data from LandClim # and combines everything in one nice dataframe # Finally it plots the results of the pnv runs nicely # load packages library(raster) # version 3.3-13 library(sf) # version 0.9-5 library(tidyverse) # version 1.3.0 library(RSQLite) # version 2.2.0 library(data.table) # version 1.13.0 library(patchwork) # version 1.0.1 # ROSALIA ----------------------------------------------------------------- # bring iLand output in FORMASAM format ----------------------------------- #load coordinates of ressource units coord <- raster("../../materials/ROSALIA/gis/EnvGrid_Rosalia.asc") %>% rasterToPoints(.) %>% as.data.frame(.) %>% rename(ruid = layer) # load simulated data # historic db.conn <- dbConnect(SQLite(), dbname="../../materials/ROSALIA/output/PNV_ROSALIA_historic.sqlite") dbListTables(db.conn) stand_iland_historic <- dbReadTable(db.conn, "dynamicstand") %>% mutate(model = "ILAND", AGB = rowSums(.[, c("foliagemass_sum", "stemmass_sum", "branchmass_sum")])) dbDisconnect(db.conn) # RCP 45 db.conn <- dbConnect(SQLite(), dbname="../../materials/ROSALIA/output/PNV_ROSALIA_RCP45.sqlite") dbListTables(db.conn) stand_iland_rcp45 <- dbReadTable(db.conn, "dynamicstand") %>% mutate(model = "ILAND", AGB = rowSums(.[, c("foliagemass_sum", "stemmass_sum", "branchmass_sum")])) dbDisconnect(db.conn) # RCP 85 db.conn <- dbConnect(SQLite(), dbname="../../materials/ROSALIA/output/PNV_ROSALIA_RCP85.sqlite") dbListTables(db.conn) stand_iland_rcp85 <- dbReadTable(db.conn, "dynamicstand") %>% mutate(model = "ILAND", AGB = rowSums(.[, c("foliagemass_sum", "stemmass_sum", "branchmass_sum")])) dbDisconnect(db.conn) # bring data in FORMASAM format # historic PNV_ILAND_ROSALIA_historic <- stand_iland_historic %>% dplyr::select(year, rid, species, AGB, model) %>% rename(ruid = rid) %>% left_join(coord) %>% dplyr::select(x, y, ruid, year, species, AGB, model) %>% mutate(AGB = round(AGB, 3)) # RCP45 PNV_ILAND_ROSALIA_rcp45 <- stand_iland_rcp45 %>% dplyr::select(year, rid, species, AGB, model) %>% rename(ruid = rid) %>% left_join(coord) %>% dplyr::select(x, y, ruid, year, species, AGB, model) %>% mutate(AGB = round(AGB, 3)) # RCP85 PNV_ILAND_ROSALIA_rcp85 <- stand_iland_rcp85 %>% dplyr::select(year, rid, species, AGB, model) %>% rename(ruid = rid) %>% left_join(coord) %>% dplyr::select(x, y, ruid, year, species, AGB, model) %>% mutate(AGB = round(AGB, 3)) write_csv(PNV_ILAND_ROSALIA_historic, "../../materials/ROSALIA/output/PNV_results/iLand/PNV_ILAND_ROSALIA_historic.csv") write_csv(PNV_ILAND_ROSALIA_rcp45, "../../materials/ROSALIA/output/PNV_results/iLand/PNV_ILAND_ROSALIA_RCP45.csv") write_csv(PNV_ILAND_ROSALIA_rcp85, "../../materials/ROSALIA/output/PNV_results/iLand/PNV_ILAND_ROSALIA_RCP85.csv") rm(list = ls()) # load pnv output -------------------------------------------------------- # iLand PNV_ILAND_ROSALIA_historic <- read.csv("../../materials/ROSALIA/output/PNV_results/iLand/PNV_ILAND_ROSALIA_historic.csv", stringsAsFactors = FALSE) %>% mutate(climate = "historic") PNV_ILAND_ROSALIA_rcp45 <- read.csv("../../materials/ROSALIA/output/PNV_results/iLand/PNV_ILAND_ROSALIA_RCP45.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP45") PNV_ILAND_ROSALIA_rcp85 <- read.csv("../../materials/ROSALIA/output/PNV_results/iLand/PNV_ILAND_ROSALIA_RCP85.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP85") PNV_ILAND_ROSALIA <- bind_rows(PNV_ILAND_ROSALIA_historic, PNV_ILAND_ROSALIA_rcp45, PNV_ILAND_ROSALIA_rcp85) %>% filter(species != "rops") # LandClim PNV_LANDCLIM_ROSALIA_historic <- read.csv("../../materials/ROSALIA/output/PNV_results/LandClim/PNV_LANDCLIM_ROSALIA_historic.csv", stringsAsFactors = FALSE) %>% mutate(climate = "historic") %>% filter(AGB != 0) PNV_LANDCLIM_ROSALIA_rcp45 <- read.csv("../../materials/ROSALIA/output/PNV_results/LandClim/PNV_LANDCLIM_ROSALIA_RCP45.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP45") %>% filter(AGB != 0) PNV_LANDCLIM_ROSALIA_rcp85 <- read.csv("../../materials/ROSALIA/output/PNV_results/LandClim/PNV_LANDCLIM_ROSALIA_RCP85.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP85" )%>% filter(AGB != 0) PNV_LANDCLIM_ROSALIA <- bind_rows(PNV_LANDCLIM_ROSALIA_historic, PNV_LANDCLIM_ROSALIA_rcp45, PNV_LANDCLIM_ROSALIA_rcp85) %>% mutate(species = case_when(species == "pinucemb" ~ "pice", species == "betupube" ~ "bepu", species == "popunigr" ~ "poni", species == "salialba" ~ "saal", species == "ilexaqui" ~ "ilaq", TRUE ~ .$species)) # both models in one data table PNV_ROSALIA <- setDT(bind_rows(PNV_ILAND_ROSALIA, PNV_LANDCLIM_ROSALIA)) write_csv(PNV_ROSALIA, "../r/pnv_processed/pnv_rosalia.csv") # landscape plots --------------------------------------------------------- PNV_ROSALIA <-setDT(read.csv("../r/pnv_processed/pnv_rosalia.csv")) # define species colours and factor levels cols <- c("fasy"="#33CC33", "piab"="#006600", "quro"="#FF7F00", "qupe"="#FF9900", "qupu"="#CC9900", "abal"="#003300", "acca"="#F3F781", "acpl"="#86B404", "acps"="#58FAD0", "algl"="#61210B", "alin"="#A4A4A4", "alvi"="#0B3B17", "bepe"="#2E64FE", "bepu"="#FF7F00", "cabe"="#F7BE81", "casa"="#A9F5A9", "coav"="#58D3F7", "frex"="#FF0000", "lade"="#8A4B08", "pice"="#FFB90F", "pini"="#610B21", "pimo"="#000035", "pimu"="#000000", "taba"="#7B4F4B", "ilaq"="#8000FF", "juco"="#DF7401", "pisy"="#B18904", "poni"="#000000", "potr"="#610B5E","saca"="#F5ECCE", "saal"="#00C2A0", "soar"="#E6E0F8", "soau"="#B40404", "tico"="#9F81F7", "tipl"="#8000FF", "ulgl"="#DF7401" ) new_order_gg <- c("ulgl", "tipl", "tico", "soau", "soar", "saca", "saal", "potr", "poni", "pisy", "pini", "pice", "lade", "frex", "coav", "casa","cabe", "bepe", "bepu", "alvi", "alin", "algl", "acps", "acpl", "acca", "abal","qupu", "qupe","quro", "piab", "fasy") cols_map <- c("01_larch_stonepine"="#FFCC00","02_larch"="#8A4B08","03_subalpine_spruce"="#006600","04_montane_spruce"="#666633","05_spruce_fir"="#0033CC","06_spruce_fir_beech"="#1A8080","07_beech"="#33CC33", "08_oak_hornbeam"="#FF9900","22_silicate_scotspine"="#B18904","23_black_pine"="#610B21", "unclassified"="#D3D3D3") # create plots landscape_rosalia <- PNV_ROSALIA %>% group_by(model, climate, year, species) %>% summarize(AGB = sum(AGB), AGB_ha = AGB/1222000) %>% ungroup(.) %>% mutate(landscape = "rosalia") all_rosalia <- ggplot(landscape_rosalia, aes(x = year, y = AGB_ha, fill = species)) + geom_area() + scale_fill_manual(values = cols, guide = guide_legend(reverse = TRUE)) + labs(y = "biomass [t/ha]") + labs(tag = "A)") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), plot.title = element_text(hjust = 0.5, size = 22, face = "bold", vjust = 1)) + theme(plot.background = element_rect(colour = NA)) + facet_grid(model ~ climate, scales = "free") pnv_supplement <- all_rosalia + all_dischma + plot_layout(ncol = 1) ggsave("../../results/figures/supplement/pnv_plots.png", pnv_supplement, width = 7, height = 8.5) # DISCHMA ----------------------------------------------------------------- rm(list=ls()) # bring iLand output in FORMASAM format --------------------------------- coord <- raster("../../materials/DISCHMA/gis/EnvGrid_Dischma.asc") %>% rasterToPoints(.) %>% as.data.frame(.) %>% rename(ruid = EnvGrid_Dischma) db.conn <- dbConnect(SQLite(), dbname="../../materials/DISCHMA/output/PNV_DISCHMA_historic.sqlite") dbListTables(db.conn) stand_dischma_historic <- dbReadTable(db.conn, "dynamicstand") %>% mutate(model = "ILAND", AGB = rowSums(.[, c("foliagemass_sum", "stemmass_sum", "branchmass_sum")])) dbDisconnect(db.conn) db.conn <- dbConnect(SQLite(), dbname="../../materials/DISCHMA/output/PNV_DISCHMA_RCP45.sqlite") dbListTables(db.conn) stand_dischma_rcp45 <- dbReadTable(db.conn, "dynamicstand") %>% mutate(model = "ILAND", AGB = rowSums(.[, c("foliagemass_sum", "stemmass_sum", "branchmass_sum")])) dbDisconnect(db.conn) db.conn <- dbConnect(SQLite(), dbname="../../materials/DISCHMA/output/PNV_DISCHMA_RCP85.sqlite") dbListTables(db.conn) stand_dischma_rcp85 <- dbReadTable(db.conn, "dynamicstand") %>% mutate(model = "ILAND", AGB = rowSums(.[, c("foliagemass_sum", "stemmass_sum", "branchmass_sum")])) dbDisconnect(db.conn) PNV_ILAND_DISCHMA_historic <- stand_dischma_historic %>% dplyr::select(year, rid, species, AGB, model) %>% rename(ruid = rid) %>% left_join(coord) %>% dplyr::select(x, y, ruid, year, species, AGB, model) %>% mutate(AGB = round(AGB, 3)) PNV_ILAND_DISCHMA_rcp45 <- stand_dischma_rcp45 %>% dplyr::select(year, rid, species, AGB, model) %>% rename(ruid = rid) %>% left_join(coord) %>% dplyr::select(x, y, ruid, year, species, AGB, model) %>% mutate(AGB = round(AGB, 3)) PNV_ILAND_DISCHMA_rcp85 <- stand_dischma_rcp85 %>% dplyr::select(year, rid, species, AGB, model) %>% rename(ruid = rid) %>% left_join(coord) %>% dplyr::select(x, y, ruid, year, species, AGB, model) %>% mutate(AGB = round(AGB, 3)) write_csv(PNV_ILAND_DISCHMA_historic, "../../materials/DISCHMA/output/PNV_results/iLand/PNV_ILAND_DISCHMA_historic.csv") write_csv(PNV_ILAND_DISCHMA_rcp45, "../../materials/DISCHMA/output/PNV_results/iLand/PNV_ILAND_DISCHMA_RCP45.csv") write_csv(PNV_ILAND_DISCHMA_rcp85, "../../materials/DISCHMA/output/PNV_results/iLand/PNV_ILAND_DISCHMA_RCP85.csv") rm(list=setdiff(ls(), "all_rosalia")) # load pnv output -------------------------------------------------------- # iLand PNV_ILAND_DISCHMA_historic <- read.csv("../../materials/DISCHMA/output/PNV_results/iLand/PNV_ILAND_DISCHMA_historic.csv", stringsAsFactors = FALSE) %>% mutate(climate = "historic") PNV_ILAND_DISCHMA_rcp45 <- read.csv("../../materials/DISCHMA/output/PNV_results/iLand/PNV_ILAND_DISCHMA_RCP45.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP45") PNV_ILAND_DISCHMA_rcp85 <- read.csv("../../materials/DISCHMA/output/PNV_results/iLand/PNV_ILAND_DISCHMA_RCP85.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP85") PNV_ILAND_DISCHMA <- bind_rows(PNV_ILAND_DISCHMA_historic, PNV_ILAND_DISCHMA_rcp45, PNV_ILAND_DISCHMA_rcp85) # LandClim PNV_LANDCLIM_DISCHMA_historic <- read.csv("../../materials/DISCHMA/output/PNV_results/LandClim/PNV_LANDCLIM_DISCHMA_historic.csv", stringsAsFactors = FALSE) %>% mutate(climate = "historic") PNV_LANDCLIM_DISCHMA_rcp45 <- read.csv("../../materials/DISCHMA/output/PNV_results/LandClim/PNV_LANDCLIM_DISCHMA_RCP45.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP45") PNV_LANDCLIM_DISCHMA_rcp85 <- read.csv("../../materials/DISCHMA/output/PNV_results/LandClim/PNV_LANDCLIM_DISCHMA_RCP85.csv", stringsAsFactors = FALSE) %>% mutate(climate = "RCP85") PNV_LANDCLIM_DISCHMA <- bind_rows(PNV_LANDCLIM_DISCHMA_historic, PNV_LANDCLIM_DISCHMA_rcp45, PNV_LANDCLIM_DISCHMA_rcp85) %>% filter(AGB != 0) # both models in one data table PNV_DISCHMA <- setDT(bind_rows(PNV_ILAND_DISCHMA, PNV_LANDCLIM_DISCHMA)) write_csv(PNV_DISCHMA, "pnv_processed/pnv_dischma.csv") # landscape plots --------------------------------------------------------- PNV_DISCHMA <- setDT(read.csv("pnv_processed/pnv_dischma.csv")) # define species colours and factor levels cols <- c("fasy"="#33CC33", "piab"="#006600", "quro"="#FF7F00", "qupe"="#FF9900", "qupu"="#CC9900", "abal"="#003300", "acca"="#F3F781", "acpl"="#86B404", "acps"="#58FAD0", "algl"="#61210B", "alin"="#A4A4A4", "alvi"="#0B3B17", "bepe"="#2E64FE", "bepu"="#FF7F00", "cabe"="#F7BE81", "casa"="#A9F5A9", "coav"="#58D3F7", "frex"="#FF0000", "lade"="#8A4B08", "pice"="#FFB90F", "pini"="#610B21", "pimo"="#000035", "pimu"="#000000", "taba"="#7B4F4B", "ilaq"="#8000FF", "juco"="#DF7401", "pisy"="#B18904", "poni"="#000000", "potr"="#610B5E","saca"="#F5ECCE", "saal"="#00C2A0", "soar"="#E6E0F8", "soau"="#B40404", "tico"="#9F81F7", "tipl"="#8000FF", "ulgl"="#DF7401" ) new_order_gg <- c("ulgl", "tipl", "tico", "soau", "soar", "saca", "saal", "potr", "poni", "pisy", "pini", "pice", "lade", "frex", "coav", "casa","cabe", "bepe", "bepu", "alvi", "alin", "algl", "acps", "acpl", "acca", "abal","qupu", "qupe","quro", "piab", "fasy") cols_map=c("01_larch_stonepine"="#FFCC00","02_larch"="#8A4B08","03_subalpine_spruce"="#006600","04_montane_spruce"="#666633","05_spruce_fir"="#0033CC","06_spruce_fir_beech"="#1A8080","07_beech"="#33CC33", "08_oak_hornbeam"="#FF9900","22_silicate_scotspine"="#B18904","23_black_pine"="#610B21", "unclassified"="#D3D3D3") # create plots landscape_dischma <- PNV_DISCHMA %>% group_by(model, climate, year, species) %>% summarize(AGB = sum(AGB), AGB_ha = AGB/923000) %>% ungroup(.) %>% mutate(landscape = "dischma") all_dischma <- ggplot(landscape_dischma, aes(x = year, y = AGB_ha, fill = species)) + geom_area() + scale_fill_manual(values = cols, guide = guide_legend(reverse = TRUE)) + labs(y = "biomass [t/ha]") + theme_bw() + labs(tag = "B)") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), plot.title = element_text(hjust = 0.5, size = 22, face = "bold", vjust = 1)) + scale_x_continuous(expand = c(0.0, 0.0)) + theme(plot.background = element_rect(colour = NA), legend.position = "none") + facet_grid(model ~ climate, scales = "free") # pnv plot for supplement ------------------------------------------------- pnv_supplement <- all_rosalia + all_dischma + plot_layout(ncol = 1) ggsave("../../results/figures/supplement/pnv_plots.png", pnv_supplement, width = 7, height = 8.5)
044c2054ecbf8860e0326c312eb9c4c6d611697d
4dee9013b0c82214a989b26a86c2ad5b15b48496
/R/columnLabels.R
a945c06e5d901c6a910c68859302254240f846e6
[]
no_license
fischuu/GenomicTools
7997d65c175362acd4e2380c57f5e52e77c01e10
67b8b7612c4a5ffc5806be6e0e5492e6948ac49d
refs/heads/master
2023-05-01T14:02:58.955074
2023-04-20T12:12:28
2023-04-20T12:12:28
60,615,647
8
2
null
2022-10-19T16:41:40
2016-06-07T13:34:21
R
UTF-8
R
false
false
846
r
columnLabels.R
columnLabels <- function(x){ alleles <- sort(unique(unlist(strsplit(unique(x),"")))) # Only missing values: if((alleles=="X")&&(length(alleles)==1)){ alleles[1] <- "A" alleles[2] <- "B" alleles[3] <- "X" } # Momomorph if((alleles!="X")&&(length(alleles)==1)){ alleles[2] <- "B" alleles[3] <- "X" } if(length(alleles)==2){ if(is.element("X",alleles)){ alleles[2] <- "B" alleles[3] <- "X" } else { alleles[3] <- "X" } } genotypes <- c() hetOpt <- c(paste(alleles[1],alleles[2],sep=""),paste(alleles[2],alleles[1],sep="")) takeThis <- is.element(hetOpt,x) if(sum(takeThis)==0) takeThis <- 1 genotypes[1] <- paste(alleles[1],alleles[1],sep="") genotypes[2] <- hetOpt[takeThis] genotypes[3] <- paste(alleles[2],alleles[2],sep="") genotypes[4] <- "XX" genotypes }
b3bde358e0b86451aa88320fd71d2e5c71c6326c
515f7bdfc17a76b1d8a5d75a43a3be935a474aae
/man/ElementRecog.Rd
b1f7b49c42dae4782a077d9ec1715ea2e7f97979
[]
no_license
bhklab/CREAM
22d6b4942f6c5e097d55b6f341e09413e2c181b1
718264747fd4f6886ffcfc260d70fb2b2892b04d
refs/heads/master
2021-06-03T00:02:07.031151
2021-02-11T01:54:32
2021-02-11T01:54:32
97,014,476
14
4
null
2017-10-24T16:19:35
2017-07-12T14:00:38
R
UTF-8
R
false
true
1,332
rd
ElementRecog.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ElementRecog.R \name{ElementRecog} \alias{ElementRecog} \title{ElementRecog is a function to identify COREs} \usage{ ElementRecog(InputData, windowSize_Vec, peakNumMax, peakNumMin) } \arguments{ \item{InputData}{The input data as a table including chromosome regions in which the first column is chromosome annotation, and second and third columns are start and ending positions.} \item{windowSize_Vec}{Vector of window sizes ordered based on order of CORE} \item{peakNumMax}{Maximum order of COREs (e.g. maximum number of peaks within COREs)} \item{peakNumMin}{Minimum order of COREs (e.g. minimum number of peaks within COREs)} } \value{ Identified COREs for the given input regions } \description{ ElementRecog is a function to identify COREs } \examples{ InputData <- read.table(system.file("extdata", "A549_Chr21.bed", package = "CREAM"), sep="\\t") colnames(InputData) <- c("chr", "start", "end") MinLength <- 1000 if(nrow(InputData) < MinLength){ stop(paste( "Number of functional regions is less than ", MinLength, ".", sep = "", collapse = "")) } peakNumMin <- 2 WScutoff <- 1.5 WindowVecFinal <- WindowVec(InputData, peakNumMin, WScutoff) OutputList <- ElementRecog(InputData, WindowVecFinal, (1+length(WindowVecFinal)), peakNumMin) }
e4ece3182af20922df0048b4abe32420977a7ca1
02b794c7cb49497868da94b411ab78e6d9e0162f
/R/summary-method.R
6b854d85fecfae575c673af23ebcd104e27e9fba
[]
no_license
cran/iCARH
c5fc09a2f23b3253f27149e23c354750b5839e74
75dbc3f6d441f4f6d85d8c72b290ea282a514b7c
refs/heads/master
2021-07-12T01:02:42.197343
2020-08-27T06:50:07
2020-08-27T06:50:07
196,477,121
0
0
null
null
null
null
UTF-8
R
false
false
3,750
r
summary-method.R
#' @title Summarize and return model parameters #' #'@description Group of functions to summarize and return model parameters of interest #' #' @describeIn iCARH.params Summary of model parameters #' #' @param fit Object returned by iCARH.model #' @param pars Parameters of interest ("theta","alpha","beta","phi"). All parameters by default. #' @param path.names Specify pathway names. #' @param prob Confidence level. Defaults to 0.95. #' @param use_cache passed to stan summary method. #' @param digits The number of significant digits for printing out the summary; #' defaults to 2. The effective sample size is always rounded to integers. #' @param ... not used currently #' #' @return contain summaries for all chains. Included in the summaries are means, standard deviations (Est.Error), effective sample sizes (Eff.Sample), and split Rhats. #' Monte Carlo standard errors (MC.Error) are also reported. #' #' @examples data.sim = iCARH.simulate(4, 10, 14, 8, 2, path.probs=0.3, Zgroupeff=c(0,4), #' beta.val=c(1,-1,0.5, -0.5)) #' XX = data.sim$XX #' Y = data.sim$Y #' Z = data.sim$Z #' pathways = data.sim$pathways #' \donttest{ #' rstan_options(auto_write = TRUE) #' options(mc.cores = 2) #' fit = iCARH.model(XX, Y, Z, groups=rep(c(0,1), each=5), pathways, #' control = list(adapt_delta = 0.99, max_treedepth=10), iter = 2, chains = 2) #' if(!is.null(fit$icarh)) #' iCARH.params(fit)} #' #' #' @importFrom rstan summary #' @export iCARH.params iCARH.params <- function(fit, pars=c("theta","alpha","beta","phi"), path.names=NULL, prob = 0.95, use_cache = TRUE, digits=2, ...){ probs = c((1 - prob) / 2, 1 - (1 - prob) / 2) fit_summary = summary(fit$icarh, pars = pars, probs = probs, use_cache = use_cache)$summary ci <- paste0( probs * 100, "%") colnames(fit_summary) <- c("Estimate", "MC.Error", "Est.Error", ci, "Eff.Sample", "Rhat") xnames = attr(fit$X, "dimnames")[[3]] ynames = attr(fit$Y, "dimnames")[[3]] P = dim(iCARH.getPathwaysCoeff(fit))[2] rhats = iCARH.checkRhats(fit) cat("\nResponse: ") if(setequal(fit$drug, c(0,1))) cat(" Binary.\n") else cat(" Continuous.\n") cat("Data: ") cat(nrow(fit$X), " time points, ", ncol(fit$X), " observations, ", P, "pathways.\n ") cat("X has ", length(xnames), " variables, Y has ", length(ynames), " variables.\n") cat("MCMC samples: ") cat(fit$icarh@sim$chains, " chains, ", fit$icarh@sim$iter, " iterations each with ", fit$icarh@sim$warmup, " warmup samples.\n") if ("theta" %in% pars){ cat("\nTemporal Effects (theta):\n") tempo = fit_summary[grepl("^theta\\[([0-9]+,*)*\\]$", rownames(fit_summary)),] rownames(tempo) = xnames colnames(tempo) = colnames(fit_summary) print(tempo, digits=digits) } if("alpha" %in% pars){ cat("\nTreatment Effect (alpha):\n") treat = fit_summary[grepl("^alpha\\[([0-9]+,*)*\\]$",rownames(fit_summary)),] rownames(treat) = xnames colnames(treat) = colnames(fit_summary) print(treat, digits=digits) } if(("beta" %in% pars) & !is.null(fit$Y) ){ cat("\nEffect of Y variables (beta):\n") yeff = fit_summary[grepl("^beta\\[([0-9]+,*)*\\]$",rownames(fit_summary)),] rownames(yeff) = paste0(rep(xnames, each=length(ynames)),"/",ynames) colnames(yeff) = colnames(fit_summary) print(yeff, digits=digits) } if("phi" %in% pars){ cat("\nPathway Coefficients:\n") path.names = if(is.null(path.names)) paste("path",1:P) else path.names path = fit_summary[grepl("^phi\\[([0-9]+,*)*\\]$",rownames(fit_summary)),] rownames(path) = paste0(rep(path.names, each=2), c("controls", "cases"), sep="/") colnames(path) = colnames(fit_summary) print(path, digits=digits) } cat("\nWAIC: ", iCARH.waic(fit), ".\n") return(fit_summary) }
bd2953bea86bbd4cd267fac51ce8b4ef659e6656
e82101a5856af37a88679820ae82e2cf30afd9b6
/code/hubway.R
9cf34461fb8646ef13999251db574791b39c8b32
[]
no_license
ekonlab/dvux
f3e37ed75c945a4f4ddf1c7c8ade97bac248e8d7
c781c8a0a1e59c311b28a443bd49387d8dd2a22b
refs/heads/master
2021-09-03T17:52:18.558210
2018-01-10T21:41:52
2018-01-10T21:41:52
108,869,577
0
0
null
null
null
null
UTF-8
R
false
false
5,840
r
hubway.R
# Hubway data options(scipen=999) # avoid scientic notation # Set up current directory library(ggplot2) library(dplyr) library(tidyr) library(lubridate) library(ggmap) setwd("/Users/albertogonzalez/Dropbox/work_17/bestiario/Mark") # Import Natgeo water data set and have a initial overiew hubway_1 = read.csv("hubway_shorttrips-2013.csv") str(hubway_1) head(hubway_1) # Let's start checking some important variables, like the dates range dates_range = as.data.frame(table(hubway_1$start_date)) str(dates_range) head(dates_range) # histogram of trips by day g0 = ggplot(dates_range,aes(Freq)) + geom_histogram() g0 + theme_minimal() # filter one day to visualize its results one_day = hubway_1 %>% filter(start_date == "10/1/2013") # plot maps q = get_map(location = "boston university", zoom = 13) q_1 = ggmap(q) q_1 + geom_point(aes(x=end_statn_long, y=end_statn_lat,size = duration), data=one_day,col = "red",alpha=0.4, shape = 21) + scale_size(range=c(1,10)) + facet_wrap(~subsc_type) #q_1 + geom_point(aes(x=end_statn_long, y=end_statn_lat,size = duration), data=hubway_1,col = "red",alpha=0.4, shape = 21) + scale_size(range=c(1,10)) + facet_wrap(~start_hour_2) # long lat by gender, all days g1 = ggplot(one_day,aes(start_statn_long,start_statn_lat)) + geom_point() + facet_wrap(~gender) g1 + theme_minimal() # Add duration as size and facet by gender g2 = ggplot(one_day,aes(start_statn_long,start_statn_lat, size = duration)) + geom_point() + facet_wrap(~gender) g2 + theme_minimal() # Add some alpha to avoid overplotting g3 = ggplot(one_day,aes(start_statn_long,start_statn_lat, size = duration)) + geom_point(alpha = 0.5) + facet_wrap(~gender) g3 + theme_minimal() # Delete circle fill to improve legibility g4 = ggplot(one_day,aes(start_statn_long,start_statn_lat, size = duration)) + geom_point(shape = 21) + facet_wrap(~gender) g4 + theme_minimal() # Add gender as fill g5 = ggplot(one_day,aes(start_statn_long,start_statn_lat, size = duration, fill = gender)) + geom_point(shape = 21) g5 + theme_minimal() # Facet by type of user g6 = ggplot(one_day,aes(start_statn_long,start_statn_lat, size = duration, fill = gender)) + geom_point(shape = 21) + facet_wrap(~subsc_type) g6 + theme_minimal() g7 = ggplot(one_day,aes(start_statn_long,start_statn_lat, size = duration, fill = gender)) + geom_point(shape = 22) + facet_wrap(~subsc_type) g7 + theme_minimal() g8 = ggplot(one_day,aes(start_statn_long,start_statn_lat, size = duration, fill = gender)) + geom_point(shape = 23) + facet_wrap(~subsc_type) g8 + theme_minimal() # Lets try to assign day of week and study seasonality head(hubway_1) str(hubway_1) # From factor to date hubway_1$start_date = as.Date(hubway_1$start_date,format = "%m/%d/%Y") str(hubway_1) head(hubway_1) hubway_1$week_day = wday(hubway_1$start_date,label = TRUE) head(hubway_1) # Total trips by weekday trips_by_day = as.data.frame(table(hubway_1$week_day)) trips_by_day g9 = ggplot(trips_by_day,aes(Var1,Freq)) + geom_bar(stat = "identity") g9 + theme_minimal() # Lon / lat, duration as size, gender as color, weekday as facet g10 = ggplot(hubway_1,aes(start_statn_long,start_statn_lat, size = duration, fill = gender)) + geom_point(shape = 21) g10 + theme_minimal() + facet_wrap(~week_day) # Change gender to faceting mode g11 = ggplot(hubway_1,aes(start_statn_long,start_statn_lat)) + geom_point(shape = 3) g11 + theme_minimal() + facet_grid(gender~week_day) # Could the starting hour of the day help us? # We first need to convert start time factor to date head(hubway_1) hubway_1$start_hour_1 = hms(hubway_1$start_time) hubway_1$start_hour_2 = hour(hubway_1$start_hour_1) # Let's use 5 variables together in the same viz g11 = ggplot(hubway_1,aes(start_statn_long,start_statn_lat,fill=gender)) + geom_point(shape = 21) + theme_minimal() + facet_grid(start_hour_2~week_day) g11 g12 = ggplot(hubway_1,aes(start_statn_long,start_statn_lat,fill=gender)) + geom_point(shape = 21) + theme_minimal() + facet_wrap(~start_hour_2) g12 # We can also add the month to see if it might show any given pattern hubway_1$month = month(hubway_1$start_date) g13 = ggplot(hubway_1,aes(start_statn_long,start_statn_lat,fill=gender)) + geom_point(shape = 21) + theme_minimal() + facet_wrap(~month) g13 # It's still difficult to spot seasonality patterns, let's try to change # the geometric shape and pass form circle x/y to line (time series) g14 = ggplot(hubway_1,aes(start_date,duration, group = gender)) + geom_line() + theme_minimal() g14 # We need to do some transformations hubway_2 = hubway_1 %>% group_by(start_date,gender)%>% summarise(count = n()) head(hubway_2) hubway_3 = as.data.frame(hubway_2) str(hubway_3) # plot results g15 = ggplot(hubway_3,aes(start_date,count,group=gender,color=gender)) + geom_line() + theme_minimal() g15 # Let's add in weekday in the groupings hubway_4 = hubway_1 %>% group_by(start_date,gender,week_day)%>% summarise(count = n()) hubway_5 = as.data.frame(hubway_4) head(hubway_5) # plot results g16 = ggplot(hubway_5,aes(start_date,count,group=gender,color=gender)) + geom_line() + theme_minimal() + facet_wrap(~week_day) g16 # Let's add in hour in the groupings hubway_6 = hubway_1 %>% group_by(start_date,gender,start_hour_2)%>% summarise(count = n()) hubway_7 = as.data.frame(hubway_6) head(hubway_7) # plot results g17 = ggplot(hubway_7,aes(start_date,count,group=gender,color=gender)) + geom_line() + theme_minimal() + facet_wrap(~start_hour_2) g17 # Let's add in hour in the groupings hubway_8 = hubway_1 %>% group_by(start_date,gender,strt_statn_name)%>% summarise(count = n()) hubway_9 = as.data.frame(hubway_8) head(hubway_9) # plot results g18 = ggplot(hubway_9,aes(start_date,count,group=gender,color=gender)) + geom_line() + theme_minimal() + facet_wrap(~strt_statn_name) g18
03324218b284fdfa26a87c622760f77f1d2bd6a2
d14bcd4679f0ffa43df5267a82544f098095f1d1
/R/plot.groupm.out.R
b3b0cd6924619461b1b63172e6c8f76aabb1e208
[]
no_license
anhnguyendepocen/SMRD
9e52aa72a5abe5274f9a8546475639d11f058c0d
c54fa017afca7f20255291c6363194673bc2435a
refs/heads/master
2022-12-15T12:29:11.165234
2020-09-10T13:23:59
2020-09-10T13:23:59
null
0
0
null
null
null
null
UTF-8
R
false
false
1,995
r
plot.groupm.out.R
#' @export plot.groupm.out <- function (x, focus.variable, fixed.other.values, range.of.focus = range(xmat(data.ld)[[focus.variable]]), ylim = c(NA, NA), xlim = c(NA, NA), xlab = NULL, ylab = NULL, grids = F, title.option = GetSMRDDefault("SMRD.TitleOption"), response.on.yaxis = T, dummy.for.fixed = F, point.from.xmat = NULL, density.at = "Automatic", censor.time = NULL, quant.lines = c(0.1, 0.5, 0.9), add = F, plot.quant.labels = T, my.title = NULL, include.data = F,...) { `if`(!is.onlist("life.data", oldClass(x[[1]])), groupm.out <- x[[1]], groupm.out <- x) data.ld <- groupm.out$data.ld if (!missing(fixed.other.values)) { dummy.groupm.out <- get.conditional.groupm.out(focus.variable = focus.variable, fixed.other.values = fixed.other.values, groupm.out = groupm.out) } else { if (is.null(groupm.out$focus.variable)) groupm.out$focus.variable <- focus.variable dummy.groupm.out <- groupm.out } plot.alt.fit(x = dummy.groupm.out, ylim = ylim, xlim = xlim, xlab = xlab, ylab = ylab, grids = grids, title.option = title.option, response.on.yaxis = response.on.yaxis, my.title = my.title, include.data = include.data, density.at = density.at, censor.time = censor.time, quant.lines = quant.lines, add = add, plot.quant.labels = plot.quant.labels, range.of.focus = range.of.focus,...) invisible() }
d460636910caac6b257d178de23087e18a105154
7503dba6d36a46fa2501bb5dd5c874bc44d4805f
/man/MyWpdf.Rd
9148a2f0110b8c13edb68c549614589e54efb3a8
[]
no_license
IvanNavarroCytel/MyPack
ce8f2e358442d374e0ed6317dde29f7f4ada5c7e
61afc2d17e859d7fc5e338b00202ee04fb52d261
refs/heads/master
2021-04-29T21:07:43.821898
2018-01-24T17:16:19
2018-01-24T17:16:19
121,609,216
0
0
null
null
null
null
UTF-8
R
false
true
442
rd
MyWpdf.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MyWeibull.R \name{MyWpdf} \alias{MyWpdf} \title{PDF of Weibull distribution} \usage{ MyWpdf(x, a, b) } \arguments{ \item{x}{value.} \item{a}{Shape parameter.} \item{b}{Scale parameter.} } \value{ Probability density. } \description{ This function calculates the probability density for x given ‘a’ as the shape parameter and ‘b’as the scale parameter. }
1280738693c872cbf3545ab27647c1b05025893b
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.machine.learning/man/sagemaker_update_app_image_config.Rd
7baf95a7d6866fcd0f69382f4bd036d77f28221f
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
696
rd
sagemaker_update_app_image_config.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sagemaker_operations.R \name{sagemaker_update_app_image_config} \alias{sagemaker_update_app_image_config} \title{Updates the properties of an AppImageConfig} \usage{ sagemaker_update_app_image_config( AppImageConfigName, KernelGatewayImageConfig = NULL ) } \arguments{ \item{AppImageConfigName}{[required] The name of the AppImageConfig to update.} \item{KernelGatewayImageConfig}{The new KernelGateway app to run on the image.} } \description{ Updates the properties of an AppImageConfig. See \url{https://www.paws-r-sdk.com/docs/sagemaker_update_app_image_config/} for full documentation. } \keyword{internal}
031c7b369d42a44b833ebf00533cb21bc8e74553
f2bfd5ceae6bf32cebc28cf18740a8b44e010e7b
/pkg/retistruct/R/RetinalReconstructedOutline.R
d78ff7da94fe6a055d7e38dd402a67946106ddb5
[]
no_license
davidcsterratt/retistruct
602972d127b7119df3fda54ac915228d7ac854d1
f7075b0a8ac84fdc9773300d553c26a11b45ce2e
refs/heads/master
2023-08-09T20:08:39.039964
2023-07-29T09:27:35
2023-07-29T09:27:35
25,682,590
5
7
null
2017-07-29T09:14:58
2014-10-24T10:05:33
R
UTF-8
R
false
false
11,295
r
RetinalReconstructedOutline.R
##' A version of \link{ReconstructedOutline} that is specific to ##' retinal datasets ##' ##' @description A RetinalReconstructedOutline overrides methods of ##' \link{ReconstructedOutline} so that they return data point and ##' landmark coordinates that have been transformed according to the ##' values of \code{DVflip} and \code{side}. When reconstructing, it ##' also computes the \dQuote{Optic disc displacement}, i.e. the ##' number of degrees subtended between the optic disc and the pole. ##' ##' @author David Sterratt ##' @export RetinalReconstructedOutline <- R6Class("RetinalReconstructedOutline", inherit = ReconstructedOutline, public = list( ##' @field EOD Optic disc displacement in degrees EOD = NULL, ##' @description Get coordinates of corners of pixels of image in spherical ##' coordinates, transformed according to the value of \code{DVflip} ##' @return Coordinates of corners of pixels in spherical coordinates getIms = function() { ims <- super$getIms() if (self$ol$DVflip) { if (!is.null(ims)) { ims[,"lambda"] <- -ims[,"lambda"] } } return(ims) }, ##' @description Get location of tear coordinates in spherical coordinates, ##' transformed according to the value of \code{DVflip} ##' @return Location of tear coordinates in spherical coordinates getTearCoords = function() { Tss <- super$getTearCoords() if (self$ol$DVflip) { for (i in 1:length(Tss)) { Tss[[i]][,"lambda"] <- -Tss[[i]][,"lambda"] } } return(Tss) }, ##' @param ... Parameters to \code{\link{ReconstructedOutline}} reconstruct = function(...) { super$reconstruct(...) OD <- self$getFeatureSet("LandmarkSet")$getFeature("OD") if (!is.null(OD)) { ODmean <- karcher.mean.sphere(OD) self$EOD <- 90 + ODmean["phi"]*180/pi } }, ##' @description Get \link{ReconstructedFeatureSet}, transformed ##' according to the value of \code{DVflip} ##' @param type Base type of \link{FeatureSet} as string. ##' E.g. \code{PointSet} returns a \link{ReconstructedPointSet} getFeatureSet = function(type) { fs <- super$getFeatureSet(type) if (self$ol$DVflip) { if (is.null(self$fst)) { fst <- fs$clone() fst$data <- lapply(fs$data, function(x) { x[,"lambda"] <- -x[,"lambda"] return(x) }) } return(fst) } return(fs) } ) ) ##' Plot projection of reconstructed dataset ##' @param r \code{\link{RetinalReconstructedOutline}} object ##' @param transform Transform function to apply to spherical coordinates ##' before rotation ##' @param projection Projection in which to display object, ##' e.g. \code{\link{azimuthal.equalarea}} or \code{\link{sinusoidal}} ##' @param axisdir Direction of axis (North pole) of sphere in external space ##' @param proj.centre Location of centre of projection as matrix with ##' column names \code{phi} (elevation) and \code{lambda} (longitude). ##' @param lambdalim Limits of longitude (in degrees) to display ##' @param datapoints If \code{TRUE}, display data points ##' @param datapoint.means If \code{TRUE}, display Karcher mean of data points. ##' @param datapoint.contours If \code{TRUE}, display contours around ##' the data points generated using Kernel Density Estimation. ##' @param grouped If \code{TRUE}, display grouped data. ##' @param grouped.contours If \code{TRUE}, display contours around ##' the grouped data generated using Kernel Regression. ##' @param landmarks If \code{TRUE}, display landmarks. ##' @param mesh If \code{TRUE}, display the triangular mesh used in reconstruction ##' @param grid If \code{TRUE}, show grid lines ##' @param image If \code{TRUE}, show the reconstructed image ##' @param ids IDs of groups of data within a dataset, returned using ##' \code{getIDs}. ##' @param ... Graphical parameters to pass to plotting functions ##' @method projection RetinalReconstructedOutline ##' @export projection.RetinalReconstructedOutline <- function(r, transform=identity.transform, projection=azimuthal.equalarea, axisdir=cbind(phi=90, lambda=0), proj.centre=cbind(phi=0, lambda=0), lambdalim=c(-180, 180), datapoints=TRUE, datapoint.means=TRUE, datapoint.contours=FALSE, grouped=FALSE, grouped.contours=FALSE, landmarks=TRUE, mesh=FALSE, grid=TRUE, image=TRUE, ids=r$getIDs(), ...) { philim <- c(-90, 90) colatitude <- FALSE pole <- TRUE if (!(identical(projection, sinusoidal) | identical(projection, orthographic))) { philim <- c(-90, r$ol$phi0*180/pi) colatitude <- TRUE pole <- FALSE } if (r$ol$side=="Right") { labels=c("N", "D", "T", "V") } else { labels=c("T", "D", "N", "V") } NextMethod(projection=projection, philim=philim, labels=labels, colatitude=TRUE, grid=FALSE, mesh=FALSE, image=image) ## Plot FeatureSets ## Datapoints if (datapoints) { message("Plotting points") fs <- r$getFeatureSet("PointSet") if (!is.null(fs)) { projection.ReconstructedPointSet(fs, phi0=r$phi0, ids=ids, transform=transform, axisdir=axisdir, projection=projection, proj.centre=proj.centre, ...) } } ## Mean datapoints if (datapoint.means) { message("Plotting point means") fs <- r$getFeatureSet("PointSet") if (!is.null(fs)) { Dss.mean <- fs$getMean() for (id in ids) { if (!is.null(Dss.mean[[id]])) { points(projection(rotate.axis(transform(Dss.mean[[id]], phi0=r$phi0), axisdir*pi/180), proj.centre=pi/180*proj.centre), bg=fs$cols[[id]], col="black", pch=23, cex=1.5) } } } } ## Count sets, formerly known as groups if (grouped) { message("Plotting counts") fs <- r$getFeatureSet("CountSet") if (!is.null(fs)) { projection.ReconstructedCountSet(fs, phi0=r$phi0, ids=ids, transform=transform, axisdir=axisdir, projection=projection, proj.centre=proj.centre, ...) } } ## KDE if (datapoint.contours) { message("Plotting point contours") fs <- r$getFeatureSet("PointSet") if (!is.null(fs)) { k <- fs$getKDE() for (id in ids) { if (!is.null(k[[id]])) { css <- k[[id]]$contours for(cs in css) { suppressWarnings(lines(projection(rotate.axis(transform(cs, phi0=r$phi0), axisdir*pi/180), lambdalim=lambdalim*pi/180, lines=TRUE, proj.centre=pi/180*proj.centre), col=fs$cols[[id]])) } ## FIXME: contours need to be labelled } } ## Plot locations of highest contours for (id in ids) { if (!is.null(k[[id]])) { suppressWarnings(points(projection(rotate.axis(transform(k[[id]]$maxs, phi0=r$phi0), axisdir*pi/180), proj.centre=pi/180*proj.centre), pch=22, cex=1, lwd=1, col="black", bg=fs$cols[[id]])) } } } } ## KR if (grouped.contours) { message("Plotting count contours") fs <- r$getFeatureSet("CountSet") if (!is.null(fs)) { k <- fs$getKR() for (id in ids) { if (!is.null(k[[id]])) { css <- k[[id]]$contours for(cs in css) { lines(projection(rotate.axis(transform(cs, phi0=r$phi0), axisdir*pi/180), lambdalim=lambdalim*pi/180, lines=TRUE, proj.centre=pi/180*proj.centre), col=fs$cols[[id]]) } ## FIXME: contours need to be labelled } } ## Plot locations of highest contours for (id in ids) { if (!is.null(k[[id]])) { points(projection(rotate.axis(transform(k[[id]]$maxs, phi0=r$phi0), axisdir*pi/180), proj.centre=pi/180*proj.centre), pch=23, cex=1, lwd=1, col="black", bg=fs$cols[[id]]) } } } } ## Landmarks if (landmarks) { message("Plotting landmarks") fs <- r$getFeatureSet("LandmarkSet") if (!is.null(fs)) { projection.ReconstructedLandmarkSet(fs, phi0=r$phi0, ids=ids, transform=transform, axisdir=axisdir, projection=projection, proj.centre=proj.centre, ...) } } NextMethod(projection=projection, philim=philim, labels=labels, colatitude=TRUE, grid=grid, add=TRUE, image=FALSE, mesh=mesh) } ##' @method projection RetinalReconstructedOutline sphericalplot.RetinalReconstructedOutline <- function(r, datapoints=TRUE, ids=r$getIDs(), ...) { NextMethod() if (datapoints) { message("Plotting points") sphericalplot.ReconstructedPointSet(r, projection=projection, ids=ids, ...) } }
c35c3988c193b298942f277f51c82f20e1234e49
68812e0861a476b85115fe6ddea9e9a216c49387
/man/several.Rd
40fd4c03e8b7ebced07b7f9e5b6bb5a88b9f5f72
[ "MIT" ]
permissive
txopen/histoc
5a895e4593b7486d66e4a31a3f9a7059f01ce0d4
86893ebc4396eb568f8ce5c85146aca624fe0af8
refs/heads/main
2023-08-24T06:21:32.155839
2023-08-04T19:18:11
2023-08-04T19:18:11
511,302,158
0
1
NOASSERTION
2023-08-04T19:18:13
2022-07-06T21:50:33
R
UTF-8
R
false
true
1,757
rd
several.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mult.R \name{several} \alias{several} \title{Runs several time the function donor_recipient_pairs() as bootstrap.} \usage{ several( iteration.number = 10, df.donors = donors, df.candidates = candidates, df.abs = cabs, algorithm = lima, n = 0, seed.number = 123, check.validity = TRUE, ... ) } \arguments{ \item{iteration.number}{Number of times the matchability runs.} \item{df.donors}{A data frame containing demographics and medical information for a pool of donors. For \code{uk} algorithm must have their respective columns.} \item{df.candidates}{A data frame containing demographics and medical information for a group of waitlisted transplant candidates. For \code{uk} algorithm must have respective columns.} \item{df.abs}{A data frame with candidates' antibodies.} \item{algorithm}{The name of the function to use. Valid options are: \code{lima}, \code{et}, \code{pts}, \code{uk} (without quotation)} \item{n}{A positive integer to slice the first candidates.} \item{seed.number}{Seed for new random number. \code{seed.number} can be \code{NA} in which case no seed is applied.} \item{check.validity}{Logical to decide whether to validate input.} \item{...}{all the parameters used on the function algorithm} } \value{ Overall statistics obtained from all runs. } \description{ Generic function that runs the matchability between all combinations of donors and candidates. Runs an arbitrary number of times (\code{iteration.number}) to provide statistics. } \examples{ \donttest{ several(iteration.number = 10, df.donors = donors, df.candidates = candidates, df.abs = cabs, algorithm = lima, n = 0, seed.number = 123, check.validity = TRUE) } }
20788a980ffc18cbe483f7195e80bf767ddddd4c
aab75123aad01298006206831a197533cae83e79
/Assignment 2 ZK - Subcourse 1 FINAL.R
933a01f0fafda8fb53f967d877c814f9317b3b9b
[]
no_license
FridaWP/PSYP14_FridaWP_Assignments
3053a2f01fa6636215996be171516d77fc743d59
06abf55485de733281995f960b3cd24c52a0c7c2
refs/heads/main
2023-01-29T07:36:15.584361
2020-12-04T16:55:31
2020-12-04T16:55:31
318,578,883
0
0
null
null
null
null
UTF-8
R
false
false
4,392
r
Assignment 2 ZK - Subcourse 1 FINAL.R
Data_1_sample = read.csv("https://tinyurl.com/ha-dataset1") library(tidyverse) library(ggplot2) library(dplyr) library(gridExtra) library(car) library(lmtest) library(psych) library(sandwich) library(lm.beta) data_pain <- Data_1_sample view(data_pain) data_pain_exclude <- data_pain [-c(93),] data_pain_changed <- data_pain_exclude %>% mutate(STAI_trait = replace(STAI_trait, STAI_trait == "3.9", 39)) view(data_pain_changed) #Data and Model diagnostics: Plot1 <- data_pain_changed %>% ggplot() + aes(x = age, y = pain) + geom_point() + geom_smooth() Plot1 Plot2 <- data_pain_changed %>% select(sex, pain) %>% ggplot() + aes(x = sex, y = pain) + geom_boxplot() Plot2 Plot3 <- data_pain_changed %>% ggplot() + aes(x = STAI_trait, y = pain) + geom_point() + geom_smooth() Plot3 Plot4 <- data_pain_changed %>% ggplot() + aes(x = pain_cat, y = pain) + geom_point() + geom_smooth() Plot4 Plot5 <- data_pain_changed %>% ggplot() + aes(x = mindfulness, y = pain) + geom_point() + geom_smooth() Plot5 Plot6 <- data_pain_changed %>% ggplot() + aes(x = cortisol_serum, y = pain) + geom_point() + geom_smooth() Plot6 Plot7 <- data_pain_changed %>% ggplot() + aes(x = weight, y = pain) + geom_point() + geom_smooth() Plot7 Plot8 <- data_pain_changed %>% ggplot() + aes(x = IQ, y = pain) + geom_point() + geom_smooth() Plot8 Plot9 <- data_pain_changed %>% ggplot() + aes(x = household_income, y = pain) + geom_point() + geom_smooth() Plot9 grid.arrange(Plot1, Plot2, Plot3, Plot4, Plot5, Plot6, Plot7, Plot8, Plot9, nrow = 3) data_pain_final <- data_pain_changed %>% mutate(household_income = replace(household_income, household_income == "-3732", 3732)) view(data_pain_final) full_model <- lm(pain ~age + sex + STAI_trait + pain_cat + mindfulness + cortisol_serum + weight + IQ + household_income, data = data_pain_final) summary(full_model) data_pain_final %>% ggplot() + aes(x = household_income, y = pain) + geom_point() + geom_smooth(method = "lm") full_model %>% plot(which = 5) full_model %>% plot(which = 4) data_pain_final %>% slice(c(3, 102, 113)) #Assumptions of Normality full_model %>% plot(which = 2 ) #Skew and Kurtosis full_model_res = enframe(residuals(full_model)) full_model_res %>% ggplot() + aes(x = value) + geom_histogram() describe(residuals(full_model)) #Assumptions of Linearity full_model %>% residualPlots() #Assumptions of Homoscedasticity full_model %>% plot(which = 3) full_model %>% ncvTest() full_model %>% bptest() #Assumptions of Multicollinearity full_model %>% vif() full_model %>% summary() data_pain_final %>% select(pain, age, sex, STAI_trait, pain_cat, mindfulness, cortisol_serum, IQ, weight, household_income) %>% pairs.panels(col = "Blue", lm = T) AIC(full_model) # Backwards regression of the full model full_model_back = step(full_model, direction = "backward") summary(full_model_back) backward_model = lm(pain~age + sex + pain_cat + mindfulness + cortisol_serum + household_income, data = data_pain_final) theory_based_model = lm(pain ~ age + sex + STAI_trait + pain_cat + mindfulness + cortisol_serum, data = data_pain_final) summary(theory_based_model) # Standardised Beta for the backwardmodel lm.beta(backward_model) confint(backward_model) # Comparing initial model (full model) and the backwardsregression model summary(full_model) summary(backward_model) AIC(full_model) AIC(backward_model) # Comparing theory-based model and backward model summary(backward_model)$adj.r.squared summary(theory_based_model)$adj.r.squared AIC(theory_based_model, backward_model) summary(backward_model) summary(theory_based_model) # Trying the models on a new dataset Home_sample_2 = read.csv("https://tinyurl.com/ha-dataset2") data_pain2 = Home_sample_2 view(data_pain2) # Predicted values for each model on a new dataset. pred_theorybased_model <- predict(theory_based_model, data_pain2) pred_backward_model <- predict(backward_model, data_pain2) # Calculating the Sum of squared residuals RSS_theorybased = sum((data_pain2[, "pain"] - pred_theorybased_model)^2) RSS_backward = sum((data_pain2[, "pain"] - pred_backward_model)^2) RSS_theorybased RSS_backward
ee97fa150c049ea8f0d40b2072e0a0223ab3b019
ed25ad1a418c4afc0c3c5ee28108fdf85ff6da48
/openenbewerkkoffie.R
6c72abe20f44a73558be5e388138091dc3d97ca7
[ "MIT" ]
permissive
RMHogervorst/koffie
f67df67cf019c565640c7259abbba79e8647c126
0fb2833d381cc91d3423dd5ef12ef3fd584398fa
refs/heads/master
2016-08-12T14:27:17.273361
2016-02-09T16:58:09
2016-02-09T16:58:09
45,740,643
0
0
null
null
null
null
UTF-8
R
false
false
12,063
r
openenbewerkkoffie.R
################################################################ #date: 6-1-15 #by: Roel Hogervorst #description: het grote koffieproject. #Rversion:R version 3.1.2 (2014-10-31) -- "Pumpkin Helmet" # 22-6-15 #R version 3.2.0 (2015-04-16) -- "Full of Ingredients" # R version 3.2.2 (2015-08-14) -- "Fire Safety" # aanpassing 31-10-15 ################################################################ #klaarmaken van workspace ## Clear R workspace #### rm(list = ls() ) #dit verwijdert alle bestanden uit de workspace. #set working directory (mocht je niet in het goede project zitten) setwd("~/dre-roel map/R/koffie") #openen van bestanden #### ##hierbij hebben we uit het log gekopieert naar de .txt bestanden #die laden we. RoelsKoffie<-read.csv(file="koffie.txt", header=FALSE, as.is=T) SamenKoffie<-read.csv(file="koffie met dre.txt", header=FALSE, as.is=T) DreKoffie <-read.csv(file="coffee dre.txt",header=FALSE, as.is=T) #variabelen toevoegen RoelsKoffie$cat <-"roel alleen" #categorische variabele SamenKoffie$cat <- "Samen" DreKoffie$cat <- "Dre alleen" #eindresultaat is nu drie dataframes met 3 naamloze kolommen. Coffee <-rbind(RoelsKoffie, SamenKoffie, DreKoffie) #combineren tot 1 bestand. colnames(Coffee) <- c("datum", "tijd", "categorie") # namen aan kolommen geven. Coffee$counter<-1 #koffiecounter toevoegen ### functiechecks is het dataframe correct?#### head(Coffee) #als deze correct is, is de rest ook correct. head(DreKoffie) head(RoelsKoffie) head(SamenKoffie) unique(Coffee$categorie) #zitten ze er alledrie in. class(Coffee$datum) class(Coffee$tijd) class(Coffee$categorie) #is nu allemaal character. unique(Coffee$counter) #maar 1. #einde checks. library(dplyr) #kolommen in het juiste type zetten en nieuwe variabelen aanmaken. #### Coffee$datum <- as.Date(Coffee$datum, "%m-%d-%Y") #verandert datum in date format. Coffee$datetime<- as.POSIXct(paste(Coffee$datum, Coffee$tijd, sep=" "), format="%Y-%m-%d %H:%M") Coffee$dag<-weekdays(Coffee$datum) #creeert variabele dag. Coffee$getaluur <- gsub("[^[:digit:]]", "", Coffee$tijd) #combineer getallen tot 1. Coffee$getaluur <- as.numeric(Coffee$getaluur) # verander in getallen. Coffee$dagdeel[which(Coffee$getaluur <1200)] <- "ochtend" #wanneer getaluur is onder de 1200 dus voor twaalf uur, maak in andere variable ochend Coffee$dagdeel[which(Coffee$getaluur >= 1200 & Coffee$getaluur <1800)] <- "middag" Coffee$dagdeel[which(Coffee$getaluur >=1800)] <- "avond" Coffee$tijd2<-sapply(strsplit(Coffee$tijd,":"), function(x) { x <- as.numeric(x) x[1]+x[2]/60 } ) #van https://stackoverflow.com/questions/5186972/how-to-convert-time-mmss-to-decimal-form-in-r ##checks. class(Coffee$datetime) #"POSIXct" "POSIXt" head(Coffee$datetime) class(Coffee$tijd) class(Coffee$dag) #character head(Coffee$dag) #geeft dagen weer in het nederlands. head(Coffee$getaluur) #check werking: zijn het getallen? class(Coffee$getaluur) # check type: is het numeric? head(Coffee) qplot(dagdeel, data= Coffee) qplot(dag, data= Coffee) ## einde checks. ##optionele tussenstap om het bestand weg te schrijven#### saveRDS(Coffee, file = "Coffee.Rda") Coffee <-readRDS(file = "Coffee.Rda") #om het weer in te laden. rm(DreKoffie, RoelsKoffie, SamenKoffie) #deze hebben we niet meer nodig. ############### #Grafieken#### library(ggplot2) ##we hebben de ggplot2 nodig voor deze awesome grafieken. library(dplyr) #hebben we misshien ook nodig #qplot(dag, getaluur, data = Coffee, color = categorie, alpha = I(1 / 2)) #plot voor koffiemomenten op de dag. plotdaguur <- qplot(dag, getaluur, data = Coffee, color = categorie) + geom_hline(aes(yintercept= 1200)) # dagen van de week zijn verkeerd. plotdaguur + scale_x_discrete( limits=c("maandag","dinsdag","woensdag", "donderdag", "vrijdag", "zaterdag", "zondag")) +geom_jitter(size=3) ggsave("wekelijkskoffiegebruik.png", width=6, height=4) #kopieer naar bestand. qplot(datum, tijd2, data=Coffee)+ geom_hline(aes(yintercept= 12)) #AANPASSING 3-11-15 #dit object bestaat nu uit de data, een 12 uur lijn, en de assen zijn goed. #g = ggplot(aes (x= dag,y= dagdeel, data = Coffee )) #g = g + scale_x_discrete( # limits=c("maandag","dinsdag","woensdag", "donderdag", "vrijdag", # "zaterdag", "zondag")) # g = ggplot(data = InsectSprays, aes(y = count, x = spray, fill = spray)) # g = g + geom_violin(colour = "black", size = 2) # g = g + xlab("Type of spray") + ylab("Insect count") # g #per persoon apart.#### Roel <- filter(Coffee, categorie == "roel alleen") Samen <- filter(Coffee, categorie == "Samen") #plots Rplot<- qplot(dag, getaluur, data = Roel) Rplot + scale_x_discrete(limits=c("maandag","dinsdag","woensdag", "donderdag", "vrijdag", "zaterdag", "zondag")) Rplot ggsave("RoelPlotWeek.png", width=6, height=4) #kopieer naar bestand. #boxplt van Roel over een week. Rboxplot<-qplot(dag, getaluur, data = Roel, geom = "boxplot") Rboxplot + scale_x_discrete(limits=c("maandag","dinsdag","woensdag", "donderdag", "vrijdag", "zaterdag", "zondag")) + geom_hline(aes(yintercept= 1200)) #werkt niet sBoxplot<- qplot(dag, getaluur, data = Samen, geom = "boxplot") sBoxplot + scale_x_discrete(limits=c("maandag","dinsdag","woensdag", "donderdag", "vrijdag", "zaterdag", "zondag")) + geom_hline(aes(yintercept= 1200)) ggsave("wekelijkskoffiegebruikRoeloverdag.png", width=6, height=4) #boxplot samen over een week. Splot<-qplot(dag, getaluur, data = Samen, geom = "boxplot") Splot + scale_x_discrete(limits=c("maandag","dinsdag","woensdag", "donderdag", "vrijdag", "zaterdag", "zondag")) + geom_hline(aes(yintercept= 1200)) ggsave("wekelijkskoffiegebruiksamenoverdag.png", width=6, height=4) #koffie per dag. Cplotdag<- qplot(dag, data = Coffee, geom = "histogram", color = categorie) Cplotdag + scale_x_discrete(limits=c("maandag","dinsdag","woensdag", "donderdag", "vrijdag", "zaterdag", "zondag")) ggsave("totaalkoffiegebruik.png", width=6, height=4) #summaries maken? #dingen die vin deed. # DENSITY IN GGPLOT OPROEPEN EN DAN FACETTEN OP DAG.##### plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=5)) # > plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=10)) # > plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=100)) # > plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=50)) # > plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=300)) # > plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=50)) # > plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=25)) # > plot(density( Coffee[Coffee$dag=="maandag", ]$getaluur, bw=100)) #table(Coffee$dag, Coffee$dagdeel) #werkt, maar ordering is niet goed. Overzichttabel <- table(factor(Coffee$dag, levels = c("maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag", "zondag")) , factor(Coffee$dagdeel, levels = c("ochtend", "middag", "avond"))) Overzichttabel<- as.data.frame.matrix(Overzichttabel ) #omzetten in dataframe mutate(Overzichttabel, perdag = ochtend + middag + avond) #KNMI data #### #tijd koffie is van 2014-11-13 - 2015-6-5 # #alternatief: nieuws in periode opzoeken en classificeren? # library(readr) KNMI<-read_csv("KNMI_20150605.txt",skip = 24, col_names = c("STN","YYYYMMDD", "TG", "TN", "TNH", "TX", "TXH", "T10N", "SQ", "Q", "DR", "RH", "PG", "PX", "PXH"), trim_ws = TRUE) #source("C:/Users/roel/Documents/docs/actief/Projecten/paplusdatabestanden/code/nuttigescripts.R") #eigenschappen_dataset(KNMI) library(dplyr) library(lubridate) KNMI<-KNMI %>% mutate(datum = as.Date(fast_strptime(as.character(YYYYMMDD), "%Y%m%d")))%>% #zet datum om in date formaat. mutate(etmaalGemiddeldeTempGradenCelsius = TG/10 , zonneschijnUren = SQ/10, stralingJoulePerVierkanteCm = Q, EtmaalNeerslagMM = RH/10, gemLuchtdrukHectoPascal = PG/10)%>% select(datum, etmaalGemiddeldeTempGradenCelsius, zonneschijnUren, stralingJoulePerVierkanteCm, EtmaalNeerslagMM,gemLuchtdrukHectoPascal ) #missings definieeren. KNMI$zonneschijnUren[KNMI$zonneschijnUren == -1] <- NA KNMI$EtmaalNeerslagMM[KNMI$EtmaalNeerslagMM == -1] <- NA #graph that shit qplot(datum, etmaalGemiddeldeTempGradenCelsius, data= KNMI, color = zonneschijnUren)+ scale_fill_brewer(type = "div") qplot(datum, etmaalGemiddeldeTempGradenCelsius, data= KNMI, color = stralingJoulePerVierkanteCm) #+ scale_fill_brewer(type = "div") #Combineer Roel en Weer #### #Coffee naar per dag bestand. Roel <- filter(Coffee, categorie == "roel alleen") #idem als hierboven koffieRoelPerDag<-aggregate( Roel$counter, by =list(datum = Roel$datum), sum) names(koffieRoelPerDag)[2] <- "aantal" #combineren to 1 dataset library(dplyr) anti_join(koffieRoelPerDag, KNMI, by= "datum") #check, 0 rijen? koffieWeer<-left_join(koffieRoelPerDag, KNMI, by = "datum") #left want ik hoef geen weer waar ik geen koffie dronk #plotten library(ggplot2) qplot(datum, aantal, data = koffieWeer)+geom_line() plot(density( koffieWeer$aantal, bw=10)) qplot(datum, etmaalGemiddeldeTempGradenCelsius, data= koffieWeer, color = aantal, size =4, geom = c("point","smooth")) qplot(datum, EtmaalNeerslagMM, data= koffieWeer, color = aantal, size =3) qplot(factor(aantal), etmaalGemiddeldeTempGradenCelsius, data = koffieWeer)+ geom_boxplot() plot <- ggplot(koffieWeer, aes(x = datum, y= etmaalGemiddeldeTempGradenCelsius, color = aantal)) plot = plot + geom_point() + geom_smooth() plot #koffie en zon viool <- ggplot(koffieWeer, aes(factor(aantal), zonneschijnUren)) viool+ geom_violin() + ggtitle("zon per koffieaantal") + xlab("aantal koppen koffie") +ylab("aantal uren zon op de dag") #luchtdruk qplot(gemLuchtdrukHectoPascal, data = koffieWeer) #luchtdruk per koffie aantal. g<- ggplot(data = koffieWeer ,aes(datum, gemLuchtdrukHectoPascal, aantal)) g + geom_point() + facet_wrap(~ aantal) #temperatuur h<- ggplot(data = koffieWeer ,aes(datum, etmaalGemiddeldeTempGradenCelsius, aantal)) h + geom_point() + facet_wrap(~ aantal) #straling i<- ggplot(data = koffieWeer ,aes(datum, stralingJoulePerVierkanteCm, aantal)) i + geom_point() + facet_wrap(~ aantal) #etmaalneerslag j<- ggplot(data = koffieWeer ,aes(datum, EtmaalNeerslagMM, aantal)) j + geom_point() + facet_wrap(~ aantal) #beursdata maakt bestand koffieWeerBeurs library(readr) library(dplyr) beurs<- read_csv("YAHOO-INDEX_AEX.csv") #readr package herkent direct als datum qplot(Date, Close, data = beurs)#plot van closing AEX. koffieWeerBeurs<-left_join(koffieWeer, beurs, by = c("datum" = "Date")) #namen: datum, aantal, etmaalGemiddeldeTempGradenCelsius, zonneschijnUren # stralingJoulePerVierkanteCm, EtmaalNeerslagMM, gemLuchtdrukHectoPascal, # Open, High, Low, Close, Volume, Adjusted Close library(ggplot2) #iets met settings van kleuren is raar. bij errors voer uit: #theme_set(theme_grey()) k<- ggplot(data = koffieWeerBeurs ,aes(datum, Close, aantal)) k + geom_point() + facet_wrap(~ aantal) i<- ggplot(data = koffieWeerBeurs ,aes(datum, Close, color = etmaalGemiddeldeTempGradenCelsius)) i + geom_point( size =3) +scale_color_gradient2(high="red") #rood i + geom_point( size =3) +scale_color_gradientn(colours = rainbow(3)) #hoog contrast regenboog #plot temperatuur onder koers library(cowplot) A<- ggplot(data = koffieWeerBeurs ,aes(datum, Close))+ geom_point() B<- qplot(datum, etmaalGemiddeldeTempGradenCelsius ,data = koffieWeerBeurs) plot_grid(A, B, align = "h", nrow = 2) C<-qplot(datum, aantal, data=koffieWeerBeurs) plot_grid(A, C, nrow = 2, align = "h") cor(koffieWeerBeurs$aantal, koffieWeerBeurs$Close, use = "complete.obs") library(GGally) ggpairs(data=koffieWeerBeurs, columns = 2:10, upper = list(continuous = "density"), lower = list(combo = "facetdensity") ) ggscatmat(data = koffieWeerBeurs, columns = 2:10) #eenvoudigste plot
01bd21e63f3a83c8b1f5bb19e93a52b7d9967e4c
029bbab7cbb2b6f1ecd48b4f500a52a72caf86bb
/man/hoad.calib.Rd
d0789fbd11308a1021b3cc7860c1bc6fbdbf42b4
[]
no_license
cran/LinCal
fdc57d51f1bcd3bcde899b2abd3ec3eddf6a33d9
40efd11039a97da600d509d69c2523687d492aed
refs/heads/master
2022-05-13T06:12:19.302304
2022-04-29T21:40:15
2022-04-29T21:40:15
26,346,284
0
0
null
null
null
null
UTF-8
R
false
true
967
rd
hoad.calib.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hoad_calib.R \name{hoad.calib} \alias{hoad.calib} \title{Bayesian Inverse Linear Calibration Function} \usage{ hoad.calib(x, y, alpha, y0) } \arguments{ \item{x}{numerical vector of regressor measurments} \item{y}{numerical vector of observation measurements} \item{alpha}{the confidence interval to be calculated} \item{y0}{vector of observed calibration value} } \description{ \code{hoad.calib} uses an inverse Bayesian approach to estimate an unknown X given observed vector y0 and calculates credible interval estimates. } \examples{ X <- c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10) Y <- c(1.8,1.6,3.1,2.6,3.6,3.4,4.9,4.2,6.0,5.9,6.8,6.9,8.2,7.3,8.8,8.5,9.5,9.5,10.6,10.6) hoad.calib(X,Y,0.05,6) } \references{ Hoadley, B. (1970). A Bayesian look at Inverse Linear Regression. Journal of the American Statistical Association. 65, 356-369. } \keyword{calibration} \keyword{linear}
a0516b0a0c85d8618fe780d572a903b08a61bd82
a2b36f90d65387e0bfe5791ae1a155268b5ec566
/Model_2/ann_2.R
366bbd62b90beb96e398239a8ba5e4a9130adaeb
[]
no_license
rmarlon308/SVM_vs_ANN
27d3a25319428d3888585cfb9ec3887a3aeb3e8b
24c8433d6d8cf7f3717e290e8b27b1c9d0576cf0
refs/heads/main
2023-02-05T22:37:44.131748
2020-12-25T21:54:37
2020-12-25T21:54:37
324,438,703
0
0
null
null
null
null
UTF-8
R
false
false
4,038
r
ann_2.R
library(readr) library(dplyr) library(keras) library(tensorflow) testing = read_csv("/home/marlon/mainfolder/marlon/USFQ/DataMining/10_FinalProject/proyectoFinal/Model_2/dataset/testing.csv") training = read_csv("/home/marlon/mainfolder/marlon/USFQ/DataMining/10_FinalProject/proyectoFinal/Model_2/dataset/training.csv") dataset = rbind(training, testing) dataset$class = as.factor(dataset$class) dataset$class = as.numeric(dataset$class) - 1 #Normalize the data summary(dataset) for(i in 2:ncol(dataset)){ dataset[i] = (dataset[i] - min(dataset[i]))/ (max(dataset[i]) - min(dataset[i])) * (1-0) + 0 } summary(dataset) x_data = dataset[, 2:ncol(dataset)] y_data = dataset[, 1] #Dimensionality reduction mean_row = colMeans(x_data) for(i in 1:nrow(x_data)){ x_data[i, ] = x_data[i, ] - mean_row } svd = svd(x_data) eigVectors = svd$v eigValues = svd$d^2/sum(svd$d^2) variance = cumsum(eigValues)/sum(eigValues) k = NULL for(i in 1:length(variance)){ if(variance[i] >= 0.95){ k = i print(k) break } } k_selected = eigVectors[, 1:k] proy_data = as.data.frame(as.matrix(x_data) %*% as.matrix(k_selected)) all_data = cbind(y_data, proy_data) rand_data = all_data[sample(nrow(all_data)),] #Del csv sacar el mejor modelo folds = cut(seq(1, nrow(rand_data)), breaks = 10, labels = F) recall = c() accuracy = c() precision = c() auc = c() loss = c() pred_vector = rep(0, nrow(all_data)) real_vector = rand_data$class history = c() for(i in 1:10){ test_index = which(folds == i, arr.ind = T) test_x = as.matrix(rand_data[test_index, 2:ncol(rand_data)]) test_y = rand_data[test_index, 1] train_x = as.matrix(rand_data[-test_index, 2:ncol(rand_data)]) train_y = rand_data[-test_index, 1] test_y = to_categorical(test_y) train_y = to_categorical(train_y) model = keras_model_sequential() model %>% layer_dense(units = 15, activation = 'relu', input_shape = c(ncol(train_x))) %>% layer_dense(units = 4, activation = 'softmax') model %>% compile(loss = 'categorical_crossentropy', optimizer = optimizer_adam(lr = 0.001), metrics = c('accuracy', tf$keras$metrics$AUC(), tf$keras$metrics$Precision(), tf$keras$metrics$Recall())) mymodel = model %>% fit(train_x, train_y, epochs = 150, batch_size = 32, validation_split = 0.2 ) eval = model %>% evaluate(test_x, test_y) history = c(history, mymodel) recall = c(recall, eval[5]) accuracy = c(accuracy, eval[2]) precision = c(precision, eval[4]) auc = c(auc, eval[3]) loss = c(loss, eval[1]) pred = model %>% predict_classes(test_x) pred_vector[test_index] = pred } sprintf("Accuracy Mean: %f SD: %f", mean(accuracy), sd(accuracy)) sprintf("Precision Mean: %f SD: %f", mean(precision), sd(precision)) sprintf("Recall Mean: %f SD: %f", mean(recall), sd(recall)) sprintf("AUC Mean: %f SD: %f", mean(auc), sd(auc)) sprintf("Loss Mean: %f SD: %f", mean(loss), sd(loss)) table(pred_vector, real_vector) #Grafica Loss data_loss = NULL data_loss_val = NULL for(i in seq(2, 20, by = 2)){ data_loss = rbind(data_loss, as.vector(history[i]$metrics$loss)) data_loss_val = rbind(data_loss_val, as.vector(history[i]$metrics$val_loss)) } mean_loss = colMeans(data_loss) mean_loss_val = colMeans(data_loss_val) loss_data = data.frame(mean_loss, mean_loss_val) library(ggplot2) ggplot(loss_data) + geom_line(aes(x = 1:nrow(loss_data), y = mean_loss), color = "blue") + geom_line(aes(x = 1:nrow(loss_data), y = mean_loss_val), color = "green")+ xlab("Epochs") + ylab("Loss") + labs(title = "Loss") library(pROC) library(caret) roc.multi = multiclass.roc(real_vector, pred_vector, levels = c(0,1,2,3)) auc(roc.multi) rs <- roc.multi[['rocs']] plot.roc(rs[[1]]) sapply(2:length(rs),function(i) lines.roc(rs[[i]],col=i))
a7b46a9d70d4225cd14529ddf9d0766f561f37b5
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/summarytools/examples/print.summarytools.Rd.R
713618962291a60a9a725ed4f6aead0e6b688b14
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
418
r
print.summarytools.Rd.R
library(summarytools) ### Name: print.summarytools ### Title: Print Method for Objects of Class 'summarytools'. ### Aliases: print.summarytools print view ### Keywords: methods print ### ** Examples ## Not run: ##D data(tobacco) ##D view(dfSummary(tobacco), footnote = NA) ##D ## End(Not run) data(exams) print(freq(exams$gender), style = 'rmarkdown') print(descr(exams), omit.headings = TRUE)
5a7f00184814b73afeaa6bc8d7f60741cfe4411f
8f0431de29762061acb57e06f492d22d5ce2604f
/man/gt_sparkline.Rd
d39c5a991ab2e395f28da158714f798eb04f48fb
[ "MIT" ]
permissive
adamkemberling/gtExtras
2c3e1a81d5dd97666dedab710d49377a2a7572dd
40d1e5a006fa67833a702733055c94606f8cffb7
refs/heads/master
2023-08-17T11:12:00.431133
2021-10-13T16:28:10
2021-10-13T16:28:10
null
0
0
null
null
null
null
UTF-8
R
false
true
2,911
rd
gt_sparkline.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gt_sparkline.R \name{gt_sparkline} \alias{gt_sparkline} \title{Add sparklines into rows of a \code{gt} table} \usage{ gt_sparkline( gt_object, column, type = "sparkline", width = 30, line_color = "black", range_colors = c("red", "blue"), fill_color = "grey", bw = NULL, trim = FALSE, same_limit = TRUE ) } \arguments{ \item{gt_object}{An existing gt table object of class \code{gt_tbl}} \item{column}{The column wherein the sparkline plot should replace existing data. Note that the data \emph{must} be represented as a list of numeric values ahead of time.} \item{type}{A string indicating the type of plot to generate, accepts \code{"sparkline"}, \code{"histogram"} or \code{"density"}.} \item{width}{A number indicating the width of the plot in mm at a DPI of 25.4, defaults to 30} \item{line_color}{Color for the line, defaults to \code{"black"}. Accepts a named color (eg 'blue') or a hex color.} \item{range_colors}{A vector of two valid color names or hex codes, the first color represents the min values and the second color represents the highest point per plot. Defaults to \code{c("blue", "blue")}. Accepts a named color (eg \code{'blue'}) or a hex color like \code{"#fafafa"}.} \item{fill_color}{Color for the fill of histograms/density plots, defaults to \code{"grey"}. Accepts a named color (eg \code{'blue'}) or a hex color.} \item{bw}{The bandwidth or binwidth, passed to \code{density()} or \code{ggplot2::geom_histogram()}. If \code{type = "density"}, then \code{bw} is passed to the \code{bw} argument, if \code{type = "histogram"}, then \code{bw} is passed to the \code{binwidth} argument.} \item{trim}{A logical indicating whether to trim the values in \code{type = "density"} to a slight expansion beyond the observable range. Can help with long tails in \code{density} plots.} \item{same_limit}{A logical indicating that the plots will use the same axis range (\code{TRUE}) or have individual axis ranges (\code{FALSE}).} } \value{ An object of class \code{gt_tbl}. } \description{ The \code{gt_sparkline} function takes an existing \code{gt_tbl} object and adds sparklines via the \code{ggplot2}. This is a wrapper around \code{gt::text_transform()} + \code{ggplot2} with the necessary boilerplate already applied. } \section{Figures}{ \if{html}{\figure{ggplot2-sparkline.png}{options: width=50\%}} } \section{Function ID}{ 1-4 } \examples{ library(gt) gt_sparkline_tab <- mtcars \%>\% dplyr::group_by(cyl) \%>\% # must end up with list of data for each row in the input dataframe dplyr::summarize(mpg_data = list(mpg), .groups = "drop") \%>\% gt() \%>\% gt_sparkline(mpg_data) } \seealso{ Other Plotting: \code{\link{gt_plt_bar_pct}()}, \code{\link{gt_plt_bar_stack}()}, \code{\link{gt_plt_bar}()}, \code{\link{gt_plt_winloss}()} } \concept{Plotting}
2bf84a15dfcbf5ebbd7a77f098793943a8a2f8fd
3bf7d1502b222af53cbda561dd143aaa32a5d538
/main.R
d5616428d19a8a498deed6174749a1a5ee76febc
[ "MIT" ]
permissive
fdrennan/biggr2
90506d9637800d9149d4e6c9b4de21d0a319d9ba
d2c82763bc9331697478dfdc58f11dab6572accc
refs/heads/main
2023-03-10T22:25:37.241549
2021-02-28T07:34:36
2021-02-28T07:34:36
303,002,767
3
2
NOASSERTION
2021-02-26T17:41:03
2020-10-10T22:48:18
R
UTF-8
R
false
false
553
r
main.R
library(biggr2) library(glue) library(readr) user_data <- read_file("ubuntuinit.sh") user_data <- glue_collapse( c( user_data, paste0("echo ", readLines(".Renviron"), ">> /home/ubuntu/.Renviron", collapse = "\n"), "cd /home/ubuntu && git clone https://github.com/fdrennan/redditstack.git", "mv /home/ubuntu/.Renviron /home/ubuntu/redditstack/.Renviron", "sudo chmod 777 -R /home/ubuntu/redditstack" ), sep = "\n" ) server <- ec2_instance_create( user_data = user_data, InstanceType = "t2.xlarge" )
4f4bd2080cc7490ec7845ffca132ddebd3d1c592
8817c24a3fab4de0600244b5880f2de0c7b97f26
/plot3.R
8b2602c90d685613bc02d085a503ea3bb1c51ff0
[]
no_license
wlb0/ExData_Plotting1
c96ec5b00eae64b0e69013f84a6ea1a6c524021a
81d217b2f4e1f0c8c9136c3c8e8dab1070b8660f
refs/heads/master
2020-12-11T06:00:34.825187
2015-06-07T16:31:01
2015-06-07T16:31:01
35,371,703
0
0
null
2015-05-10T13:17:40
2015-05-10T13:17:40
null
UTF-8
R
false
false
1,334
r
plot3.R
## exdata-014 Assignment 1 part 3 ## code for plot 3 ## assumes source data has already been downloaded and unzipped to ## csv file "household_power_consumption.txt" in the current directory if (!require("sqldf")) { install.packages("sqldf") } library(sqldf) # define wData as a file with indicated format wData <- file("household_power_consumption.txt") attr(wData, "file.format") <- list(sep = ";", header = TRUE) # use sqldf to read it in keeping only rows for the two specified dates plotData <- sqldf("select * from wData where date = '1/2/2007' or date = '2/2/2007'") # add column with date and time in proper date/time format, # update date column to be proper date format plotData$dateTime <- strptime(paste(plotData$Date,plotData$Time),format="%d/%m/%Y %H:%M:%S") plotData$Date <- strptime(plotData$Date, "%d/%m/%Y") # perform plot png(filename="plot3.png", width=480, height=480) with(plotData, plot(dateTime, Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering", main="")) with(plotData, points(dateTime, Sub_metering_2, type="l", col="red")) with(plotData, points(dateTime, Sub_metering_3, type="l", col="blue")) legend("topright", lty=1, col=c("black", "red", "blue"), legend=names(plotData[7:9]), cex=0.95) dev.off()
5b2e3ae525e7eba9149409030ea4bd44bf286709
7c4fa47fe62269bad9c8bbdf9edaf8b3180865f9
/024-maritime_ports_france_2/01_scrape.R
a4214548a5ccb6c1fcae8a32d7fa89f3f3ca061a
[]
no_license
training-datalab/minard
6236897f64a49979b219ac962b4de9bfd84ce003
194f7e537ad0cc16a5ba71ca28773a814e8946ef
refs/heads/master
2023-03-23T21:11:18.323214
2021-03-17T21:57:44
2021-03-17T21:57:44
null
0
0
null
null
null
null
UTF-8
R
false
false
1,082
r
01_scrape.R
library(rvest) url <- "http://www.worldportsource.com/ports/index/FRA.php" doc <- read_html(url) links <- html_nodes(doc,"br+ a") %>% html_attr("href") %>% paste0("http://www.worldportsource.com",.) name <- html_nodes(doc,"br+ a") %>% html_text() ports <- tibble(name,link=links) %>% distinct() %>% mutate(link=str_remove(link,"review/")) ports$lat <- ports$lon <- ports$deglat <- ports$deglon <- ports$port_type <- ports$port_size <- NA for(i in 1:nrow(ports)){ cat(i,"\r") th1 <- ports$link[i] %>% read_html() %>% html_nodes("th") %>% html_text() th2 <- ports$link[i] %>% read_html() %>% html_nodes(".dash") %>% html_text() %>% .[seq(2,length(.),2)] ports$deglat[i] <- th2[which(th1=="Latitude:")] ports$deglon[i] <- th2[which(th1=="Longitude:")] latlong <- OSMscale::degree(ports$deglat[i],ports$deglon[i],drop = TRUE) ports$lat[i] <- latlong[1] ports$lon[i] <- latlong[2] ports$port_type[i] <- th2[which(th1=="Port Type:")] ports$port_size[i] <- th2[which(th1=="Port Size:")] } write_csv(ports,"data/ports_france.csv")
dc573d45f2679a4e4b5fff3c1ccfd0cdb017a72b
11394cd22cea3b4e644d20564ff4b500018d943e
/scripts/separateAnalysis/checkCellIdent.R
21db489ecc39f2104a8fdeb5c4f6b9cac4fff284
[ "MIT" ]
permissive
shunsunsun/single_cell_rna_seq_snakemake
3d153c9cb7db9988917aff38991217a35940aa64
f275546eb3bd63d5d535a13407ce47ee36e94cae
refs/heads/master
2023-04-17T08:44:13.954986
2021-04-27T08:13:50
2021-04-27T08:13:50
null
0
0
null
null
null
null
UTF-8
R
false
false
522
r
checkCellIdent.R
library(Seurat) se <- readRDS(file="./data/GEJ_QCed_sctNorm_BatchCCA_clustStab/Louvain_clust_k100_pc50_res0.4.rds") load(file="./data/GEJ_QCed_sctNorm_BatchCCA_clustStab/allcells_ident_scibet.rda") load(file="./data/GEJ_QCed_sctNorm_BatchCCA_clustStab/Louvain_clust_k100_pc50_res0.4_singleR_label.fine.rda") se$singleR_label <- clust.pred$labels[match(se$integrated_snn_res.0.4, rownames(clust.pred))] se$scibet_label <- ci[[2]] table(se$scibet_label, se$singleR_label) table(se$scibet_label, se$integrated_snn_res.0.4)
be69827fd020ea2e0e1b2ba1e4405bc8bb13f45c
5fabfb8ce7863ffe6daa91dac2971ee85f9d2ad7
/man/subsample.Rd
82dd8ca04ec56e038452770e776581ad2ca34ddd
[]
no_license
Pezhvuk/sumrep
a1ff159aa8b025d15c167e434591788bae8cc4b2
275c6981f57b14db51746735f32e1ecf6bf6c486
refs/heads/master
2020-04-30T12:14:39.350531
2019-01-19T20:44:06
2019-01-19T20:44:06
null
0
0
null
null
null
null
UTF-8
R
false
true
545
rd
subsample.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Approximation.R \name{subsample} \alias{subsample} \title{Subsample a dataset} \usage{ subsample(dataset, sample_count, replace = TRUE) } \arguments{ \item{dataset}{A data.table, data.frame, or vector object} \item{sample_count}{Number of samples to retain in the subsampled data. Samples refers to elements in a vector or rows in a data.table/data.frame} } \value{ A subsampled dataset of the same type given by \code{dataset} } \description{ Subsample a dataset }
1143c0b1bc2b469b9f734aac5d65581a440e212f
c9cd1cd8ed8904baa6fa7807e9cfc9fc42774d94
/plot4.R
2562514897a8ad86e656fda207b9a552ecc6e05a
[]
no_license
BirgitR/ExData_Plotting1
274b054c11c63b77d403eb894504bd77fffb4d01
72a46ca9c3dc7e5c82c4d498a3671914d4fc613d
refs/heads/master
2021-01-21T23:45:36.735643
2014-09-07T19:39:37
2014-09-07T19:39:37
null
0
0
null
null
null
null
UTF-8
R
false
false
1,075
r
plot4.R
hpc <- read.csv("household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE) hpc2 <- hpc[min(which(hpc$Date == "1/2/2007")):max(which(hpc$Date == "2/2/2007")),] datetime<-strptime(paste(hpc2$Date, hpc2$Time), format="%d/%m/%Y %H:%M:%S") Sys.setlocale("LC_TIME", "C") par(mfcol=c(2,2),mar=c(4,4,2,2)) plot(x=datetime,y=hpc2$Global_active_power,type="l",xlab="", ylab="Global Active Power") plot(datetime,hpc2$Sub_metering_1,xlab="",ylab="Energy sub metering",type="n") lines(datetime,hpc2$Sub_metering_1,type="l",col="black") lines(datetime,hpc2$Sub_metering_2,type="l",col="red") lines(datetime,hpc2$Sub_metering_3,type="l",col="blue") legend("topright",lty=1,lwd=2,bty="n",cex=.6,col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) plot(x=datetime,y=hpc2$Voltage,type="l",xlab="datetime", ylab="Voltage") plot(x=datetime,y=hpc2$Global_reactive_power,type="l",xlab="datetime", ylab="Global_reactive_power") dev.copy(png, file="plot4.png",width = 480, height = 480) dev.off()