blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
c704f96cbd2c4958c8e230be66448832cc0dcdf7
f4ea90de095579e845aa7cd8e631583c77755beb
/R/Class-hier-fn.R
1c1746cf1ab4adbb81d7cabea022a2b6af656327
[]
no_license
cran/classGraph
c35e320f389f372eba1f93fe2c6ed677934323a5
1666954776a5334a49c621fd733e1aadebd953ec
refs/heads/master
2023-08-31T07:13:05.501094
2023-08-21T19:50:02
2023-08-21T20:30:50
17,695,085
0
0
null
null
null
null
UTF-8
R
false
false
6,202
r
Class-hier-fn.R
if((Rv <- getRversion()) < "3.2.1") { lengths <- function(x, use.names = TRUE) vapply(x, length, 1L, USE.NAMES = use.names) }## R < 3.2.1 subClasses <- function(Cl, directOnly = TRUE, complete = TRUE, ...) { ## utility for classTree(): if (isClassDef(Cl)) { cDef <- Cl Cl <- cDef@className } else { ## need getClass() can give error because sub classes can ## be "not defined" (?!) -- e.g. "iMatrix" cDef <- if (complete) getClass(Cl) else getClassDef(Cl) } subs <- showExtends(cDef@subclasses, printTo = FALSE) if(directOnly) subs$what[subs$how == "directly"] else subs$what } numOutEdges <- function(g) { ## Purpose: returns a named integer vector giving for each node in g, ## the number of edges *from* the node ## ---------------------------------------------------------------------- ## Arguments: g: graph ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: 8 Feb 2007, 22:59 lengths( sapply(edgeL(g), `[[`, "edges") ) } is.leaf <- function(g) numOutEdges(g) == 0 ## The graph package now defines a leaves() generic {w/ degree.dir} ## leaves <- function(g) nodes(g)[is.leaf(g)] bGraph <- function(n, root = "Mom", leaves = paste(l.prefix, seq(length=n), sep=""), l.prefix = "D", # for 'D'aughter weights = NULL, mode = c("undirected", "directed")) { ## Purpose: Create a "branch graph", a simple tree with root and ## n branches / leaves ## ---------------------------------------------------------------------- ## Author: Martin Maechler, Date: Aug 2005 if(!missing(leaves)) { stopifnot(is.character(leaves)) n <- length(leaves) } else stopifnot(is.numeric(n), length(n) == 1, n >= 0) mode <- match.arg(mode) ftM2graphNEL(cbind(root, leaves), W = weights, edgemode = mode) } ## agopen() has ## layoutType = c("dot","neato","twopi", "circo", "fdp") abbrMatrixcl <- function(clnames, level = 1) { ### Do "Matrixclass" name abbrevation doSub <- clnames != "Matrix" clnames[doSub] <- sub("Matrix$", "*", clnames[doSub]) ## sparse iSp <- grep("sparse", clnames) if(level >= 2) clnames[iSp] <- sub("sparse\\*", ".sp.", clnames[iSp]) ## dense iD <- grep("dense", clnames) if(level >= 2) clnames[iD] <- sub("dense\\*", ".D.", clnames[iD]) list(clnames = clnames, iSparse = iSp, iDense = iD) } mRagraph <- function(gr, lType, fixedsize = FALSE, ## <---- this is it ! fill = c("lightblue", "gray90"), color =c("blue3", "gray60"), labcol = c("blue3","green4","purple")) { ## Produce a layed out graph, an "Ragraph" -- to be plotted if (!validGraph(gr)) stop("The graph to be plotted is not a valid graph structure") if (missing(lType)) lType <- "dot" ng <- nodes(gr) ## leaves <- function(g) nodes(g)[is.leaf(g)] ## nonVirtual <- leaves(gr) ## the leaves are *non*-virtual classes nonVirtual <- ng[is.leaf(gr)] ## the leaves are *non*-virtual classes r <- abbrMatrixcl(ng) nAtt <- makeNodeAttrs(gr, label = r$clnames, shape = "ellipse", fixedsize = fixedsize, fillcolor = fill[1], color = color[1], fontcolor = labcol[1]) nAtt$fontcolor[r$iSparse] <- labcol[2] nAtt$fontcolor[r$iDense] <- labcol[3] nAtt$fillcolor[nonVirtual] <- fill[2] nAtt $ color[nonVirtual] <- color[2] ## but make one exception (for visualization): nAtt$fillcolor["pMatrix"] <- "thistle" nAtt $ color["pMatrix"] <- "turquoise" if(getOption("verbose")) { cat("mRagraph(): nodeAttrs: "); str(nAtt) } ### Returns the "layouted graph"; is +- == method("plot", "graph"): agopen(gr, name = "", layout = TRUE, layoutType = lType, attrs = list(), nodeAttrs = nAtt, edgeAttrs = list(), subGList = list(), recipEdges = "combined") } ## plotRag() : a bit more than selectMethod("plot", "Ragraph") ## -- but building on that .optRagargs <- function(side = 1, adj = 0.05, cex = 0.75, line = 3) list(side = side, adj = adj, cex = cex, line = line) plotRag <- function(ragr, sub, subArgs = .optRagargs(), ...) { stopifnot(is(ragr, "Ragraph")) if(missing(sub)) { ## nEdges <- length(unlist(edgeL(gr), use.names=FALSE)) sub <- paste(length(ragr@AgNode), "nodes with", length(ragr@AgEdge), "edges") } ### BUG in Rgraphviz ----> FIXME: bug report, ... ### plot(ragr, sub = sub, ...) ### workaround {{but more flexible anyway: plot(ragr, ...) op <- par(xpd = NA) ; on.exit(par(op)) do.call(mtext, c(list(text = sub), subArgs)) } ## Now do this recusively classTree <- function(Cl, all = FALSE, ...) { ## First a check if (isClassDef(Cl)) { cDef <- Cl Cl <- cDef@className } else cDef <- getClass(Cl) pkg <- cDef@package where <- if(pkg == ".GlobalEnv") .GlobalEnv else asNamespace(pkg) ## Now define a recursive function that computes the extended subtree ## for one class, and uses this for all sub-classes of Cl subtree <- function(cl, all) { stopifnot(isClassDef(cl)) clN <- cl@className if(getOption('verbose')) cat(" ST",clN,":") sc <- subClasses(cl, directOnly = !all) if(length(sc) == 0) { if(getOption('verbose')) cat(" is leaf\n") ## one node named 'cl': g <- new("graphNEL", nodes = clN, edgemode = "dir") } else { if(getOption('verbose')) cat(" has leaves:\n\t") g <- bGraph(root = clN, leaves = sc, mode = "dir") for(cc in sc) { if(getOption('verbose')) cat(":: ",clN,"-",cc,sep="") st <- subtree(getClass(cc, where = where), all = all) ## -------## recursive if(numNodes(st) > 1) g <- join(g, st) } } g } subtree(cDef, all = all) }
d0d7b210a89bff73d1a8b9b23796ae874dc8b0ec
c20629bc224ad88e47849943e99fe8bc6ccb1f17
/Data processing functions/lost.hrs_function.R
f09fec2bc29b5a05bf73b429ea5754bd7053dfab
[]
no_license
nayefahmad/R-vocab-and-experiments
71a99e4d3ff0414d1306a5c7cabfd79b49df17f9
4384d3d473b0a9d28d86c3d36b0b06a1f91b862e
refs/heads/master
2022-12-13T01:12:11.134952
2020-08-24T04:59:52
2020-08-24T04:59:52
103,333,690
1
1
null
null
null
null
UTF-8
R
false
false
4,458
r
lost.hrs_function.R
#******************************************* # FUNCTION TO FIND LOST HOURS IN BOOKING DATA: #******************************************* # rm(list = ls()) # source("2017-12-13_vgh_mdc-explore-booking-data.R") # TODO: --------------- # fn defn: lost.hrs <- function(df) { # dataframe is booking data split on date and bay # output: dataframe with added col showing lost hours # note: this doesn't count gaps where two appointments are not # scheduled back-to-bacck. Only looks at gaps caused by no-shows, # cancelled appointments, etc. nrow = nrow(df) # %>% print if (nrow > 1) { lag.end <- df$endtime[1:(nrow-1)] # %>% print lag.start <- df$starttime[1:(nrow-1)] # %>% print } else { if (df$status[1] == "IN") { result.df <- data.frame(date = df$date, bay = df$bay, losses = 0, losses.made.up = 0) return(result.df) } else { result.df <- data.frame(date = df$date, bay = df$bay, losses = df$endtime[1] - df$starttime[1], losses.made.up = 0) return(result.df) } } # first we ensure that df is arranged right: df %<>% arrange(starttime, status) # %>% print # check first row for losses: first.row.loss <- ifelse(!(df$status[1] %in% "IN"), df$endtime[1] - df$starttime[1], 0) # %>% print # now we have to remove first row to add lagged vars: df <- df[2:nrow, ] # df$starttime %>% print # print(df$starttime - lag.end) df %<>% select(id, date, bay, status, starttime, endtime) %>% mutate(# add lag vars: lag.starttime = lag.start, lag.endtime = lag.end, # durations: dur = endtime - starttime, lag.dur = lag.endtime - lag.starttime, # anything other than "IN" is "lost" time: loss.hrs = ifelse(!(status %in% "IN"), endtime - starttime, NA), is.overlap = ifelse(status %in% "IN", ifelse(starttime < lag.endtime, 1, 0), NA), loss.makeup = ifelse(is.overlap == 1, ifelse(dur >= lag.dur, # branch 1: ifelse(starttime == lag.starttime, lag.dur, (lag.dur - (starttime - lag.starttime))), # branch 2: ifelse(starttime == lag.starttime, lag.dur, (lag.dur - (starttime - lag.starttime)))), NA)) # print(df) # aggregate all losses: losses <- c(first.row.loss, df$loss.hrs) losses[is.na(losses)] <- 0 losses.made.up <- c(df$loss.makeup, NA) losses.made.up[is.na(losses.made.up)] <- 0 result.df = data.frame(date = rep(df$date[1], length(losses)), bay = rep(df$bay[1], length(losses)), losses = losses, losses.made.up = losses.made.up) return(result.df) } # test the fn: lost.hrs(list1.split[[2]])
339dc835e6ccac3b9f14b29ddc0ac1d79fe31e35
a1e2e1e9c4b5eb2b2eb56f64ec6df2f83d65a2ce
/common/general_dataset_stats.R
97c8c44a9a29a0f18a2c3abf65551d1de3bfad6e
[]
no_license
posfaig/wow
e6b2747d17e7eda6869a01a1ced50b6d0a1cad67
1d8b7694886706d6152d058a9465c1c34acbb8ee
refs/heads/master
2021-01-11T19:43:07.244518
2017-01-13T01:27:40
2017-01-13T01:27:40
69,188,269
0
0
null
null
null
null
UTF-8
R
false
false
2,885
r
general_dataset_stats.R
###################################################################### ### ### Get the general statistics of the datasets ### ###################################################################### library(dplyr) compute_dataset <- function(data, pred_date, testset_end_date, ...){ print("compute_dataset") pred_date <- as.Date(pred_date) testset_end_date <- as.Date(testset_end_date) ##### Compute labels for avatars # # true: left the guild in the given period # false: not left the guild in the given period # test_data <- data %>% filter(current_date >= pred_date & current_date < testset_end_date) labels <- test_data %>% group_by(avatar) %>% summarise(label = (sum(event == "Guild Left" | event == "Guild Changed") > 0)) #### Records train_data <- data %>% filter(current_date < pred_date) # get the avatars that are member of a guild at prediction date features <- train_data %>% group_by(avatar) %>% dplyr::slice(n()) %>% group_by() %>% filter(guild != -1) %>% select(avatar, guild) ##### Joining labels print("Joining features and labels") features_and_labels <- left_join(features, labels, by = "avatar") features_and_labels <- features_and_labels %>% mutate(label = ifelse(is.na(label), FALSE, label)) # label is FALSE for avatars that did not appear in the test period features_and_labels$pred_date <- pred_date features_and_labels$testset_end_date <- testset_end_date features_and_labels } get_dataset_stats <- function(dataset){ result <- list() result[["records"]] <- nrow(dataset) result[["pred_dates"]] <- length(unique(dataset[["pred_date"]])) result[["guilds"]] <- length(unique(dataset[["guild"]])) result[["avatars"]] <- length(unique(dataset[["avatar"]])) result[["labels_true"]] <- sum(as.numeric(dataset[["label"]]) == 1) result[["labels_false"]] <- sum(as.numeric(dataset[["label"]]) == 0) result } source("common/init.R") source("common/streamline_functions_for_modeling.R") training_data <- get_features_for_pred_dates( prediction_dates_train, function(...){NULL}, compute_dataset) test_data <- get_features_for_pred_dates( prediction_dates_test, function(...){NULL}, compute_dataset) stats_train <- get_dataset_stats(training_data) stats_test <- get_dataset_stats(test_data) # Write stats to file dir.create(file.path("generated/dataset_stats/"), showWarnings = FALSE, recursive = TRUE) lines_train <- sapply(names(stats_train), function(x) { paste(x, ",", stats_train[[x]], sep = "") }) lines_test <- sapply(names(stats_test), function(x) { paste(x, ",", stats_test[[x]], sep = "") }) fileConn <- file("generated/dataset_stats/stats.txt") writeLines(c("TRAINING DATASET", lines_train, "TEST DATASET", lines_test), fileConn) close(fileConn)
014c9ef0102674ce88f2c8a62a5b9a8ca25ecf3a
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/lmreg/examples/stars2.Rd.R
69096a8b13316dde0da7b314ecca7d69a2350b96
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
148
r
stars2.Rd.R
library(lmreg) ### Name: stars2 ### Title: Stars data 2 ### Aliases: stars2 ### Keywords: datasets ### ** Examples data(stars2) head(stars2)
c0a6cc76bc0a6c4e87be57d17a53ba66b97fe78b
561beed458dfcf06de55c8b9145613adf3e3dad6
/old_bic.r
b28969541f1ef0fefa89eb54035edadbb60807d3
[]
no_license
deniseduma/respls
e75dafd68ecd0fa5905667b78f2a36f44d8947da
f326cf1a84ab4734a67a156917f8a2752597be68
refs/heads/master
2016-09-14T05:36:51.454059
2016-05-23T22:30:59
2016-05-23T22:30:59
59,517,165
0
0
null
null
null
null
UTF-8
R
false
false
1,495
r
old_bic.r
## Select best w using BIC cat("\n"); print("BIC") betas = list(); prs = list(); idx_bic = 1; min_bic = 1e6; all_zero = 1 for (j in 1:nlam) { w = ws[ ,j]; idx_w = which(w!=0); nnz_w = length(idx_w) if (nnz_w==0) next all_zero = 0 ## OLS re-estimation of w #y = ys[ ,j]; sX = X[ ,idx_w,drop=FALSE] #sw = solve ( crossprod(sX) + 0.001*diag(nnz_w), t(sX)%*%y); w2[idx_w] = sw #err = (norm(y - X%*%w2)^2) / (q) ## PLS re-estimation of w idx = union( idx_W, idx_w ) sX0 = X0[,idx,drop=FALSE] plsfit = pls::plsr( Y0~sX0, ncomp=min(k,length(idx)), method="simpls", scale=F) beta = matrix( 0, p, q ); beta[idx,] = matrix( coef(plsfit), length(idx), q ) betas[[j]] = beta; prs[[j]] = plsfit$projection err = (norm(Y0-X0%*%beta)^2) / (n*q) ## df estimation ## Method 1 dfterm = (log(n*q) / (n*q)) * nnz_w ## Method 2 #sX = X[,idx_w,drop=FALSE] #lam = lambdas[j]; lam2 = alpha2*lam #DF = sX %*% solve( t(sX)%*%sX + lam2*L[idx_w, idx_w] + lam2*diag(nnz_w), t(sX)) #df = sum(diag(DF)); dfterm = (log(n*q) / (n*q))*df #plty = 2*0.5*log(choose(p, nnz_w)) bic = log(err) + dfterm # + plty print(paste("j=",j,", nnz=",nnz_w,", err=",err,", log(err)=",log(err),", dfterm=",dfterm, ", bic=",bic,sep="")) if (bic<min_bic) { idx_bic = j; min_bic = bic } } if (all_zero) { print(paste("[opt]No non-zero found at PC ",k,sep="")) break } w = ws[ ,idx_bic] v = vs[ ,idx_bic]
e22aa30cf69877285ef788722f94005563d17afa
39b1d1b3f6e933d7fc7c30579f76d5027620b3b0
/R/parse_crossovers.R
54ad6cc9aec9ee95478e6b8c7324a19fd014e285
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
susjoh/crimaptools
bb46fdf82a889fd1b7e203fe2872c348f71686f0
70a6ec563a8544fb533a0b9a653189501bedd5e2
refs/heads/master
2021-01-09T20:53:58.204711
2020-10-12T22:23:26
2020-10-12T22:23:26
58,579,445
1
0
null
null
null
null
UTF-8
R
false
false
4,829
r
parse_crossovers.R
#' parse_crossovers: Parse crossover information from CriMAP chrompic output #' @param chrompicfile File with output from chrompic #' @param familyPedigree data.frame containing columns ANIMAL, FATHER, MOTHER #' and FAMILY. FAMILY defines family groups for crimap. IDs in ANIMAL can be #' repeated within and between families if necessary. Should be that used in #' create_crimap_input. #' @param remove.zero.inf.loci Logical, default = TRUE. Remove IDs with no #' informative loci. #' @import plyr #' @export parse_crossovers <- function(chrompicfile, familyPedigree, remove.zero.inf.loci = TRUE){ #~~ read lines from the chrompic file x <- readLines(chrompicfile) nmarkers <- nrow(parse_map_chrompic(chrompicfile)) x <- x[grep("Family", x)[1]:(grep("Overall recombinations", x)-1)] x <- x[-grep("^Recombinations ", x)] x <- x[-grep("^Non-recombinations ", x)] x <- x[-grep("^Chromosomes ", x)] x <- x[-which(x == "")] if (length(grep(" ", x) > 0)) x <- x[-grep(" ", x)] #~~ create a table with family information to add to the full table later famtab <- data.frame(FamilyOrder = 1:length(grep("Family", x)), FamilyID = grep("Family", x, value = T), LineNo = grep("Family", x), stringsAsFactors = F) famtab$startline <- famtab$LineNo - (famtab$FamilyOrder - 1) famtab$FamilyShortID <- NA famtab$FamilyShortID <- unlist(lapply(famtab$FamilyID, function (x) strsplit(x, split = " ")[[1]][2])) x <- x[-grep("Family", x)] famtab$Count <- diff(c(famtab$startline, (length(x) + 1))) table(famtab$Count) famvec <- rep(famtab$FamilyShortID, famtab$Count) #~~ create a data frame to put all information in recombframe <- data.frame(data = x, ANIMAL = NA, RecombCount = NA, parent = NA, Family = famvec, stringsAsFactors=F) #~~ Get the IDs recombframe$ANIMAL <- unlist(lapply(recombframe$data, function(foo){ z <- strsplit(foo, split = "\\s+")[[1]] if(length(z) == ceiling((nmarkers/10)+2)){ return(z[1]) } else { return(z[2]) } })) recombframe$parent[which(recombframe$ANIMAL != "")] <- "MOTHER" recombframe$ANIMAL[which(recombframe$ANIMAL == "")] <- recombframe$ANIMAL[which(recombframe$ANIMAL != "")] recombframe$parent[which(is.na(recombframe$parent))] <- "FATHER" #~~ get the number of recombinations for each individual recExtract <- function(CMPstring){ x <- strsplit(CMPstring, split = " ")[[1]][length(strsplit(CMPstring, split = " ")[[1]])] } recombframe$RecombCount <- sapply(recombframe$data, recExtract, simplify = TRUE) #~~ format the data column so that we can get out the number of informative loci removeID <- function(CMPstring){ z <- strsplit(CMPstring, split = "\\s+")[[1]] if(length(z) == ceiling((nmarkers/10)+2)){ return(paste0(z[2:(length(z)-1)], collapse = "")) } else { return(paste0(z[3:(length(z)-1)], collapse = "")) } } system.time(recombframe$data <- unlist(lapply(recombframe$data, removeID))) recombframe$data2 <- gsub("-" , "", recombframe$data) recombframe$data2 <- gsub("c" , "", recombframe$data2) recombframe$data2 <- gsub(":" , "", recombframe$data2) recombframe$No.Inf.Loci <- nchar(recombframe$data2) recombframe <- subset(recombframe, select = -data2) recombframe$RecombCount <- as.numeric(recombframe$RecombCount) if(remove.zero.inf.loci == TRUE) recombframe <- subset(recombframe, No.Inf.Loci != 0) #~~ Get first and list informative positions recombframe$First.Inf.Order <- unlist(lapply(recombframe$data, InfLengthFunc)) recombframe$Last.Inf.Order <- unlist(lapply(recombframe$data, function(x) InfLengthFunc(x, position = "Last"))) #~~ add the RRID information - the individual in which the recombination took place suppressMessages(recombframe <- join(recombframe, familyPedigree)) recombframe$RRID <- NA recombframe$RRID[which(recombframe$parent == "MOTHER")] <- recombframe$MOTHER[which(recombframe$parent == "MOTHER")] recombframe$RRID[which(recombframe$parent == "FATHER")] <- recombframe$FATHER[which(recombframe$parent == "FATHER")] #~~ add analysisID analysisID.val <- gsub("\\\\", "/", chrompicfile) analysisID.val <- strsplit(analysisID.val, split = "/")[[1]] analysisID.val <- analysisID.val[length(analysisID.val)] analysisID.val <- gsub("chr", "", analysisID.val) analysisID.val <- gsub(".cmp", "", analysisID.val, fixed = T) recombframe$analysisID <- analysisID.val recombframe$UniqueID <- paste(recombframe$analysisID, recombframe$Family, recombframe$RRID, recombframe$ANIMAL, recombframe$parent, sep = "_") recombframe }
4c27a21156f045fc1e332f953dfb5aff838aab91
0ff7d2d3b7a141ca374a1d73a47289871d5d2fda
/R/get_alphabet_freq.R
4095237cf3c35dfebfa7bde2e07dd08a12b4d1a3
[]
no_license
tinyheero/shmutils
a13f44111d8323fef68810f7a0f88aad7c09a2ec
a26d06eeb21dca16f70ddb422604ae60d5ec25f6
refs/heads/master
2020-12-25T18:23:16.564227
2015-07-16T21:55:32
2015-07-16T21:55:32
39,107,770
0
0
null
null
null
null
UTF-8
R
false
false
1,286
r
get_alphabet_freq.R
#' Return the Alphabet Distribution Over a GRanges Object #' #' Calculates the number of different bases there are in a particlar GRanges #' object. #' #' @param gr GRanges object. There needs to be a id column in the metadata. #' So that the number of motifs can be associated with the original #' GenomicRange. #' @param BS.genome This a Biostring-based genome object. (BSgenome from #' Bioconductor). For instance, library("BSgenome.Hsapiens.UCSC.hg19") #' can be used. #' @return A matrix containing with letters as columns and each GRange as #' a row. Each value indicates the prevalence of that letter in the #' GRange #' @export get_alphabet_freq <- function(gr, bs.genome) { if (!"id" %in% colnames(S4Vectors::mcols(gr))) { stop("No id column in the metadata columns of gr. This column is need to map the original GenomicRanges to the motifs") } message("Retrieving Sequences of the GRanges Object") gr.window.seq <- Biostrings::getSeq(bs.genome, gr) # assign names so that we can map the matches back to GRanges object names(gr.window.seq) <- S4Vectors::mcols(gr)[, "id"] message("Calculating the alphabet frequency") alpha.freq <- Biostrings::alphabetFrequency(gr.window.seq) alpha.freq }
73d8bb5931616fee34f7632ccf277db2a833edfd
fdae59c4711c9710bbbb78e98a8dd1a6e7026eeb
/rserv/suncorp_rserve/qa/duplicates_Scenario.R
61ad63a58b5e72f8b925c14f6709b316d6ff2e17
[]
no_license
saurabh-singh-17/secgov
ac61459751b0e8cfa6f5ec828a87054819329101
b19b9a838cd0c43562dc8fa771075ca3f2635905
refs/heads/master
2020-12-14T01:01:49.172334
2017-01-15T07:58:03
2017-01-15T07:58:03
55,128,035
0
0
null
null
null
null
UTF-8
R
false
false
2,922
r
duplicates_Scenario.R
# Written by: Tushar Gupta # Time : Sep 2014 #================================================================================= # inputPath<- # outputPath<- # customkEY<- # var_list <-c("Store_Format") # scenarioName <- 'abcd' # keepOption <- #===============================for verify and save button======================== options(digits=10) load(paste(inputPath,"/dataworking.RData",sep="")) var_name<- paste("murx_",scenarioName,sep="") if (length(var_list) != 1){ dataworking[,"var"] <-apply(dataworking[,var_list],1,function(x){paste(x,collapse="_")}) }else{ dataworking[,"var"] <- dataworking[,var_list] } number_records <- nrow(dataworking) duplicates<- duplicated(dataworking[,"var"]) duplicates_detected <- length(which(duplicates=="TRUE")) Statistics <- c("Number of Observations", "Number of Duplicates detected","Number of Duplicates Removed","Keep Option") #================================================================================= #for keeping first occurence (both as option and for auto ticking in view of first if (keepOption == "FIRST"){ logical_vector_F <- duplicated(dataworking[,"var"]) dataworking[!logical_vector_F,var_name] <- 1 # data_final_first <- data[!logical_vector_F,] number_deleted <- duplicates_detected } #================================================================================= #================================================================================= # Keep the last occurence #================================================================================= if (keepOption == "LAST"){ logical_vector_L <- duplicated(dataworking[,"var"],fromLast=T) dataworking[!logical_vector_L,var_name] <- 1 # data_final_last <-data[!logical_vector_L,] number_deleted <-duplicates_detected } #Customized selection if (keepOption =="CUSTOM"){ allDups <- !duplicated(dataworking[,"var"],fromLast=TRUE) & !duplicated(dataworking[,"var"]) uniquekey<-which(allDups=="TRUE") if (length(uniquekey) != 0){ customKey<- c(customKey,uniquekey) } dataworking[customKey,var_name] <-1 number_deleted<-nrow(dataworking)-length(customKey) } #=============================================================================== # Number of Duplicate #=============================================================================== # number of duplicates detected dataworking$var <- NULL duplicates_deleted <- number_deleted records <- cbind(duplicates_detected,duplicates_deleted) Value <- c(number_records,duplicates_detected,number_deleted,keepOption) Observations <- cbind(Statistics,Value) write.csv(Observations,paste(outputPath,"/","OBSERVATIONS.csv",sep=""),row.names=F,quote=F) save(dataworking,file=paste(paste(inputPath, "/", "dataworking.RData", sep=""))) write("COMPLETED",file=paste(outputPath,"/","SCENARIO_COMPLETED.TXT",sep=""))
3c6258110fa5980decd7860f2dc1960b22d0d220
0c1885fa442778f9e027975ef82267c5b6d69075
/learn_hmmr.R
202eccfbe1d7c39233b5263664c945a63f579af1
[]
no_license
blinsimon/HMMR
489a63344350f8a5d31b796ff796f6b90373f4e9
45dc511dc94c0a39cfc86441a02a2d5644eff5cd
refs/heads/master
2020-04-27T00:52:57.589728
2019-03-05T13:07:32
2019-03-05T13:07:32
173,947,996
0
0
null
null
null
null
IBM852
R
false
false
14,221
r
learn_hmmr.R
learn_hmmr<- function(x, y, K, p,type_variance, total_EM_tries, max_iter_EM, threshold, verbose){ # learn_hmmr learn a Regression model with a Hidden Markov Process (HMMR) # for modeling and segmentation of a time series with regime changes. # The learning is performed by the EM (Baum-Welch) algorithm. # # # Inputs : # # (x,y) : a time series composed of m points : dim(y)=[m 1] # * Each curve is observed during the interval [0,T], i.e x =[t_1,...,t_m] # # K : Number of polynomial regression components (regimes) # p : degree of the polynomials # # Outputs : # # hmmr: the estimated HMMR model. a structure composed of: # # prior: [Kx1]: prior(k) = Pr(z_1=k), k=1...K # trans_mat: [KxK], trans_mat(\ell,k) = Pr(z_t = k|z_{t-1}=\ell) # reg_param: the paramters of the regressors: # betak: regression coefficients # sigma2k (or sigma2) : the variance(s) # Stats: # tau_tk: smoothing probs: [nxK], tau_tk(t,k) = Pr(z_i=k | y1...yn) # alpha_tk: [nxK], forwards probs: Pr(y1...yt,zt=k) # beta_tk: [nxK], backwards probs: Pr(yt+1...yn|zt=k) # xi_tkl: [(n-1)xKxK], joint post probs : xi_tk\elll(t,k,\ell) = Pr(z_t=k, z_{t-1}=\ell | Y) t =2,..,n # X: [nx(p+1)] regression design matrix # nu: model complexity # parameter_vector # f_tk: [nxK] f(yt|zt=k) # log_f_tk: [nxK] log(f(yt|zt=k)) # loglik: log-likelihood at convergence # stored_loglik: stored log-likelihood values during EM # cputime: for the best run # cputime_total: for all the EM runs # klas: [nx1 double] # Zik: [nxK] # state_probs: [nxK] # BIC: -2.1416e+03 # AIC: -2.0355e+03 # regressors: [nxK] # predict_prob: [nxK]: Pr(zt=k|y1...y_{t-1}) # predicted: [nx1] # filter_prob: [nxK]: Pr(zt=k|y1...y_t) # filtered: [nx1] # smoothed_regressors: [nxK] # smoothed: [nx1] # # #Faicel Chamroukhi, sept 2008 # ## Please cite the following papers for this code: # # # @article{Chamroukhi-FDA-2018, # Journal = {Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery}, # Author = {Faicel Chamroukhi and Hien D. Nguyen}, # Note = {DOI: 10.1002/widm.1298.}, # Volume = {}, # Title = {Model-Based Clustering and Classification of Functional Data}, # Year = {2019}, # Month = {to appear}, # url = {https://chamroukhi.com/papers/MBCC-FDA.pdf} # } # # @InProceedings{Chamroukhi-IJCNN-2011, # author = {F. Chamroukhi and A. Sam\'e and P. Aknin and G. Govaert}, # title = {Model-based clustering with Hidden Markov Model regression for time series with regime changes}, # Booktitle = {Proceedings of the International Joint Conference on Neural Networks (IJCNN), IEEE}, # Pages = {2814--2821}, # Adress = {San Jose, California, USA}, # year = {2011}, # month = {Jul-Aug}, # url = {https://chamroukhi.com/papers/Chamroukhi-ijcnn-2011.pdf} # } # # @INPROCEEDINGS{Chamroukhi-IJCNN-2009, # AUTHOR = {Chamroukhi, F. and Sam\'e, A. and Govaert, G. and Aknin, P.}, # TITLE = {A regression model with a hidden logistic process for feature extraction from time series}, # BOOKTITLE = {International Joint Conference on Neural Networks (IJCNN)}, # YEAR = {2009}, # month = {June}, # pages = {489--496}, # Address = {Atlanta, GA}, # url = {https://chamroukhi.com/papers/chamroukhi_ijcnn2009.pdf}, # slides = {./conf-presentations/presentation_IJCNN2009} # } # # @article{chamroukhi_et_al_NN2009, # Address = {Oxford, UK, UK}, # Author = {Chamroukhi, F. and Sam\'{e}, A. and Govaert, G. and Aknin, P.}, # Date-Added = {2014-10-22 20:08:41 +0000}, # Date-Modified = {2014-10-22 20:08:41 +0000}, # Journal = {Neural Networks}, # Number = {5-6}, # Pages = {593--602}, # Publisher = {Elsevier Science Ltd.}, # Title = {Time series modeling by a regression approach based on a latent process}, # Volume = {22}, # Year = {2009}, # url = {https://chamroukhi.users.lmno.cnrs.fr/papers/Chamroukhi_Neural_Networks_2009.pdf} # } # ########################################################################################## options(warn=-1) if (nargs()<9){verbose =0} if (nargs()<8){threshold = 1e-6} if (nargs()<7){max_iter_EM = 1500} if (nargs()<6){total_EM_tries = 1} if (nargs()<5){total_EM_tries = 1 type_variance='hetereskedastic'} if (type_variance == 'homoskedastic'){homoskedastic =1} else if (type_variance == 'hetereskedastic'){homoskedastic=0} else{stop('The type of the model variance should be : "homoskedastic" or "hetereskedastic"')} #Chargement de toutes les fonctions Ó utiliser source("designmatrix.R") source("init_hmmr.R") source("forwards_backwards.R") source("normalise.R") source("mk_stochastic.R") source("MAP.R") source("hmm_process.R") if (ncol(y)!=1){ y=t(y) } m = length(y)#length(y) X = designmatrix(x,p)#design matrix P = ncol(X)# here P is p+1 I = diag(P)# define an identity matrix, in case of a Bayesian regularization for regression # best_loglik = -10000000 nb_good_try=0 total_nb_try=0 cputime_total=c() while (nb_good_try < total_EM_tries){ start_time = Sys.time() if (total_EM_tries>1){ paste('EM try n░',(nb_good_try+1)) } total_nb_try=total_nb_try+1 ## EM Initializaiton step ## Initialization of the Markov chain params, the regression coeffs, and the variance(s) HMMR = init_hmmr(X, y, K, type_variance, nb_good_try+1) # calculare the initial post probs (tau_tk) and joint post probs (xi_ikl) #f_tk = hmmr.stats.f_tk; # observation component densities: f(yt|zt=k) prior = HMMR[[1]] trans_mat = HMMR[[3]] Mask = HMMR[[2]] betak = HMMR[[4]][[1]] if (homoskedastic==1){ sigma2 = HMMR[[4]][[2]] } else { sigma2k = HMMR[[4]][[3]] } # iter = 0 prev_loglik = -100000 converged = 0 top = 0 # log_f_tk = matrix(c(0),m,K) muk = matrix(c(0),m,K) # ## EM stored_loglik=c() while ((iter <= max_iter_EM) & (converged!=1)){ ## E step : calculate tge tau_tk (p(Zt=k|y1...ym;theta)) and xi t_kl (and the log-likelihood) by # forwards backwards (computes the alpha_tk et beta_tk) # observation likelihoods for (k in 1:K){ mk = X%*%betak[,k] muk[,k] = mk # the regressors means if (homoskedastic==1){ sk = sigma2 } else{ sk = sigma2k[k] } z=((y - mk)^2)/sk log_f_tk[,k] = -0.5*matrix(c(1),m,1)%*%(log(2*pi)+log(sk)) - 0.5*z#log(gaussienne) } for (k in 1:K){ for (i in 1:nrow(log_f_tk)){ log_f_tk[i,k] = min(log_f_tk[i,k],log(.Machine$double.xmax)) log_f_tk[i,k] = max(log_f_tk[i,k] ,log(.Machine$double.xmin)) } } f_tk = exp(log_f_tk) #fprintf(1, 'forwards-backwards '); #[tau_tk, xi_tkl, alpha_tk, beta_tk, loglik] = fb=forwards_backwards(prior, trans_mat , f_tk ) tau_tk=fb[[1]] xi_tkl=fb$xi_tkl alpha_tk=fb[[3]] beta_tk=fb[[4]] loglik=fb[[5]] ## M step # updates of the Markov chain parameters # initial states prob: P(Z_1 = k) prior = normalise(tau_tk[1,])[[1]] # transition matrix: P(Zt=i|Zt-1=j) (A_{k\ell}) #print(cbind(apply(xi_tkl[,,1],2,sum),apply(xi_tkl[,,2],2,sum),apply(xi_tkl[,,3],2,sum))) # print(xi_tkl[,,1]) trans_mat = round(mk_stochastic(cbind(apply(xi_tkl[,,1],2,sum), apply(xi_tkl[,,2],2,sum), apply(xi_tkl[,,3],2,sum), apply(xi_tkl[,,4],2,sum), apply(xi_tkl[,,5],2,sum))),4) # for segmental HMMR: p(z_t = k| z_{t-1} = \ell) = zero if k<\ell (no back) of if k >= \ell+2 (no jumps) trans_mat = mk_stochastic(Mask*trans_mat) ## update of the regressors (reg coefficients betak and the variance(s) sigma2k) s = 0 # if homoskedastic for (k in 1:K){ wieghts = tau_tk[,k] nk = sum(wieghts)# expected cardinal nbr of state k Xk = X*(sqrt(wieghts)%*%matrix(c(1),1,P))#[n*(p+1)] yk=y*(sqrt(wieghts))# dimension :[(nx1).*(nx1)] = [nx1] # reg coefficients lambda = 1e-9 # if a bayesian prior on the beta's bk = (solve(t(Xk)%*%Xk + lambda*diag(P))%*%t(Xk))%*%yk betak[,k] = bk # variance(s) z = sqrt(wieghts)*(y-X%*%bk) sk = t(z)%*%z if (homoskedastic==1){ s = (s+sk) sigma2 = s/m } else{ sigma2k[k] = sk/nk } } ## En of an EM iteration iter = iter + 1 # test of convergence loglik = loglik + log(lambda) if (verbose==1){ paste('HMM_regression | EM : Iteration :', iter,' Log-likelihood : ', loglik) } if (prev_loglik-loglik > 1e-4){ top = top+1; if (top==10){ stop(paste('!!!!! The loglikelihood is decreasing from',prev_loglik,' to ',loglik)) } } converged = (abs(loglik - prev_loglik)/abs(prev_loglik) < threshold) stored_loglik[iter] <-loglik print(stored_loglik) prev_loglik = loglik end_time=Sys.time() cputime_total[nb_good_try+1]=c(end_time-start_time) } reg_param=list(betak=betak) if (homoskedastic==1){ reg_param = append(reg_param,list(sigma2=sigma2)) } else{ reg_param = append(reg_param,list(sigma2k=sigma2k)) } hmmr=list(prior=prior,trans_mat=trans_mat,reg_param=reg_param) # Estimated parameter vector (Pi,A,\theta) if (homoskedastic==1){ parameter_vector=c(prior, trans_mat[Mask!=0],c(betak[1:P,]), sigma2) nu = K-1 + K*(K-1) + K*(p+1) + 1#length(parameter_vector);# } else{ parameter_vector=c(prior, trans_mat[Mask!=0],c(betak[1:P,]), sigma2k) nu = K-1 + K*(K-1) + K*(p+1) + K #length(parameter_vector);# } stats=list(nu=nu,parameter_vector=parameter_vector,tau_tk=tau_tk,alpha_tk=alpha_tk,beta_tk=beta_tk, xi_tkl=xi_tkl,f_tk=f_tk,log_f_tk=log_f_tk,loglik=loglik,stored_loglik=stored_loglik,X=X) if (total_EM_tries>1){ paste('loglik_max = ',loglik) } # # if (length(hmmr$reg_param$betak)!=0){ nb_good_try=nb_good_try+1 total_nb_try=0 if (loglik > best_loglik){ best_hmmr = hmmr best_loglik = loglik } } if (total_nb_try > 500){ paste('can',"'",'t obtain the requested number of classes') hmmr=NULL return(hmmr) } }#End of the EM runs hmmr = best_hmmr # if (total_EM_tries>1){ paste('best_loglik: ',stats$loglik) } # # stats=append(stats,list(cputime=mean(cputime_total),cputime_total=cputime_total)) ## Smoothing state sequences : argmax(smoothing probs), and corresponding binary allocations partition stats=append(stats,MAP(stats$tau_tk)) #[hmmr.stats.klas, hmmr.stats.Zik ] # # compute the sequence with viterbi # #[path, ~] = viterbi_path(hmmr.prior, hmmr.trans_mat, hmmr.stats.fik'); # #hmmr.stats.viterbi_path = path; # #hmmr.stats.klas = path; # ################### # # # ## determination des temps de changements (les fonti├Ętres entres les # # ## classes) # # nk=sum(hmmr.stats.Zik,1); # # for k = 1:K # # tk(k) = sum(nk(1:k)); # # end # # hmmr.stats.tk = [1 tk]; # # ## sate sequence prob p(z_1,...,z_n;\pi,A) state_probs = hmm_process(hmmr$prior, hmmr$trans_mat, m) stats = append(stats,list(state_probs=state_probs)) ### BIC, AIC, ICL stats = append(stats,list(BIC=(stats$loglik - (stats$nu*log(m)/2)))) stats = append(stats,list(AIC=(stats$loglik - stats$nu))) # # CL(theta) : Completed-data loglikelihood # sum_t_log_Pz_ftk = sum(hmmr.stats.Zik.*log(state_probs.*hmmr.stats.f_tk), 2); # comp_loglik = sum(sum_t_log_Pz_ftk(K:end)); # hmmr.stats.comp_loglik = comp_loglik; # hmmr.stats.ICL = comp_loglik - (nu*log(m)/2); ## predicted, filtered, and smoothed time series stats = append(stats,list(regressors = round(X%*%hmmr$reg_param$betak,4))) # prediction probs = Pr(z_t|y_1,...,y_{t-1}) predict_prob = matrix(c(0),m,K) predict_prob[1,] = hmmr$prior#t=1 p (z_1) predict_prob[2:m,] = round((stats$alpha_tk[(1:(m-1)),]%*%hmmr$trans_mat)/(apply(stats$alpha_tk[(1:(m-1)),],1,sum)%*%matrix(c(1),1,K)),5)#t =2,...,n stats = append(stats,list(predict_prob = predict_prob)) # predicted observations stats = append(stats,list(predicted = apply(round(stats$predict_prob*stats$regressors,5),1,sum)))#pond par les probas de prediction # filtering probs = Pr(z_t|y_1,...,y_t) stats = append(stats,list(filter_prob = round(stats$alpha_tk/(apply(stats$alpha_tk,1,sum)%*%matrix(c(1),1,K)),5)))#normalize(alpha_tk,2); # filetered observations stats = append(stats,list(filtered = apply(round(stats$filter_prob*stats$regressors,5), 1,sum)))#pond par les probas de filtrage ### smoothed observations stats = append(stats,list(smoothed_regressors = stats$tau_tk*stats$regressors)) stats =append(stats,list(smoothed = apply(stats$smoothed_regressors, 1,sum))) hmmr=c(hmmr,stats=list(stats)) return(hmmr) }
34454d1159ed4c253064e705fbafbaa46e7af6d3
4459eb5432916b4ad6c5c5d911b50c9d2fec1ad5
/man/table.Correlation.Rd
2d706b7bb49ee43ebf807ec5bd230c9552f088bb
[]
no_license
braverock/PerformanceAnalytics
057af55b0a4ddeb4befcc02e36a85f582406b95c
49a93f1ed6e2e159b63bf346672575f3634ed370
refs/heads/master
2023-08-03T10:18:27.115592
2023-03-29T09:23:17
2023-03-29T09:23:17
58,736,268
209
113
null
2023-05-23T17:46:08
2016-05-13T12:02:42
R
UTF-8
R
false
true
1,341
rd
table.Correlation.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/table.Correlation.R \name{table.Correlation} \alias{table.Correlation} \title{calculate correlalations of multicolumn data} \usage{ table.Correlation(Ra, Rb, ...) } \arguments{ \item{Ra}{a vector of returns to test, e.g., the asset to be examined} \item{Rb}{a matrix, data.frame, or timeSeries of benchmark(s) to test the asset against.} \item{\dots}{any other passthru parameters to \code{\link{cor.test}}} } \description{ This is a wrapper for calculating correlation and significance against each column of the data provided. } \examples{ # First we load the data data(managers) table.Correlation(managers[,1:6],managers[,7:8]) result=table.Correlation(managers[,1:6],managers[,8]) rownames(result)=colnames(managers[,1:6]) require("Hmisc") textplot(format.df(result, na.blank=TRUE, numeric.dollar=FALSE, cdec=rep(3,dim(result)[2])), rmar = 0.8, cmar = 1.5, max.cex=.9, halign = "center", valign = "top", row.valign="center" , wrap.rownames=20, wrap.colnames=10, mar = c(0,0,3,0)+0.1) title(main="Correlations to SP500 TR") ctable = table.Correlation(managers[,1:6],managers[,8,drop=FALSE], conf.level=.99) dotchart(ctable[,1],labels=rownames(ctable),xlim=c(-1,1)) } \seealso{ \code{\link{cor.test}} } \author{ Peter Carl }
45ecea4c881b9a7f66994ec6fee5f3dae1dfc5a4
fde0347e6cd6aadf0c5850de45f26fc771e67f0e
/Demos/Rbased/CrashCourse/03_ClimateProcessing.R
ccb56c068b864c8b0e5f34d11f8c00048bf14ef4
[]
no_license
shichaochen/BotanyENMWorkshop2020
4b43292776c6948ae639a4d5f4d5e5d245ed9e16
9cb02a7a52bdf5d4a453d6b34c7334f7fd9e3d43
refs/heads/master
2022-11-24T11:34:20.051259
2020-08-01T17:42:12
2020-08-01T17:42:12
null
0
0
null
null
null
null
UTF-8
R
false
false
5,777
r
03_ClimateProcessing.R
# ClimateProcesssing.R ## Climate Layer Processing ## Original script by Charlotte Germain-Aubrey. ## Modified and created by ML Gaynor. # Load Packages library(maptools) library(raster) library(rgdal) library(sp) library(maps) library(mapproj) # Load bioclim layers alt_l <- raster("data/climate_processing/Bioclim/alt.bil") bio1_l <- raster("data/climate_processing/Bioclim/bio1.bil") bio2_l <- raster("data/climate_processing/Bioclim/bio2.bil") bio3_l <- raster("data/climate_processing/Bioclim/bio3.bil") bio4_l <- raster("data/climate_processing/Bioclim/bio4.bil") bio5_l <- raster("data/climate_processing/Bioclim/bio5.bil") bio6_l <- raster("data/climate_processing/Bioclim/bio6.bil") bio7_l <- raster("data/climate_processing/Bioclim/bio7.bil") bio8_l <- raster("data/climate_processing/Bioclim/bio8.bil") bio9_l <- raster("data/climate_processing/Bioclim/bio9.bil") bio10_l <- raster("data/climate_processing/Bioclim/bio10.bil") bio11_l <- raster("data/climate_processing/Bioclim/bio11.bil") bio12_l <- raster("data/climate_processing/Bioclim/bio12.bil") bio13_l <- raster("data/climate_processing/Bioclim/bio13.bil") bio14_l <- raster("data/climate_processing/Bioclim/bio14.bil") bio15_l <- raster("data/climate_processing/Bioclim/bio15.bil") bio16_l <- raster("data/climate_processing/Bioclim/bio16.bil") bio17_l <- raster("data/climate_processing/Bioclim/bio17.bil") bio18_l <- raster("data/climate_processing/Bioclim/bio18.bil") bio19_l <- raster("data/climate_processing/Bioclim/bio19.bil") # Define desired extent ## We are going to use Florida as our desired extent FL <- rgdal::readOGR("data/climate_processing/FL/FLstate2.shp") # Crop bioclim layers to the desired extent # Visualize the first layer plot(alt_l) ## First, mask the bioclim layer with Florida. alt <- mask(alt_l, FL) ## Visualize the masked layers plot(alt) ## Next, crop the bioclim layers to Florida's extent. alt <- crop(alt, extent(FL)) ## Visualize the final layer plot(alt) ## Now save the layer - note we already saved these layers for you, so you have to overwrite the file to save yours writeRaster(alt, "data/climate_processing/PresentLayers/alt.asc", format="ascii", overwrite = TRUE) # Repeat with all additional files bio1 <- mask(bio1_l, FL) bio1 <- crop(bio1, extent(FL)) #writeRaster(bio1, "data/climate_processing/PresentLayers/bio1.asc", format="ascii") bio2 <- mask(bio2_l, FL) bio2 <- crop(bio2, extent(FL)) #writeRaster(bio2, "data/climate_processing/PresentLayers/bio2.asc", format="ascii") bio3 <- mask(bio3_l, FL) bio3 <- crop(bio3, extent(FL)) #writeRaster(bio3, "data/climate_processing/PresentLayers/bio3.asc", format="ascii") bio4 <- mask(bio4_l, FL) bio4 <- crop(bio4, extent(FL)) #writeRaster(bio4, "data/climate_processing/PresentLayers/bio4.asc", format="ascii") bio5 <- mask(bio5_l, FL) bio5 <- crop(bio5, extent(FL)) #writeRaster(bio5, "data/climate_processing/PresentLayers/bio5.asc", format="ascii") bio6 <- mask(bio6_l, FL) bio6 <- crop(bio6, extent(FL)) #writeRaster(bio6, "data/climate_processing/PresentLayers/bio6.asc", format="ascii") bio7 <- mask(bio7_l, FL) bio7 <- crop(bio7, extent(FL)) #writeRaster(bio7, "data/climate_processing/PresentLayers/bio7.asc", format="ascii") bio8 <- mask(bio8_l, FL) bio8 <- crop(bio8, extent(FL)) #writeRaster(bio8, "data/climate_processing/PresentLayers/bio8.asc", format="ascii") bio9 <- mask(bio9_l, FL) bio9 <- crop(bio9, extent(FL)) #writeRaster(bio9, "data/climate_processing/PresentLayers/bio9.asc", format="ascii") bio10 <- mask(bio10_l, FL) bio10 <- crop(bio10, extent(FL)) #writeRaster(bio10, "data/climate_processing/PresentLayers/bio10.asc", format="ascii") bio11 <- mask(bio11_l, FL) bio11 <- crop(bio11, extent(FL)) #writeRaster(bio11, "data/climate_processing/PresentLayers/bio11.asc", format="ascii") bio12 <- mask(bio12_l, FL) bio12 <- crop(bio12, extent(FL)) #writeRaster(bio12, "data/climate_processing/PresentLayers/bio12.asc", format="ascii") bio13 <- mask(bio13_l, FL) bio13 <- crop(bio13, extent(FL)) #writeRaster(bio13, "data/climate_processing/PresentLayers/bio13.asc", format="ascii") bio14 <- mask(bio14_l, FL) bio14 <- crop(bio14, extent(FL)) #writeRaster(bio14, "data/climate_processing/PresentLayers/bio14.asc", format="ascii") bio15 <- mask(bio15_l, FL) bio15 <- crop(bio15, extent(FL)) #writeRaster(bio15, "data/climate_processing/PresentLayers/bio15.asc", format="ascii") bio16 <- mask(bio16_l, FL) bio16 <- crop(bio16, extent(FL)) #writeRaster(bio16, "data/climate_processing/PresentLayers/bio16.asc", format="ascii") bio17 <- mask(bio17_l, FL) bio17 <- crop(bio17, extent(FL)) #writeRaster(bio17, "data/climate_processing/PresentLayers/bio17.asc", format="ascii") bio18 <- mask(bio18_l, FL) bio18 <- crop(bio18, extent(FL)) #writeRaster(bio18, "data/climate_processing/PresentLayers/bio18.asc", format="ascii") bio19 <- mask(bio19_l, FL) bio19 <- crop(bio19, extent(FL)) #writeRaster(bio19, "data/climate_processing/PresentLayers/bio19.asc", format="ascii") # Select layers for MaxEnt ## We only want to include layers that are not highly correlated. ## To assess which layers we will include, we want to look at the pearson correlation coefficient among layers. ### Stack all layers stack <- stack(bio1, bio2, bio3, bio4, bio5, bio6, bio7, bio8, bio9, bio10, bio11, bio12, bio13, bio14, bio15, bio16, bio17, bio18, bio19) ### Then calculate the correlation coefficient corr <- layerStats(stack, 'pearson', na.rm=TRUE) ### Isolate only the pearson correlation coefficient c <- corr$`pearson correlation coefficient` ### Write file and view in excel #write.csv(c, "data/climate_processing/correlationBioclim.csv") ## Highly correlated layers (> |0.80|) can impact the statistical significance ## of the niche models and therefore must be removed.
ba27ed6ee0531c5ba65a216e8f08eb8363431df7
9f9cc8931df4007d7ae3d215b1f39b6658200e79
/BLP.R
07cf32bce36001da638d0c09b9869b28d68cb9b9
[]
no_license
ygh929/GP_Gibbs
e45940deedb296efa5f21531054e04289c8ab464
5e7150951b9121e643eba64c9e2b799cb3ea2ee1
refs/heads/master
2021-01-20T02:15:34.887554
2015-08-29T04:51:42
2015-08-29T04:51:42
20,091,153
1
0
null
null
null
null
UTF-8
R
false
false
469
r
BLP.R
GetXZ<-function(cDat){ points=cDat$points pairs=cDat$pairs m=dim(pairs)[1] n=dim(points)[1] X=matrix(0,nrow=m,ncol=n) Z=matrix(NA,nrow=m,ncol=1) for (i in 1:m){ X[i,min(pairs[i])]=1 X[i,max(pairs[i])]=-1 Z[i,1]=2*I(pairs[i,1]>pairs[i,2])-1 } list(X,Z) } #set the size of train and validate data set n1=50 n2=50 #generate training data and calculate X,Z Gen1=gendata(n1,p,type=1) Dat=Gen1$Dat cDat=convertDat(Dat) M=getXZ(cDat) X=M$X Z=M$Z Sig=getSigma()
b52f983bad7d19a57cfa1b85bebdc969950f4e8c
84e92a2e0e3f8fbdb724e7b3d70052a9059d5259
/test/rgtsvm-svmlight.R
cc05be6ed13620e3ed39e5dc4c48e6ea702335e5
[]
no_license
minghao2016/Rgtsvm
bc9c3fdd25d5a89962e9c3608357041338d5e819
b7e7bf043b70c1d21a96406ef4e0847922495ff1
refs/heads/master
2021-01-21T11:36:24.676439
2016-12-12T20:40:56
2016-12-12T20:40:56
null
0
0
null
null
null
null
UTF-8
R
false
false
850
r
rgtsvm-svmlight.R
library(Rgtsvm); #wget http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/glass.scale # normal matrix #mat <- as.matrix( load.svmlight("http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/glass.scale") ); # sparse matrix #mat <- load.svmlight("http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/glass.scale") ; mat <- load.svmlight("mnist") gt.model <- svm( x=mat[,-1], y=mat[,1], gamma=0.05, tolerance=0.01, type="C-classification", scale = FALSE, fitted=TRUE); save(gt.model, file="test2-gt.rdata"); cat("correction=", gt.model$correct, "\n"); gt.predit <- predict( gt.model, mat[,-1], score=FALSE ); save( gt.model, gt.predit, y=mat[,1], file="test2-gt.rdata"); correct = length( which(gt.predit == mat[,1]) )/nrow(mat) cat("correct=", correct, "\n"); summary(gt.predit);
d7bd673b458c7887b4efc77208090cc5b7a73b18
6593fa55f4216c069a8aa53d2475838b9a3db1c7
/munge/descarga_mapa_2.R
0064cca0d4f72d55f074abb8957e621f982a740e
[]
no_license
ecastillomon/ecoVIZ
d6eef354fd80ddcdf266c507e8a61872b9c2cce3
6d30494dd4f878b53bf046aff29ec67ba736451a
refs/heads/master
2020-08-21T22:00:42.174077
2020-07-17T00:42:46
2020-07-17T00:42:46
216,253,783
1
0
null
null
null
null
UTF-8
R
false
false
3,097
r
descarga_mapa_2.R
library(ggmap) # ggmap::register_google(key = "AIzaSyB6BLEl5Bhygcrfy-Q4USZ6dMghajMNax0") # p=ggmap(get_googlemap(center = c(long_centro,lat_centro), # zoom = 11, scale = 2, source="stamen", # maptype ='terrain', # color = 'color')) # [-99.13, -99.22, 19.35, 19.45] # c(-99.1658746-.05 ,19.4055775-.05 ,-99.1658746+.05,19.4055775 +.05) estaciones=read.csv("data/estaciones.csv") %>% mutate(long=location.lon,lat=location.lat) %>% filter(!is.na(long)) p = qmap(location = c(-99.22 ,19.35,-99.13,19.45 ), zoom = 15, source="osm", extent="device", legend = "topleft",highres=TRUE,scale="auto") x_distance=p$data$lon %>% unique() x_distance=max(x_distance) - mean(x_distance) y_distance=p$data$lat %>% unique() y_distance=max(y_distance) - mean(y_distance) prop=x_distance/y_distance png(filename="plots/mapa_estaciones.png", width=(1280*prop) , height=1280) # p %>%ggsave(file="plots/mapa_estaciones.png", limitsize = TRUE,width = 390,height =555,units = "cm" ) print(p+ggsn::scalebar(x.min = -99.21 ,x.max=-99.20,y.min =19.36,y.max =19.3625 , transform = TRUE,location = "bottomright", dist = .5, dist_unit = "km",st.size=3.5,st.color = "black",st.dist = 1, model = "WGS84", height = 0.5)) dev.off() # # sb = scalebar(19.13,-99.22, 0.15, 5, 10, "km" ) # # # p + scalebar(19.37,-99.20, dist_unit = "km", transform = FALSE, model = "WGS84") # ggsn::scalebar( data=p$data ,location="topright",transform = FALSE ,dist_unit = "km",dist =4,st.size = 2, model = "WGS84") # # # geom_rect(data=sb[[1]], aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill=z), inherit.aes=F, # show.legend = F, color = "black", fill = sb[[1]]$fill.col) + # geom_text(data=sb[[2]], aes(x=xlab, y=ylab, label=text), inherit.aes=F, show.legend = F) # geom_point(data=estaciones,aes(x=long,y=lat)) # # # # scalebar(p, dist=2, location="topright", st.size=2 ,transform = TRUE ,dist_unit = "km") # scalebar(p) # scalebar(x.min = -99.13 ,x.max=-99.11,y.min =19.25 ,y.max =19.45 , dist_unit = "km", # + transform = TRUE, model = "WGS84") # # p2 = qmap(location = c(-99.289595,19.2393901,-99.015463,19.606023), zoom = 12, source="osm", extent="device", legend = "topleft",highres=TRUE,scale="auto") x_distance=p2$data$lon %>% unique() x_distance=max(x_distance) - mean(x_distance) y_distance=p2$data$lat %>% unique() y_distance=max(y_distance) - mean(y_distance) prop=x_distance/y_distance # p2 %>%ggsave(file="plots/mapa_ciudad.png", limitsize = TRUE,width = 390,height =555,units = "cm" ) png(filename="plots/mapa_ciudad.png", width=(1280*prop) , height=1280) print(p2+ggsn::scalebar(x.min = -99.2 ,x.max=-99.25,y.min =19.30,y.max =19.31 , transform = TRUE,location = "bottomright", dist =2, dist_unit = "km",st.size=3.5,st.color = "black",st.dist = 1, model = "WGS84", height = 0.5)) dev.off() # p+ geom_point(data=estaciones,aes(x=location.lon,y=location.lat))
85a5dd00a91555b2917574bf389bb2b9838ccf62
29445f5eb91f1c9c083a387c140d302e5e3e5b52
/tests/testthat/test-stitch.R
eaece6c5a554332d0290f7fe5e06fd47f20f88f0
[ "MIT" ]
permissive
adam-gruer/victor
d9a896c371e33f49d8bcc0e4c11f4d1fc8bb19d2
988ce71dbee593fa5aafbf67a86caf4f36daa0dc
refs/heads/master
2020-08-20T02:51:55.633455
2019-11-02T00:08:47
2019-11-02T00:08:47
215,977,085
29
2
MIT
2019-10-19T23:57:57
2019-10-18T08:26:50
R
UTF-8
R
false
false
371
r
test-stitch.R
test_that("binding two tile's layer with differing attributes", { perth <- list(longitude = 115.906105,latitude = -31.842274) au_west <- spoils(zoom = 4, longitude = perth$longitude, latitude = perth$latitude) sydney <- list(longitude = 151.131706,latitude = -33.805967) aus_se <- spoils(zoom = 4, longitude = sydney$longitude, latitude = sydney$latitude) })
d3f59dae291a50f44bb38b988da1d2c230b73e71
dfea2a089f5373848d4bbfddb785f7956ba9d312
/man/ThreeComp_Volume_Exponent.Rd
4cbde6f8ae6f29c445a8af1766826beaaa830802
[]
no_license
cran/PKconverter
c09d298d74948e54bbbd00a169fb4cce66b427d4
23d3cac38c79f9164b9710ebabf579b5d9825556
refs/heads/master
2020-04-09T07:46:01.820084
2020-02-06T08:00:02
2020-02-06T08:00:02
160,169,464
0
0
null
null
null
null
UTF-8
R
false
true
1,827
rd
ThreeComp_Volume_Exponent.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ThreeComp_Volume_Exponent.R \name{ThreeComp_Volume_Exponent} \alias{ThreeComp_Volume_Exponent} \title{Convert pharmacokinetic parameters for three compartment model} \usage{ ThreeComp_Volume_Exponent(V1,alpha,beta,gamma,k21,k31, V1.sd=NA,alpha.sd=NA,beta.sd=NA,gamma.sd=NA,k21.sd=NA,k31.sd=NA, covar=c(V1alpha=NA,V1beta=NA,V1gamma=NA,V1k21=NA,V1k31=NA, alphabeta=NA,alphagamma=NA,alphak21=NA,alphak31=NA, betagamma=NA,betak21=NA,betak31=NA,gammak21=NA,gammak31=NA, k21k31=NA),...) } \arguments{ \item{V1}{The volume of distribution of compartment 1} \item{alpha}{parameter in one compartment model "Aexp(-alpha)"} \item{beta}{parameter in two compartment model "Aexp(-alpha)+Bexp(-beta)"} \item{gamma}{parameter in three compartment model "Aexp(-alpha)+Bexp(-beta)+Cexp(-gamma)"} \item{k21}{transfer rate constants from compartment 2 to compartment 1} \item{k31}{transfer rate constants from compartment 3 to compartment 1} \item{V1.sd}{standard error of V1} \item{alpha.sd}{standard error of alpha} \item{beta.sd}{standard error of beta} \item{gamma.sd}{standard error of gamma} \item{k21.sd}{standard error of k21} \item{k31.sd}{standard error of k31} \item{covar}{covariances among parameters} \item{...}{arguments to be passed to methods} } \description{ Calculate pharmacokinetic parameters with volume of distribution(V1), transfer rate constant (k12 and k31), and parameters (alpha, beta and gamma) in the model "Aexp(-alpha)+Bexp(-beta)+Cexp(-gamma)" } \examples{ ThreeComp_Volume_Exponent(V1=10,alpha=0.6, beta=0.013, gamma=0.00074, k21=0.02, k31=0.001, V1.sd=0.01,alpha.sd=0.01,beta.sd=0.00005, gamma.sd=0.000002, k21.sd=0.0006,k31.sd=0.0000005) } \references{ \url{http://www.nonmemcourse.com/convert.xls} }
652274b79ef8cbcfc3351d42367ab07e14f45c1b
8e0989c127fa440b1356606c5b1616703d76c06d
/R/gif.R
032417a17600102d424fd27deb5fad5aef4a83fb
[]
no_license
HenrikBengtsson/R.graphics
8da678165bd6cfad546faf71b78d768a44b3165e
c92a1761f82806ecf1b761d58a59ab55532aa518
refs/heads/master
2021-01-20T11:44:10.433776
2014-06-19T04:04:58
2014-06-19T04:04:58
null
0
0
null
null
null
null
UTF-8
R
false
false
3,853
r
gif.R
#########################################################################/** # @RdocFunction gif # # @title "GIF graphics device" # # \description{ # Device driver for GIF images. # Internally \code{png()} and ImageMagick's convert command is used. # } # # @synopsis # # \arguments{ # \item{filename}{filename (@character string).} # \item{width}{width (in pixels) of the saved image (@integer).} # \item{height}{height (in pixels) of the saved image (@integer).} # \item{pointsize}{font size (@integer).} # } # # \value{A plot device is opened; nothing is returned to the \R interpreter.} # # \note{ # Requires: ImageMagick (\url{http://www.imagemagick.org}) has to be # installed on the system. To test whether it is installed or not, try # \code{system("convert")}. # } # # \examples{\dontrun{ # options(imagemagick=c(convert="c:/Program Files/ImageMagick/convert")) # logo <- BitmapImage$read("logosm.ppm", path = R.graphics$dataPath) # gif(filename="logosm.gif") # image(logo) # dev.off() # }} # # \keyword{device} # # @author #*/######################################################################### gif <- function(filename="Rplot.gif", width=480, height=480, pointsize=12) { base.pos <- which(search() == "package:base"); pos <- which(search() == "package:R.graphics"); # Just in case something went wrong last time, remove any overridden # dev.off(). if (exists("dev.off", where=pos, inherits=FALSE)) rm("dev.off", pos=pos); # Create the png file. tmpfile <- tempfile(); png.filename <- paste(tmpfile,".png",sep=""); png(png.filename, width, height, pointsize); # Call my dev.off() instead (for now). assign("dev.off", function() { # Call the original dev.off() in the base package. fcn <- get("dev.off", mode="function", pos=base.pos); rm("dev.off", pos=pos); fcn(); # Get the path to ImageMagick's convert command. convert <- getOption("imagemagick")["convert"]; if (is.null(convert)) convert <- "convert"; if (regexpr("^[^\"].* .*", convert) != -1) convert <- paste("\"", convert, "\"", sep=""); # Call 'convert' to convert from temporary png to temporary gif. gif.filename <- paste(tmpfile, ".gif", sep=""); cmd <- paste(convert, png.filename, gif.filename, sep=" "); if (.Platform$OS.type == "windows") system(cmd, invisible=TRUE) else system(cmd); # Renaming the temporary gif file to the wanted filename file.rename(gif.filename, filename); # Remove the temporary png file. file.remove(png.filename); invisible(filename); }, pos=pos); } # gif() ############################################################################ # HISTORY: # 2001-10-29 # o Updated the gif() command to 1) not create temporary variables in # .GlobalEnv. It now also convert to a tempory gif file and first then # rename that to the wanted filename. This means that if the filename # does not have a *.gif extension a gif file will still be generated. # Finally, the option "imagemagick" contains named field where one of # them can be "convert" for specifying the path to the convert command. # Note that on WinXP Pro there is already a convert command in the path. # 2001-04-11 # * Created from old com.braju.graphics and made into a static class. # 2001-03-08 [version 0.2.1] # * Added close.screen argument to dev.close(). # 2001-03-07 [version 0.2] # * Added restore.par argument to dev.open(). # * Added dev.close(). # 2001-02-28 [version 0.1] # * Added Rdoc comments. # 2001-02-12 # * Added quotaion marks around filenames in 'convert' system call. # 2001-02-08 # * Created the gif device driver. Removed old eps2gif(); no need for it! # * Added support for gif outputs. # 2001-02-07 # * Created. ############################################################################
533b49786a6a747e23765df4c9daff6842b9c343
58552d1a2700c5a31c1915b27d347fc59ff06283
/Insights/tools/All Migration - server - mc.R
c3a1ca5c49cab7cb303d870e1b885d915c44e239
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
Treasury-Analytics-and-Insights/Insights-Tool-April-2018
6b56f62df6a87f621abdc14b2e8c6c5b9d833308
29944ffc9da5843e6e07613d59c30dc1ceeb5085
refs/heads/master
2020-03-15T08:21:44.702584
2018-07-19T01:10:35
2018-07-19T01:10:35
132,049,038
1
1
null
null
null
null
UTF-8
R
false
false
40,167
r
All Migration - server - mc.R
#Server MC mig_all_panel <- reactiveValues( base_data = df.init_mig_all_base_data, map = spldf.init_mig_all_map, last_start_year = vt.init_mig_all_param_year_1, last_end_year = vt.init_mig_all_param_year_2, tmp_to_use = 1, last_agg_level = "Total NZ", vt.last_flow_dir = 'Inflow', vb_1_value = 'test', vb_1_text = 'test', vb_3_value = 'test', vb_3_text = 'test', vb_4_value = 'test', vb_4_text = 'test', vb_5_value = 'test', vb_5_text = 'test', ta_vb_1_value = 'test', ta_vb_1_text = 'test', ta_vb_3_value = 'test', ta_vb_3_text = 'test', ta_vb_4_value = 'test', ta_vb_4_text = 'test', map_init_refresh = 0 ) #Update the base data when the year slider changes observeEvent(input$mig_all_param_year,{ #Check for unecessary updates caused by the input being bounced after an invalid input #If the two inputs are directly adjacent and the user then puts them together, #they will get bounced back to where they were but we don't want to update the data if(mig_all_panel$last_start_year != input$mig_all_param_year[1] | mig_all_panel$last_end_year != input$mig_all_param_year[2]){ vt.param_ta <- input$mig_all_param_ta vt.param_agg_level <- input$mig_all_agg_level vt.param_year_1 <- input$mig_all_param_year[1] vt.param_year_2 <- input$mig_all_param_year[2] #If the user selects the same value for start and end (i.e. pinches the slider together) if(input$mig_all_param_year[1] == input$mig_all_param_year[2]){ #If the first value has changed then move it back one. if(input$mig_all_param_year[1] != mig_all_panel$last_start_year & (input$mig_all_param_year[1] - 1) %in% vt.init_mig_all_param_year_1_select){ updateSliderInput( session, "mig_all_param_year", label = "Year range selection", min = vt.init_mig_all_year_min, max = vt.init_mig_all_year_max, value = c(input$mig_all_param_year[1] - 1, input$mig_all_param_year[2]), step = 1 ) vt.param_year_1 <- input$mig_all_param_year[1] - 1 } #If the second value has changed then move it forward one. if(input$mig_all_param_year[2] != mig_all_panel$last_end_year & (input$mig_all_param_year[2] + 1) %in% vt.init_mig_all_param_year_2_select){ updateSliderInput( session, "mig_all_param_year", label = "Year range selection", min = vt.init_mig_all_year_min, max = vt.init_mig_all_year_max, value = c(input$mig_all_param_year[1], input$mig_all_param_year[2] + 1), step = 1 ) vt.param_year_2 <- input$mig_all_param_year[2] + 1 } } #If the last update was cancelled and the slider was bounced back to the same position, #we don't need to update the data if((vt.param_year_1 != mig_all_panel$last_start_year | vt.param_year_2 != mig_all_panel$last_end_year) & vt.param_year_1 != vt.param_year_2){ #Update the base data if(vt.param_agg_level == "Total NZ"){ df.base_data <- df.db_mig_data %>% filter( PREV_YEAR %in% vt.param_year_1, CURR_YEAR %in% vt.param_year_2 ) } if(vt.param_agg_level == "Territorial authority"){ df.base_data <- df.db_mig_data %>% filter( PREV_YEAR %in% vt.param_year_1, CURR_YEAR %in% vt.param_year_2, TA %in% vt.param_ta ) } mig_all_panel$base_data <- df.base_data } #Record the last position of the slider mig_all_panel$last_start_year <- vt.param_year_1 mig_all_panel$last_end_year <- vt.param_year_2 } }) #Update the base data if the TA changes or TA level observe({ vt.param_ta <- input$mig_all_param_ta vt.param_agg_level <- input$mig_all_agg_level isolate({ vt.param_year_1 <- input$mig_all_param_year[1] vt.param_year_2 <- input$mig_all_param_year[2] }) if(vt.param_agg_level == "Total NZ"){ df.base_data <- df.db_mig_data %>% filter( PREV_YEAR %in% vt.param_year_1, CURR_YEAR %in% vt.param_year_2 ) mig_all_panel$base_data <- df.base_data } if(vt.param_agg_level == "Territorial authority"){ df.base_data <- df.db_mig_data %>% filter( PREV_YEAR %in% vt.param_year_1, CURR_YEAR %in% vt.param_year_2, TA %in% vt.param_ta ) if(dim(df.base_data)[1] > 0){ mig_all_panel$base_data <- df.base_data } } }) #Update the map data when the base data, flow dir, TA, or agg_level updates observe({ df.base_data <- mig_all_panel$base_data vt.param_flow_name <- input$mig_all_param_flow vt.param_flow <- input$mig_all_param_flow vt.param_flow <- df.mig_all_param_flow_map %>% filter(NAME == vt.param_flow) %>% .[['VALUE']] vt.param_perc <- input$mig_all_param_map_perc vt.param_flow_type <- input$mig_all_param_flow_type vt.param_flow_type <- df.mig_flow_type_mapping %>% filter(FLOW_TYPE_LABEL == vt.param_flow_type) %>% .[["FLOW_TYPE"]] vt.param_agg_level <- input$mig_all_agg_level isolate({ vt.last_agg_level <- mig_all_panel$last_agg_level }) if(vt.param_agg_level == "Total NZ"){ if(vt.param_agg_level != vt.last_agg_level){ updateRadioButtons(session, inputId = "mig_all_param_flow_type", label = "Type of Population Change", choices = vt.init_mig_all_parm_flow_type_select, selected = vt.init_mig_all_parm_flow_type) } if(vt.param_flow_type != 'tot'){ df.base_data = df.base_data %>% filter(FLOW_DIR == vt.param_flow, FLOW_TYPE == vt.param_flow_type, is.na(FLOW_DEMOGRAPHIC), TA_LEVEL) %>% inner_join(df.mig_flow_type_mapping, by = "FLOW_TYPE") } if(vt.param_flow_type == 'tot'){ df.base_data = df.base_data %>% filter(is.na(FLOW_DEMOGRAPHIC), FLOW_DIR == vt.param_flow, FLOW_TYPE %in% 2:5) %>% group_by(TA, TA_CODE, TOTAL_PREV, TOTAL_CURR, PREV_YEAR, CURR_YEAR, FLOW_DIR) %>% summarise(VALUE = sum(VALUE)) %>% mutate(FLOW_TYPE = vt.param_flow_type) } #Convert to percentage of popn at start period if(vt.param_perc){ df.base_data = df.base_data %>% mutate(VALUE = VALUE/TOTAL_PREV) } spldf.map_update <- spldf.nz_ta_migration spldf.map_update@data <- spldf.map_update@data %>% mutate(code = as.numeric(code)) %>% left_join(df.base_data, by = c('code' = 'TA_CODE')) mig_all_panel$map <- spldf.map_update } if(vt.param_agg_level == "Territorial authority"){ if(vt.param_agg_level != vt.last_agg_level){ #Remove other choices at a TA level updateRadioButtons(session, inputId = "mig_all_param_flow_type", label = "Type of Population Change", choices = vt.init_mig_all_parm_flow_type_ta_select, selected = vt.init_mig_all_parm_flow_type_ta) } df.base_data = df.base_data %>% filter(FLOW_DIR == vt.param_flow, FLOW_TYPE == 2, FLOW_DEMOGRAPHIC == "TA", TA_LEVEL) if(vt.param_perc){ df.base_data = df.base_data %>% mutate(VALUE = VALUE/TOTAL_PREV) } spldf.map_update <- spldf.nz_ta_migration spldf.map_update@data <- spldf.map_update@data %>% left_join(df.base_data, by = c('code' = 'FLOW_DEMOGRAPHIC_VALUE')) mig_all_panel$map <- spldf.map_update } mig_all_panel$last_agg_level <- vt.param_agg_level }) #When the aggregation level is total NZ then disable the TA selection observeEvent(input$mig_all_agg_level, { if(input$mig_all_agg_level == "Total NZ"){ disable("mig_all_param_ta") } if(input$mig_all_agg_level != "Total NZ"){ enable("mig_all_param_ta") } }) #Help dialogue for chord diagram observeEvent(input$mig_all_chord_modal_help, { vt.param_agg_level <- input$mig_all_agg_level if(vt.param_agg_level == "Total NZ"){ vt.help_dialogue <- "The chord diagram shows the size and direction of the population moving between Territorial Authorities within NZ. The six TAs with the largest populations are identified separately, as well as Other North Island and Other South Island. Christchurch City has been combined with the Selwyn and Waimakariri Districts and is described as Greater Christchurch. Wellington City has been combined with Porirua, Lower Hutt, and Upper Hutt Cities and is described as Greater Wellington. The size of each segment around the edge of the circle represents the number of people who left that area to move to another area. The width of the chord at each end represents the number of people who moved to the area at the other end of the chord. Each chord is shaded in the colour representing the direction of the predominant movement. For example, chords are shaded with the same colour as the Auckland segment if more people arrived in Auckland than left. When 'Display as a percentage of the population?' is selected the diagram shows the flows as a percentage of the population at the start year of the areas they originate from e.g. if Christchurch -> Auckland is 0.50% this means that 0.50% of the size of the Christchurch population at the start year moved from Christchurch to Auckland. Move the mouse over a segment at the edge of the circle to see all movements related to that area, or over a chord to see more detail about the movements that chord represents." } if(vt.param_agg_level == "Territorial authority"){ vt.param_ta <- input$mig_all_param_ta vt.help_dialogue <- glue("The chord diagram shows the size and direction of the population moving to and from {vt.param_ta}. The five TAs with the largest flows to and from {vt.param_ta} are identified separately, as well as Other North Island and Other South Island. The size of each segment around the edge of the circle represents the number of people who left that area to move to another area. The width of the chord at each end represents the number of people who moved to the area at the other end of the chord. Each chord is shaded in the colour representing the direction of the predominant movement. For example, chords are shaded in with the same colour as the {vt.param_ta} segment if more people arrived in {vt.param_ta} than left. When 'Display as a percentage of the population?' is selected the diagram shows the flows as a percentage of the population at the start year of the areas they originate from e.g. if {vt.param_ta} -> {ifelse(vt.param_ta == 'Auckland', 'Christchurch', 'Auckland')} is 0.50% this means that 0.50% of the size of the {vt.param_ta} population at the start year moved from {vt.param_ta} to {ifelse(vt.param_ta == 'Auckland', 'Christchurch', 'Auckland')}. Move the mouse over a segment at the edge of the circle to see all movements related to that area, or over a chord to see more detail about the movements that chord represents.") } showModal(modalDialog( title = "About this chord diagram", vt.help_dialogue, easyClose = TRUE, footer = NULL )) }) #Help dialogue for chord diagram observeEvent(input$mig_all_pyramid_modal_help, { vt.param_agg_level <- input$mig_all_agg_level vt.param_start_year <- input$mig_all_param_year[1] vt.param_end_year <- input$mig_all_param_year[2] if(vt.param_agg_level == "Total NZ"){ vt.help_dialogue <- glue("The leftmost population pyramid shows how the NZ population has changed from {vt.param_start_year} to {vt.param_end_year}. The drivers of this change are represented by the pyramids to the right, showing the effects of; international migration; and natural increase and ageing. Click the corresponding legend entry to select or unselect each factor.") } if(vt.param_agg_level == "Territorial authority"){ vt.param_ta <- input$mig_all_param_ta vt.help_dialogue <- glue("The leftmost population pyramid shows how the {vt.param_ta} population has changed from {vt.param_start_year} to {vt.param_end_year}. The drivers of this change are represented by the pyramids to the right, showing the effects of; migration within NZ; international migration; and natural increase and ageing. Click the corresponding legend entry to select or unselect each factor.") } showModal(modalDialog( title = "About this chord diagram", vt.help_dialogue, easyClose = TRUE, footer = NULL )) }) #Help dialogue for the page observeEvent({input$mig_all_page_modal_help}, { vt.help_dialogue1 <- glue("Population explorer presents new data about population change in New Zealand (NZ) between {min(vt.mig_all_year_combn$PREV_YEAR)} and {max(vt.mig_all_year_combn$CURR_YEAR)} through interactive graphs and maps. Using this tool you can investigate changes in the population of New Zealand and specific territorial authorities over time. Different types of population change can be examined, including: internal migration within NZ, international migration to and from NZ, ageing, and births and deaths (natural increase).") vt.help_dialogue2 <- glue("The grey boxes at the top of the page present key facts about changes in the population for any selected time period and area. The tabs below these boxes allow different aspects of population change to be examined and mapped.") vt.help_dialogue3 <- glue("There are a number of controls on the left-hand side of the page which you can use to manipulate the data. You can select different time periods by adjusting the year range selection slider. The 'Display as a percentage?' checkbox allows you to view figures as percentages instead of numbers of people. The level of analysis buttons allow you to switch between figures for all of NZ and figures for a specific territorial authority within NZ. Hovering over the visualisations in this tool with your mouse pointer will reveal additional information.") showModal(modalDialog( title = "About this tool", vt.help_dialogue1, br(), br(), vt.help_dialogue2, br(), br(), vt.help_dialogue3, easyClose = TRUE, footer = NULL )) }) #Headlines for value boxes observe({ df.base_data <- mig_all_panel$base_data vt.param_start_year <- input$mig_all_param_year[1] vt.param_end_year <- input$mig_all_param_year[2] validate(need(vt.mig_all_year_combn %>% filter(PREV_YEAR %in% vt.param_start_year, CURR_YEAR %in% vt.param_end_year) %>% dim() %>% .[1] >= 1, "Updating...")) isolate({ vt.param_agg_level <- input$mig_all_agg_level vt.param_ta <- input$mig_all_param_ta }) if(vt.param_agg_level == "Total NZ"){ #Get all different rows for the national vbs tmp.vb_data <- mig_all_panel$base_data %>% filter(FLOW_TYPE %in% 2:3 & FLOW_DIR == "out" & is.na(FLOW_DEMOGRAPHIC) | (FLOW_TYPE == 3 & is.na(FLOW_DEMOGRAPHIC) | FLOW_DEMOGRAPHIC == "Country")| (FLOW_TYPE == 4)) tmp.vb_2_data <- df.db_mig_data %>% filter(PREV_YEAR == vt.param_start_year, CURR_YEAR == vt.param_end_year) %>% filter(FLOW_DIR == 'net', FLOW_TYPE %in% 2 & FLOW_DEMOGRAPHIC == "TA" & TA_LEVEL | FLOW_TYPE %in% 4 & is.na(FLOW_DEMOGRAPHIC_VALUE)) #VB1 mig_all_panel$vb_1_value <- tmp.vb_data %>% filter(TA_CODE == 999) %>% slice(1) %>% mutate(VALUE_PERC = paste0(formatC(100 * (TOTAL_CURR / TOTAL_PREV - 1), format = 'f', digits = 1), "%")) %>% .[["VALUE_PERC"]] tmp.vb_1_test_1 <- tmp.vb_data %>% filter(TA_CODE == 999) %>% slice(1) %>% mutate(test = TOTAL_CURR > TOTAL_PREV) %>% .[['test']] tmp.vb_1_text_1 <- tmp.vb_data %>% filter(TA_CODE == 999, METRIC == "out_3") %>% mutate(VALUE_PERC = paste0(formatC(100 * (VALUE / TOTAL_PREV), format = 'f', digits = 1), "%")) %>% .[["VALUE_PERC"]] tmp.vb_1_text_2 <- tmp.vb_data %>% filter(TA_CODE == 999, METRIC == "out_2") %>% mutate(VALUE_PERC = paste0(formatC(100 * (VALUE / TOTAL_PREV), format = 'f', digits = 1), "%")) %>% .[["VALUE_PERC"]] tmp.vb_1_text_3 <- tmp.vb_data %>% filter(TA_CODE == 999) %>% .[["TOTAL_PREV"]] %>% unique() tmp.vb_1_text_4 <- tmp.vb_data %>% filter(TA_CODE == 999) %>% .[["TOTAL_CURR"]] %>% unique() tmp.vb_1_text_5 <- tmp.vb_data %>% filter(TA_CODE == 999) %>% mutate(DIFF = abs(TOTAL_CURR-TOTAL_PREV)) %>% .[["DIFF"]] %>% unique() mig_all_panel$vb_1_text <- glue({"{ifelse(tmp.vb_1_test_1, 'increase', 'decrease')} in the population of New Zealand between {vt.param_start_year} and {vt.param_end_year} from {comma(tmp.vb_1_text_3)} to {comma(tmp.vb_1_text_4)} ({ifelse(tmp.vb_1_test_1, 'an increase', 'a decrease')} of {comma(tmp.vb_1_text_5)}). Of the {vt.param_start_year} population {tmp.vb_1_text_1} left the country, while {tmp.vb_1_text_2} moved to another area"}) #vb3 tmp.vb_3_value_val <- (tmp.vb_data %>% filter(METRIC == "in_3", TA_CODE == 999) %>% .[["VALUE"]] - tmp.vb_data %>% filter(METRIC == "in_3_country_NZ", TA_CODE == 999) %>% .[["VALUE"]]) tmp.vb_3_value <- tmp.vb_3_value_val %>% # round(-3) %>% comma() mig_all_panel$vb_3_value <- tmp.vb_3_value tmp.vb_3_text_1_val <- (tmp.vb_data %>% filter(METRIC == "out_3", TA_CODE == 999) %>% .[["VALUE"]] - tmp.vb_data %>% filter(METRIC == "out_3_country_NZ", TA_CODE == 999) %>% .[["VALUE"]]) tmp.vb_3_text_1 <- tmp.vb_3_text_1_val %>% # round(-3) %>% comma() tmp.vb_3_text_2 <- tmp.vb_data %>% filter(METRIC == "out_3", TA_CODE == 999) %>% .[["TOTAL_PREV"]] tmp.vb_3_test <- (tmp.vb_3_value_val - tmp.vb_3_text_1_val) > 0 tmp.vb_3_text_2 <- paste0(formatC(100 * (tmp.vb_3_value_val - tmp.vb_3_text_1_val) / tmp.vb_3_text_2, format = 'f', digits = 1), "%") tmp.vb_3_text_5 <- abs(tmp.vb_3_value_val - tmp.vb_3_text_1_val) tmp.vb_3_text_3 <- tmp.vb_data %>% filter(FLOW_DEMOGRAPHIC == "Country", FLOW_TYPE == 3, FLOW_DIR == 'in', !(FLOW_DEMOGRAPHIC_VALUE %in% c("NZ", "ZZ")), TA_CODE == 999) %>% filter(VALUE == max(VALUE)) tmp.vb_3_text_4 <- tmp.vb_3_text_3 %>% .[["VALUE"]]%>% # round(-3) %>% comma() tmp.vb_3_text_3 <- tmp.vb_3_text_3 %>% inner_join(df.mig_all_world_country_vb_mapping, by = c("FLOW_DEMOGRAPHIC_VALUE" = "ID")) %>% .[["code"]] mig_all_panel$vb_3_text <- glue("overseas migrants arrived in New Zealand between {vt.param_start_year} and {vt.param_end_year}, while {tmp.vb_3_text_1} left. This represents {ifelse(tmp.vb_3_test, 'an increase', 'a decrease')} of {comma(tmp.vb_3_text_5)} or {tmp.vb_3_text_2} of the NZ population. The largest source of migrants was {tmp.vb_3_text_3} with {tmp.vb_3_text_4} arrivals.") #vb 4 mig_all_panel$vb_4_value <- tmp.vb_data %>% filter(METRIC == "in_3_country_NZ", TA_CODE == 999) %>% .[["VALUE"]]%>% # round(-3) %>% comma() tmp.vb_4_text_1 <- tmp.vb_data %>% filter(METRIC == "out_3_country_NZ", TA_CODE == 999) %>% .[["VALUE"]] %>% # round(-3) %>% comma() tmp.vb_4_text_2 <- tmp.vb_data %>% filter(METRIC == "net_3_country_NZ", TA_CODE == 999) %>% mutate(VALUE_PERC = paste0(formatC(100 * abs(VALUE)/TOTAL_PREV, format = 'f', digits = 1), "%")) tmp.vb_4_text_5 <- tmp.vb_4_text_2 %>% mutate(VALUE=abs(VALUE)) %>% .[["VALUE"]] tmp.vb_4_text_2 <- tmp.vb_4_text_2 %>% .[["VALUE_PERC"]] tmp.vb_4_test <- tmp.vb_data %>% filter(METRIC == "net_3_country_NZ", TA_CODE == 999) %>% mutate(test = VALUE > 0) %>% .[['test']] mig_all_panel$vb_4_text <- glue("New Zealanders arrived in NZ by {vt.param_end_year} after living overseas in {vt.param_start_year}, while {tmp.vb_4_text_1} departed by {vt.param_end_year} after living in NZ in {vt.param_start_year}. This represents {ifelse(tmp.vb_4_test, 'an increase', 'a decrease')} of {comma(tmp.vb_4_text_5)} or {tmp.vb_4_text_2} of the NZ population.") mig_all_panel$vb_5_value <- tmp.vb_data %>% filter(METRIC == "in_4", TA_CODE == 999) %>% .[["VALUE"]] %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.vb_5_text_1 <- tmp.vb_data %>% filter(METRIC == "out_4", TA_CODE == 999) %>% .[["VALUE"]] %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.vb_5_text_2 <- tmp.vb_data %>% filter(METRIC == "net_4", TA_CODE == 999) %>% mutate(VALUE_PERC = paste0(formatC(100 * abs(VALUE) / TOTAL_PREV, format = 'f', digits = 1), "%")) tmp.vb_5_test <- tmp.vb_data %>% filter(METRIC == "net_4", TA_CODE == 999) %>% mutate(test = VALUE > 0) %>% .[['test']] tmp.vb_5_text_3 <- tmp.vb_2_data %>% filter(FLOW_TYPE == 4) %>% mutate(VALUE_PERC = VALUE / TOTAL_PREV) %>% filter(VALUE_PERC == max(VALUE_PERC, na.rm = T)) %>% mutate(VALUE_PERC = paste0(formatC(100 * VALUE_PERC, format = 'f', digits = 1), "%")) tmp.vb_5_text_4 <- tmp.vb_5_text_3 %>% .[["TA"]] tmp.vb_5_text_3 <- tmp.vb_5_text_3 %>% .[["VALUE_PERC"]] tmp.vb_5_text_5 <- tmp.vb_5_text_2 %>% .[["VALUE"]] tmp.vb_5_text_2 <- tmp.vb_5_text_2 %>% .[["VALUE_PERC"]] mig_all_panel$vb_5_text <- glue("children were born in New Zealand between June {vt.param_start_year} and June {vt.param_end_year} while {tmp.vb_5_text_1} people died. This represents a natural increase of {comma(tmp.vb_5_text_5)} or {tmp.vb_5_text_2}. The largest natural increase of {tmp.vb_5_text_3} was in {tmp.vb_5_text_4}.") } if(vt.param_agg_level == "Territorial authority"){ #vb1 tmp.vb_data <- mig_all_panel$base_data #National facts for comparison tmp.vb_data_nat <- df.db_mig_data %>% filter(TA_CODE == 999, PREV_YEAR == vt.param_start_year, CURR_YEAR == vt.param_end_year, METRIC %in% c("net_3", "net_3_country_NZ", "net_4") | (FLOW_DEMOGRAPHIC == "Country" & !is.na(FLOW_DEMOGRAPHIC_VALUE) & FLOW_DEMOGRAPHIC_VALUE != "NZ" & FLOW_DIR == 'net' & FLOW_TYPE == 3) ) #VB1 tmp.ta_vb_1_value <- tmp.vb_data %>% slice(1) %>% mutate(VALUE = TOTAL_CURR / TOTAL_PREV - 1, VALUE_PERC = paste0(formatC(100 * abs(TOTAL_CURR / TOTAL_PREV - 1), format = 'f', digits = 1), "%")) tmp.ta_vb_1_value_test <- tmp.ta_vb_1_value %>% .[["VALUE"]] > 0 mig_all_panel$ta_vb_1_value <- tmp.ta_vb_1_value %>% .[["VALUE_PERC"]] tmp.ta_vb_1_text_1 <- tmp.vb_data_nat %>% slice(1) %>% mutate(VALUE_PERC = paste0(formatC(100 * abs(TOTAL_CURR / TOTAL_PREV - 1), format = 'f', digits = 1), "%")) %>% .[['VALUE_PERC']] tmp.ta_vb_1_text_1_test <- tmp.vb_data_nat %>% slice(1) %>% mutate(test = (TOTAL_CURR / TOTAL_PREV - 1) > 0) %>% .[["test"]] tmp.ta_vb_1_text_2 <- tmp.vb_data %>% filter(METRIC == "net_2") %>% mutate(VALUE_PERC = paste0(formatC(100 * (VALUE / TOTAL_PREV), format = 'f', digits = 1), "%")) %>% .[['VALUE_PERC']] tmp.ta_vb_1_text_6 <- tmp.vb_data %>% filter(METRIC == "net_2") %>% mutate(VALUE = abs(VALUE)) %>% .[['VALUE']] tmp.ta_vb_1_text_2_test <- tmp.vb_data %>% filter(METRIC == "net_2") %>% mutate(test = VALUE > 0) %>% .[["test"]] tmp.ta_vb_1_text_3 <- tmp.vb_data %>% .[["TOTAL_PREV"]] %>% unique() tmp.ta_vb_1_text_4 <- tmp.vb_data %>% .[["TOTAL_CURR"]] %>% unique() tmp.ta_vb_1_text_5 <- tmp.vb_data %>% mutate(DIFF = abs(TOTAL_CURR-TOTAL_PREV)) %>% .[["DIFF"]] %>% unique() tmp.ta_vb_1_text <- glue("{ifelse(tmp.ta_vb_1_value_test, 'increase', 'decrease')} in the population of {vt.param_ta} between {vt.param_start_year} and {vt.param_end_year} from {comma(tmp.ta_vb_1_text_3)} to {comma(tmp.ta_vb_1_text_4)} ({ifelse(tmp.ta_vb_1_value_test, 'an increase', 'a decrease')} of {comma(tmp.ta_vb_1_text_5)}). This compares to {ifelse(tmp.ta_vb_1_text_1_test, 'an increase', 'a decrease')} of {tmp.ta_vb_1_text_1} for all NZ. Migration within NZ contributed {tmp.ta_vb_1_text_2} ({ifelse(tmp.ta_vb_1_text_2_test, 'an increase', 'a decrease')} of {comma(tmp.ta_vb_1_text_6)}) to this {ifelse(tmp.ta_vb_1_value_test, 'increase', 'decrease')}.") #VB3 mig_all_panel$ta_vb_3_value <- tmp.vb_data %>% filter(FLOW_DEMOGRAPHIC == "Country", !is.na(FLOW_DEMOGRAPHIC_VALUE), FLOW_DEMOGRAPHIC_VALUE != "NZ", FLOW_DIR == 'in', FLOW_TYPE == 3) %>% summarise(VALUE = sum(VALUE)) %>% .[["VALUE"]] %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.ta_vb_3_text_1 <- tmp.vb_data %>% filter(FLOW_DEMOGRAPHIC == "Country", !is.na(FLOW_DEMOGRAPHIC_VALUE), FLOW_DEMOGRAPHIC_VALUE != "NZ", FLOW_DIR == 'out', FLOW_TYPE == 3) %>% summarise(VALUE = sum(VALUE)) %>% .[["VALUE"]] %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.ta_vb_3_text_2 <- tmp.vb_data %>% filter(FLOW_DEMOGRAPHIC == "Country", !is.na(FLOW_DEMOGRAPHIC_VALUE), FLOW_DEMOGRAPHIC_VALUE != "NZ", FLOW_DIR == 'net', FLOW_TYPE == 3) %>% summarise(VALUE = sum(VALUE), TOTAL_PREV = TOTAL_PREV[1]) %>% mutate( test = VALUE > 0, VALUE_PERC = paste0(formatC(100 * abs(VALUE/TOTAL_PREV), format = 'f', digits = 1), "%")) tmp.ta_vb_3_text_2_test <- tmp.ta_vb_3_text_2 %>% .[['test']] tmp.ta_vb_3_text_6 <- tmp.ta_vb_3_text_2 %>% .[["VALUE"]] tmp.ta_vb_3_text_2 <- tmp.ta_vb_3_text_2 %>% .[["VALUE_PERC"]] tmp.ta_vb_3_text_3 <- tmp.vb_data_nat %>% filter(FLOW_DEMOGRAPHIC == "Country", !is.na(FLOW_DEMOGRAPHIC_VALUE), FLOW_DEMOGRAPHIC_VALUE != "NZ", FLOW_DIR == 'net', FLOW_TYPE == 3) %>% summarise(VALUE = sum(VALUE), TOTAL_PREV = TOTAL_PREV[1]) %>% mutate(test = VALUE > 0, VALUE_PERC = paste0(formatC(100 * abs(VALUE)/TOTAL_PREV, format = 'f', digits = 1), "%")) tmp.ta_vb_3_text_3_test <- tmp.ta_vb_3_text_3 %>% .[['test']] tmp.ta_vb_3_text_3 <- tmp.ta_vb_3_text_3 %>% .[['VALUE_PERC']] tmp.ta_vb_3_text_4 <- tmp.vb_data %>% filter(FLOW_DEMOGRAPHIC == "Country", !is.na(FLOW_DEMOGRAPHIC_VALUE), FLOW_TYPE == 3, FLOW_DIR == 'in', !(FLOW_DEMOGRAPHIC_VALUE %in% c("NZ", "ZZ"))) %>% filter(VALUE == max(VALUE)) %>% #Don't want ties slice(1) %>% inner_join(df.mig_all_world_country_vb_mapping, by = c("FLOW_DEMOGRAPHIC_VALUE" = 'ID')) %>% .[["code"]] tmp.ta_vb_3_text_5 <- tmp.vb_data %>% filter(FLOW_DEMOGRAPHIC == "Country", !is.na(FLOW_DEMOGRAPHIC_VALUE), FLOW_TYPE == 3, FLOW_DIR == 'in', !(FLOW_DEMOGRAPHIC_VALUE %in% c("NZ", "ZZ"))) %>% filter(VALUE == max(VALUE)) %>% #No ties slice(1) %>% .[["VALUE"]] %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.ta_vb_3_text <- glue("overseas migrants arrived in {vt.param_ta} between {vt.param_start_year} and {vt.param_end_year}, while {tmp.ta_vb_3_text_1} left. This represents {ifelse(tmp.ta_vb_3_text_2_test, 'an increase', 'a decrease')} of {comma(tmp.ta_vb_3_text_6)} or {tmp.ta_vb_3_text_2} of the population, compared to a {tmp.ta_vb_3_text_3} {ifelse(tmp.ta_vb_3_text_3_test, 'increase', 'decrease')} nationally. The largest source of migrants was {tmp.ta_vb_3_text_4}, with {tmp.ta_vb_3_text_5} arrivals.") #VB4 mig_all_panel$ta_vb_4_value <- tmp.vb_data %>% filter(METRIC == "in_3_country_NZ") %>% .[["VALUE"]] %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.ta_vb_4_text_1 <- tmp.vb_data %>% filter(METRIC == "out_3_country_NZ") %>% .[["VALUE"]] %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.ta_vb_4_text_2 <- tmp.vb_data %>% filter(METRIC == "net_3_country_NZ") %>% mutate(VALUE_PERC = paste0(formatC(100 * abs(VALUE)/TOTAL_PREV, format = 'f', digits = 1), "%")) %>% .[["VALUE_PERC"]] tmp.ta_vb_4_test_1 <- tmp.vb_data %>% filter(METRIC == "net_3_country_NZ") %>% mutate(test = VALUE > 0) %>% .[['test']] tmp.ta_vb_4_text_3 <- tmp.vb_data_nat %>% filter(METRIC == "net_3_country_NZ") %>% mutate(VALUE_PERC = paste0(formatC(100 * abs(VALUE)/TOTAL_PREV, format = 'f', digits = 1), "%")) %>% .[["VALUE_PERC"]] tmp.ta_vb_4_text_5 <- tmp.vb_data %>% filter(METRIC == "net_3_country_NZ") %>% mutate(VALUE = abs(VALUE)) %>% .[["VALUE"]] tmp.ta_vb_4_test_2 <- tmp.vb_data_nat %>% filter(METRIC == "net_3_country_NZ") %>% mutate(test = VALUE > 0) %>% .[['test']] tmp.ta_vb_4_text <- glue("New Zealanders arrived in {vt.param_ta} by {vt.param_end_year} after living overseas in {vt.param_start_year}, while {tmp.ta_vb_4_text_1} departed NZ by {vt.param_end_year} after living in {vt.param_ta} in {vt.param_start_year}. This represents {ifelse(tmp.ta_vb_4_test_1, 'an increase', 'a decrease')} of {comma(tmp.ta_vb_4_text_5)} or {tmp.ta_vb_4_text_2} of the {vt.param_ta} population, compared to {ifelse(tmp.ta_vb_4_test_2, 'an increase', 'a decrease')} of {tmp.ta_vb_4_text_3} across NZ.") #VB5 tmp.ta_vb_5_value <- tmp.vb_data %>% filter(METRIC == 'in_4') %>% .[["VALUE"]] mig_all_panel$ta_vb_5_value <- tmp.ta_vb_5_value %>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.ta_vb_5_text_1_val <- tmp.vb_data %>% filter(METRIC == 'out_4') %>% .[["VALUE"]] tmp.ta_vb_5_text_1 <- tmp.ta_vb_5_text_1_val%>% as.integer() %>% # round(-(nchar(.) - 3)) %>% comma() tmp.ta_vb_5_test_1 <- tmp.ta_vb_5_value > tmp.ta_vb_5_text_1_val tmp.ta_vb_5_text_2 <- paste0(formatC(100 * (tmp.ta_vb_5_value - tmp.ta_vb_5_text_1_val) / tmp.vb_data %>% filter(METRIC == 'in_4') %>% .[["TOTAL_PREV"]], format = 'f', digits = 1), "%") tmp.ta_vb_5_text_5 <- abs(tmp.ta_vb_5_value - tmp.ta_vb_5_text_1_val) tmp.ta_vb_5_text_3 <- tmp.vb_data_nat %>% filter(METRIC == 'net_4') %>% mutate(VALUE_PERC = paste0(formatC(100 * VALUE / TOTAL_PREV, format = 'f', digits = 1), "%")) %>% .[["VALUE_PERC"]] tmp.ta_vb_5_text_3_test <- tmp.vb_data_nat %>% filter(METRIC == 'net_4') %>% .[['VALUE']] > 0 tmp.ta_vb_5_text <- glue("children were born in {vt.param_ta} between June {vt.param_start_year} and June {vt.param_end_year} while {tmp.ta_vb_5_text_1} people died. This represents a natural {ifelse(tmp.ta_vb_5_test_1, 'increase', 'decrease')} of {comma(tmp.ta_vb_5_text_5)} or {tmp.ta_vb_5_text_2}, compared to a natural {ifelse(tmp.ta_vb_5_text_3_test, 'increase', 'decrease')} of {tmp.ta_vb_5_text_3} across New Zealand.") mig_all_panel$ta_vb_1_text <- tmp.ta_vb_1_text mig_all_panel$ta_vb_3_text <- tmp.ta_vb_3_text mig_all_panel$ta_vb_4_text <- tmp.ta_vb_4_text mig_all_panel$ta_vb_5_text <- tmp.ta_vb_5_text } }) #Add map download output$mig_all_map_download <- downloadHandler( filename = function(){ vt.param_agg_level <- input$mig_all_agg_level if(vt.param_agg_level == "Total NZ"){ return(paste0("NZ_migration_map_data_", format(Sys.time(), "%x_%H:%M"), ".csv")) } else{ vt.param_ta <- input$mig_all_param_ta return(paste0(vt.param_ta, "_migration_map_data_", format(Sys.time(), "%x_%H:%M"), ".csv")) } }, content = function(file) { # browser() isolate({ vt.param_agg = input$mig_all_agg_level vt.param_perc = input$mig_all_param_map_perc vt.param_map_flow = input$mig_all_param_flow }) df = mig_all_panel$map@data if(vt.param_agg == vt.init_mig_all_agg_level_select[1]){ if(!vt.param_perc){ df = national_map_csv_reformat(df, df.mig_all_map_national_rename_absolute) } else{ df = df %>% mutate(VALUE = paste0(formatC(100 * VALUE, format = 'f', digits = 2), "%")) df = national_map_csv_reformat(df, df.mig_all_map_national_rename_percentage) } } else{ vt.flow_dir_value = df.mig_natural_change_flow_dir_mapping %>% filter(FLOW_DIR %in% vt.param_map_flow) %>% .[["FLOW_DIR_VALUE"]] if(!vt.param_perc){ if(vt.flow_dir_value %in% vt.mig_all_param_flow_dir_in){ df = ta_map_csv_reformat(df, df.mig_all_map_ta_rename_in_absolute) } else{ df = ta_map_csv_reformat(df, df.mig_all_map_ta_rename_out_absolute) } } else{ if(vt.flow_dir_value %in% vt.mig_all_param_flow_dir_in){ df = df %>% mutate(VALUE = paste0(formatC(100 * VALUE, format = 'f', digits = 2), "%")) df = ta_map_csv_reformat(df, df.mig_all_map_ta_rename_in_percentage) } else{ df = df %>% mutate(VALUE = paste0(formatC(100 * VALUE, format = 'f', digits = 2), "%")) df = ta_map_csv_reformat(df, df.mig_all_map_ta_rename_out_percentage) } } } write.csv(df, file, row.names = F) }) #Invalidate the map the first time it's displayed. observeEvent({input$grey_tabBox}, { if(mig_all_panel$map_init_refresh == 0 & input$grey_tabBox == vt.mig_all_map_tab_title){ mig_all_panel$map_init_refresh <- mig_all_panel$map_init_refresh + 1 } })
a058f88a4f7110e068e7556ce0186b8adde3851f
38915da347869e164d9485f9bbd394fe56d2fcb0
/1_1MFScondist_jp/ui_E.R
0c7008e0eecffc750e814ce622eecd8c1584d83d
[ "MIT" ]
permissive
mephas/mephas_web
24df65c5bdbf1e65c91523f4bfd120abae03e409
197e99828d6b9a6a3c1d11b2fc404c9631103ec0
refs/heads/master
2023-07-21T14:29:37.597163
2023-07-13T03:32:32
2023-07-13T03:32:32
161,571,944
8
3
null
null
null
null
UTF-8
R
false
false
4,546
r
ui_E.R
#****************************************************************************************************************************************************1.2. Exp distribution sidebarLayout( sidebarPanel( h4(tags$b("Step 1. データソースを選ぶ")), p("数式ベース、シミュレーションベース、又は、ユーザのデータベース"), #Select Src selectInput( "InputSrc_e", "選択肢", c("数式ベース" = "MathDist", "シミュレーションベース" = "SimuDist", "ユーザデータベース" = "DataDist")), hr(), #Select Src end h4(tags$b("Step 2. パラメータの設定")), #condiPa 1 conditionalPanel( condition = "input.InputSrc_e == 'MathDist'", #"Draw an Exponential Distribution", p(br()), HTML("<b>パラメータの設定(E(Rate))</b></h4>"), numericInput("r", HTML(" 率(> 0) とは変化率を指します。率を入力"), value = 2, min = 0), hr(), numericInput("e.mean", HTML("または平均とSD (平均 = SD) から率を計算するために平均を入力"), value = 0.5, min = 0), verbatimTextOutput("e.rate"), p("Mean = SD = 1/Rate"), hr(), numericInput("e.xlim", "2. x軸の範囲(> 0)を変更します。", value = 5, min = 1) ), #condiPa 2 conditionalPanel( condition = "input.InputSrc_e == 'SimuDist'", numericInput("e.size", "シミュレートした数の標本サイズ", value = 100, min = 1,step = 1), sliderInput("e.bin", "ヒストグラムのビンの数", min = 0, max = 100, value = 0), p("ビンの数が0の場合はプロットでデフォルトのビンの数が使用されます。") ), #condiPa 2 end #condiPa 3 conditionalPanel( condition = "input.InputSrc_e == 'DataDist'", tabsetPanel( tabPanel("手入力",p(br()), p("データポイントは「,」「;」「Enter」「Tab」で区切ることができます。"), p(tags$b("データはCSV(1列)からコピーされ、ボックスに貼り付けられます")), tags$textarea( id = "x.e", #p rows = 10, "2.6\n0.5\n0.8\n2.3\n0.3\n2\n0.5\n4.4\n0.1\n1.1\n0.7\n0.2\n0.7\n0.6\n3.7\n0.3\n0.1\n1\n2.6\n1.3" ), p("欠損値はNAとして入力されます") ), tabPanel.upload.num(file ="e.file", header="e.header", col="e.col", sep="e.sep") ), sliderInput("bin.e", "ヒストグラムのビンの数", min = 0, max = 100, value = 0), p("ビンの数が0の場合はプロットでデフォルトのビンの数が使用されます。") ), #condiPa 3 end hr(), h4(tags$b("Step 3. 確率を表示する")), numericInput("e.pr", HTML("赤線の左側の面積の割合 = Pr(X < x<sub>0</sub>)で、赤線の位置が x<sub>0</sub> です。"), value = 0.05, min = 0, max = 1, step = 0.05), hr() ), #sidePa end mainPanel( h4(tags$b("Outputs")), conditionalPanel( condition = "input.InputSrc_e == 'MathDist'", h4("数式ベースプロット"), tags$b("指数分布プロット"), plotOutput("e.plot", click = "plot_click9"),# verbatimTextOutput("e.info"), #p(br()), #p(tags$b("赤線の位置")), #p(tags$b("赤線の位置、 x<sub>0</sub>")), #verbatimTextOutput("e"), hr(), # plotly::plotlyOutput("e.plot.cdf") plotOutput("e.plot.cdf") ), conditionalPanel( condition = "input.InputSrc_e == 'SimuDist'", h4("シミュレーションベースプロット"), tags$b("乱数から作成したヒストグラム"), plotly::plotlyOutput("e.plot2"),#click = "plot_click10", #verbatimTextOutput("e.info2"), downloadButton("download2", "乱数をダウンロードする"), p(tags$b("サンプルの記述統計")), tableOutput("e.sum") ), conditionalPanel( condition = "input.InputSrc_e == 'DataDist'", tags$b("データの確認"), DT::DTOutput("Y"), h4("データの分布"), tags$b("アプロードされたデータの密度プロット"), plotly::plotlyOutput("makeplot.e2"), tags$b("アプロードされたデータのヒストグラム"), plotly::plotlyOutput("makeplot.e1"), tags$b("アプロードされた累積密度プロット(CDF)"), plotly::plotlyOutput("makeplot.e3"), p(tags$b("データの記述統計")), tableOutput("e.sum2") ) ) )
0df7e8e54a78248aa59b2eed3e02684cff1ff8f0
afdac1aa116adcd2cb004a636ebd1cf342b878cc
/man/calculate_conditioned_rdi.Rd
1faec6b41babb608cd29fd59c30d3425ce729a9d
[]
no_license
aNing1210/Test1
6260ae7b5f37d1c3073d146f60ceaf8dedd68533
fdc476a998bd1647e121f1af7c1fd9c1a6c7380e
refs/heads/master
2020-09-16T19:20:09.248930
2019-12-12T04:25:31
2019-12-12T04:25:31
223,865,898
0
0
null
null
null
null
UTF-8
R
false
true
1,890
rd
calculate_conditioned_rdi.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/function.R \name{calculate_conditioned_rdi} \alias{calculate_conditioned_rdi} \title{Calculate conditionally RDI value} \usage{ calculate_conditioned_rdi(cds_subset, super_graph = NULL, rdi_list, top_incoming_k = 1, uniformalize = FALSE, log = TRUE, pseudo_cnt = 1) } \arguments{ \item{cds_subset}{A cds_subset which has been ordered with Monocle 2} \item{super_graph}{A graph including all possible interactions used for performing the causality inference. When it is NULL, all possible pairs of interactions for the genes_data are used, otherwise only the interactions specified in the graph are used. Note that the super_graph only accept integer matrix for now (each integer corresponds to a particular gene in the genes_data).} \item{rdi_list}{a list returned from calculate_rdi function.} \item{top_incoming_k}{The number of genes to be conditioned on when calculating the conditional RDI values} \item{uniformalize}{Whether or not you want to use ucmi to calculate rdi. Default to be false.} } \value{ a dataframe storing conditional RDI results. First two columns are the id names for the genes. } \description{ This function estimates the conditional RDI value for all gene-pair combination from the genes_data (or that specified in the super_graph) in the pseudotime/time series data, conditioned on the top top_incoming_k incoming nodes. The result from the calculate_rdi function will be used to identify the proper delay for the gene-pair under test corresponds to that with highest RDI as well as the proper delays for the k incoming nodes which corresponds to that with highest RDI values. } \examples{ \dontrun{ lung <- load_lung() rdi_res <- calculate_rdi(exprs(lung)[, order(pData(lung)$Pseudotime)], delays = 5) lung_res_cRDI <- calculate_conditioned_rdi(lung, rdi_list = rdi_res) } }
e222ade27c2e4c6b0014cac1b9e3de5f569cf559
ee314b07bbbcdff0683cb1083a00f2d94e36ab82
/man/rftk-package.Rd
ef23dcc0b2ffebd1cb636066fa5e3a294460d436
[]
no_license
kingweck/rftk
08a6adf3c304d765af7958b948f76849dc2b0b7c
d92513178afd258c3aa0db0dd471c5d688d30073
refs/heads/master
2023-01-22T17:20:32.103051
2020-11-30T17:54:06
2020-11-30T17:54:06
null
0
0
null
null
null
null
UTF-8
R
false
true
331
rd
rftk-package.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rftk.R \docType{package} \name{rftk-package} \alias{rftk} \alias{rftk-package} \title{rftk: RF Toolkit} \description{ R functions for working with RF/wireless data. } \author{ \strong{Maintainer}: Aaron Anderson \email{aaron.m.anderson.3@gmail.com} }
79311186b602ac668f3993a0b45f2db961c8874c
25c4d0cf195c5907f0ebddb12cb1e8aa9cb74b2e
/reporting/Summary_of_Findings.R
24dbb25d04e2d8a7153800f417143b9b1f1a3770
[]
no_license
rcatlord/reviewing
32ad628e9c406c3a8ca655f9e3e40a4ea953e3c1
3b9d6ff3ab3af899624a5cbc4d2fa986a131dbd3
refs/heads/master
2021-06-16T14:22:01.789570
2017-05-10T11:01:56
2017-05-10T11:01:56
79,554,838
1
0
null
null
null
null
UTF-8
R
false
false
1,028
r
Summary_of_Findings.R
## Calculate the corresponding risk for GRADE Summary of Findings tables # See http://cccrg.cochrane.org/sites/cccrg.cochrane.org/files/public/uploads/Summary%20of%20Findings%20Tables.pdf (p.23) # Data: Petrosino et al., 2013 # 1. Calculate the Assumed Risk # i.e. risk in control group = n events / N p <- 98 / 358 round(p*1000,0) # 2. Convert the Odds Ratio to Risk Ratio # RR = OR / (1 – p + (p x OR)) OR <- 1.68 RR <- OR / (1 - p * (1 - OR)) round(RR, 2) # RR = 1.42 # 3. Repeat for OR confidence intervals OR_lower <- 1.20 RR_lower <- OR_lower / (1 - p * (1 - OR_lower)) round(RR_lower, 2) ## CI (lower) = 1.14 OR_upper <- 2.36 RR_upper <- OR_upper / (1 - p * (1 - OR_upper)) round(RR_upper, 2) ## CI (upper) = 1.72 # 4. Multiply the RR by the Assumed Risk to obtain the Corresponding Risk CR <- (RR * p)*1000 round(CR, 0) # ie 39 per 1000 # 5. Repeat for confidence intervals CR_lower <- (RR_lower * p)*1000 round(CR_lower, 0) # ie 311 per 1000 CR_upper <- (RR_upper * p)*1000 round(CR_upper, 0) # ie 471 per 1000
a95d3e35f738f396e848466222aba05716cc7472
f2c79b247620463f045187e335e993d8c82e12e8
/plot1.R
6767785017e01e36e9b6bd8b4d9a6942b66e1dfe
[]
no_license
opalh/Exploratory-Data-Analysis-Course-Assignment-1
d7721111cb4986780188451825167ddf040a0283
5ec1de556739f45cd9c91d01525778d7c6b54ad2
refs/heads/master
2021-06-22T09:50:54.289684
2017-08-22T21:35:40
2017-08-22T21:35:40
null
0
0
null
null
null
null
UTF-8
R
false
false
826
r
plot1.R
library(dplyr) if(!file.exists("E:/R/Exploratory Data Analysis/1/assiment")){dir.create("E:/R/Exploratory Data Analysis/1/assiment")} url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url,"E:/R/Exploratory Data Analysis/1/assiment/dataset.zip") unzip("E:/R/Exploratory Data Analysis/1/assiment/dataset.zip", exdir = "E:/R/Exploratory Data Analysis/1/assiment") alldata <- read.table("E:/R/Exploratory Data Analysis/1/assiment/household_power_consumption.txt",header=TRUE, na.strings="?", sep=";") data<-filter(alldata, Date == "1/2/2007"| Date=="2/2/2007") str(data) hist(data$Global_active_power,xlab = "Global Active Power (kilowatts)",main="Global Active Power",col = "red") dev.copy(png, file="plot1.png", height=480, width=480) dev.off()
1111b3fce7b187c8e834e1163daeb0b878af290f
7919d7f2e4b9bdef8553efa8b4f814f218d85321
/PenalizeMimicry.R
f5a685d36b4f9c8aa6aba79df9fb94c6a9098d4e
[]
no_license
nklange/Gumbel
3ed5e1432f243bf3b73c1956ab3313a3aea7cfc0
5fd73cef0ee9d64b5349ddd32ad34cacce35bb68
refs/heads/main
2023-06-19T20:15:22.359652
2021-07-26T08:30:05
2021-07-26T08:30:05
372,556,096
0
0
null
null
null
null
UTF-8
R
false
false
29,611
r
PenalizeMimicry.R
# based on Wagenmakers et al 2004 - find optimal decision rule between models # i.e. to what extent need which models be penalized to find the optimal decision rule # in other words, how mimic-y are models even under the optimal decision rule library(cowplot) GetGsquared <- function(LL,observedData){ temp <- c(observedData[1:6] * log(observedData[1:6]/sum(observedData[1:6])), observedData[7:12] * log(observedData[7:12]/sum(observedData[7:12]))) temp[observedData == 0] <- 0 GSq = 2*(LL - -sum(temp) ) # Calculate G^2 (better approximator to Chi-sq dist than Pearson Chi-Sq) # f$df <- length(observedData)-length(names(get_start_par(model)))-1 # df model (L&F,p.63) n_observations - n_freeparas - 1 #f$pGSq <- 1-pchisq(f$GSq,f$df) # p value of GSq return(GSq) } MimicryFits <- NULL for (gen in c("genGumbelEVSDT","genGaussianUVSDT","genExGaussNormEVSDT")){ fits <- readRDS("LargeNSimulation/LargeN_mimicry_bestfits.rds") %>% filter(genmodel == substr(gen,start=4,stop=nchar(gen))) %>% arrange(id,model) # fits <- readRDS(paste0("SimulationFits/",gen,"_mimicry_bestfits.rds")) %>% # arrange(id,model) %>% mutate(genmodel = substr(gen,start=4,stop=nchar(gen))) data <- readRDS(paste0("LargeNSimulation/simulate_",gen,"_data_LN.rds")) %>% arrange(id) # data <- readRDS(paste0("SimulationData/simulate_",gen,"_data_mimicry.rds")) %>% # arrange(id) LL <- fits %>% #mutate(genmodel = substr(gen,start=4,stop=nchar(gen))) %>% group_by(genmodel,id,model) %>% group_nest(keep=T,.key="fit") # LL <- fits %>% mutate(genmodel = substr(gen,start=4,stop=nchar(gen))) %>% # group_by(genmodel,id,model) %>% # group_nest(keep=T,.key="fit") observedData <- data %>% group_by(genmodel,id) %>% group_nest(keep=T)%>% dplyr::slice(rep(1:n(),each=3)) # observedData <- data %>% group_by(genmodel,id) %>% # dplyr::slice(rep(1:n(),each=3)) getGsquaredvals <- LL %>% bind_cols(observedData %>% ungroup() %>% dplyr::select(data)) %>% mutate(Gsq = map2(.x = fit,.y = data, .f = ~GetGsquared(.x$objective,.y$Freq))) %>% mutate(objective = map(.x = fit,.f = ~mean(.x$objective))) %>% mutate(Dev = map(.x = fit,.f = ~mean(2*.x$objective))) %>% mutate(AIC = map(.x = fit,.f = ~mean(.x$AIC))) MimicryFits <- MimicryFits %>% bind_rows(getGsquaredvals) } testdat <- MimicryFits %>% dplyr::select(genmodel,id,model,Gsq,objective,Dev,AIC) %>% unnest(cols=c(Gsq,objective,Dev,AIC)) testdat <- testdat %>% filter(model %in% c("GaussianUVSDT","ExGaussNormEVSDT", "GumbelEVSDT"), genmodel %in% c("GaussianUVSDT", "ExGaussNormEVSDT", "GumbelEVSDT")) %>% dplyr::select(genmodel,id,model,Dev,AIC) custompenalty_pair <- function(data,par,modelpair){ calc<-data %>% mutate(penobjective = case_when(model == modelpair[[2]] ~ Dev + par, model == modelpair[[1]] ~ Dev)) %>% pivot_longer(cols = c("Dev","AIC","penobjective"), names_to = "penalty",values_to="value") %>% group_by(genmodel,penalty,id) %>% mutate(winning = ifelse(value == min(value),1,0)) %>% group_by(genmodel,penalty,model) %>% summarize(num = sum(winning)) %>% group_by(genmodel,penalty) %>% mutate(winprop = num/sum(num)) return(calc) } opt_penalty_maxmeanacc_pair <- function(data,par,modelpair){ res <- custompenalty_pair(data,par,modelpair) meanpw <- sum(res %>% filter(penalty == "penobjective") %>% filter(genmodel==model) %>% .$num)/ sum(res %>% filter(penalty == "penobjective") %>% .$num) return(-meanpw) } custompenalty <- function(data,par){ UVSDT_pen <- par[[1]] EG_pen <- par[[2]] calc<-data %>% mutate(penobjective = case_when(model == "GaussianUVSDT" ~ Dev + UVSDT_pen, model == "ExGaussNormEVSDT" ~ Dev + EG_pen, model == "GumbelEVSDT" ~ Dev)) %>% pivot_longer(cols = c("Dev","AIC","penobjective"),names_to = "penalty",values_to="value") %>% group_by(genmodel,penalty,id) %>% mutate(winning = ifelse(value == min(value),1,0)) %>% group_by(genmodel,penalty,model) %>% summarize(num = sum(winning), meanval = mean(value)) %>% group_by(genmodel,penalty) %>% mutate(winprop = num/sum(num)) return(calc) } opt_penalty_maxmeanacc <- function(data,par){ res <- custompenalty(data,par) meanpw <- sum(res %>% filter(penalty == "penobjective") %>% filter(genmodel==model) %>% .$num)/ sum(res %>% filter(penalty == "penobjective") %>% .$num) return(-meanpw) } resultsMaxAcc <- NULL for(i in c(1:200)){ start <- as_tibble(t(c(runif(1,-1,3),runif(1,-1,3)))) %>% set_colnames(c("s_UVSDT_pen","s_ExGN_pen")) fits2 <- optim(par = start, opt_penalty_maxmeanacc, data = testdat) res2 <- as_tibble(t(fits2$par)) %>% set_colnames(c("UVSDT_pen","ExGN_pen")) %>% mutate(meandiagonal = -fits2$value, fcteval = fits2$counts[[1]], rep = i) %>% bind_cols(start) resultsMaxAcc <- resultsMaxAcc %>% bind_rows(res2) } test <- resultsMaxAcc %>% arrange(-meandiagonal) saveRDS(resultsMaxAcc,file=paste0("SimulationFits/optimizeconfusionmatrix_meanacc_LN.rds")) # Optimize pairwise confusion matrices ---------------------------------------- #A,B,C in order makeplotGOF <- function(i){ modelexpression <- list(c("GumbelEVSDT","GaussianUVSDT"), c("GumbelEVSDT","ExGaussNormEVSDT"), c("ExGaussNormEVSDT","GaussianUVSDT")) modelnames <- list(c("Gumbel","UVSDT"), c("Gumbel","ExGauss"), c("ExGauss","UVSDT")) prepdata<- testdat %>% filter(genmodel %in% modelexpression[[i]] & model %in% modelexpression[[i]]) %>% mutate(model = factor(model,levels=modelexpression[[i]])) %>% group_by(genmodel,id) %>% arrange(genmodel,id,model) %>% mutate(genmodel= factor(genmodel,levels=modelexpression[[i]])) dists <- prepdata %>% mutate(GOFAB = Dev[[1]]-Dev) %>% filter(model != modelexpression[[i]][[1]]) # # FgenA <- ecdf(dists %>% filter(genmodel == modelexpression[[i]][[1]]) %>% .$GOFAB) # FgenB <- ecdf(dists %>% filter(genmodel == modelexpression[[i]][[2]]) %>% .$GOFAB) # # z <- uniroot(function(z) FgenA(z) + FgenB(z) - 1, # interval<-c(min(dists$GOFAB),max(dists$GOFAB))) # calcempopt <- dists %>% group_by(genmodel) %>% # summarize(sayA = length(GOFAB[GOFAB < z$root])/length(GOFAB), # sayB = length(GOFAB[GOFAB > z$root])/length(GOFAB)) %>% # pivot_longer(names_to="model",values_to="value",cols=c(sayA,sayB)) %>% # mutate(crit = paste0("optimal = ",round(z$root,2)), # crittype = "threshold") # # # calcemp0 <- dists %>% group_by(genmodel) %>% # summarize(sayA = length(GOFAB[GOFAB < 0])/length(GOFAB), # sayB = length(GOFAB[GOFAB > 0])/length(GOFAB)) %>% # pivot_longer(names_to="model",values_to="value",cols=c(sayA,sayB)) %>% # mutate(crit = "nominal = 0", # crittype = "threshold") maxmean <- optimize(f=opt_penalty_maxmeanacc_pair,data = prepdata, interval = c(-50, 50), modelpair=modelexpression[[i]]) naxm <- custompenalty_pair(prepdata,maxmean$minimum,modelexpression[[i]]) critmaxdiag <- naxm %>% mutate(model = case_when(model == modelexpression[[i]][[1]] ~ "sayA", TRUE~"sayB")) %>% ungroup() %>% dplyr::select(-num) %>% rename("value" = winprop) %>% mutate(genmodel = as.character(genmodel)) Accuracy <- naxm %>% group_by(penalty) %>% filter(genmodel==model) %>% summarize(value = mean(winprop))%>% mutate(model = "sayA") %>% mutate(genmodel = "ZAcc") Penalty <- naxm %>% group_by(penalty) %>% dplyr::select(penalty,model) %>% distinct() %>% mutate(value = case_when(model != "GumbelEVSDT" & penalty == "AIC" ~ ifelse(i == 3,0,2), penalty == "Dev" ~ 0, penalty == "penobjective" & model != modelexpression[[i]][[1]] ~ maxmean$minimum, TRUE ~ 0))%>% mutate(model = case_when(model == modelexpression[[i]][[1]] ~ "sayA", TRUE~"sayB")) %>% mutate(genmodel = "YPen") tablegraph <- bind_rows(critmaxdiag,Accuracy,Penalty) %>% mutate(penalty = factor(penalty,levels=c("Dev","AIC","penobjective"), labels=c("-2LL","AIC","Max Acc"))) %>% # mutate(model = factor(model)) %>% mutate(genmodel = factor(genmodel,levels=c("ZAcc","YPen",rev(modelexpression[[i]])), labels = c("Accuracy","Penalty",rev(modelnames[[i]])))) %>% mutate(colorunder = ifelse(!genmodel %in% modelnames[[i]],1,0)) %>% mutate(colorunder = factor(colorunder)) nameexpression <- c(expression(paste(Delta, GOF[GumUVSD]," = ", GOF[Gumbel] - GOF[UVSD])), expression(paste(Delta, GOF[GumExG]," = ", GOF[Gumbel] - GOF[ExGauss])), expression(paste(Delta, GOF[ExGUVSD]," = ", GOF[ExGauss] - GOF[UVSD]))) placeopt <- which(sort(c(seq(-5,10,5),maxmean$minimum)) == maxmean$minimum) prelabels <- c(-5,0,5,10) xlabels <- append(prelabels,"\nopt",after=placeopt-1) colorexpression <- list(c("#009E73","#E69F00" ), c( "#009E73","#0072B2"), c("#0072B2","#E69F00")) A <- ggplot(dists,aes(x=GOFAB))+ geom_histogram(data=dists,aes(x=GOFAB,fill=genmodel,y=..density..), alpha=0.3,binwidth = 0.05, position = "identity")+ # geom_density_ridges(data=dists,aes(x=GOFAB,fill=genmodel, # y = genmodel, height = stat(density)), # stat = "binline", bins = 200, #scale = 0.95, # scale = 50,alpha=0.4, # draw_baseline = FALSE # )+ # geom_density(aes(group=genmodel),size=1,color="black",adjust=0.5, # bw = "nrd", # kernel = "gaussian", # n = 1024)+ scale_color_manual(name="Generating model",values = colorexpression[[i]], labels=modelnames[[i]])+ scale_fill_manual(name = "Generating model",values = colorexpression[[i]], labels=modelnames[[i]])+ geom_vline(size=0.5,color="black",xintercept=0)+ geom_vline(size=0.5,linetype="dashed",color="black",xintercept=maxmean$minimum)+ coord_cartesian(xlim=c(-5,10),ylim=c(0,1)) + annotate("text",label=paste0("Recover: ",modelnames[[i]][[1]]),x=-3,y=0.9)+ annotate("text",label=paste0("Recover: ",modelnames[[i]][[2]]),x=5,y=0.9)+ scale_x_continuous(name = nameexpression[i],breaks=sort(c(seq(-5,10,5),maxmean$minimum)), labels=xlabels)+ theme(axis.text.x = element_text(size=12), axis.text.y = element_blank(), axis.ticks.y = element_blank(), axis.title.y = element_blank(), #legend.title = element_blank(), panel.background = element_rect(fill = "white"), legend.position = c(0.7,0.5), panel.border = element_rect(colour = "black", fill = "transparent"), strip.background = element_rect(fill = "white"), strip.placement = "outside", strip.text = element_text(size=12)) B <- ggplot(tablegraph,aes(y = genmodel,x = model,fill=colorunder)) + geom_tile(color="white") + scale_fill_manual(values=c("#F0F0F0","white"))+ geom_text(aes(label=ifelse(value > abs(0.0005), ifelse(genmodel=="Accuracy" | abs(value)<.005, stringr::str_remove(round(value,3), "^0+"), stringr::str_remove(round(value,2), "^0+")),"0")),size=4)+ scale_x_discrete(position = "top",name="Recovered",labels=modelnames[[i]])+ scale_y_discrete(position = "left", name="Generating")+ facet_grid(.~penalty)+ theme(axis.text.y = element_text(size=10), axis.text.x = element_text(size=10), axis.title.y = element_text(hjust=0.9), axis.ticks = element_blank(), panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = "transparent",colour="black"), strip.placement = "outside", legend.position = "none", strip.background = element_rect(fill = "transparent", colour="transparent"), strip.text = element_text(size = 12)) plot_grid(A,B,nrow=2,rel_heights=c(0.6,0.4)) } pwplots <- plot_grid(makeplotGOF(2), makeplotGOF(1), makeplotGOF(3),labels=c("A","B","C"),scale=.9,nrow=1, label_x = 0, label_y = 1) # Make tablegraph for 3-model situation # First: estimate penalties like in two-model case maxacc <-readRDS(paste0("SimulationFits/optimizeconfusionmatrix_meanacc_LN.rds")) optthresh <- maxacc %>% dplyr::filter(meandiagonal == max(meandiagonal)) %>% dplyr::slice(1) penaltybased <- custompenalty(testdat,c(optthresh %>% .$UVSDT_pen, optthresh %>% .$ExGN_pen)) critmaxdiag <- penaltybased %>% mutate(model = case_when(model == "GumbelEVSDT" ~ "sayA", model == "ExGaussNormEVSDT" ~ "sayB", TRUE~"sayC")) %>% ungroup() %>% dplyr::select(-num,-meanval) %>% rename("value" = winprop) %>% mutate(genmodel = as.character(genmodel)) meanvalues <- penaltybased %>% mutate(model = case_when(model == "GumbelEVSDT" ~ "sayA", model == "ExGaussNormEVSDT" ~ "sayB", TRUE~"sayC")) %>% ungroup() %>% dplyr::select(-num,-winprop) %>% rename("value" = meanval) %>% mutate(genmodel = as.character(genmodel)) %>% group_by(genmodel,penalty) %>% mutate(minval = value - min(value)) Accuracy <- penaltybased %>% group_by(penalty) %>% filter(genmodel==model) %>% summarize(value = mean(winprop))%>% mutate(model = "sayA") %>% mutate(genmodel = "ZAcc") Penalty <- penaltybased %>% group_by(penalty) %>% dplyr::select(penalty,model) %>% distinct() %>% mutate(value = case_when(model != "GumbelEVSDT" & penalty == "AIC" ~ 2, penalty == "objective" ~ 0, penalty == "penobjective" & model == "GaussianUVSDT" ~ optthresh$UVSDT_pen, penalty == "penobjective" & model == "ExGaussNormEVSDT" ~ optthresh$ExGN_pen, TRUE ~ 0))%>% mutate(model = case_when(model == "GumbelEVSDT" ~ "sayA", model == "ExGaussNormEVSDT" ~ "sayB", TRUE~"sayC")) %>% mutate(genmodel = "YPen") tablegraph <- bind_rows(critmaxdiag,Accuracy,Penalty) %>% mutate(penalty = factor(penalty,levels=c("Dev","AIC","penobjective"), labels=c("-2LL","AIC","Max Acc"))) %>% # mutate(model = factor(model)) %>% mutate(genmodel = factor(genmodel,levels=c("ZAcc","YPen","GaussianUVSDT","ExGaussNormEVSDT","GumbelEVSDT"), labels = c("Accuracy","Penalty","UVSDT","ExGauss","Gumbel"))) %>% mutate(colorunder = ifelse(!genmodel %in%c("UVSDT","ExGauss","Gumbel"),1,0)) %>% mutate(colorunder = factor(colorunder)) tablegraph2 <- bind_rows(meanvalues) %>% mutate(penalty = factor(penalty,levels=c("Dev","AIC","penobjective"), labels=c("-2LL","AIC","Max Acc"))) %>% # mutate(model = factor(model)) %>% mutate(genmodel = factor(genmodel,levels=c("GaussianUVSDT","ExGaussNormEVSDT","GumbelEVSDT"), labels = c("UVSDT","ExGauss","Gumbel"))) %>% mutate(colorunder = ifelse(!genmodel %in%c("UVSDT","EGNorm","Gumbel"),1,0)) %>% mutate(colorunder = factor(colorunder)) Dgraph <- ggplot(tablegraph,aes(y = genmodel,x = model,fill=colorunder)) + geom_tile(color="white") + scale_fill_manual(values=c("#E8E8E8","white"))+ geom_text(aes(label=ifelse(genmodel=="Accuracy", stringr::str_remove(round(value,3), "^0+"), ifelse(genmodel=="Penalty" | value > .005, paste(round(value,2)), ifelse(!genmodel %in% c("Accuracy","Penalty"), stringr::str_remove(round(value,2), "^0+"), "0")))),size=4)+ scale_x_discrete(position = "top",name="Recovered",labels=c("Gumbel","ExGauss","UVSDT"))+ scale_y_discrete(position = "left", name="Generating")+ facet_grid(.~penalty)+ theme(axis.text.y = element_text(size=10), axis.text.x = element_text(size=10), axis.title.y = element_text(hjust=0.75), axis.ticks = element_blank(), panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = "transparent",colour="black"), strip.placement = "outside", legend.position = "none", strip.background = element_rect(fill = "transparent", colour="transparent"), strip.text = element_text(size = 12)) Dgraph2 <- ggplot(tablegraph2,aes(y = genmodel,x = model,fill=colorunder)) + geom_tile(color="white") + scale_fill_manual(values=c("#E8E8E8","white"))+ geom_text(aes(label = round(minval,2)),size=4)+ scale_x_discrete(position = "top",name=expression(paste("Recovered (",Delta,frac(1,N),Sigma,")")),labels=c("Gumbel","ExGauss","UVSD"))+ scale_y_discrete(position = "left", name="Generating")+ facet_grid(.~penalty)+ theme(axis.text.y = element_text(size=10), axis.text.x = element_blank(), #axis.title.y = element_text(hjust=0.75), axis.ticks = element_blank(), panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = "transparent",colour="black"), strip.background = element_blank(), strip.text.x = element_blank(), strip.placement = "outside", legend.position = "none") #Visualize distributions testdat <- MimicryFits %>% dplyr::select(genmodel,id,model,Gsq,objective,Dev,AIC) %>% # dplyr::select(genmodel,condition,id,model,Gsq,objective,Dev,AIC) %>% # filter(condition == cond) %>% ungroup() %>% dplyr::select(-condition) %>% unnest(cols=c(Gsq,objective,Dev,AIC)) plotty <- testdat %>% mutate(logobj = log(Gsq)) %>% dplyr::select(-AIC,-objective,-Dev,-Gsq) %>% mutate(genmodel = factor(genmodel,levels=c("GumbelEVSDT","ExGaussNormEVSDT","GaussianUVSDT"), labels=c("Gen: Gumbel","Gen: EGNorm","Gen:UVSDT"))) %>% mutate(model = factor(model,levels=c("GumbelEVSDT","ExGaussNormEVSDT","GaussianUVSDT"), labels=c("Fit: Gumbel","Fit: EGNorm","Fit: UVSDT"))) library(ggridges) densitypl <- ggplot(plotty, aes(x = logobj,fill=genmodel,y=genmodel))+ geom_density_ridges(aes(fill = genmodel),scale=10,alpha=0.9, quantile_lines = TRUE, quantiles = 2)+ scale_x_continuous(name=expression(paste("log(",G^2,")")))+ scale_fill_manual(values=c("#009E73","#56B4E9","#E69F00"), name= "Generating model", labels=c("Gumbel","ExGauss","UVSDT"))+ facet_grid(.~model)+ guides(fill = guide_legend(reverse = TRUE)) + theme(axis.text.y = element_text(size=10), axis.text.x = element_text(size=10), axis.title.y = element_blank(), axis.ticks = element_blank(), panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = "transparent",colour="black"), strip.placement = "outside", legend.position = c(0.075,0.6), strip.background = element_rect(fill = "transparent", colour="transparent"), strip.text = element_text(size = 12)) densitypl2 <- ggplot(plotty, aes(x = logobj,fill=model,y=model))+ geom_density_ridges(aes(fill = model),scale=5,alpha=0.7, quantile_lines = TRUE, quantiles = 2)+ scale_x_continuous(name=expression(paste("log(",G^2,")")))+ scale_fill_manual(values=c("#009E73","#56B4E9", "#E69F00"), name= "Fitted model", labels=c("Gumbel","ExGauss","UVSDT"))+ coord_cartesian(xlim=c(-10,40))+ guides(fill = guide_legend(reverse = TRUE)) + facet_grid(.~genmodel)+ theme(axis.text.y = element_text(size=10), axis.text.x = element_text(size=10), axis.title.y = element_blank(), axis.ticks = element_blank(), panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = "transparent",colour="black"), strip.placement = "outside", legend.position = c(0.075,0.6), strip.background = element_rect(fill = "transparent", colour="transparent"), strip.text = element_text(size = 12)) plotty <- testdat %>% mutate(logobj = Gsq) %>% dplyr::select(-AIC,-objective,-Dev,-Gsq) %>% mutate(genmodel = factor(genmodel,levels=c("GumbelEVSDT","ExGaussNormEVSDT","GaussianUVSDT"), labels=c("Gen: Gumbel","Gen: ExGauss","Gen:UVSDT"))) %>% mutate(model = factor(model,levels=c("GumbelEVSDT","ExGaussNormEVSDT","GaussianUVSDT"), labels=c("Fit: Gumbel","Fit: ExGauss","Fit: UVSDT"))) densitypl3 <- ggplot(plotty, aes(x = logobj,fill=model,y=model))+ geom_density_ridges(aes(fill = model),scale=5,alpha=0.7, quantile_lines = TRUE, quantiles = 2)+ scale_x_continuous(name=expression(paste(G^2)))+ scale_fill_manual(values=c("#56B4E9","#009E73", "#E69F00"), name= "Fitted model", labels=c("Gumbel","EGNorm","UVSDT"))+ guides(fill = guide_legend(reverse = TRUE)) + facet_grid(.~genmodel)+ theme(axis.text.y = element_text(size=10), axis.text.x = element_text(size=10), axis.title.y = element_blank(), axis.ticks = element_blank(), panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = "transparent",colour="black"), strip.placement = "outside", legend.position = c(0.15,0.6), strip.background = element_rect(fill = "transparent", colour="transparent"), strip.text = element_text(size = 12)) Table <- plot_grid(Dgraph,Dgraph2,nrow=2,rel_heights= c(0.6,0.4),labels=c("","")) threedim <- plot_grid(densitypl2,Table,ncol=2,labels=c("D",""),scale=.9, label_x = 0.05, label_y = 1) plot_grid(pwplots,threedim,labels=c("",""),nrow=2,rel_heights=c(1,0.8)) ggsave(paste0("MimicryFull.png"), units="cm", width=35, height=25, dpi=600) twodi <- plot_grid( makeplotGOF(1), makeplotGOF(3)) mimicry <- plot_grid(twodi,Dgraph,nrow=2,rel_heights=c(2.2,1)) ggsave(paste0("Figures/Mimicry_LN.png"), units="cm", width=25, height=15, dpi=600) # Other stufff ---- # knn # riffing on multi-modelPBCM (MMPBCM) Schultheis & Naidu (2014) # CogSci proceedings # https://escholarship.org/content/qt7544w9b0/qt7544w9b0.pdf # don't use difference GOF but use dimensionality of GOF to find # use k-nearest-neighbor # makeTibbleCM <- function(cm) { # tibble(data.frame(num = c(as.numeric(cm[1,c(1:3)]), # as.numeric(cm[2,c(1:3)]), # as.numeric(cm[3,c(1:3)])), # genmodel = rep(row.names(cm),each=3), # model = rep(row.names(cm),3) # )) # # } # # library(caret) # # prep <- testdat %>% # dplyr::select(-AIC) %>% # pivot_wider(names_from="model",id_cols=c("genmodel","id"),values_from="objective") # # # # Splitting data into train # # and test data # # datprep <- prep %>% mutate(split = sample(c("TRUE","FALSE"),size=length(dim(prep)[[1]]),replace=T,prob=c(0.7,0.3))) %>% # mutate(genmodel = factor(genmodel)) %>% ungroup() %>% dplyr::select(-id,-split) # # set.seed(300) # #Spliting data as training and test set. Using createDataPartition() function from caret # indxTrain <- createDataPartition(y = datprep$genmodel,p = 0.75,list = FALSE) # training <- datprep[indxTrain,] %>% ungroup() # testing <- datprep[-indxTrain,] %>% ungroup() # # set.seed(400) # ctrl <- trainControl(method="repeatedcv",repeats = 3) #,classProbs=TRUE,summaryFunction = twoClassSummary) # knnFit <- train(genmodel ~ ., data = training, method = "knn", trControl = ctrl, # preProcess = c("center","scale"), tuneLength = 20) # # #Output of kNN fit # knnFit$results # # knnPredict <- predict(knnFit,newdata = testing) # #Get the confusion matrix to see accuracy value and other parameter values # test <- confusionMatrix( testing$genmodel,knnPredict, dnn = c("Generated", "Recovered")) # cm <- test$table # # # knnacc <- tibble(value = mean(testing$genmodel==knnPredict)) %>% # # mutate(model = "sayA") %>% # mutate(genmodel = "ZAcc") %>% # mutate(penalty = "kNN") # # knntab <- makeTibbleCM(cm) %>% mutate(penalty = "kNN") # # # LDA # # library(MASS) # # # Estimate preprocessing parameters # preproc.param <- training %>% # preProcess(method = c("center", "scale")) # # Transform the data using the estimated parameters # train.transformed <- preproc.param %>% predict(training) # test.transformed <- preproc.param %>% predict(testing) # # model <- lda(genmodel~., data =train.transformed) # predictions <- model %>% predict(test.transformed) # # cm <- table(test.transformed$genmodel,predictions$class) # ldatab <- makeTibbleCM(cm) %>% mutate(penalty = "LDA") # # ldaacc <- tibble(value = mean(test.transformed$genmodel==predictions$class)) %>% # # mutate(model = "sayA") %>% # mutate(genmodel = "ZAcc") %>% # mutate(penalty = "LDA") # # # QDA # # model <- qda(genmodel~., data =train.transformed) # predictions <- model %>% predict(test.transformed) # # Model accuracy # mean(predictions$class==test.transformed$genmodel) # # model # # cm <- table(test.transformed$genmodel, predictions$class) # qdatab <- makeTibbleCM(cm) %>% mutate(penalty = "QDA") # # qdaacc <- tibble(value =mean(predictions$class==test.transformed$genmodel)) %>% # # mutate(model = "sayA") %>% # mutate(genmodel = "ZAcc") %>% # mutate(penalty = "QDA") # # # MDA # # library(mda) # model <- mda(genmodel~., data =train.transformed) # predicted.classes <- model %>% predict(test.transformed) # # Model accuracy # mean(predicted.classes == test.transformed$genmodel) # # cm <- table(test.transformed$genmodel, predicted.classes) # mdatab <- makeTibbleCM(cm) %>% mutate(penalty = "MDA") # # mdaacc <- tibble(value =mean(predicted.classes == test.transformed$genmodel)) %>% # # mutate(model = "sayA") %>% # mutate(genmodel = "ZAcc") %>% # mutate(penalty = "MDA") # # # mlmax <- bind_rows(knntab,ldatab,qdatab,mdatab) %>% group_by(penalty,genmodel) %>% # mutate(value = num/sum(num)) %>% dplyr::select(-num) %>% # mutate(model = case_when(model == "GumbelEVSDT" ~ "sayA", # model == "ExGaussNormEVSDT" ~ "sayB", # TRUE~"sayC")) # # mlaccuracy <- bind_rows(knnacc,ldaacc,qdaacc,mdaacc) # # tablegraph <- bind_rows(critmaxdiag,Accuracy,Penalty,mlmax,mlaccuracy) %>% # mutate(penalty = factor(penalty,levels=c("objective","AIC","penobjective","kNN","LDA","QDA","MDA"), # labels=c("-2LL","AIC","Max Acc","k(=15)NN","LDA","QDA","MDA"))) %>% # # mutate(model = factor(model)) %>% # mutate(genmodel = factor(genmodel,levels=c("ZAcc","YPen","GaussianUVSDT","ExGaussNormEVSDT","GumbelEVSDT"), # labels = c("Accuracy","Penalty","UVSDT","EGNorm","Gumbel"))) %>% # mutate(colorunder = ifelse(!genmodel %in%c("UVSDT","EGNorm","Gumbel"),1,0)) %>% # mutate(colorunder = factor(colorunder)) # # plotty <- testdat %>% mutate(logobj = log(Gsq)) %>% dplyr::select(-AIC,-objective,-Dev,-Gsq) %>% # mutate(genmodel = factor(genmodel,levels=c("GumbelEVSDT","ExGaussNormEVSDT","GaussianUVSDT"), # labels=c("Gumbel","EGNorm","UVSDT"))) %>% # pivot_wider(names_from="model",values_from="logobj") %>% # relocate(GumbelEVSDT,before=ExGaussNormEVSDT) %>% # rename("Fit: Gumbel"=GumbelEVSDT, # "Fit: EGNorm" =ExGaussNormEVSDT, # "Fit: UVSDT" =GaussianUVSDT) # library(GGally) # p1 <- ggpairs(plotty,columns = c(1,2,5), # mapping = ggplot2::aes(color = genmodel), # legend = 1, # upper = list(continuous = "blank"), # lower = list(continuous = wrap("points", alpha = 0.1), # combo = wrap("dot", alpha = 0.4)), # diag = list(continuous = wrap("densityDiag", alpha = 0.3)), # xlab = expression(paste("log(",G^2,")")), # ylab = NULL, # axisLabels = c("show")) # # for(i in 1:p1$nrow) { # for(j in 1:p1$ncol){ # p1[i,j] <- p1[i,j] + # scale_fill_manual(values=c("#56B4E9","#009E73", "#E69F00"), name= "Generating model") + # scale_color_manual(values=c("#56B4E9","#009E73", "#E69F00"), name= "Generating model") # } # } # # plotface <- p1 + theme(legend.position = "bottom", # panel.background = element_rect(fill = "white"), # # panel.border = element_rect(colour = "black", fill = "transparent"), # strip.background = element_rect(fill = "#F0F0F0",color="black"), # strip.text = element_text(size=12))
466fc846ae7bef5081c22a1d6721642337d58a9a
5a1147e95d9e1a88b81c22fe2de9910066045871
/Classification in R.R
98e2057c9e44704dc7f6292c5b5583f07dd3efec
[]
no_license
sikrimanas/Statistics
fb7b700afefdfef20ab7d6a203a244eda7a19899
9a3b070c1dbdad810a709328e8f8447c107ae0e4
refs/heads/master
2021-01-12T03:28:28.606005
2017-01-06T15:05:33
2017-01-06T15:05:33
78,214,121
0
0
null
null
null
null
UTF-8
R
false
false
3,390
r
Classification in R.R
# Classification in R ### # Author : Manas Sikri # Date: 13-Dec-2016 # install and load the required packages in R require(ISLR) # check the column names for stock market dataset names(Smarket) View(Smarket) # view the data # check the summary of the dataset summary(Smarket) ?Smarket # help # plot the variable pairs in stock market dataset pairs(Smarket,col=Smarket$Direction) # Logistic regression # glm fit function # direction variable is the response which has two variables i.e. up and down glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume # function ,data=Smarket # dataset ,family=binomial) # type of family i.e. tells to fit logistic regression model # summary of the model summary(glm.fit) # predict the model glm.probs=predict(glm.fit,type="response") glm.probs[1:5] # setting the thresholds for classification by using ifelse commands glm.pred=ifelse(glm.probs>0.5,"Up","Down") attach(Smarket) # create a table for predicted as well as original values of direction variable table(glm.pred,Direction) mean(glm.pred==Direction) # mean of the proportion # Make training and test set # create a train data set train = Year<2005 # fit the data in glm model glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume ,data=Smarket ,family=binomial , subset=train) # give the input as training # predict for Snamrket dataset which has rows not in train dataset glm.probs=predict(glm.fit ,newdata=Smarket[!train,] ,type="response") # perform the classification glm.pred=ifelse(glm.probs >0.5,"Up","Down") # test data Direction.2005=Smarket$Direction[!train] # create a tabular form table(glm.pred,Direction.2005) mean(glm.pred==Direction.2005) #Fit smaller model # we are going to fit lag1 and lag2 and leave other variables glm.fit=glm(Direction~Lag1+Lag2 ,data=Smarket ,family=binomial ,subset=train) # predict glm.probs=predict(glm.fit ,newdata=Smarket[!train,] ,type="response") # classification glm.pred=ifelse(glm.probs >0.5,"Up","Down") # table table(glm.pred,Direction.2005) # mean mean(glm.pred==Direction.2005) 106/(76+106) # Now lets try our hands on Linear Discriminant Analysis require(MASS) ## Linear Discriminant Analysis # fit the model with the help of lda function lda.fit=lda(Direction~Lag1+Lag2 ,data=Smarket ,subset=Year<2005) # check the model lda.fit # plot plot(lda.fit) # get the test data Smarket.2005=subset(Smarket,Year==2005) # predict lda.pred=predict(lda.fit,Smarket.2005) lda.pred[1:5,] # check for the class of lda.pred class(lda.pred) # convert it into a dataframe data.frame(lda.pred)[1:5,] # check the result # it will give you a sort of confusion matrix table(lda.pred$class,Smarket.2005$Direction) mean(lda.pred$class==Smarket.2005$Direction) ## K-Nearest Neighbors # load the library library(class) # help ?knn # attach function makes available variables of the dataset by name attach(Smarket) # create a matrix for lag1 and lag2 Xlag=cbind(Lag1,Lag2) # train dataset train=Year<2005 # model fit and predict with knn method knn.pred=knn(Xlag[train,],Xlag[!train,],Direction[train],k=1) # confusion matrix table(knn.pred,Direction[!train]) # mean mean(knn.pred==Direction[!train])
12e3646bd1907e141b044a2b815065007fc318a5
e93cf6300b21ad0e89fcfd5d2ce05029c3247952
/astsa/R/Kfilter2.R
d3e79e50bb7d2aed35faf7d4c95d23180620bc2f
[ "GPL-3.0-only" ]
permissive
wilsonify/TimeSeries
51a1b80afa512aad4512471d8031049699b890f1
0d67fc256e6a406c560711d448f2db3f66203a23
refs/heads/master
2023-01-08T17:43:00.863159
2023-01-07T16:57:55
2023-01-07T16:57:55
146,666,517
0
0
MIT
2018-09-25T12:50:18
2018-08-29T22:38:08
Jupyter Notebook
UTF-8
R
false
false
2,281
r
Kfilter2.R
Kfilter2 <- function(num,y,A,mu0,Sigma0,Phi,Ups,Gam,Theta,cQ,cR,S,input){ # ######## Reference Property 6.5 in Section 6.6 ########### # num is the number of observations # y is the data matrix (num by q) # mu0 initial mean is converted to mu1 in code # Sigma0 initial var is converted to Sigma1 in code #mu1= E(x_1) = x_1^0 = Phi%*%mu0 + Ups%*%input1 # Sigma1 = var(x_1)= P_1^0 = Phi%*%Sigma0%*%t(Phi)+Theta%*%Q%*%t(Theta) # input has to be a matrix (num by r) - similar to obs y # set Ups or Gam or input to 0 if not used # Must give Cholesky decomp: cQ=chol(Q), cR=chol(R) Q=t(cQ)%*%cQ R=t(cR)%*%cR # Phi=as.matrix(Phi) pdim=nrow(Phi) y=as.matrix(y) qdim=ncol(y) rdim=ncol(as.matrix(input)) if (max(abs(Ups))==0) Ups = matrix(0,pdim,rdim) if (max(abs(Gam))==0) Gam = matrix(0,qdim,rdim) ut=matrix(input,num,rdim) xp=array(NA, dim=c(pdim,1,num)) # xp=x_t^{t-1} Pp=array(NA, dim=c(pdim,pdim,num)) # Pp=P_t^{t-1} xf=array(NA, dim=c(pdim,1,num)) # xf=x_t^{t} Pf=array(NA, dim=c(pdim,pdim,num)) # Pf=P_t^{t} Gain=array(NA, dim=c(pdim,qdim,num)) innov=array(NA, dim=c(qdim,1,num)) # innovations sig=array(NA, dim=c(qdim,qdim,num)) # innov var-cov matrix like=0 # -log(likelihood) xp[,,1]=Phi%*%mu0 + Ups%*%as.matrix(ut[1,],rdim) # mu1 Pp[,,1]=Phi%*%Sigma0%*%t(Phi)+Theta%*%Q%*%t(Theta) #Sigma1 for (i in 1:num){ B = matrix(A[,,i], nrow=qdim, ncol=pdim) innov[,,i] = y[i,]-B%*%xp[,,i]-Gam%*%as.matrix(ut[i,],rdim) sigma = B%*%Pp[,,i]%*%t(B)+R sigma=(t(sigma)+sigma)/2 # make sure sig is symmetric sig[,,i]=sigma siginv=solve(sigma) Gain[,,i]=(Phi%*%Pp[,,i]%*%t(B)+Theta%*%S)%*%siginv K=as.matrix(Gain[,,i], nrow=qdim, ncol=pdim) xf[,,i]=xp[,,i]+ Pp[,,i]%*%t(B)%*%siginv%*%innov[,,i] Pf[,,i]=Pp[,,i] - Pp[,,i]%*%t(B)%*%siginv%*%B%*%Pp[,,i] sigma=matrix(sigma, nrow=qdim, ncol=qdim) like= like + log(det(sigma)) + t(innov[,,i])%*%siginv%*%innov[,,i] if (i==num) break xp[,,i+1]=Phi%*%xp[,,i] + Ups%*%as.matrix(ut[i+1,],rdim) + K%*%innov[,,i] Pp[,,i+1]=Phi%*%Pp[,,i]%*%t(Phi)+ Theta%*%Q%*%t(Theta) - K%*%sig[,,i]%*%t(K) } like=0.5*like list(xp=xp,Pp=Pp,xf=xf,Pf=Pf, K=Gain,like=like,innov=innov,sig=sig) }
9742ab83b96fc56463d68afe090585f5e6356de1
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/bigsparser/tests/testthat/test-SFBM.R
226355d4691219d7edf81750199a980498994474
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
false
2,102
r
test-SFBM.R
################################################################################ test_that("can create an SFBM from a dgCMatrix", { spmat <- Matrix::rsparsematrix(1000, 1000, 0.01) X <- as_SFBM(spmat) expect_identical(X$p, spmat@p) expect_equal(dim(X), spmat@Dim) expect_equal(X$nval, length(spmat@x)) expect_equal(file.size(X$sbk), length(spmat@x) * 16) con <- file(X$sbk, open = "rb") i <- rep(0, length(spmat@x)) x <- rep(0, length(spmat@x)) for (k in seq_along(x)) { # i[k] <- readBin(con, n = 1, what = integer(), size = 4) i[k] <- readBin(con, n = 1, what = double()) x[k] <- readBin(con, n = 1, what = double()) } expect_equal(i, spmat@i) expect_identical(x, spmat@x) close(con) expect_false(X$is_saved) X <- X$save() expect_true(X$is_saved) X2 <- readRDS(X$rds) expect_equal(X2$p, X$p) expect_equal(X2$sbk, X$sbk) }) ################################################################################ test_that("can create an SFBM from a dsCMatrix", { spmat0 <- Matrix::rsparsematrix(1000, 1000, 0.01, symmetric = TRUE) X <- as_SFBM(spmat0) spmat <- as(spmat0, "dgCMatrix") X2 <- as_SFBM(spmat) expect_identical(readBin(X$sbk, what = 1, n = 1e6), readBin(X2$sbk, what = 1, n = 1e6)) expect_equal(X$p, spmat@p) expect_equal(dim(X), spmat@Dim) expect_equal(X$nval, length(spmat@x)) expect_equal(file.size(X$sbk), length(spmat@x) * 16) expect_false(X$is_saved) X <- X$save() expect_true(X$is_saved) X2 <- readRDS(X$rds) expect_equal(X2$p, X$p) expect_equal(X2$sbk, X$sbk) }) ################################################################################ test_that("products between an SFBM and a vector work", { spmat <- Matrix::rsparsematrix(1000, 1000, 0.1) X <- as_SFBM(spmat) y <- runif(1000) expect_equal(sp_prodVec(X, y), as.vector(spmat %*% y)) expect_equal(sp_cprodVec(X, y), as.vector(Matrix::crossprod(spmat, y))) }) ################################################################################
f8cab0dc128ef74fc9b15b1e21e168201335fcc4
25bf83bed09a2121375343998cad3e7fd2f6fc8e
/man/hopach2tree.Rd
ca57ebf6e4026f82e722e2cfce8963fe4754d55a
[]
no_license
keinstein/hopach
200f7bdff5dbc51465264a8e274de437f6ca583b
9a294568a5153541efd8f78461fcac337448c718
refs/heads/master
2021-01-12T11:51:07.155403
2016-05-03T21:30:44
2016-05-03T21:30:44
null
0
0
null
null
null
null
UTF-8
R
false
false
6,165
rd
hopach2tree.Rd
\name{hopach2tree} \alias{hopach2tree} \alias{makeTree} \title{function to write MapleTree files for viewing hopach hierarchical clustering results} \description{ The MapleTree software (http://mapletree.sourceforge.net/) is an open source, cross-platform, visualization tool to graphically browse results of cluster analyses. The \code{hopach2tree} function takes a data matrix, plus corresponding \code{hopach} clustering output for genes and/or arrays, and writes the (.cdt, .gtr, and .atr) files needed to view these hierarchical clustering results in MapleTree. The function \code{makeTree} is called internally by \code{hopach2tree}. } \usage{ hopach2tree(data, file = "HOPACH", hopach.genes = NULL, hopach.arrays = NULL, dist.genes = NULL, dist.arrays = NULL, d.genes = "cosangle", d.arrays = "euclid", gene.wts = NULL, array.wts = NULL, gene.names = NULL) makeTree(labels, ord, medoids, dist, side = "GENE") } \arguments{ \item{data}{data matrix, data frame or exprSet of gene expression measurements. Each column corresponds to an array, and each row corresponds to a gene. All values must be numeric. Missing values are ignored.} \item{file}{name for the output files (the extensions .cdt, .gtr and .atr will be added).} \item{hopach.genes}{output of the \code{hopach} function applied to genes (rows of \code{data}. If only arrays are clustered, hopach.genes can be NULL. There must be at least K=2 levels in the hopach final tree (ie: hopach.genes$final$labels greater than 1 digit) for a gtr file to be generated.} \item{hopach.arrays}{optional output of the \code{hopach} function applied to arrays (columns of \code{data}. There must be at least K=2 levels in the hopach final tree (ie: hopach.arrays$final$labels greater than 1 digit) for an atr file to be generated.} \item{dist.genes}{matrix of pair wise distances between all genes. All values must be numeric, and missing values are not allowed. If NULL, this matrix is computed using the metric specified by \code{d.genes}. Only needed if genes are clustered (hopach.genes!=NULL).} \item{dist.arrays}{matrix of pair wise distances between all arrays. All values must be numeric, and missing values are not allowed. If NULL, this matrix is computed using the metric specified by \code{d.arrays}. Only needed if arrays are clustered (hopach.arrays!=NULL).} \item{d.genes}{character string specifying the metric to be used for calculating dissimilarities between genes. The currently available options are "cosangle" (cosine angle or uncentered correlation distance), "abscosangle" (absolute cosine angle or absolute uncentered correlation distance), "euclid" (Euclidean distance), "abseuclid" (absolute Euclidean distance), "cor" (correlation distance), and "abscor" (absolute correlation distance). Advanced users can write their own distance functions and add these to the functions \code{distancematrix()} and \code{distancevector()}.} \item{d.arrays}{character string specifying the metric to be used for calculating dissimilarities between arrays.} \item{gene.wts}{an optional vector of numeric weights for the genes.} \item{array.wts}{an optional vector of numeric weights for the arrays.} \item{gene.names}{optional vector of names or annotations for the genes, which can be different from the row names of \code{data}.} \item{labels}{final cluster labels from a hopach object.} \item{ord}{final ordering from a hopach object.} \item{medoids}{final medoids matrix from a hopach object.} \item{dist}{gene or array distance matrix.} \item{side}{character string specifying if the tree is for genes ("GENE", default) or arrays ("ARRY").} } \value{ The function \code{hopach2tree} has no value. It writes up to three text files to the current working directory. A .cdt file is always produced. This file can be used to visualize the data matrix as a heat map in MapleTree or other viewers such as TreeView (http://rana.lbl.gov/EisenSoftware.htm), jtreeview (http://sourceforge.net/projects/jtreeview/), and GeneXPress (http://genexpress.stanford.edu/). When \code{hopach.genes}!=NULL, a .gtr is produced, and gene clustering results can be viewed, including ordering the genes in the heat map according to the final level of the \code{hopach} tree and drawing the dendogram for hierarchical gene clustering. Similarly, when \code{hopach.arrays}!=NULL, an .atr file is produced and array clustering results can be viewed. The function \code{makeTree} is called internally by \code{hopach2tree} to make the objects needed to write the MapleTree files for a gene and/or array HOAPCH clustering result. } \references{ van der Laan, M.J. and Pollard, K.S. A new algorithm for hybrid hierarchical clustering with visualization and the bootstrap. Journal of Statistical Planning and Inference, 2003, 117, pp. 275-303. \url{http://www.stat.berkeley.edu/~laan/Research/Research_subpages/Papers/hopach.pdf} } \author{Katherine S. Pollard <kpollard@gladstone.ucsf.edu>} \note{Thank you to Lisa Simirenko <lsimirenko@lbl.gov> for providing HOPACH views in MapleTree, and to Karen Vranizan <vranizan@uclink.berkeley.edu> for her input. The MapleTree software can be downloaded from: http://sourceforge.net/projects/mapletree/} \section{Warning }{Operating systems use different end of line characters. These characters can cause errors in MapleTree when files generated on one OS are visualized on another OS. Hence, \code{hopach2tree} should be run on the same OS as MapleTree whenever possible.} \seealso{\code{\link{hopach}}, \code{\link{boothopach}}, \code{\link{bootmedoids}}, \code{\link{boot2fuzzy}}} \examples{ #25 variables from two groups with 3 observations per variable mydata<-rbind(cbind(rnorm(10,0,0.5),rnorm(10,0,0.5),rnorm(10,0,0.5)),cbind(rnorm(15,5,0.5),rnorm(15,5,0.5),rnorm(15,5,0.5))) dimnames(mydata)<-list(paste("Var",1:25,sep=""),paste("Exp",1:3,sep="")) mydist<-distancematrix(mydata,d="cosangle") #compute the distance matrix. #clusters and final tree clustresult<-hopach(mydata,dmat=mydist) #write MapleTree files hopach2tree(mydata,hopach.genes=clustresult,dist.genes=mydist) } \keyword{cluster} \keyword{multivariate}
31a08f506e13d9a263e0422f028fe83f875c26d8
a32eb8f6c0402f0d1ce059fb9edad3b75cc39904
/ratchets/R/team-types.R
0e002ec900e67274a447258de3e5c5a7b556ca66
[]
no_license
pedmiston/ratchets
322591c849aa6429f4946645caccf794cab1b923
69973708226633b76b693240ffd52fcfc36c6924
refs/heads/master
2020-06-17T15:00:28.551616
2017-02-17T15:03:44
2017-02-17T15:03:44
74,994,330
0
0
null
null
null
null
UTF-8
R
false
false
1,643
r
team-types.R
#' Assign team types by median splits on time and submissions. #' #' See also \code{\link{recode_team_type}}. #' #' @import dplyr #' @export divide_into_team_types <- function(leaderboards) { # Augment team type map to merge based on TimeSplit and SubmissionSplit team_type_map <- recode_team_type() %>% mutate( TimeSplit = c(1, 0, 0, 1), SubmissionsSplit = c(1, 1, 0, 0) ) # Remove any team type columns already in leaderboards for(map_col in colnames(team_type_map)) { if(map_col %in% colnames(leaderboards)) leaderboards[map_col] <- NULL } median_split <- function(x) as.numeric(x < median(x)) leaderboards %>% mutate( TimeSplit = median_split(TotalTime), SubmissionsSplit = median_split(TotalSubmissions) ) %>% left_join(team_type_map) %>% select(-TimeSplit, -SubmissionsSplit) # only needed for merge } #' Recode team type for labels and contrast coding. #' #' If no data is provided, the team type map is returned. #' #' @import dplyr #' @export recode_team_type <- function(frame) { team_type_levels <- c("steady", "long", "short", "rapid") contr_rel_short <- contr.treatment(n = length(team_type_levels), base = 3) %>% as_data_frame() names(contr_rel_short) <- c("ShortVSteady", "ShortVLong", "ShortVRapid") contr_rel_short$TeamType <- team_type_levels team_type_map <- data_frame( TeamType = team_type_levels, TeamLabel = factor(team_type_levels, levels = team_type_levels), TeamNum = seq_along(team_type_levels) ) %>% left_join(contr_rel_short) if (missing(frame)) return(team_type_map) left_join(frame, team_type_map) }
5a5741d36f206e87d50e6833e18bcfb4ff9cb8ac
d3653226fdc95dff7392b7c18e70e799d40b35d7
/man/parseGmt.Rd
cecde97ade48b6cacf13d82916eb1324c12cd7ea
[]
no_license
cran/GiANT
6645b847883d0433728af457d17c7751f57f914a
c82bd331a5ee768c1d3abe9caf878c11f30c4580
refs/heads/master
2021-01-21T21:43:11.150637
2020-09-29T03:30:02
2020-09-29T03:30:02
40,326,102
0
0
null
null
null
null
UTF-8
R
false
false
1,033
rd
parseGmt.Rd
\name{parse GMT files} \alias{parseGmt} \title{ Venn Euler Diagramm } \description{ Parses a GMT file as downloadable from MSigDB (presented in Subramanian et al.) and returns a list of gene sets. } \usage{ parseGmt(file) } \arguments{ \item{file}{ A file name. } } \details{ Parses a GMT file and returns a list of gene sets. Each list element named according to the included gene set. The gene set files can be downloaded from http://www.broadinstitute.org/gsea/msigdb. } \value{ A named \code{list} of gene sets. } \references{ Subramanian, A., Tamayo, P., Mootha, V. K., Mukherjee, S., Ebert, B. L., Gillette, M. A., Paulovich, A., Pomeroy, S. L., Golub, T. R., Lander, E. S., Mesirov, J. P. (2005) Gene set enrichment analysis: a knowledge-based approach for interpreting genome-wide expression profiles. \emph{Proceedings of the National Academy of Science of the United States of America}, \bold{102}, 15545--15550. } \seealso{ \code{\link{geneSetAnalysis}}, \code{\link{predefinedAnalyses}}, \code{\link{gsAnalysis}} }
94fbed515275e5eb48958d3c3f1d84dbab2506e0
f8393f4a20b0e1195ad41e8c326ab565ef281195
/Heatmap/MODUSHeatmap/MODUSHeatmap_Whitesheet.R
1fea9f736e822a5e180e5b21b270ee9256d2f1be
[]
no_license
maxgotts/RubensteinMpalaMaps
bcb3609df7a30a69d5f726abc84ba0da1a7aae10
b862878d6f3d09118ef4c24906d60d85152d23f8
refs/heads/master
2023-07-17T02:38:58.306930
2021-09-05T20:10:38
2021-09-05T20:10:38
381,844,819
0
0
null
null
null
null
UTF-8
R
false
false
2,115
r
MODUSHeatmap_Whitesheet.R
rm(list=ls()) library(lubridate) library(dplyr) ## Useful functions military_to_24 <- function(military) { # military is a number military_time <- paste0(military) split_military_time <- strsplit(military_time,"")[[1]] if (length(split_military_time) == 3) split_military_time <- c("0", split_military_time) split_hour24_time <- c(split_military_time[1], split_military_time[2], ":", split_military_time[3], split_military_time[4]) if (split_hour24_time[1] == 0) split_hour24_time <- split_hour24_time[2:5] hour24_time <- paste(split_hour24_time, collapse = "") return(hour24_time) } whitesheets <- read.csv("~/Desktop/MPALA/Whitesheets/ConvertedWhitesheets.csv") # Create a simplified sheet for a basic heat map ws <- whitesheets[,c("Date", "Time", "Latitude", "Longitude", "Species", "Multispecies", "Activity", "Grass.height", "Grass.color", "Grass.spp.1", "Grass.spp.2", "Grass.spp.3", "Grass.spp.4", "Bush.type", "Total.animals")] df <- filter(ws,!is.na(Time), !is.na(Total.animals)) df$Date <- mdy(df$Date) df$Population <- (4/75)*(df$Total.animals)^(0.75) df_simple <- df[,c("Latitude","Longitude","Total.animals","Date","Species","Population")] camel.abbr <- c("Camel","ZC","Comm_Camel") cattle.abbr <- c("Cattle","CKC","CC","MC") zebra.abbr <- c("GZ","PZ") write.csv(filter(df_simple,Species=="GZ"),"~/Desktop/MPALA/Maps/Heatmap/MODUSHeatmap/MODUSHeatmap_Whitesheet_GZ.csv",row.names = FALSE) write.csv(filter(df_simple,Species=="PZ"),"~/Desktop/MPALA/Maps/Heatmap/MODUSHeatmap/MODUSHeatmap_Whitesheet_PZ.csv",row.names = FALSE) write.csv(filter(df_simple,Species%in%cattle.abbr),"~/Desktop/MPALA/Maps/Heatmap/MODUSHeatmap/MODUSHeatmap_Whitesheet_Cattle.csv",row.names = FALSE) write.csv(filter(df_simple,Species%in%zebra.abbr),"~/Desktop/MPALA/Maps/Heatmap/MODUSHeatmap/MODUSHeatmap_Whitesheet_Zebras.csv",row.names = FALSE) write.csv(filter(df_simple,Species%in%camel.abbr),"~/Desktop/MPALA/Maps/Heatmap/MODUSHeatmap/MODUSHeatmap_Whitesheet_Camels.csv",row.names = FALSE) if (FALSE) { source('~/Desktop/MPALA/Maps/Heatmap/MODUSHeatmap/MODUSHeatmap_Whitesheet.R') }
7fdd8f334fadd84c77395ba5576f9db893263d4d
6a11db10db1f055e737911611760ff1335b8e554
/R/locale.R
ea5d5d1e143673aa7cdffa1d273b837ee811a1d7
[]
no_license
cran/assertive.reflection
947258eb8406a8dd0804a9d0a4c2ca95b25aa9af
c7b09e7fafddc9a8b6845abca45bbd05fb95e87b
refs/heads/master
2021-05-04T11:23:00.367866
2020-07-31T00:00:14
2020-07-31T00:00:14
48,076,808
0
0
null
null
null
null
UTF-8
R
false
false
2,749
r
locale.R
#' Get or set the system locale #' #' Wrappers to \code{Sys.getlocale} and \code{Sys.setlocale} for getting and #' setting the system locale. #' #' @param simplify If \code{TRUE}, the locale settings are returned as a #' character vector, otherwise a list. #' @param remove_empty_categories if \code{TRUE}, don't include empty categories. #' @param ... Name-value pairs of locale categories to set. #' @param l A list, as an alternative method of passing local categories to set. #' @return A named list or vector giving the system locale names. #' \code{sys_set_locale} invisibly returns the locale settings *before* making #' changes (like \code{setwd} and \code{options} do). #' @examples #' (current_locale <- sys_get_locale()) #' #' # Output simplified to character vector #' sys_get_locale(simplify = TRUE) #' \dontrun{ #' # Not run since it (temporarily) affects system settings #' english <- if(is_windows()) "English.United_Kingdom" else #' if(is_mac()) "en_GB" else #' if(is_linux()) "en_GB.utf8" else #' "en" #' sys_set_locale(LC_MONETARY = english) #' sys_get_locale() #' sys_set_locale(l = current_locale) #restore everything #' } #' @seealso \code{\link[base]{Sys.getlocale}}. #' @export sys_get_locale <- function(simplify = FALSE, remove_empty_categories = TRUE) { categories <- c( "LC_COLLATE", "LC_CTYPE", "LC_MONETARY", "LC_NUMERIC", "LC_TIME", "LC_MESSAGES", "LC_PAPER", "LC_MEASUREMENT" ) names(categories) <- categories locale <- lapply(categories, Sys.getlocale) if(remove_empty_categories) { locale <- locale[nzchar(locale)] } if(simplify) { unlist(locale) } else { locale } } #' @rdname sys_get_locale #' @importFrom assertive.base merge_dots_with_list #' @export sys_set_locale <- function(..., l = list()) { old_locale <- sys_get_locale() values <- merge_dots_with_list(..., l = l) categories <- names(values) categories <- match.arg( categories, locale_categories(), several.ok = TRUE ) for(i in seq_along(values)) { Sys.setlocale(categories[i], values[[i]]) } invisible(old_locale) } #' Allowed locale categories. #' #' The categories of locale that can be gotten/set. #' #' @param include_all If \code{TRUE}, the value \code{LC_ALL} is included. #' @param include_unix If \code{TRUE}, the extra unix-only values are included. #' @return A character vector of locale categories. #' @seealso \code{\link{sys_get_locale}}. #' @noRd locale_categories <- function(include_all = TRUE, include_unix = is_unix()) { allowed_categories <- c( if(include_all) "ALL", "COLLATE", "CTYPE", "MONETARY", "NUMERIC", "TIME", if(include_unix) c("MESSAGES", "PAPER", "MEASUREMENT") ) paste0("LC_", allowed_categories) }
85d78bbc0beca5d60040528443a2df5d22365bd0
97dd874e245ef075bb2d573d27023ce2b5a1b9c3
/man/gains_chart.Rd
bd6c4f5865d2764464a1c92de18a9255891f03b9
[]
no_license
ISS-Analytics/gainslift
af0900ac358700a36dd15270c567576de9e6f605
8597fefcedefd71bd30357b1aa090ba28e752fce
refs/heads/master
2020-05-24T10:47:14.696216
2019-06-03T15:32:48
2019-06-03T15:32:48
176,554,571
0
0
null
null
null
null
UTF-8
R
false
true
307
rd
gains_chart.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotSettings.R \name{gains_chart} \alias{gains_chart} \title{plotFunctions Functions to plot identified charts} \usage{ gains_chart(data, title, xlabel, ylabel) } \description{ plotFunctions Functions to plot identified charts }
79ecf9989bfffaa7d24e39b2fc89b36be0aeda25
227ea96bda4039edae16a8765cf45dd5db21b475
/202-multivariet-regression.R
06f7f13b2438ddfc75a1a09062233b7d674b18c3
[]
no_license
data-stache/covid-tracking
4764883a0085e075120c39cadb1a5a2023985af5
5d3f6e74a5e1356a0afbb758529f2023be803ff0
refs/heads/main
2023-04-01T11:23:52.328619
2021-04-02T14:39:52
2021-04-02T14:39:52
303,205,157
0
0
null
null
null
null
UTF-8
R
false
false
2,402
r
202-multivariet-regression.R
load("rda/mobility.rda") load("rda/weather.rda") load("rda/covid.rda") # REGRESSION DF dat <- covid %>% select(date, state, state_name, new_cases_percap) %>% inner_join(mobility) %>% select(1:10) %>% inner_join(weather_usa) %>% select(1:11) %>% group_by(state) %>% mutate(cases = rollapply(new_cases_percap, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="left"), ret = rollapply(retail_recreation, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="right"), groc = rollapply(grocery_pharmacy, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="right"), parks = rollapply(parks, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="right"), trans = rollapply(transit, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="right"), work = rollapply(workplace, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="right"), res = rollapply(residential, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="right"), temp = rollapply(tavg, width = 7, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, partial=TRUE, fill=NA, align="right")) cor <- dat %>% filter(!is.na(parks) & !is.na(trans) & date >= ymd(20200401)) %>% summarise(cor_ret = cor(cases, ret), cor_groc = cor(cases, groc), cor_parks = cor(cases, parks), cor_trans = cor(cases, trans), cor_work = cor(cases, work), cor_res = cor(cases, res), cor_temp = cor(cases, temp)) %>% summarise(cor_ret = mean(cor_ret), cor_groc = mean(cor_groc), cor_parks = mean(cor_parks), cor_trans = mean(cor_trans), cor_work = mean(cor_work), cor_res = mean(cor_res), cor_temp = mean(cor_temp, na.rm = TRUE)) cor dat %>% filter(date >= ymd(20200301)) %>% ggplot(aes(x = temp, y = cases)) + geom_point(alpha = .5) + geom_smooth() dat %>% filter(date >= ymd(20200401)) %>% ggplot(aes(x = trans, y = cases)) + geom_point(alpha = .5) + geom_smooth() + facet_wrap(. ~ state)
d318538dc5ac7349156695ced19e4fa500252505
04ab1edfb4b08adabbf89eb8011b78c35c6f4e0f
/scripts/08_getAndParseMiscFeatures.R
23a3719fa24c5f54c51d78d65b5c63c8bec993ec
[]
no_license
alanlorenzetti/tlr_v2
b4bc9d7c00580dd08015273957e919a35cc9b4df
ea4c3e2701cbd5e6357fac83b4f764e6b7969d30
refs/heads/master
2023-05-27T11:57:20.296063
2021-06-08T22:17:02
2021-06-08T22:17:02
290,974,388
0
0
null
null
null
null
UTF-8
R
false
false
1,533
r
08_getAndParseMiscFeatures.R
# alorenzetti 202008 # description #### # this script will get/load # a few additional misc features # LSm binding sites # UTR info # GC content # processing starts #### # getting and parsing LSm binding info # here I will load the table, remove the asRNAs # and lookup into genes and 5UTRs # if LSm binding sites are detected # in genes or 5UTRs, sense or antisense, # that will be taken into consideration lsmGenes = read_tsv(file = "data/interactionListNRTX.tsv") %>% select(name = representative, lsmSense = LSmInteraction, lsmAntiSense = LSmInteractionAS) %>% mutate(lsmSense = case_when(lsmSense == "Sim" ~ "yes", TRUE ~ "no"), lsmAntiSense = case_when(lsmAntiSense == "Sim" ~ "yes", TRUE ~ "no")) # # getting and parsing info about UTRs # utr = read_delim(file = "data/5UTR.txt", # delim = "\t", # col_names = T) %>% # dplyr::select(-length) # # # getting and parsing info about UTR mfe # utrmfe = read_delim(file = "data/5UTRplus18ntdownstream_mfe.txt", # delim = "\t", # col_names = T) %>% # dplyr::select(-seq,-dotbracket) # getting GC content info for each gene GCcontent = tibble(locus_tag = names(nrtxseqs) %>% sub("\\|.*$", "", .), GC = nrtxseqs %>% letterFrequency(letters = "GC", as.prob = T) %>% as.numeric(), GCdev = GC - mean(GC))
2871528610f4ef2a4f92348877b3311f09eb7085
04b81a5bafa79294872b828bc9760065963a5f27
/preprocess_nna/add_labels.R
a7440c1ea989a9e0c92d2624a421884492208d44
[ "CC0-1.0" ]
permissive
MinCiencia/ECQQ
34088ab1cb625f01f5f62ea6594e07667fcf46f7
f93a01ce2dd140d073bd81afb9b4733c1d8a34c3
refs/heads/main
2023-08-15T06:46:05.025570
2021-10-05T13:23:09
2021-10-05T13:23:09
413,440,130
4
1
null
null
null
null
UTF-8
R
false
false
1,177
r
add_labels.R
library(tidyverse) library(ggplot2) library(readxl) # obtenemos datos mesa var_diags <- read_excel("input/nna_resulta.xlsx", sheet = "base") %>% select(ORDEN,everything()) %>% janitor::clean_names() var_diags$organizacion <- tolower(var_diags$organizacion) sum(is.na(var_diags$organizacion)) var_diags$rango_edades_id[var_diags$rango_edades_id==1] <- "4 a 5" var_diags$rango_edades_id[var_diags$rango_edades_id==2] <- "6 a 9" var_diags$rango_edades_id[var_diags$rango_edades_id==3] <- "10 a 13" var_diags$rango_edades_id[var_diags$rango_edades_id==4] <- "14 a 18" var_diags$rango_edades_id[var_diags$rango_edades_id==5] <- "Todos" var_diags$contexto_id[var_diags$contexto_id==1] <- "Establecimiento Educacional" var_diags$contexto_id[var_diags$contexto_id==2] <- "Consejo Consultivo" var_diags$contexto_id[var_diags$contexto_id==3] <- "Oficina de Protección de Derechos" var_diags$contexto_id[var_diags$contexto_id==4] <- "Oficina Local de la Niñez" var_diags$contexto_id[var_diags$contexto_id==5] <- "Programa Sename" var_diags$contexto_id[var_diags$contexto_id==6] <- "Otro" writexl::write_xlsx(var_diags,"temp/var_diags2_inst.xlsx")
5d9d46e49e67a8f35cb7ebb816aa0bcafbdb6a02
6d4fe0d45fcb7e7f7bc0238abf09d2b6ff2dc566
/man/move_all.Rd
d5f6246aa73a479d34608968842184b5579cae8f
[]
no_license
anhnguyendepocen/simachine
a1f584c8d5df145f76bd54a4dc0f9eaff348fca3
853aa22fced505dbf09e51b9cef405ddb1b7cb19
refs/heads/master
2020-03-29T12:19:53.129901
2016-12-07T19:24:14
2016-12-07T19:24:14
null
0
0
null
null
null
null
UTF-8
R
false
true
376
rd
move_all.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/move.R \name{move_all} \alias{move_all} \title{Move all objects from one environment to another} \usage{ move_all(from, to) } \arguments{ \item{from}{environment to pull objects from.} \item{to}{environment to write objects to.} } \description{ Move all objects from one environment to another }
e84a223660952c21a1aa5fe2fce2659414a5371a
4e929f4a92a2533e713b87adb06f773748814126
/R/RProjects/HITHATStats/R/mh19.R
452bcf0fabd73d02a0dc45632337582f0a32de41
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
jlthomps/EflowStats
f4fe56f17cb675bcc1d618bc838003c2f2e9f81b
016c9cb65a2f13a041af3eb87debb4f83793238a
refs/heads/master
2021-01-01T05:33:44.189671
2013-12-04T23:30:21
2013-12-04T23:30:21
null
0
0
null
null
null
null
UTF-8
R
false
false
1,007
r
mh19.R
#' Function to return the MH19 hydrologic indicator statistic for a given data frame #' #' This function accepts a data frame that contains columns named "discharge" and "year_val" and #' calculates the skewness in log of annual maximum flows. #' #' @param x data frame containing a "discharge" column containing daily flow values #' @return mh19 numeric value of the skewness of log of maximum annual flows for the given data frame #' @export #' @examples #' load_data<-paste(system.file(package="HITHATStats"),"/data/obs_data.csv",sep="") #' x<-read.csv(load_data) #' mh19(x) mh19 <- function(x) { annmax <- aggregate(x$discharge,list(x$wy_val),FUN=max,na.rm=TRUE) log_disch <- log10(annmax$x) sumq3 <- sum(log_disch^3) sumq2 <- sum(log_disch^2) sumq <- sum(log_disch) num_years <- length(unique(x$wy_val)) qstd <- sd(log_disch) mh19 <- ((num_years*num_years*sumq3) - (3*num_years*sumq*sumq2) + (2*sumq*sumq*sumq))/(num_years*(num_years-1)*(num_years-2)*qstd*qstd*qstd) return(mh19) }
25f9b48f7a54c71100d1e3e45de5c9aa4d63fedd
6b6b90966a1db5f00240ace9425c608900de149c
/amazon_clustering.r
898dbc0168009e013397feef86aad24b410e9832
[]
no_license
ambarket/comp597
e78716284fd27a8d8ad0bc3d8fa3e235c20a6c5b
eddce73de18df6250e1503f18d63b663008cc05f
refs/heads/master
2021-01-01T20:00:01.395463
2015-05-01T08:20:40
2015-05-01T08:20:40
32,291,850
0
0
null
null
null
null
UTF-8
R
false
false
1,971
r
amazon_clustering.r
rm(list = ls()) setwd("C:\\Users\\ambar_000\\Desktop\\597\\Amazon dataset") rm(list = ls()) #amazon <- read.csv("Amazon_Instant_Video_no_text2.csv") amazon_instant_video <- read.csv("Amazon_Instant_Video.csv") str(amazon) table(count.fields("Amazon_Instant_Video_no_text2.csv", sep=",", quote="")) txt <-readLines("output.csv")[which(count.fields("output.csv", quote="", sep=",") == 19)] load("amazonInstantVideo.rdata") convertNAsToZero <- function(v) { ifelse(is.na(v),0,v) } ptm <- proc.time() amazonNoNA = as.data.frame(apply(amazon,2,convertNAsToZero)) proc.time() - ptm amazonSample <- t(amazonNoNA[sample(1:nrow(amazonNoNA), nrow(amazonNoNA)/10, replace=FALSE),]) calcDistForRowAndAllOthers <- function (row) { apply(amazonSample, 1, calcDistForTwoRows, row) } calcDistForTwoRows <- function(row1,row2) { intersectionEx1Ex2 <- sum(pmin(row1,row2)) unionEx1Ex2 <- sum(row1) + sum(row2) - intersectionEx1Ex2 intersectionEx1Ex2 / unionEx1Ex2 } do <- apply (amazonSample,1, calcDistForRowAndAllOthers) clusterTree <- hclust(as.dist(do),method="ward.D") plot(clusterTree, labels = NULL, hang = 0.1, check = TRUE, axes = TRUE, frame.plot = FALSE, ann = TRUE, main = "Cluster Dendrogram", sub = NULL, xlab = NULL, ylab = "Height"); amazon$product.productId = NULL sampled <- amazon[sample(nrow(amazon), 10000), ] d <- dist(sampled, method = "euclidean") clusters <- hclust(d, method = "ward.D") plot(clusters, lebels = NULL, hang = 0.1, check = TRUE, axes = TRUE, frame.plot = FALSE, ann = TRUE, main = "Cluster Dendrogram", sub = NULL, sxlab = NULL, ylab = "Height") groups <- cutree(clusters, k = 4) png("clusplot.png") clusplot(sampled, groups, color=TRUE, shade=TRUE, labels=2, lines=0) dev.off() pamCLust <- pam(sampled, 3, metric="euclidean") png("clusplot.png") clusplot(sampled, pamCLust$cluster, color=TRUE, shade=TRUE, labels=2, lines=0) dev.off()
5a899990bfe1eae94bbcced3cd11eeac2110265a
6e4cf749acf3c491bd76e95a462aa97aae0b1a7f
/man/merge2nodes.Rd
99255511c3872dd507b6990cdbcba3c3546bb31e
[]
no_license
cran/gRapfa
56c44e6cbad56ab6e13700082615477cc167467d
69adad4efb0cfca0b5b312c4aca1ab33b634b5a1
refs/heads/master
2021-01-02T08:52:35.659438
2014-04-10T00:00:00
2014-04-10T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,883
rd
merge2nodes.Rd
\name{merge2nodes} \alias{merge2nodes} %\alias{MergeNodes} \title{ Merge two nodes } \description{ Calculates various quantities in connection with merging two nodes in a level of a sample tree. } \usage{ merge2nodes(NS, mnode, test = TRUE, get.map = FALSE, doMerge = FALSE) } \arguments{ \item{NS}{ \code{NS} is a node by symbol array, the 1st half of the columns are node ids, the 2nd half the edge counts. When the corresponding edge is absent, the edge id is set to 0. } \item{mnode}{ \code{mnode} is a vector of nodes to be merged, specified as vertex ids (rather than names). Required to be of length two. } \item{test}{ If \code{test=TRUE}, the deviance and df associated with the merging are returned. } \item{get.map}{ If \code{get.map=TRUE}, a map is returned. } \item{doMerge}{ If \code{doMerge=TRUE}, \code{NS} returned is the node by symbol array after merging (used in \code{MergeNodes}) } } \value{ A list of computed quantities \item{mmat }{An integer matrix containing the nodes to be merged (the original and the induced).} \item{map}{A integer vector of length vcount(G)) containing the vertex ids of the vertices after merging} \item{devtest}{A numeric vector of length two containg the degrees of freedom and deviance associated with the merging} \item{NS}{A node by symbol array representing the result of the merging} } \references{ Ankinakatte, S. and Edwards, D. (2014?) Modelling discrete longitudinal data using acyclic probabilistic finite automata. Submitted to C.S.D.A. } \author{ Smitha Ankinakatte and David Edwards } %%\note{ %%further notes. %%} %\seealso{ %\code{link{apfa2NS}, link{MergeNodes}}, \code{\link{merge.select}} %} \examples{ library(gRapfa) data(Wheeze) G <- st(Wheeze) G.c <- contract.last.level(G) NS <- apfa2NS(G.c) n2n <- merge2nodes(NS, c(5,3)) }
204c5c4403ac29e9d09828ca1d4d6839d6201a53
745d585395acad1376d84f8ca1284c13f2db70f0
/tests/testthat/test-replace_column.R
226987f8a798d7874c79bbf4905f5a9565b8f70a
[]
no_license
pik-piam/quitte
50e2ddace0b0e2cbfabf8539a0e08efe6bb68a0b
4f5330695bd3d0e05d70160c1af64f0e436f89ea
refs/heads/master
2023-08-20T04:15:16.472271
2023-08-09T08:14:32
2023-08-09T08:14:32
206,053,101
0
8
null
2023-08-09T08:14:34
2019-09-03T10:39:07
R
UTF-8
R
false
false
4,213
r
test-replace_column.R
context('replace_column()') test_that( 'Test replace_column() results', { # ---- simple example with matching old and match column names ---- model_data <- data.frame( model = c('Model1', '2ndModel', 'Model Three'), region = c('Region 1', 'Region 2', 'Region 1'), value = 1:3, stringsAsFactors = FALSE) mask <- data.frame( model = c('Model1', '2ndModel', 'Model Three', 'fourth Model'), clear_name = paste('Model', 1:4), stringsAsFactors = FALSE) expect_equal( object = replace_column(model_data, mask, model, clear_name), expected = data.frame(model = paste('Model', 1:3), region = paste('Region', c(1, 2, 1)), value = 1:3, stringsAsFactors = FALSE) ) # ---- mismatched column names ---- model_data <- data.frame( model = c('Model1', '2ndModel', 'Model Three', 'fourth Model'), region = c('Region 1', 'Region 2', 'Region 1', 'Region 2'), value = 1:4, stringsAsFactors = FALSE) mask <- data.frame( ugly_name = c('Model1', '2ndModel', 'Model Three'), clear_name = paste('Model', 1:3), stringsAsFactors = FALSE) expect_equal( object = replace_column(model_data, mask, model = ugly_name, clear_name), expected = data.frame( model = c(paste('Model', 1:3), NA), region = paste('Region', c(1, 2, 1, 2)), value = 1:4, stringsAsFactors = FALSE) ) # ---- SE example ---- expect_equal( object = replace_column_(model_data, mask, 'model', 'ugly_name', 'clear_name'), expected = data.frame( model = c(paste('Model', 1:3), NA), region = paste('Region', c(1, 2, 1, 2)), value = 1:4, stringsAsFactors = FALSE) ) # ---- dropping the extra entries in model ---- expect_equal( object = replace_column(model_data, mask, model = ugly_name, clear_name, drop.extra = TRUE), expected = data.frame(model = paste('Model', 1:3), region = paste('Region', c(1, 2, 1)), value = 1:3, stringsAsFactors = FALSE) ) # ---- also works on quitte objects ---- quitte <- tibble(model = c('Model1', '2ndModel'), scenario = 'Scenario', region = 'Region', variable = 'Variable', unit = 'Unit', period = 2010, value = 1:2) %>% as.quitte() expect_equal( object = replace_column(quitte, mask, model = ugly_name, clear_name), expected = tibble(model = paste('Model', 1:2), scenario = 'Scenario', region = 'Region', variable = 'Variable', unit = 'Unit', period = 2010, value = 1:2) %>% as.quitte() ) }) test_that( desc = 'replace_column() warning for ambiguous mask', code = { model_data <- data.frame( model = c('Model1', '2ndModel', 'Model Three'), region = c('Region 1', 'Region 2', 'Region 3'), value = 1:3, stringsAsFactors = FALSE) mask <- data.frame( model = c('Model1', '2ndModel', 'Model Three')[c(1, 2, 3, 3, 2)], clear_name = paste('Model', 1:5), stringsAsFactors = FALSE) expect_warning( object = replace_column(model_data, mask, model, clear_name), regexp = 'No unambiguous match for.*') expect_failure( expect_warning( object = replace_column(model_data, mask, model, clear_name, ignore.ambiguous.match = TRUE), regexp = 'No unambiguous match for.*') ) mask <- data.frame( model = c('Model1', '2ndModel', 'Model Three')[c(1, 2, 3)], clear_name = paste('Model', 1:3), stringsAsFactors = FALSE) expect_failure( expect_warning( object = replace_column(model_data, mask, model, clear_name), regexp = 'No unambiguous match for.*') ) } )
5556e8a8a1d2ca87c0dcd0ee846d1c88d5cdb5da
aa684167a345fdd921969c0d66e6ec98166990fb
/tests/testthat/test-rogue.R
163e40030d7b1aa82f885feccc9923def8c72457
[ "MIT" ]
permissive
ashther/ashr.rogue
ae6b19bf59cbd0688e61bfbd8c2ef61b5957e4b0
adbde55c9963d95aa61f0b97b750371b1b990ae3
refs/heads/master
2020-04-23T01:03:55.521301
2019-08-02T08:47:05
2019-08-02T08:47:05
170,791,022
0
0
null
null
null
null
UTF-8
R
false
false
8,045
r
test-rogue.R
context("test-Rogue") test_that("can create a rogue instance", { expect_silent({rogue <- Rogue$new()}) expect_equal(rogue$proxy, NULL) expect_equal(rogue$useragent, NULL) expect_equal(rogue$iter_max, 1) expect_equal(rogue$is_quite, FALSE) expect_equal(rogue$is_random, FALSE) expect_equal(rogue$is_record, FALSE) proxys <- list(list(ip = '1.1.1.1', port = 1), list(ip = '2.2.2.2', port = 2), list(ip = '3.3.3.3', port = 3)) proxys_long <- lapply(letters[1:11], function(x) { list(ip = x, port = 1) }) proxys_df <- do.call( rbind, lapply(proxys, function(x) data.frame(ip = x$ip, port = x$port, stringsAsFactors = FALSE)) ) proxys_modify <- lapply(proxys, function(x) { x$times <- 1 x }) useragents <- c('ua1', 'ua2', 'ua3') expect_silent({rogue <- Rogue$new(proxys, useragents)}) expect_equal(rogue$proxy, proxys_modify) expect_equal(rogue$useragent, useragents) expect_equal(rogue$iter_max, length(proxys)) expect_silent({rogue <- Rogue$new(proxys_long)}) expect_equal(length(rogue$proxy), 11) expect_equal(rogue$iter_max, 10) expect_silent({rogue <- Rogue$new(proxys_df)}) expect_equal(rogue$proxy, proxys_modify) expect_equal(rogue$iter_max, length(proxys)) expect_error(Rogue$new('test'), 'proxy must be list or data.frame') expect_error(Rogue$new(list(list(port = 1))), 'proxy must have ip and port at least') expect_error(Rogue$new(list(list(ip = '1'))), 'proxy must have ip and port at least') expect_error(Rogue$new(list(list(ip = 1, port = 1))), 'ip must be character, not double') expect_error(Rogue$new(list(list(ip = '1', port = '1'))), 'port must be integer or numeric, not character') temp <- list(list(ip = '1', port = 1, times = 1)) expect_silent({rogue <- Rogue$new(temp)}) expect_equal(rogue$proxy, temp) expect_error(Rogue$new(list(list(ip = '1', port = 1, times = '1'))), 'times must be integer or numeric, not character') temp <- replicate(2, list(ip = '1', port = 1), FALSE) expect_silent({rogue <- Rogue$new(temp)}) expect_equal(rogue$proxy, list(list(ip = '1', port = 1, times = 1))) expect_equal(rogue$iter_max, 1) }) test_that("can add and show proxy", { rogue <- Rogue$new() temp <- rogue$.__enclos_env__$private$proxySetdiff(list(), list(1)) expect_equal(temp, list()) temp <- rogue$.__enclos_env__$private$proxySetdiff(list(1), list()) expect_equal(temp, list(1)) temp <- rogue$.__enclos_env__$private$proxySetdiff( list(list(ip = 1, port = 1), list(ip = 2, port = 2)), list(list(ip = 2, port = 2)) ) expect_equal(temp, list(list(ip = 1, port = 1))) temp <- rogue$.__enclos_env__$private$proxySetdiff( list(list(ip = 1, port = 1), list(ip = 2, port = 2)), list(list(ip = 3, port = 3)) ) expect_equal(temp, list(list(ip = 1, port = 1), list(ip = 2, port = 2))) proxys <- list(list(ip = '1', port = 1), list(ip = '2', port = 2), list(ip = '3', port = 3)) proxys_modify <- lapply(proxys, function(x) { x$times <- 1 x }) expect_silent(rogue$proxy_add(proxys)) expect_equal(rogue$proxy, proxys_modify) expect_warning(rogue$proxy_add(list()), 'no new proxy to add') expect_warning(rogue$proxy_add(proxys), 'no new proxy to add') expect_silent(rogue$proxy_add(list(list(ip = '4', port = 4)))) expect_equal(length(rogue$proxy), 4) expect_silent(rogue$proxy_add(list(list(ip = '4', port = 4), list(ip = '5', port = 5)))) expect_equal(length(rogue$proxy), 5) expect_silent(rogue$proxy_add(list(list(ip = '6', port = 6)), delete = TRUE)) expect_equal(length(rogue$proxy), 5) rogue$proxy <- lapply(rogue$proxy, function(x){ x$times <- 2 x }) expect_silent(rogue$proxy_add(list(list(ip = '7', port = 7)), delete = TRUE)) expect_equal(length(rogue$proxy), 6) expect_silent(rogue$proxy_add(list(list(ip = '8', port = 8), list(ip = '9', port = 9)), delete = TRUE)) expect_equal(length(rogue$proxy), 7) expect_is(rogue$proxy_show(), 'data.frame') expect_named(rogue$proxy_show(), c('ip', 'port', 'times')) rogue <- Rogue$new() expect_is(rogue$proxy_show(), 'data.frame') expect_named(rogue$proxy_show(), c('ip', 'port', 'times')) expect_equal(nrow(rogue$proxy_show()), 0) }) test_that('can use proxy and useragent', { rogue <- Rogue$new() expect_silent({temp <- rogue$get('http://httpbin.org/get')}) expect_equal(status_code(temp), 200) expect_silent({temp <- rogue$post('http://httpbin.org/post')}) expect_equal(status_code(temp), 200) expect_silent({temp <- rogue$put('http://httpbin.org/put')}) expect_equal(status_code(temp), 200) expect_silent({temp <- rogue$delete('http://httpbin.org/delete')}) expect_equal(status_code(temp), 200) temp <- rogue$get('http://httpbin.org/get', httr::authenticate('u', 'p')) expect_equal(content(temp)$header$Authorization, 'Basic dTpw') proxy <- list(list(ip = '121.33.220.158', port = 808)) rogue <- Rogue$new(proxy) temp <- rogue$get('http://httpbin.org/ip') expect_true(proxy[[1]]$ip %in% trimws(unlist(strsplit(content(temp)$origin, ',')))) rogue <- Rogue$new(useragent = 'test user') temp <- rogue$get('http://httpbin.org/user-agent') expect_equal(content(temp)$`user-agent`, 'test user') }) test_that('can select proxys', { proxys <- unname(mapply(function(i, p) { list(ip = i, port = p) }, letters[1:6], 1:6, SIMPLIFY = FALSE)) # iter_max samller, random rogue <- Rogue$new(proxys, is_random = TRUE) set.seed(1) selected <- sapply(rogue$.__enclos_env__$private$proxySelect(), `[[`, 'ip') set.seed(1) temp <- sapply(sample(proxys, 6), `[[`, 'ip') expect_equal(selected, temp) proxys_modify <- lapply(proxys, function(x) { x$times <- sample.int(100, 1) x }) # iter_max samller, not random rogue <- Rogue$new(proxys_modify, is_random = FALSE) set.seed(1) selected <- sapply(rogue$.__enclos_env__$private$proxySelect(), `[[`, 'ip') expect_true(any(selected != temp)) set.seed(1) temp <- sapply( sample(proxys, 6, prob = sapply(proxys_modify, `[[`, 'times')), `[[`, 'ip' ) expect_equal(selected, temp) # iter_max larger, random suppressWarnings({ rogue <- Rogue$new(proxys, is_random = TRUE, iter_max = 10) }) set.seed(1) selected <- sapply(rogue$.__enclos_env__$private$proxySelect(), `[[`, 'ip') expect_equal(length(selected), 10) set.seed(1) temp <- sapply(sample(proxys, 10, replace = TRUE), `[[`, 'ip') expect_equal(selected, temp) # iter_max larger, not random suppressWarnings({ rogue <- Rogue$new(proxys_modify, is_random = FALSE, iter_max = 15) }) set.seed(1) selected <- sapply(rogue$.__enclos_env__$private$proxySelect(), `[[`, 'ip') expect_equal(length(selected), 15) set.seed(1) temp <- sapply(unlist(append( replicate( 2, sample(proxys, 6, prob = sapply(proxys_modify, `[[`, 'times')), FALSE ), list(sample(proxys, 3, prob = sapply(proxys_modify, `[[`, 'times'))) ), recursive = FALSE), `[[`, 'ip') expect_equal(selected, temp) }) test_that('can confirm good and bad', { proxys <- unname(mapply(function(i, p) { list(ip = i, port = p) }, letters[1:6], 1:6, SIMPLIFY = FALSE)) proxy_good <- list(ip = 'a', port = 1) proxy_not_exist <- list(ip = 'a', port = 2) rogue <- Rogue$new(proxys) expect_silent(rogue$.__enclos_env__$private$proxyGoodConfirm(proxy_good)) expect_equal(sapply(rogue$proxy, `[[`, 'times'), c(2, rep(1, 5))) rogue$.__enclos_env__$private$proxyGoodConfirm(proxy_not_exist) expect_equal(sapply(rogue$proxy, `[[`, 'times'), c(2, rep(1, 5))) rogue$.__enclos_env__$private$proxyGoodConfirm(proxy_good) proxy_bad <- list(list(ip = 'a', port = 1)) expect_silent(rogue$.__enclos_env__$private$proxyBadConfirm(proxy_bad)) expect_equal(sapply(rogue$proxy, `[[`, 'times'), c(2, rep(1, 5))) proxy_bad <- list(list(ip = 'a', port = 1), list(ip = 'b', port = 2)) rogue$.__enclos_env__$private$proxyBadConfirm(proxy_bad) expect_equal(sapply(rogue$proxy, `[[`, 'times'), rep(1, 6)) })
90381bd5deb51112ef0fd8f364afa0c90ff663ab
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/StreamMetabolism/examples/window_chron.Rd.R
5d0a141386978db6e986810b29bd936e92036e70
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
269
r
window_chron.Rd.R
library(StreamMetabolism) ### Name: window_chron ### Title: Time Windows of Diurnal Curves ### Aliases: window_chron ### Keywords: chron ### ** Examples #with real data data(DOTemp) d <- window_chron(DOTemp, "8/18/70", "06:00:00", "8/18/70", "20:15:00") plot(d)
dfc30ed8c89fc9753cbbfb37044503eb3a57c3a3
8002f1c25aaf74fde2aa4a162a23906083189f52
/tests/testthat/test-FilterGB.R
96d78fbe8ab3732166e77d50d09b57c5d21b8607
[]
no_license
zijianni/scCB2
8d4e657cbd8e121559acbc48936384e286cd9b48
5ba289850b4785e6fd71b4a2b4217620ad9ffc4f
refs/heads/master
2023-04-27T09:44:42.589719
2023-04-19T04:14:44
2023-04-19T04:14:44
191,231,709
11
1
null
null
null
null
UTF-8
R
false
false
578
r
test-FilterGB.R
data(mbrainSub) test_that("Input data format", { expect_error(FilterGB(1), "at least two dimensions") expect_error(FilterGB("1"), "at least two dimensions") }) test_that("Gene filtering threshold", { for(k in sample(0:100,5)){ dat_temp <- FilterGB(mbrainSub,g_threshold = k) expect_true(all(Matrix::rowSums(dat_temp) > k)) } }) test_that("Barcode filtering threshold", { for(k in sample(0:100,5)){ dat_temp <- FilterGB(mbrainSub,b_threshold = k) expect_true(all(Matrix::colSums(dat_temp) > k)) } })
5df7e535dce1b153eb5d29968758d6a282938560
b220e9b4c9db0a1590358b22480ea00beb3fba0f
/R/Region_TwoSampleTTest.R
6a36f2c32d2b88d4e066011b9d428d1c7720a4de
[]
no_license
catfishy/jagust
9741a74812c15c4c812b7143404ae1092e76e4d7
8877d29b340d6489fbb20542dc5d1188ec512531
refs/heads/master
2021-04-19T01:21:36.722907
2016-11-09T22:54:44
2016-11-09T22:54:44
37,677,259
0
0
null
null
null
null
UTF-8
R
false
false
4,485
r
Region_TwoSampleTTest.R
input_file = '/usr/local/jagust/ADNI_BL_AV45Neg_Normals.csv' df = read.csv(input_file) slope.thresh = 0.004 # get the accumulators # df = df[complete.cases(df$AV45_NONTP_BigRef_Slope),] # df$accum = factor(df$AV45_NONTP_BigRef_Slope > slope.thresh) df = df[complete.cases(df$AV45_NONTP_BigRef_Slope_3pts),] df$accum = factor(df$AV45_NONTP_BigRef_Slope_3pts > slope.thresh) # output file name # out.name = '/usr/local/jagust/AV45_NegNormal_accumAllgroup_region_anova.csv' out.name = '/usr/local/jagust/AV45_NegNormal_accum3ptgroup_region_anova.csv' demog = c('RID','Gender','accum','APOE4_BIN','Age.AV45','AV45_NONTP_BigRef_Slope','AV45_NONTP_BigRef_Slope_3pts') # regions = c('CTX_LH_BANKSSTS','CTX_LH_CAUDALANTERIORCINGULATE','CTX_LH_CAUDALMIDDLEFRONTAL','CTX_LH_CUNEUS','CTX_LH_ENTORHINAL','CTX_LH_FRONTALPOLE','CTX_LH_FUSIFORM','CTX_LH_INFERIORPARIETAL','CTX_LH_INFERIORTEMPORAL','CTX_LH_INSULA','CTX_LH_ISTHMUSCINGULATE','CTX_LH_LATERALOCCIPITAL','CTX_LH_LATERALORBITOFRONTAL','CTX_LH_LINGUAL','CTX_LH_MEDIALORBITOFRONTAL','CTX_LH_MIDDLETEMPORAL','CTX_LH_PARACENTRAL','CTX_LH_PARAHIPPOCAMPAL','CTX_LH_PARSOPERCULARIS','CTX_LH_PARSORBITALIS','CTX_LH_PARSTRIANGULARIS','CTX_LH_PERICALCARINE','CTX_LH_POSTCENTRAL','CTX_LH_POSTERIORCINGULATE','CTX_LH_PRECENTRAL','CTX_LH_PRECUNEUS','CTX_LH_ROSTRALANTERIORCINGULATE','CTX_LH_ROSTRALMIDDLEFRONTAL','CTX_LH_SUPERIORFRONTAL','CTX_LH_SUPERIORPARIETAL','CTX_LH_SUPERIORTEMPORAL','CTX_LH_SUPRAMARGINAL','CTX_LH_TEMPORALPOLE','CTX_LH_TRANSVERSETEMPORAL','CTX_RH_BANKSSTS','CTX_RH_CAUDALANTERIORCINGULATE','CTX_RH_CAUDALMIDDLEFRONTAL','CTX_RH_CUNEUS','CTX_RH_ENTORHINAL','CTX_RH_FRONTALPOLE','CTX_RH_FUSIFORM','CTX_RH_INFERIORPARIETAL','CTX_RH_INFERIORTEMPORAL','CTX_RH_INSULA','CTX_RH_ISTHMUSCINGULATE','CTX_RH_LATERALOCCIPITAL','CTX_RH_LATERALORBITOFRONTAL','CTX_RH_LINGUAL','CTX_RH_MEDIALORBITOFRONTAL','CTX_RH_MIDDLETEMPORAL','CTX_RH_PARACENTRAL','CTX_RH_PARAHIPPOCAMPAL','CTX_RH_PARSOPERCULARIS','CTX_RH_PARSORBITALIS','CTX_RH_PARSTRIANGULARIS','CTX_RH_PERICALCARINE','CTX_RH_POSTCENTRAL','CTX_RH_POSTERIORCINGULATE','CTX_RH_PRECENTRAL','CTX_RH_PRECUNEUS','CTX_RH_ROSTRALANTERIORCINGULATE','CTX_RH_ROSTRALMIDDLEFRONTAL','CTX_RH_SUPERIORFRONTAL','CTX_RH_SUPERIORPARIETAL','CTX_RH_SUPERIORTEMPORAL','CTX_RH_SUPRAMARGINAL','CTX_RH_TEMPORALPOLE','CTX_RH_TRANSVERSETEMPORAL','LEFT_ACCUMBENS_AREA','LEFT_AMYGDALA','LEFT_CAUDATE','LEFT_CHOROID_PLEXUS','LEFT_HIPPOCAMPUS','LEFT_PALLIDUM','LEFT_PUTAMEN','LEFT_THALAMUS_PROPER','LEFT_VENTRALDC','RIGHT_ACCUMBENS_AREA','RIGHT_AMYGDALA','RIGHT_CAUDATE','RIGHT_CHOROID_PLEXUS','RIGHT_HIPPOCAMPUS','RIGHT_PALLIDUM','RIGHT_PUTAMEN','RIGHT_THALAMUS_PROPER','RIGHT_VENTRALDC') regions = c('CTX_LH_CAUDALMIDDLEFRONTAL', 'CTX_LH_LATERALORBITOFRONTAL', 'CTX_LH_MEDIALORBITOFRONTAL', 'CTX_LH_PARSOPERCULARIS', 'CTX_LH_PARSORBITALIS', 'CTX_LH_PARSTRIANGULARIS', 'CTX_LH_ROSTRALMIDDLEFRONTAL', 'CTX_LH_SUPERIORFRONTAL', 'CTX_LH_FRONTALPOLE', 'CTX_RH_CAUDALMIDDLEFRONTAL', 'CTX_RH_LATERALORBITOFRONTAL', 'CTX_RH_MEDIALORBITOFRONTAL', 'CTX_RH_PARSOPERCULARIS', 'CTX_RH_PARSORBITALIS', 'CTX_RH_PARSTRIANGULARIS', 'CTX_RH_ROSTRALMIDDLEFRONTAL', 'CTX_RH_SUPERIORFRONTAL', 'CTX_RH_FRONTALPOLE', 'CTX_LH_INFERIORPARIETAL', 'CTX_LH_PRECUNEUS', 'CTX_LH_SUPERIORPARIETAL', 'CTX_LH_SUPRAMARGINAL', 'CTX_RH_INFERIORPARIETAL', 'CTX_RH_PRECUNEUS', 'CTX_RH_SUPERIORPARIETAL', 'CTX_RH_SUPRAMARGINAL', 'CTX_LH_MIDDLETEMPORAL', 'CTX_LH_SUPERIORTEMPORAL', 'CTX_RH_MIDDLETEMPORAL', 'CTX_RH_SUPERIORTEMPORAL', 'CTX_LH_CAUDALANTERIORCINGULATE', 'CTX_LH_ISTHMUSCINGULATE', 'CTX_LH_POSTERIORCINGULATE', 'CTX_LH_ROSTRALANTERIORCINGULATE', 'CTX_RH_CAUDALANTERIORCINGULATE', 'CTX_RH_ISTHMUSCINGULATE', 'CTX_RH_POSTERIORCINGULATE', 'CTX_RH_ROSTRALANTERIORCINGULATE') for (region in regions) { df[region] = df[region]/df['COMPOSITE_REF'] } df = df[,c(demog,regions)] # create factors df$RID = factor(df$RID) df$Gender = factor(df$Gender) df$APOE4_BIN = factor(df$APOE4_BIN) # Run anova results = data.frame() for (region in regions) { form = paste(region,'~ accum + Gender + APOE4_BIN + Age.AV45') fm = lm(form,df) fm.summary = summary(fm) accum_coef = as.data.frame(fm.summary$coefficients['accumTRUE',]) colnames(accum_coef) = c(region) results = rbind(results,t(accum_coef)) } results = results[order(results$`Pr(>|t|)`),] # bonferroni correct results$p.bonferroni = p.adjust(results$`Pr(>|t|)`, method = "bonferroni") results$p.holm = p.adjust(results$`Pr(>|t|)`, method = "holm") write.csv(results,file=out.name)
0db091ca4f18d78c804a7bc356129772052414a5
b022e68f0139455784d95133deb4cf8f487142ce
/man/traprule.Rd
d8d1644596852b35c15f17624792ea47fd8bcce8
[]
no_license
cran/REBayes
1e311610a28f37509d2da28f81385b17e84b4bbf
e26237baf78f2dc4bb776ae29a2ddfce68963435
refs/heads/master
2022-05-13T04:13:53.603760
2022-03-22T17:20:02
2022-03-22T17:20:02
17,681,954
4
2
null
null
null
null
UTF-8
R
false
true
413
rd
traprule.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{traprule} \alias{traprule} \title{Integration by Trapezoidal Rule} \usage{ traprule(x, y) } \arguments{ \item{x}{points of evaluation} \item{y}{function values} } \value{ A real number. } \description{ Integration by Trapezoidal Rule } \details{ Crude Riemann sum approximation. } \author{ R. Koenker } \keyword{utility}
45132d05f0267c6ccbec4f4c143504d7967737ad
3aa5f28626782b046730018b9727f3bcd1ce024b
/ch_4__advance_R/33__Lists.R
f83cfdc00296b52223588c0b1fd86c9bbc252103
[]
no_license
asamadgithub/R__complete
1c854d56f4f3add69256c011c8871b911a108886
3d3899aa77958e305bc8a523ed4afd946822bf15
refs/heads/master
2022-11-26T23:19:17.183332
2020-08-04T15:36:35
2020-08-04T15:36:35
null
0
0
null
null
null
null
UTF-8
R
false
false
4,248
r
33__Lists.R
# --------------------------------------Import the data and formating date_Time stamps via POSIXct utl <- read.csv("https://sds-platform-private.s3-us-east-2.amazonaws.com/uploads/P3-Machine-Utilization.csv") utl$Utilization <- 1 - utl$Percent.Idle #?POSIX,?POSIXct,... utl$POSIX_time <- as.POSIXct(utl$Timestamp, format = "%d/%m/%Y %H:%M") utl <- utl[,c(5,2,3,4)] head(utl, n = 6) colnames(utl) # --------------------------------------What is a list? # vector = only data element of same type ( e.g if one is character, it will convert all other to character as well) # Here I need a list <- name of machine, stats(min,median,max), logical(if efficiny <90%) head(utl) tail(utl) m1 = utl$Machine == "RL1" # subsetting according to machine name RL1 <- utl[m1,] tail(RL1) #max,min,median is.na(RL1$Utilization) RL1[!complete.cases(RL1$Utilization),] # means there are some "NA" entries... minimum_RL1 = min(RL1$Utilization, na.rm = T) maximum_RL1 = max(RL1$Utilization, na.rm = T) median_RL1 = median(RL1$Utilization, na.rm = T) # logical RL1 logical_RL1 <- length(which(RL1$Utilization < 0.90)) > 0 # combine in a list list_RL1 <- list(RL1,c(minimum_RL1,median_RL1,maximum_RL1),logical_RL1) list_RL1 # --------------------------------------Naming different components of list # method-1 list_RL1 names(list_RL1) # its NULL at this time names(list_RL1) <- c("Machine","Stats","Low_Threshold") list_RL1 length(list_RL1) # method-2 ; righ at the begining.. rm(list_RL1) list_RL1 <- list(Machine = RL1,Stats = c(minimum_RL1,median_RL1,maximum_RL1),Low_Threshold = logical_RL1) list_RL1 # --------------------------------------Extract components of a list # method-1: [ ] --> always return a list names(list_RL1) list_RL1["Machine"] list_RL1[1] list_RL1[2] typeof(list_RL1[2]) # method-2: [[]] --> return the actual object list_RL1[[1]] list_RL1[[2]] typeof(list_RL1[[2]]) # method-3: $- same as [[]] but prettier list_RL1$Machine list_RL1$Stats typeof(list_RL1$Stats) # quiz... access the 3rd element of vector of list_RL1$Stats # method-1 list_RL1[2] # this will not work ofcourse.. buc it return list.. list_RL1[[2]][3] # method-2 list_RL1$Stats length(list_RL1$Stats) list_RL1$Stats[3] # --------------------------------------Adding/subtracting from list #rm(list_RL1) #list_RL1 <- list(Machine = "RL1",Stats = c(minimum_RL1,median_RL1,maximum_RL1),Low_Threshold = logical_RL1) #list_RL1 # case-1 within in one sub-list # ---------------------------- # adding/insering list_RL1 list_RL1$Machine[2] <- "222" list_RL1$Machine[3] <- "333" list_RL1$Machine[4] <- "444" # deleting list_RL1$Machine list_RL1$Machine <- list_RL1$Machine[-which(list_RL1$Machine == "222")] # case-2 in th main list # ---------------------- # adding/insering # method-1 # with predefined index number list_RL1 list_RL1[4] <- "Addition at index 4 in the main list" # method-2 # assume column is already there and assign the data/values # add hours of null vallues in current list list_RL1 head(RL1) RL1[is.na(RL1$Utilization),"POSIX_time"] list_RL1$Unknown_Data_Hrs <- RL1[is.na(RL1$Utilization),"POSIX_time"] list_RL1$Data <- RL1 # method-3 removing main element(vector) in the list list_RL1 # lets say remove number 4th position list_RL1[4] <- NULL list_RL1 # indexing also shifted little bit above... # --------------------------------------Subsetting a list # task-1: access 1st or 2nd element of "Unknown_Data_Hrs" list_RL1$Unknown_Data_Hrs list_RL1$Unknown_Data_Hrs[1] #or list_RL1[[4]][1] names(list_RL1) list_RL1[1] list_RL1[1:2] list_RL1[c(1,4)] # we also use names in "name-1", "name-2" inside c() subset_1 <- list_RL1[c(1,4)] subset_1 # names carry over ## ## double [[]] are desingned only to access the component not for subsetting ## [[1:3]] , i.e. its wrong # --------------------------------------Plotting library(ggplot2) head(utl, n = 3) #plot_data <- utl[complete.cases(utl),] #head(plot_data) obj <- ggplot(data = plot_data) obj + geom_line(na.rm = T, aes(x = POSIX_time, y = Utilization, color = Machine ), size = 1.2) + facet_grid(Machine~.) + geom_hline(yintercept = 0.90, color = "Blue", linetype = 3) # --------------------------------------add plot to the list
c1110ea4ee1b59b259d001d27e85fb1df229a62c
60fe479cf2eb4cda139dd758af86af30d31f3006
/Forecasting using Covid19 data.R
0f23234af35363220bf8a0d4077ee621c6dba7fe
[]
no_license
riteshtripathi2010/Rstudio
3b1bc14eb59fe3bb5432d024a830a0dfae1ba95c
c27226fa0c5a299956bdf7d11aa797dda169f4c1
refs/heads/master
2021-07-16T18:08:56.262023
2020-08-02T11:41:22
2020-08-02T11:41:22
195,048,572
0
0
null
null
null
null
UTF-8
R
false
false
1,972
r
Forecasting using Covid19 data.R
#Forecasting COVID19 Cases with R, using facebook's prophet library install.packages("covid19.analytics") install.packages("ape") library(covid19.analytics) library(dplyr) library(prophet) library(lubridate) library(ggplot2) setwd("/Users/riteshtripathi/Desktop/") tsc <- read.csv("ts-confirmed.csv", header = TRUE) #we got a coln called Country, lets select US from it tsc <- tsc %>% filter(Country.Region == 'US') #we see the cols from are in rows, lets transponse into colns tsc <- data.frame(t(tsc)) #dates are: 22-01-2020 and ownwards #currently date is in row names, we need to make them in coln names tsc <- cbind(rownames(tsc), data.frame(tsc, row.names = NULL)) #Change coln names colnames(tsc) = c('Date', 'Confirmed') #now lets remove first four rows tsc <- tsc[-c(1:4), ] #lets change dates tsc$Date <- ymd(tsc$Date)#didnt work tsc$Date <- ymd(tsc$Date) str(tsc) #it says confirmed in char, lets convert that tsc$Confirmed <- as.numeric(tsc$Confirmed) # Plot qplot(Date, Confirmed, data = tsc, main = "Covid19 confirmed cases in UK") ds <- tsc$Date y <- tsc$Confirmed df <- data.frame(ds, y) # Forecasting m <- prophet(df) # Prediction future <- make_future_dataframe(m, periods = 28)#we want to make predcitions for next 28 days #last date now is June 18 2020 forecast <- predict(m, future) # Plot forecast plot(m, forecast) dyplot.prophet(m, forecast)#making above prophet_plot_components(m, forecast) #Forecast Components prophet_plot_components(m, forecast) #this shows two plots #the bottom one is number of confrimed cases on an average at a lower side, Frid and sat numbers are bigger #data is not realisty, it only tries to approximate reality #due to lag or reporting, # Model performance pred <- forecast$yhat[1:121]#this forecast has a coln called yhat actual <- m$history$y plot(actual, pred) #lets add line abline(lm(pred~actual), col = 'red')#we dont see underestimation or overestimation #Summary summary(lm(pred~actual))
d055b728010f59c06a1cf4a66c286073253d3870
fb7e9d43adb368b42dc85124a591f5622fc53c7c
/nba_stat/nba_stat.R
42252f9207bc2f17832fe207df3edb1023929593
[]
no_license
expdal3/data-visualisation
ba63a999dc2ec3730033aa032eb1327b86e02167
ef9cb5fed7f66a8b46d3eb327b4e71f98fb7cb07
refs/heads/master
2021-01-13T22:00:31.288451
2020-04-01T13:10:07
2020-04-01T13:10:07
242,508,336
0
0
null
null
null
null
UTF-8
R
false
false
939
r
nba_stat.R
library(dplyr) library(plyr) library(data.table) ## read in csv ad text file dataset = read.csv("C:\\Users\\uC260543\\Work\\DataSciencePractices\\R\\nba-players-stats\\Seasons_Stats.csv",header=T) dataset <- data.frame(dataset) colnames(dataset) #head(dataset,5) dataset[is.na(dataset)]<-0 #replace NA with 0 #subset = dataset[dataset$Year >=2000,] #just take rows after 2000 #head(dataset,5) #summary(subset) ## calculate sum of 3points in one year per player dset <- aggregate(X3P~Player + Year, data=dataset, FUN=sum) #head(arrange(dset,desc(Player)), n = 200) #display result #group_by(dset, Player) ## CREATE 1 and 2 LAG dset <- as.data.table(dset) #convert to data.table #dset2 <- dset1[, lag.X3P:=c(0, X3P[-.N]), by=Player] dset1 <- dset[, lag.X3P:=lag(X3P,1, 0), by=Player] dset1 <- dset1[, lag2.X3P:=lag(X3P,2, 0), by=Player] head(arrange(dset1,desc(Player)), n = 100) #display result
49ab3ff92aa5849027996440bbdd2835a562eb09
a70271ce4777831c477b4272021e5bfa8555f47f
/man/restrictions.Rd
a54080f40676811c856df655bd1f490cfcad8b74
[]
no_license
moquedace/aqp
9f016e990a6c522e04afb100dcfa1a81debd046e
73f7f6a64edf727f24258341c94eb591fd783173
refs/heads/master
2023-04-08T18:59:15.038547
2021-05-04T20:58:41
2021-05-04T20:58:41
null
0
0
null
null
null
null
UTF-8
R
false
true
506
rd
restrictions.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Class-SoilProfileCollection.R \docType{methods} \name{restrictions} \alias{restrictions} \alias{restrictions,SoilProfileCollection-method} \title{Retrieve restriction data from SoilProfileCollection} \arguments{ \item{object}{a SoilProfileCollection} } \description{ Get restriction data from SoilProfileCollection. Result is returned in the same \code{data.frame} class used to initially construct the SoilProfileCollection. }
fe54345772061b093682a43f167d033a07ec9393
c98eff6792c2f139086ddce955f6e980788cba7e
/code/functions/pres.abs.lag.R
1b5dc2e19b3598e93ef7f44e9292394c40865055
[]
no_license
efcaguab/ws-allith-habitat-selection
82ac41e46e8ce549d39e7c4eb7506a09f887f6f5
0fc73a947a450f46ee33bc8bc4858025f457182b
refs/heads/master
2022-01-23T10:44:53.804290
2019-07-10T21:38:24
2019-07-10T21:38:24
109,056,794
0
0
null
null
null
null
UTF-8
R
false
false
1,927
r
pres.abs.lag.R
# FUNCTION ---------------------------------------------------------------- # Function to calculate the vectors of presence absence pres.abs.lag <- function (start.date, end.date, sightings, dates){ # Create a data frame with the detections sight <- data.frame (id = sightings, date = dates) %>% filter (date >= start.date, date <= end.date) %>% mutate (id = factor (id)) %>% arrange (date) # For each shark we'll start with the first detection only individuals <- levels (sight$id) # Cycle trough each shark presence.absence <- foreach (i=1:length (individuals), .combine = rbind) %dopar% { # message(individuals[i]) # Find dates in which the shark was present dates.present <- sight$date[sight$id == individuals[i]] %>% as.numeric () # Establish all dates in which it was tagged (only dates in which there was monitoring) dates.tagged <- unique(sight$date)[unique(sight$date) >= sight$date[match (individuals[i], sight$id)]] if(length(dates.tagged)>1) { # Find all possible combinations of dates in which it was tagged dates.comb <- as.data.frame (t (combn (dates.tagged, 2))) %>% tbl_df() names (dates.comb) <- c ("date.1", "date.2") dates.comb <- mutate (dates.comb, lag = date.2 - date.1, # Find the lag between given dates # Establish if it was present for in that lag present = (date.1 %in% dates.present) & (date.2 %in% dates.present), date.1 = as.Date(date.1, origin = "1970-01-01"), date.2 = as.Date(date.2, origin = "1970-01-01"), id = individuals[i]) return (dates.comb) } else { return(NULL) } } return (presence.absence) }
947a540355e12d7c4007dc97affec8f25d0388e1
3e508d7cd0798f70f70242a837f99123397fc1c4
/tests/sim/20210909-sim/look-at-results.R
d16546938a20637bbc09bc722ada03a68a5e70eb
[]
no_license
jlivsey/Dvar
f0d142df9866a4232947d114891111097c4c5851
824cdcc822829d655b905bfeca54a40b82f16e36
refs/heads/master
2023-07-26T19:17:28.531305
2023-07-06T13:55:28
2023-07-06T13:55:28
237,249,747
2
0
null
null
null
null
UTF-8
R
false
false
934
r
look-at-results.R
library(tidyverse, warn.conflicts = FALSE, quietly = TRUE) simDir <- "~/Github/Dvar/tests/sim/20210909-sim" nsim <- 1 # ---- Load Results ---- simLevels <- c(0:4, 6:10) R <- matrix(NA, 7224, length(simLevels)) colnames(R) <- letters[1:length(simLevels)] for(jj in seq_along(simLevels)){ simNum <- simLevels[jj] print(simNum) fileName <- sprintf("results%d.RData", simNum) print(fileName) load(file.path(simDir, fileName)) R[, jj] <- coefEsts colnames(R)[jj] <- paste0("sim", simNum) } head(R) # ---- True Data ---- load(file.path(simDir, "true-table.Rdata")) # Loads array A Av <- c(A) # ---- Absolue Error ---- absErr <- abs(R - Av) par(mfrow = c(1, 2), mar = c(3, 3, 3, .1)) boxplot(absErr, main = "absolute error") boxplot(absErr, ylim = c(0, 1), main = "zoom in to (0, 1)") Rsort <- apply(absErr, 2, sort) hist(R[, "sim0"], breaks = 40, ylim = c(0, 100)) hist(R[, "sim1"], breaks = 40, ylim = c(0, 100))
4bd4055383e8fb732f2666cdd59c987f547248cd
bf825ce5fcbda03c083caf45b549f4ae350e27af
/Projekty/problem sąsiedztwa.R
951ebbefeaa39ce7ca0710a062053d5cb8d4b82a
[]
no_license
ajandria/AWD
a48cbafe3a6e0eb2b6075bda5cf0060582d79ec1
0f7f4ef1dc569d2eecb36e2b9d471f1b0770588e
refs/heads/master
2023-04-06T15:26:32.827834
2020-11-26T15:49:08
2020-11-26T15:49:08
null
0
0
null
null
null
null
UTF-8
R
false
false
4,353
r
problem sąsiedztwa.R
# ------------------------------------------------------------------------- # library(tidyverse) obliczDystans <- function(df1, df2) { x1 <- df1$x y1 <- df1$y z1 <- df1$z x2 <- df2$x y2 <- df2$y z2 <- df2$z dystans <- sqrt((x2-x1)^2+(y2-y1)^2+(z2-z1)^2) return(dystans) } set.seed(424242) # 1. - wylosuj 10k punktów z rozkładu normalnego (0, I). ilosc_punktow <- 10000 myList <- list() for (i in 1:ilosc_punktow) { myList[[i]] <- runif(3, 0, 1) names(myList[[i]]) <- c('x', 'y', 'z') } merged_data_frame <- data.frame(do.call(rbind, myList)) # 2. - Dla n=20 punktów policz mediana+-mad(policz # stosunek odległości (1, 10, 100, 1000) najbliższego sąsiada do najdalszego sąsiada) # https://www.calculatorsoup.com/calculators/geometry-solids/distance-two-points.php # wylosuj 20 punktów z 10k punktów set.seed(424242) wybrane20pkt <- round(runif(20, 1, 10000)) lista20pkt <- list() index <- 1 for (i in wybrane20pkt) { lista20pkt[[index]] <- myList[[i]] index = index + 1 } merged_data_frame_20pkt <- data.frame(do.call(rbind, lista20pkt)) %>% mutate(id_punktu = (1:20)) %>% select(id_punktu, everything()) # library(tidyverse) obliczDystans <- function(df1, df2) { x1 <- df1$x y1 <- df1$y z1 <- df1$z x2 <- df2$x y2 <- df2$y z2 <- df2$z dystans <- sqrt((x2-x1)^2+(y2-y1)^2+(z2-z1)^2) return(dystans) } set.seed(424242) # 1. - wylosuj 10k punktów z rozkładu normalnego (0, I). ilosc_punktow <- 10000 myList <- list() for (i in 1:ilosc_punktow) { myList[[i]] <- runif(3, 0, 1) names(myList[[i]]) <- c('x', 'y', 'z') } merged_data_frame <- data.frame(do.call(rbind, myList)) # 2. - Dla n=20 punktów policz mediana+-mad(policz # stosunek odległości (1, 10, 100, 1000) najbliższego sąsiada do najdalszego sąsiada) # https://www.calculatorsoup.com/calculators/geometry-solids/distance-two-points.php # wylosuj 20 punktów z 10k punktów set.seed(424242) wybrane20pkt <- round(runif(20, 1, 10000)) lista20pkt <- list() index <- 1 for (i in wybrane20pkt) { lista20pkt[[index]] <- myList[[i]] index = index + 1 } merged_data_frame_20pkt <- data.frame(do.call(rbind, lista20pkt)) %>% mutate(id_punktu = (1:20)) %>% select(id_punktu, everything()) # Final ------------------------------------------------------------------- library(philentropy) problem_sasiedztwa <- function(d) { nazwy_wspolrzednych_dla_punktu <- paste(c("x"), 1:d, sep="") set.seed(424242) n = 100 myList <- list() for (i in 1:n) { myList[[i]] <- runif(d, 0, 1) names(myList[[i]]) <- nazwy_wspolrzednych_dla_punktu } merged_data_frame <- do.call(rbind, myList) merged_data_frame <- as.data.frame(merged_data_frame) rownames(merged_data_frame) <- paste(c("p"), 1:n, sep="") set.seed(424242) wybrane20pkt <- round(runif(5, 1, 100)) lista20pkt <- list() i = 1 for (value in wybrane20pkt) { lista20pkt[[i]] <- merged_data_frame[value, ] i = i + 1 } merged_data_frame_20pkt <- do.call(rbind, lista20pkt) merged_data_frame_20pkt_setdiff <- setdiff(merged_data_frame, merged_data_frame_20pkt) macierz_odleglosci <- distance(rbind(merged_data_frame_20pkt, merged_data_frame_20pkt_setdiff), method = 'euclidean', use.row.names = TRUE) list_distances <- list() i2 = 1 for (value in rownames(merged_data_frame_20pkt)) { wybrane_punkty <- data.frame(macierz_odleglosci) %>% select(rownames(merged_data_frame_20pkt)[i2]) wybrane_punkty2 <- wybrane_punkty %>% slice(-i2) wybrane_punkty2_max <- wybrane_punkty2 %>% max() wybrane_punkty2_min <- wybrane_punkty2 %>% min() stosunek_odleglosci <- wybrane_punkty2_min/wybrane_punkty2_max zwracana_tabela <- data.frame(punkt = value, stosunek_odleglosci = stosunek_odleglosci) list_distances[[i2]] <- zwracana_tabela i2 = i2 + 1 } merged_list_distances <- do.call(rbind, list_distances) merged_list_distances <- as.data.frame(merged_list_distances) mad_ps <- mad(merged_list_distances$stosunek_odleglosci) median_ps <- median(merged_list_distances$stosunek_odleglosci) return(data.frame(mad_ps, median_ps, median_ps-mad_ps, median_ps+mad_ps)) }
dc0681c9d6f438e25f84e380965e9828ed1cd851
f9c3eb0584c42c25db3e9f719d03c92d38a607c2
/app.R
ce0c5e9cae9fcb7fabc234618a72d105a51f5a22
[ "MIT" ]
permissive
strojank/loan-vs-savings
264c154e15549a7f751ddc9e93c20f48737c3552
24edf54d03eb78b35d8d33d36ae0cd4340a9526a
refs/heads/master
2020-08-04T23:03:37.518693
2019-10-23T06:08:17
2019-10-23T06:08:17
212,307,226
0
0
null
null
null
null
UTF-8
R
false
false
6,022
r
app.R
# Shiny app library(shiny) library(plotly) library(tidyverse) source('pomozneFunkcije.R') options(scipen = 999) library(DT) # Define UI for application that draws a histogram ui <- fluidPage( titlePanel("When to take a loan?"), # Sidebar with inputs. sidebarLayout( sidebarPanel( helpText( "This Shiny app can help answer the following question: When in the next few years is the best time to take a loan for an investment? Calculation is based on current funds, yearly savings, interest rates and more." ), sliderInput( "leta", "Loan maturity:", min = 2, max = 30, value = 10 ), numericInput( "eur", "Estimated EURIBOR interest rate (%):", 0, min = -1, max = 100 ), numericInput( "bank", "Bank interest rate (%):", 3, min = 0, max = 100 ), numericInput( "exp", "Estimated yearly price increase of investment (%):", 4, min = 0, max = 100 ), numericInput( "inv", "Current investment estimate (€):", 200000, min = 0, max = 10000000 ), sliderInput( "funds", "Current funds (€):", value = 10000, min = 0, max = 200000, step = 1000 ), sliderInput( "savings", "Average yearly savings (€):", value = 10000, min = 0, max = 100000, step = 1000 ) ), mainPanel(tabsetPanel( # Loans and interests tabPanel( "Loan and interests", h4("Funds, loan and interests"), p( "The plot shows the cost of total investment separated to funds, loan and the cost of that loan (interests) depending on the year you decide to invest. Hoover on top of the chart to display values" ), plotlyOutput("loanPlot"), tags$hr() ), # Total investment tabPanel( "Total investment", h4("Total investment cost"), p( "The plot shows total cost of investment depending on the year you decide to invest." ), plotlyOutput("investmentPlot") ), # Payments tabPanel( "Payments", h4("Payments"), p( "In the table below you can see how much money you need to borrow, what is the cost of that loan and the value of monthly payment depending on the year you decide to invest." ), DT::dataTableOutput("loanTable") ), # Credits tabPanel( "Credits", tags$hr(), HTML( "Loan and payments calculations are based on <a href='https://github.com/lcolladotor/mortgage'>Mortage Calculator</a> by <a href='http://bit.ly/LColladoTorres'>L. Collado Torres</a>." ), tags$hr(), HTML( "Powered by <a href='http://www.rstudio.com/shiny/'>Shiny</a> and hosted by <a href='http://www.rstudio.com/'>RStudio</a>." ), tags$hr(), HTML( "Developed by <a href='https://twitter.com/strojanklemen'>Klemen Strojan.</a>" ), tags$hr(), HTML( "Code hosted by <a href='https://github.com/strojank/loan-vs-savings'>GitHub</a>." ), tags$hr() ) )) )) # Define server logic required to draw a histogram server <- function(input, output) { # loan data calcualtion investmentData <- reactive({ cenaInvesticije( loanDuration = input$leta, euribor = input$eur, bankInterest = input$bank, investmentEstimate = input$inv, priceIncrease = input$exp / 100, currentFunds = input$funds, yearlySavings = input$savings ) }) # subseting table data, formating tableData <- reactive({ investmentData() %>% select(startYear, loan, interests, payment) %>% datatable() %>% formatCurrency( c('loan', 'interests', 'payment'), currency = "€", interval = 3, mark = ".", dec.mark = ',', digits = 2 ) }) # subseting main results data loanData <- reactive({ investmentData() %>% pivot_longer(cols = -c(startYear, totalInvestment)) %>% filter(name %in% c('funds', 'loan', 'interests')) }) # subseting investment plot data invPlotData <- reactive({ investmentData() %>% pivot_longer(cols = -startYear) %>% filter(name %in% c('totalInvestment')) }) # Funds, loan and interests plot output$investmentPlot <- renderPlotly({ ggplot(invPlotData(), aes( x = startYear, y = value, color = name, fill = name, label = value )) + geom_bar(stat = 'identity') + labs(x = 'Starting year', y = 'EUR', title = '') + scale_y_continuous(labels = scales::dollar_format(suffix = "", prefix = "€")) + scale_color_manual(values = c("#000000")) + scale_fill_manual(values = c("#000000")) + theme_bw() }) # investment plot output$loanPlot <- renderPlotly({ ggplot( loanData(), aes( x = startYear, y = value, color = name, fill = name, label = totalInvestment ) ) + geom_bar(stat = 'identity') + labs(x = 'Starting year', y = 'EUR') + scale_y_continuous(labels = scales::dollar_format(suffix = "", prefix = "€")) + scale_fill_manual(values = c("#740001", "#eeba30", "#ae0001")) + scale_color_manual(values = c("#000000", "#000000", "#000000")) + theme_bw() }) # Payments table output$loanTable <- DT::renderDataTable({ tableData() }) } # Run the application shinyApp(ui = ui, server = server)
9b2b013ea4c30bfac34ef26c666f936ca3702371
106a8e26bd103fa96745a9e94f3b2ca40dd33d23
/tests/testthat/test-worm_download.R
e4c2b37c7d841f897134a1b845f2d4afb632b617
[ "MIT" ]
permissive
rikenbit/WormTensor
babece93b314999d076694058f59444c73b646bc
3b08a92cf4da2f33221aa1c7a122803dee06177b
refs/heads/master
2023-04-10T08:09:59.990702
2022-10-04T07:25:20
2022-10-04T07:25:20
470,419,278
0
0
null
null
null
null
UTF-8
R
false
false
1,160
r
test-worm_download.R
.dist2mat <- function(x) { as.matrix(x) } ############ Euclid ############ ## dist object Ds_Euclid <- worm_download("Euclid")$Ds expect_true(object.size(Ds_Euclid) != 0) expect_equal(length(Ds_Euclid), 24) expect_true(all(lapply(Ds_Euclid, is) == "dist")) ## matrix object Ms_Euclid <- lapply(Ds_Euclid, .dist2mat) expect_true(all(unlist(lapply(Ms_Euclid, isSymmetric)))) ############# mSBD ############# ## dist object Ds_mSBD <- worm_download("mSBD")$Ds expect_true(object.size(Ds_mSBD) != 0) expect_equal(length(Ds_mSBD), 24) expect_true(all(lapply(Ds_mSBD, is) == "dist")) ## matrix object Ms_mSBD <- lapply(Ds_mSBD, .dist2mat) expect_true(all(unlist(lapply(Ms_mSBD, isSymmetric)))) ############ WARN ############ Ds_Euclid_warn <- worm_download("Euclid", qc = "WARN")$Ds expect_true(object.size(Ds_Euclid_warn) != 0) expect_equal(length(Ds_Euclid_warn), 27) expect_true(all(lapply(Ds_Euclid_warn, is) == "dist")) ############ FAIL ############ Ds_Euclid_fail <- worm_download("Euclid", qc = "FAIL")$Ds expect_true(object.size(Ds_Euclid_fail) != 0) expect_equal(length(Ds_Euclid_fail), 28) expect_true(all(lapply(Ds_Euclid_fail, is) == "dist"))
451dfbb7aae1c7ea3227357e07c54fd91caec39c
9f57e0ad44b78d809c262fa0ffb659232fdb8d5e
/Art of R Programming Code/Ch2/findud.R
4203569aa81ec773044cbe79378b199e915463b5
[]
no_license
abhi8893/Intensive-R
c3439c177776f63705546c6666960fbc020c47e8
e340ad775bf25d5a17435f8ea18300013195e2c7
refs/heads/master
2020-09-22T00:57:36.118504
2020-08-31T09:23:57
2020-08-31T09:23:57
224,994,015
1
0
null
null
null
null
UTF-8
R
false
false
376
r
findud.R
# findud() converts vector v to 1s, 0s, representing an element # increasing or not, relative to the previous one; output length is 1 # less than input findud <- function(v) { vud <- v[-1] - v[-length(v)] _label~findvud1@ return(ifelse(vud > 0,1,-1)) _label~findvud2@ } udcorr <- function(x,y) { ud <- lapply(list(x,y),findud) return(mean(ud[[1]] == ud[[2]])) }
7e8851c91d5c262f02ac127bc0cddc8d5a9e6abf
f4fffe026383f8f681c8b2ef2e7b2ec0f8143688
/man/brauer_2008.Rd
02d4082228ee4148ebcf4ae3b8a674873c13d255
[]
no_license
DavisVaughan/romic
436da67b077937d1c13af9701d39a00f083e6694
f3470f5cd42b6ee8322db3f10a1c254766a5dc3e
refs/heads/master
2023-05-08T05:44:49.460728
2021-05-18T13:10:03
2021-05-18T13:10:03
null
0
0
null
null
null
null
UTF-8
R
false
true
1,486
rd
brauer_2008.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{brauer_2008} \alias{brauer_2008} \alias{brauer_2008_tidy} \alias{brauer_2008_triple} \title{Brauer 2008} \format{ A tibble with 18,000 rows and 8 columns: \describe{ \item{name}{Common gene name} \item{BP}{Gene ontology biological process of the gene} \item{MF}{Gene ontology molecular function of the gene} \item{sample}{Sample name} \item{nutrient}{Which nutrient limits growth (Glucose, Nitrogen, Phosphorous, Sulfur, Uracil, Leucine} \item{DR}{Dilution rate of the culture - basically how fast the cells are growing} \item{expression}{Expression level of the gene, log2 observation relative to a replicate of G0.3} } An object of class \code{tidy_omic} (inherits from \code{tomic}, \code{general}) of length 2. An object of class \code{triple_omic} (inherits from \code{tomic}, \code{general}) of length 4. } \source{ \url{https://pubmed.ncbi.nlm.nih.gov/17959824/} } \usage{ brauer_2008 brauer_2008_tidy brauer_2008_triple } \description{ An RNA expression (microarray) dataset looking at how yeast gene expression changes as nutrient sources and nutrient richness changes. \code{\link{brauer_2008}} formatted as a tidy_omic object \code{\link{brauer_2008}} formatted as a triple_omic object } \details{ This version of the dataset contains only 500 genes randomly selected from the ~6K genes in the complete dataset. } \keyword{datasets}
f4013e94761da12a48533e3114038c8585749d93
799f724f939763c26c4c94497b8632bad380e8f3
/tests/testthat/test-tokens_ngrams.R
833cdf6164089c6d86d660cd019da1c83c71aef2
[]
no_license
chmue/quanteda
89133a7196b1617f599e5bba57fe1f6e59b5c579
aed5cce6778150be790b66c031ac8a40431ec712
refs/heads/master
2020-12-01T02:59:48.346832
2017-11-22T18:35:37
2017-11-22T18:35:37
50,363,453
2
0
null
2016-01-25T16:18:50
2016-01-25T16:18:50
null
UTF-8
R
false
false
4,036
r
test-tokens_ngrams.R
library(quanteda) library(testthat) context('test ngrams.R') test_that("test that ngrams produces the results from Guthrie 2006", { toks <- tokens(c('insurgents killed in ongoing fighting')) bi_grams <- c('insurgents_killed', 'killed_in', 'in_ongoing', 'ongoing_fighting') two_skip_bi_grams <- c('insurgents_killed', 'insurgents_in', 'insurgents_ongoing', 'killed_in', 'killed_ongoing', 'killed_fighting', 'in_ongoing', 'in_fighting', 'ongoing_fighting') tri_grams <- c('insurgents_killed_in', 'killed_in_ongoing', 'in_ongoing_fighting') two_skip_tri_grams <- c('insurgents_killed_in', 'insurgents_killed_ongoing', 'insurgents_killed_fighting', 'insurgents_in_ongoing', 'insurgents_in_fighting', 'insurgents_ongoing_fighting', 'killed_in_ongoing', 'killed_in_fighting', 'killed_ongoing_fighting', 'in_ongoing_fighting') expect_equivalent(setdiff( as.list(tokens_ngrams(toks, n=2, skip=0))[[1]], bi_grams ), character(0) ) expect_equivalent(setdiff( as.list(tokens_ngrams(toks, n=2, skip=0:2))[[1]], two_skip_bi_grams ), character(0) ) expect_equivalent(setdiff( as.list(tokens_ngrams(toks, n=3, skip=0))[[1]], tri_grams ), character(0) ) expect_equivalent(setdiff( as.list(tokens_ngrams(toks, n=3, skip=0:2))[[1]], two_skip_tri_grams ), character(0) ) expect_equivalent(setdiff( as.list(tokens_ngrams(toks, n = 2:3))[[1]], c(bi_grams, tri_grams) ), character(0) ) expect_equivalent(setdiff( as.list(suppressWarnings(tokens_ngrams(toks, n = 2:3)))[[1]], c(bi_grams, tri_grams) ), character(0) ) }) test_that("test `tokens_ngrams` on characters", { ngms <- tokens_ngrams(c('insurgents','killed', 'in', 'ongoing', 'fighting')) charNgms <- char_ngrams(c('insurgents','killed', 'in', 'ongoing', 'fighting')) expect_equivalent( ngms, c('insurgents_killed', 'killed_in', 'in_ongoing', 'ongoing_fighting') ) expect_equivalent( charNgms, c('insurgents_killed', 'killed_in', 'in_ongoing', 'ongoing_fighting') ) expect_warning(tokens_ngrams(c('insurgents killed', 'in', 'ongoing', 'fighting')), "whitespace detected: you may need to run tokens\\(\\) first") expect_warning(tokens_ngrams(c('insurgents killed', 'in', 'ongoing', 'fighting'), n = 1, skip = 1), "skip argument ignored for n = 1") }) test_that("test `tokens_ngrams` on skipgrams", { toks <- tokens("insurgents killed in ongoing fighting") ngms <- tokens_skipgrams(toks, n = 2, skip = 0:1, concatenator = " ") expect_equivalent( as.list(ngms)[[1]], c('insurgents killed', "insurgents in", "killed in" , "killed ongoing" , "in ongoing", "in fighting", "ongoing fighting") ) }) # FAILLING (issue #469) # test_that("test there is not competition between the thread", { # txt <- c(one = char_tolower("Insurgents killed in ongoing fighting."), # two = "A B C D E") # toks <- tokens(txt, remove_punct = TRUE) # # for(k in 1:1000) { # out <- tokens_ngrams(toks, n = 2:3) # expect_equivalent(as.list(out), # list(c("insurgents_killed", "killed_in", "in_ongoing", "ongoing_fighting", # "insurgents_killed_in", "killed_in_ongoing", "in_ongoing_fighting"), # c("A_B", "B_C", "C_D", "D_E", "A_B_C", "B_C_D", "C_D_E"))) # } # }) # FAILLING # test_that("tokens_ngrams(x, n = ...) works when ntokens(x) < n", { # ## issue #392 # expect_equivalent(unclass(tokens_ngrams(tokens("a"), n = 2))[[1]], # char_ngrams("a", n = 2)) # })
b2a89df694f8d1df3a0c7b01e0e7fc8d5549753a
93730e6fee0ae4e3bb028714d6e8a5e335632121
/man/search_subset_node.Rd
aec08f6a72d9ce66ac774a6b0c740886af2e4fa3
[]
no_license
jeevanyue/PIVOT
56ef7c2d2eb30a197b1c0387ae79e73605ab7eae
46fa6af11f19c320ee338452ccff745aa93a1f6d
refs/heads/master
2021-01-01T06:30:45.988532
2017-07-17T02:51:20
2017-07-17T02:51:20
null
0
0
null
null
null
null
UTF-8
R
false
true
352
rd
search_subset_node.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_map.R \name{search_subset_node} \alias{search_subset_node} \title{PIVOT data management function} \usage{ search_subset_node(tbl, node) } \description{ This function is for determining which sample subset the filter operation is performed on, if not keep filtering. }
e9b8d5de340a9ffd1ffa3dc68f37ef28642e5d30
58f007d9b5414c747998bf0bc655ed2428ac06dc
/autres_exemples/gh.R
f7aeaf21739da5124a778c37b9a1db17dba12ea6
[]
no_license
GuillaumePressiat/formation_pmsi_R
9e9cfc76532c2e97ae9e4d64850c0b3d959f3f3a
78b9db64b79bc8ad179bdd90e60ebcc7db63d335
refs/heads/master
2020-03-25T03:00:28.155493
2018-09-07T13:19:49
2018-09-07T13:19:49
143,318,013
1
0
null
null
null
null
UTF-8
R
false
false
1,865
r
gh.R
# Faire une base par GH liste_hop <- c('750100125', '940100035') library(pmeasyr) library(dplyr, warn.conflicts = FALSE) p <- noyau_pmeasyr( annee = 2018, mois = 7, path = '~/Documents/data/mco', progress = FALSE, tolower_names = TRUE ) rsa_gh <- liste_hop %>% purrr::map(function(i){ p$finess <- i; adezip(p, type = "out", liste = 'rsa') irsa(p) %>% purrr::map(function(table) {mutate(table, f = i)})}) rsa_gh_fin <- list() rsa_gh_fin$rsa <- rsa_gh %>% purrr::map('rsa') %>% bind_rows() rsa_gh_fin$rsa_um <- rsa_gh %>% purrr::map('rsa_um') %>% bind_rows() rsa_gh_fin$das <- rsa_gh %>% purrr::map('das') %>% bind_rows() rsa_gh_fin$actes <- rsa_gh %>% purrr::map('actes') %>% bind_rows() rsa_gh_fin ano_gh <- liste_hop %>% purrr::map(function(i){ p$finess <- i; adezip(p, type = "out", liste = 'ano') iano_mco(p) %>% mutate( f = i)}) %>% bind_rows() # Chainer en intra GH get_hop_from_gh <- function(table, fin){ table %>% filter(f == fin) } rsa_psl <- get_hop_from_gh(rsa_gh_fin$rsa, '750100125') rsa_cfx <- get_hop_from_gh(rsa_gh_fin$rsa, '940100035') ano_psl <- get_hop_from_gh(ano_gh, '750100125') ano_cfx <- get_hop_from_gh(ano_gh, '940100035') rsa_psl %>% select(cle_rsa, ghm, duree) %>% inner_join(ano_psl %>% filter(cok) %>% select(cle_rsa, noanon, nosej), by = 'cle_rsa') %>% left_join( rsa_cfx %>% select(cle_rsa, ghm, duree) %>% inner_join(ano_cfx %>% filter(cok) %>% select(cle_rsa, noanon, nosej), by = 'cle_rsa'), by = 'noanon', suffix = c('_psl', '_cfx') ) %>% filter(!is.na(ghm_cfx)) %>% mutate(delta = nosej_cfx - (nosej_psl + duree_psl)) %>% filter(delta >= 0) %>% arrange(noanon, delta) %>% distinct(noanon, .keep_all = TRUE) %>% filter(delta < 5) %>% # View count(ghm_psl, ghm_cfx) %>% View
380cc47cc82a1ebe838b4f4f910e74e9dfbd62b2
2fa33aeef712fa0a1b8043b40261d218a37cafa2
/R/pbino.R
8b61c9a1eea0b48736ef9e7afce03156a8636299
[]
no_license
cran/bayess
778e3cd961acecec0ccbf0de66048543af82c98c
30208f8c4b61bc73e5885875b8134f05a963719c
refs/heads/master
2022-09-03T00:53:47.483683
2022-08-11T09:30:08
2022-08-11T09:30:08
17,694,647
0
1
null
null
null
null
UTF-8
R
false
false
190
r
pbino.R
pbino=function(nplus) { # POSTERIOR PROBABILITIES OF N FOR THE BINOMIAL CAPTURE MODEL UNDER UNIFORM PRIOR (N_MAX=400) prob=c(rep(0,max(nplus,1)-1),1/(max(nplus,1):400+1)) prob/sum(prob) }
c45f0c3e1ffce75a5583c42a5b3d5c2ee633d671
5a1e2c2f2d8ab9668acc3cbfe61b3251ea8e3fc6
/R/split_dat.R
81badbba8dc2376855f210930dab93851570fae1
[]
no_license
helixcn/phylotools
1fc5162f5dc5d1c9483a25f4ac162c5bf27c695b
758d338b2d2004c5b94f931802d269251f4fd7a1
refs/heads/master
2021-06-08T22:55:57.141365
2021-03-24T15:55:30
2021-03-24T15:55:30
35,886,086
10
11
null
null
null
null
UTF-8
R
false
false
1,176
r
split_dat.R
#### author: Jinlong Zhang <jinlongzhang01@gmail.com> #### institution: Kadoorie Farm and Botanic Garden, Hong Kong #### package: phylotools #### URL: http://github.com/helixcn/phylotools #### date: 26 MAY 2015 #### modified: 8 DEC 2017 ### Create fasta files based on the groups specified split_dat <- function(dat, ref_table){ colnames(ref_table) <- c("seq.name", "group") dat.merged <- merge(dat, ref_table, by = "seq.name", all.x = TRUE) ### save the ungrouped sequences first group.dat_i <- dat.merged[is.na(dat.merged$group), ][, -3] dat2fasta(group.dat_i, outfile = "ungrouped.fasta") ### deleted ungrouped sequences from the merged dat.merged <- na.omit(dat.merged) group.name <- as.character(unique(dat.merged$group)) ### generate fasta files according to the groups for(i in 1:length(group.name)){ group.name_i <- group.name[i] group.dat_i <- dat.merged[as.character(dat.merged$group) == group.name_i, ][, -3] dat2fasta(group.dat_i, outfile = paste(group.name_i, ".fasta", sep = "")) } cat(paste("splitted fasta files have been saved to: \n", getwd(),"\n")) }
043c9cfc8c94d1978ae1224447e4e7d5d0eaabd4
c8533654476eeec28d52b2dd47f471009a12c593
/man/predict.ConsRegArima.Rd
4bb1162e7bf516fb90c20b19d36042fb478b287e
[]
no_license
puigjos/ConsReg
e64d03eca6b6b37d116682e7cdda10da73decb63
50b7df1faaccc888036bed3622c4651e669be672
refs/heads/master
2021-05-20T01:42:51.925681
2020-04-03T13:29:22
2020-04-03T13:29:22
252,132,981
2
0
null
null
null
null
UTF-8
R
false
true
1,357
rd
predict.ConsRegArima.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict_ConsRegArima.R \name{predict.ConsRegArima} \alias{predict.ConsRegArima} \alias{print.predict.ConsRegArima} \alias{plot.predict.ConsRegArima} \title{Predict function for ConsRegArima object} \usage{ \method{predict}{ConsRegArima}(object, h = ifelse(is.null(newdata), 1, nrow(newdata)), newdata = NULL, intervals = 90, origdata = NULL, ...) \method{print}{predict.ConsRegArima}(x, ...) \method{plot}{predict.ConsRegArima}(x, ...) } \arguments{ \item{object}{ConsRegArima object} \item{h}{horizont to predict} \item{newdata}{data frame in which to look for variables with which to predict. In case there is no regression part, this parameter could be set NULL} \item{intervals}{Confidence level for prediction intervals (default 90)} \item{origdata}{Original data (default NULL). Useful if lagged predictive variables are used in the formula} \item{...}{Additional params passed to the function ggplot2::labs} \item{x}{object of class predict.ConsRegArima} } \value{ Returns an object of class predict.ConsRegArima \item{predict}{dataframe with the predictions} \item{table}{dataframe with the predictions as well as the fitted values} \item{intervals}{Interval level} \item{object}{original object} } \description{ Obtains predictions of ConsRegArima object }
059664936fc9ac4daf462b89ae7ffb9db92d09b6
e189d2945876e7b372d3081f4c3b4195cf443982
/man/icevision_Flip.Rd
61b983dd40b7a6cbd34f8f311607fda990763446
[ "Apache-2.0" ]
permissive
Cdk29/fastai
1f7a50662ed6204846975395927fce750ff65198
974677ad9d63fd4fa642a62583a5ae8b1610947b
refs/heads/master
2023-04-14T09:00:08.682659
2021-04-30T12:18:58
2021-04-30T12:18:58
324,944,638
0
1
Apache-2.0
2021-04-21T08:59:47
2020-12-28T07:38:23
null
UTF-8
R
false
true
491
rd
icevision_Flip.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/icevision_albumentations.R \name{icevision_Flip} \alias{icevision_Flip} \title{Flip} \usage{ icevision_Flip(always_apply = FALSE, p = 0.5) } \arguments{ \item{always_apply}{always_apply} \item{p}{p} } \value{ None } \description{ Flip the input either horizontally, vertically or both horizontally and vertically. } \section{Targets}{ image, mask, bboxes, keypoints } \section{Image types}{ uint8, float32 }
63e8b8ce55346dbbbbbce18321f8ec59c5f873c4
ac40ac14de2765386f86ad7a6c4d44293226ce1f
/Machine Learning II/Lab II/ISLR 7-8 Lab.R
15afe1f70a65a3e4a4ef838cf5e760a4ca09e29d
[]
no_license
King-AK/MSBA
846974427be025769294a6ed8e3cc65ab55e1e31
ece0a24e4ed8da11b95317de35287a75280a1a3e
refs/heads/master
2020-01-23T21:24:17.933797
2017-06-06T15:34:13
2017-06-06T15:34:13
67,090,677
0
1
null
null
null
null
UTF-8
R
false
false
7,287
r
ISLR 7-8 Lab.R
#VJ Davey #ISLR 7.8 Lab #non-linear modeling rm(list = ls()) library(ISLR) attach(Wage) #Going to analyze the Wage data in order to illustrate that nonlinear fitting procedures can be easly implemented in R ### 7.8.1 Polynomial Regression and Step Functions fit = lm(wage~poly(age,4),data=Wage) summary(fit) coef(summary(fit)) #above: a linear model has been fit using the lm() function in order to predict wage using a 4th degree polynomial. #poly() allows us to avoid writing out a long formula with powers of age. #the poly() function returns a matrix whose columns are a basis of orthogonal polynomials, which essentially means that each column is a linear combination of the variables age, age^2, age^3, and age^4 #we can use poly() to obtain age, age2, age^3, and age^4 directly by using the raw=TRUE argument fit2=lm(wage~poly(age,4,raw=TRUE), data=Wage) coef(summary(fit2)) #there are several other equivalent ways of fitting this model, which showcase the flexibility of the formula language #for example: fit2a=lm(wage~age+I(age^2)+I(age^3)+I(age^4), data=Wage) coef(fit2a) #the above simiply creates the polynomial basis functions on the fly, taking care to protect terms like age^2 via the wrapper function I() #the following will do the same, though more compactly, by using the cbind() function for building a matrix from a collection of vectors fit2b=lm(wage~cbind(age,age^2,age^3,age^4),data=Wage) #we now create a grid of values for age at which we want predcitions for wage, and then call predict() agelims=range(age) age.grid=seq(from=agelims[1],to=agelims[2]) preds=predict(fit,newdata=list(age=age.grid),se=TRUE) se.bands=cbind(preds$fit+2*preds$se.fit,preds$fit-2*preds$se.fit) #finally, we plot the data and add the fit from the degree-4 polynomial par(mfrow=c(1,2),mar=c(4.5,4.5,1,1), oma=c(0,0,4,0)) plot(age,wage,xlim=agelims,cex=.5,col="darkgrey") title("Degree-4 Polynomial", outer=T) lines(age.grid,preds$fit,lwd=2, col="blue") matlines(age.grid,se.bands,lwd=1,col="blue",lty=3) #mar and oma arguments allow us to control the margins of the plot #title function creates a figure title that spans both subplots #In performing a polynomial regression, we must decide on the degree of the polynomial to use. We can do this using hypothesis tests. We can fit models ranging from linear to degree-p and seek to determine te simplest model which is sufficient to explain the relationship between Wage and Age. #we use the anova() function to perform an analysis of variance (ANOVA using an f-test) in order to test the null hypothesis that a model M1 is sufficient to explain the data against the hypothesis that a more complex model M2 is required #in order to use anova() M1 and M2 must be nested models: the predeictors in M1 must be a subset of the predictors in M2 #in this case, we fit five different models and sequentially compare the simpler model to the more complex model fit.1=lm(wage~age,data=Wage) fit.2=lm(wage~poly(age,2),data=Wage) fit.3=lm(wage~poly(age,3),data=Wage) fit.4=lm(wage~poly(age,4),data=Wage) fit.5=lm(wage~poly(age,5),data=Wage) anova(fit.1,fit.2,fit.3,fit.4,fit.5) #the p=value comparing the linear model 1 to the quadratic model 2 is essentially 0, indicating that a linear fit is not sufficient #the p-value comparing the quadratic model 2 to the cubic model 3 is also very low, so the quadratic fit is insufficient #quartic is around 5% #quintic is way too much #So either a cubic or quartic model give the most reasonable fit to the data, but higher or lower order models are not justified via ANOVA #as an alternative to using hypothesis tests and ANOVA, we could choose polynomial degree using Cross Validation #we can also do classification much in the same way. We make sure we have an appropriate response vector and then apply the glm() function using the appropriate family (binomial, multinomial) in order to fit a polynomial logistic regression model fit=glm(I(wage>250)~poly(age,4), data=Wage,family=binomial) preds=predict(fit,newdata=list(age=age.grid),se=TRUE) #calculating the confidence intervals is more involved than in the linear regression case. pfit=exp(preds$fit)/(1+exp(preds$fit)) se.bands.logit = cbind(preds$fit +2* preds$se.fit , preds$fit-2*preds$se.fit) se.bands = exp(se.bands.logit)/(1+exp(se.bands.logit)) #plot the fit plot(age,I(wage>250),xlim=agelims,type="n",ylim=c(0,0.2)) points(jitter(age), I((wage>250)/5), cex=.5, pch="|", col="darkgrey") lines(age.grid,pfit,lwd=2,col="blue") matlines(age.grid, se.bands,lwd=1,col="blue",lty=3) #step functions ###7.8.2 Splines #regression splines can be fit by constructing an appropriate matrix of basis functions #the bs() function generates the entire matrix of basis functions for splines with the specified set of knots. By default, cubic splines are produced. library(splines) fit <- lm(wage ~ bs(age, knots = c(25, 40, 60)), data = Wage) pred <- predict(fit, newdata = list(age = age.grid), se = TRUE) plot(age, wage, col = "gray") lines(age.grid, pred$fit, lwd = 2) lines(age.grid, pred$fit + 2*pred$se, lty = "dashed") lines(age.grid, pred$fit - 2*pred$se, lty = "dashed") #we specified knots at ages 25, 40 and 60. This produces a spline with six basis functions. #we could also use the df() function to produce a spline with knots at uniform quantiles of the data dim(bs(age,knots=c(25,40,60))) dim(bs(age,df=6)) attr(bs(age,df=6),"knots") #In this case, R would choose knots at 33.8, 42.0 and 51.0, which correspond to the 25th, 50th and 75th percentiles of age #the bs() function also has a degree argument, so we can fit splines of any degree, rather than the default of 3 for cubic splines #in order to fit a natural spline, we use the ns() function fit2=lm(wage~ns(age,df=4),data=Wage) pred2=predict(fit2,newdata=list(age=age.grid),se=TRUE) lines(age.grid,pred2$fit,col="red",lwd=2) #as with the bs() function we could instead specify the knots directly using the knots option #in order to fit a smoothing spline, we use the smooth.spline() function plot(age,wage,xlim=agelims,cex=.5,col="darkgrey") title("Smoothing Spline") fit=smooth.spline(age,wage,df=16) fit2=smooth.spline(age,wage,cv=TRUE) fit2$df lines(fit,col="red",lwd=2) lines(fit2,col="blue",lwd=2) legend("topright",legend=c("16 DF", "6.8 DF"), col=c("red","blue"),lty=1,lwd=2,cex=.8) #in the first call of smooth.spline() we specified the df as being 16 #in the second call to smooth.spline() we selected the smoothness level with cross validation, yielding a lambda value that yields 6.8 degrees of freedom #in order to perform local regression we use the loess() function plot(age,wage,xlim=agelims,cex=.5,col="darkgrey") title("Local Regression") fit=loess(wage~age,span=.2,data=Wage) fit2=loess(wage~age,span=.5,data=Wage) lines(age.grid,predict(fit,data.frame(age=age.grid)),col="red",lwd=2) lines(age.grid,predict(fit2,data.frame(age=age.grid)),col="blue",lwd=2) legend("topright",legend=c("Span=0.2","Span=0.5"),col=c("red","blue"),lty=1,lwd=2,cex=.8) #we havev performed local regression using spans of 0.2 and 0.5: that is, each neighborhood consists of 20 or 50% of the observations. The larger the span, the smoother the fit.
c1fa80208403498f786701b8a9d379dbdb077a5d
85641490bc20f312161d6ababd651e2503bab20c
/Metabolomics stats course.R
64d786edc5a409d38e89b33cb05944dffd6bafb1
[]
no_license
JoeRothwell/MOOCs
159236fcbf2baf7e0382d73c1e2222a5d262f8bb
70f8583d376ee779fe25b4427cad36f5b2aafa75
refs/heads/master
2023-04-29T22:41:56.455539
2023-04-24T06:28:18
2023-04-24T06:28:18
180,340,799
0
0
null
null
null
null
UTF-8
R
false
false
8,065
r
Metabolomics stats course.R
# Functions 1 ----------------------------------- describe <- function(object, ...) { UseMethod("describe") } describe.default <- function(object, var.name, na.rm=TRUE) { if (!is.numeric(object)) { stop(deparse(substitute(object)), " must be numeric") } if (missing(var.name)) { var.name <- deparse(substitute(object)) } par(mfrow=c(2, 3), pty="s") plot(density(object, na.rm=na.rm), main="Density") hist(object, main="Histogram", xlab=var.name) qqnorm(object, pch=20, main="Normal Q-Q plot") qqline(object) plot(density(log(object), na.rm=na.rm), main="Density") hist(log(object), main="Histogram", xlab=sprintf("log(%s)", var.name)) qqnorm(log(object), pch=20, main="Normal Q-Q plot") qqline(log(object)) } describe.data.frame <- function(object, na.rm=TRUE) { devAskNewPage(ask=TRUE) invisible(lapply(object, function(x) { var.name <- names(eval(sys.call(1)[[2]]))[substitute(x)[[3]]] describe(x, var.name, na.rm) })) } describe.matrix <- function(object, na.rm=TRUE) { describe(as.data.frame(object), na.rm) } # Functions 2 -------------------------------------------- library(lme4) library(pheatmap) pqq <- function(pvals, alpha=0.05, add=FALSE, ...) { pvals <- na.omit(pvals) n <- length(pvals) exp <- -log10(1:n / n) obs <- -log10(sort(pvals)) if (!add) { ci <- -log10(cbind( qbeta(alpha/2, 1:n, n - 1:n + 1), qbeta(1 - alpha/2, 1:n, n - 1:n + 1) )) xlim <- c(0, exp[1]) ylim <- c(0, max(obs[1], ci[1,])) par(pty="s") matplot(exp, ci, type="l", lty=2, col="gray50", xlim=xlim, ylim=ylim, xlab=expression(Expected~~-log[10]~italic(p)), ylab=expression(Observed~~-log[10]~italic(p)), ...) abline(0, 1, col="gray25") } points(exp, obs, pch=20, ...) invisible(NULL) } response.name <- function(formula) { tt <- terms(formula) vars <- as.character(attr(tt, "variables"))[-1] vars[attr(tt, "response")] } mlm <- function(formula, data=NULL, vars) { Y <- get(response.name(formula)) formula <- update(formula, "NULL ~ .") mf <- model.frame(formula, data, drop.unused.levels=TRUE) mm <- model.matrix(formula, mf) labs <- labels(terms(formula)) if (missing(vars)) { vars <- labs } colnames(mm) <- sprintf("V%d", 1:ncol(mm)) new.vars <- colnames(mm)[attr(mm, "assign") %in% match(vars, labs)] mm <- as.data.frame(mm) formula <- as.formula(sprintf("y ~ %s - 1", paste0(colnames(mm), collapse=" + ") )) coefs <- array(NA, c(ncol(Y), length(vars), 3), dimnames=list(colnames(Y), vars, c("coef", "coef.se", "pval"))) options(warn=2) for (i in 1:ncol(Y)) { mm$y <- Y[,i] model <- try(lm(formula, data=mm), silent=TRUE) if (inherits(model, "try-error")) { next } tmp <- try(coef(summary(model)), silent=TRUE) if (inherits(tmp, "try-error")) { next } for (j in 1:length(vars)) if (new.vars[j] %in% rownames(tmp)) { coefs[i,vars[j],"coef"] <- tmp[new.vars[j],"Estimate"] coefs[i,vars[j],"coef.se"] <- tmp[new.vars[j],"Std. Error"] coefs[i,vars[j],"pval"] <- tmp[new.vars[j],"Pr(>|t|)"] } } if (length(vars) == 1) { coefs <- as.data.frame(coefs[,1,]) } coefs } setGeneric("VarComp", function(object) { standardGeneric("VarComp") }) setMethod("VarComp", signature(object="merMod"), function(object) { vc <- VarCorr(object) sds <- lapply(vc, attr, which="stddev") data.frame( group=c(rep(names(sds), unlist(lapply(sds, length))), "Residual"), var.name=c(c(lapply(sds, names), recursive=TRUE), NA), var=c(c(sds, recursive=TRUE), attr(vc, "sc"))**2, row.names=NULL, stringsAsFactors=FALSE ) }) setGeneric("re.rank", function(object, whichel) { standardGeneric("re.rank") }) setMethod("re.rank", signature(object="merMod", whichel="missing"), function(object, whichel) { re.rank(object, names(getME(object, "flist"))) }) setMethod("re.rank", signature(object="merMod", whichel="character"), function(object, whichel) { vc <- subset(VarComp(object), group %in% whichel & var.name == "(Intercept)") groups <- vc$group[which(vc$var > .Machine$double.eps)] lapply(ranef(object)[groups], function(x) { structure(rank(x[,"(Intercept)"]), names=rownames(x)) }) }) mlmer <- function(formula, data=NULL, vars, lrt=TRUE) { Y <- get(response.name(formula)) lf <- lFormula(update(formula, "NULL ~ ."), data, REML=FALSE, control=lmerControl(check.formula.LHS="ignore")) labs <- as.character(attr(terms(lf$fr), "predvars.fixed")[-1]) if (missing(vars)) { vars <- labs } mm <- lf$X colnames(mm) <- sprintf("V%d", 1:ncol(mm)) new.vars <- colnames(mm)[which(attr(lf$X, "assign") %in% match(vars, labs))] formula <- as.formula(sprintf("y ~ %s - 1", paste0(c( sprintf("(%s)", findbars(formula)), colnames(mm) ), collapse=" + ") )) model.data <- cbind(mm, lf$reTrms$flist) coefs <- array(NA, c(ncol(Y), length(vars), 3), dimnames=list(colnames(Y), vars, c("coef", "coef.se", "pval"))) re.ranks <- lapply(lf$reTrms$flist, function(x) { tmp <- matrix(0, nlevels(x), nlevels(x), dimnames=list(levels(x), 1:nlevels(x))) attr(tmp, "count") <- 0 tmp }) options(warn=2) for (i in 1:ncol(Y)) { model.data$y <- Y[,i] data.subset <- subset(model.data, !is.na(y)) model <- try(lmer(formula, data=data.subset, REML=FALSE), silent=TRUE) if (inherits(model, "try-error")) { next } tmp <- try(coef(summary(model)), silent=TRUE) if (inherits(tmp, "try-error")) { next } for (j in 1:length(vars)) if (new.vars[j] %in% rownames(tmp)) { coefs[i,vars[j],"coef"] <- tmp[new.vars[j],"Estimate"] coefs[i,vars[j],"coef.se"] <- tmp[new.vars[j],"Std. Error"] if (!lrt) { coefs[i,vars[j],"pval"] <- 2 * pt(abs(tmp[new.vars[j],"t value"]), df=df.residual(model), lower.tail=FALSE) } else { lrt.formula <- as.formula(sprintf(". ~ . - %s", new.vars[j])) model0 <- try(update(model, lrt.formula), silent=TRUE) if (inherits(model0, "try-error")) { next } coefs[i,vars[j],"pval"] <- anova(model0, model)["model","Pr(>Chisq)"] } } ranks <- lapply(re.rank(model), function(x) { diag(length(x))[x,] }) for (g in names(ranks)) { re.ranks[[g]] <- re.ranks[[g]] + ranks[[g]] attr(re.ranks[[g]], "count") <- attr(re.ranks[[g]], "count") + 1 } } if (length(vars) == 1) { coefs <- as.data.frame(coefs[,1,]) } list(coefs=coefs, re.ranks=re.ranks) } plot.ranks <- function(x, col="red") { x <- x / attr(x, "count") breaks <- seq(0, max(x), length.out=101) cols <- c( rep("white", sum(breaks <= 1 / ncol(x)) - 1), colorRampPalette(c("white", col))(sum(breaks > 1 / ncol(x))) ) pheatmap(x, color=cols, breaks=breaks, cluster_rows=FALSE, cluster_cols=FALSE) invisible(NULL) } # Functions 3---------------------------------------------------------------- library(glmnet) best.coefs <- function(model) { stopifnot(inherits(model, "cv.glmnet")) best.model <- which.min(model$cvm) if (!is.list(model$glmnet.fit$beta)) { model$glmnet.fit$beta <- list(model$glmnet.fit$beta) } tmp <- lapply(model$glmnet.fit$beta, function(x) { x <- x[,best.model] x[abs(x) > sqrt(.Machine$double.eps)] }) with(model$glmnet.fit, cbind( `(Intercept)`=a0[best.model], do.call(rbind, tmp) )) }
bf0ec8e2590c30856bd0ea5c6769d999a24ff03d
21327fc1d5030fc4bb69e6505b66b471290fae5e
/commands.R
f12df4b8259aadf00b92842c5346d677a430a767
[]
no_license
sportebois/Rbash
995f6c6aac874cde109c7ae661d76cc49d005421
de809e643055f4856c5175bb44e71bdb4f4d53f3
refs/heads/master
2021-01-10T03:42:28.372645
2016-03-14T01:23:53
2016-03-14T01:23:53
53,816,427
0
0
null
null
null
null
UTF-8
R
false
false
290
r
commands.R
#commands.R print("sourcing 'commands.R'") source("defaultTestValue.R") testVal <- getOption("test", default = "defaultTestInCommands") print(paste0("test value is ", testVal)) testVal2 <- getOption("test2", default = "defaultTest2InCommands") print(paste0("test2 value is ", testVal2))
2f26cb4c59202ac86d289aadb0a1c2efca0d2bd3
c3bcb4169d97579d67e14198b7ed827562f7c7bb
/Supervised_Learning/project_1.R
3fbcc8c83b83cda25cce266725438b14ae39f679
[]
no_license
xiaomizhou616/Machine-Learning
c11c5cfa582231abd6438ca2c5fe937129a37887
66287a275a62305c340933041c981b7661badbe6
refs/heads/master
2020-04-11T06:46:37.706765
2018-12-13T08:20:40
2018-12-13T08:20:40
161,591,458
0
0
null
null
null
null
UTF-8
R
false
false
32,848
r
project_1.R
#protein solubility dataset #preprocessing data total=read.csv("solubility2.csv") total[,1]<-NULL total[,1]<-NULL total[ ,1][total[ ,1]>100]<-100 total[ ,1]<-total[ ,1]/100 colnames(total)[1] <- "solubility" total[,3]<-total[,1] total[,1]<-NULL x<-na.omit(total) x[ ,1]<-as.character(x[ ,1]) length(x[ ,1]) library("protr") x[ ,1] = x[ ,1][(sapply(x[ ,1], protcheck))] length(x[ ,1]) x1 = t(sapply(x[ ,1], extractAAC)) x[ ,3:22] <- x1[ ,1:20] x[ ,1]<-NULL x[ ,22]<-x[ ,1] x[ ,1]<-NULL colnames(x)[21] <- "solubility" write.csv(x,"cleandata_1.csv") clean7=read.csv("cleandata_1.csv") clean7[ ,1]<-NULL clean7$solubility[clean7$solubility >= 0.5] <-1 clean7$solubility[clean7$solubility < 0.5] <- 0 # 70% training data, 10 fold cross validation, 30% test data library('caret') set.seed(5) TrainingDataIndex <- createDataPartition(clean7[ ,21], p=0.7, list = FALSE) train7 <- clean7[TrainingDataIndex,] test7<- clean7[-TrainingDataIndex,] #decison tree # train accuracy testdata<-train7 testdata[ ,21]<-NULL install.packages("party") library(party) #output.tree <- ctree(solubility ~ ., data=train7) output.tree <- ctree(solubility ~ ., data=train7,controls = ctree_control(mincriterion = 0.1)) pred <- predict(output.tree, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train7$solubility, Predicted = prediction[,1])) sum(diag(cm))/length(train7$solubility) plot(output.tree) #prune tree #0.1 249 0.8357532 #0.2 189 0.8212341 #0.3 147 0.8030853 #0.4 141 0.7999093 #0.5 121 0.7881125 #0.6 97 0.7767695 #0.7 59 0.7617967 #0.8 45 0.7459165 #0.9 39 0.7436479 #default mincriterion 0.95 29 nodes accuracy 0.7345735 #mincriterion 0.98 25 nodes accuracy 0.7214156 #0.99 25 0.7214156 #1 1 0.5494555 # cv accuracy d1 = NULL for (x in seq(0,1,0.1)) { d2 = NULL yourData<-train7[sample(nrow(train7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7,controls = ctree_control(mincriterion = x)) pred <- predict(output.tree, newdata = test_7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7$solubility) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 d1 = rbind(d1, data.frame(x,average_accuracy)) } write.csv(d1,"DT_1.csv") #test data testdata<-test7 testdata[ ,21]<-NULL d1 = NULL for (x in seq(0,1,0.1)) { output.tree <- ctree(solubility ~ ., data=train7,controls = ctree_control(mincriterion = x)) pred <- predict(output.tree, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = test7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(test7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } write.csv(d1,"DT_2.csv") # choose mincriterion 0.8 # change training data ratio #calculate train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7,controls = ctree_control(mincriterion = 0.8)) pred <- predict(output.tree, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(train_7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } write.csv(d1,"DT_3.csv") #cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7data,controls = ctree_control(mincriterion = 0.8)) pred <- predict(output.tree, newdata = test_7data) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7data$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7data$solubility) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_3.csv") # test data d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7,controls = ctree_control(mincriterion = 0.8)) pred <- predict(output.tree, newdata = test7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = test7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(test7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } # svm # kernel #train accuracy library("e1071") testdata<-train7 testdata[ ,21]<-NULL svm_model <- svm(solubility ~ ., train7,kernel ="radial") pred <- predict(svm_model, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train7$solubility, Predicted = prediction[,1])) sum(diag(cm))/length(train7$solubility) #radial 0.8393829 cv 0.7363739 #linear 0.7068966 0.6996154 #sigmoid 0.4664247 0.5131201 #polynomial 0.7654265 0.647427 # kernel cv accuracy #cv accuracy d2 = NULL yourData<-train7[sample(nrow(train7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7,kernel ="polynomial") pred <- predict(svm_model, newdata = test_7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7$solubility) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 average_accuracy # training data ratio # train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7,kernel ="radial") pred <- predict(svm_model, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(train_7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } # cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7data,kernel ="radial") pred <- predict(svm_model, newdata = test_7data) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7data$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7data$solubility) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_4.csv") #test data d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7,kernel ="radial") pred <- predict(svm_model, newdata = test7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = test7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(test7$solubility) d2 = rbind(d2, data.frame(x,accuracy)) } d1$SVM=d2[,2] #boosted install.packages("adabag") library("adabag") train7f=train7 train7f$solubility<-as.factor(train7f$solubility) testdata<-train7f testdata[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train7f, mfinal=100, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=testdata) cm = as.matrix(table(Actual = train7f$solubility, Predicted = adaboost.pred$class)) sum(diag(cm))/length(train7f$solubility) #8 100 0.9909256 0.7150494 #8 50 0.9314882 0.7173427 #8 10 0.8203267 0.7286508 # 8 5 0.7912886 0.7164562 # 10 5 0.7899274 0.7037392 # 6 5 0.7885662 0.7027849 # 4 5 0.7513612 0.7200555 # maxdepth 5 iteration 5 0.7749546 0.70645 #no control 0.7935572 # cv accuracy d2 = NULL yourData<-train7f[sample(nrow(train7f)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train_7, mfinal=100, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=test_7) cm = as.matrix(table(Actual = testdata_7$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(testdata_7$solubility) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 average_accuracy # traning data ratio # train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7f[ ,21], p=x, list = FALSE) train_7 <- train7f[TrainingDataIndex,] test_7<- train7f[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train_7, mfinal=10, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=testdata) cm = as.matrix(table(Actual = train_7$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(train_7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } # cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7f[ ,21], p=x, list = FALSE) train_7 <- train7f[TrainingDataIndex,] test_7<- train7f[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train_7data, mfinal=10, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=test_7data) cm = as.matrix(table(Actual = testdata_7data$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(testdata_7data$solubility) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_5.csv") #test data test7f=test7 test7f$solubility<-as.factor(test7f$solubility) d3 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7f[ ,21], p=x, list = FALSE) train_7 <- train7f[TrainingDataIndex,] test_7<- train7f[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train_7, mfinal=10, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=test7f) cm = as.matrix(table(Actual = test7f$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(test7f$solubility) d3 = rbind(d3, data.frame(x,accuracy)) } d1$Boosting=d3[,2] # knn # change neigbor n # train accuracy install.packages("class") library("class") knn.model=knn(train=train7[,1:20], test=train7[,1:20], cl=train7$solubility, k =30) d=table(train7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) accuracy # k 100 accuracy 0.6515426 0.6465405 # 50 0.6705989 0.6565076 # 30 0.6873866 0.6601687 # 10 0.7295826 0.6660736 # 5 0.7717786 0.6655944 # 1 0.996824 0.62297 # cv accuracy d2 = NULL yourData<-train7[sample(nrow(train7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL knn.model=knn(train=train_7[,1:20], test=testdata_7[,1:20], cl=train_7$solubility, k =1) d=table(testdata_7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 average_accuracy # traning data ratio #train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL knn.model=knn(train=train_7[,1:20], test=train_7[,1:20], cl=train_7$solubility, k =10) d=table(train_7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d1 = rbind(d1, data.frame(x,accuracy)) } # cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL knn.model=knn(train=train_7data[,1:20], test=testdata_7data[,1:20], cl=train_7data$solubility, k =10) d=table(testdata_7data$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_6.csv") # test data d4 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL knn.model=knn(train=train_7[,1:20], test=test7[,1:20], cl=train_7$solubility, k =10) d=table(test7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d4 = rbind(d4, data.frame(x,accuracy)) } d1$KNN=d4[,2] write.csv(d1,"DT_7.csv") # bank dataset # preprocessing clean7=read.csv("bank.csv") clean7[ ,1]<-NULL colnames(clean7)[21]="solubility" clean7$solubility=as.numeric(as.factor(clean7$solubility)) clean7$solubility[clean7$solubility == 1] <-0 clean7$solubility[clean7$solubility ==2] <- 1 # 70% training data, 10 fold cross validation, 30% test data library('caret') set.seed(5) TrainingDataIndex <- createDataPartition(clean7[ ,21], p=0.7, list = FALSE) train7 <- clean7[TrainingDataIndex,] test7<- clean7[-TrainingDataIndex,] # Decision tree # train accuracy testdata<-train7 testdata[ ,21]<-NULL install.packages("party") library(party) #output.tree <- ctree(solubility ~ ., data=train7) output.tree <- ctree(solubility ~ ., data=train7,controls = ctree_control(mincriterion = 1)) pred <- predict(output.tree, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train7$solubility, Predicted = prediction[,1])) sum(diag(cm))/length(train7$solubility) # cv accuracy d1 = NULL for (x in seq(0,1,0.1)) { d2 = NULL yourData<-train7[sample(nrow(train7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7,controls = ctree_control(mincriterion = x)) pred <- predict(output.tree, newdata = test_7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7$solubility) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 d1 = rbind(d1, data.frame(x,average_accuracy)) } write.csv(d1,"DT_8.csv") # training data ratio # parameter 0.7 # train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7,controls = ctree_control(mincriterion = 0.7)) pred <- predict(output.tree, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(train_7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } #cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7data,controls = ctree_control(mincriterion = 0.7)) pred <- predict(output.tree, newdata = test_7data) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7data$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7data$solubility) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_9.csv") # test data d10 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL output.tree <- ctree(solubility ~ ., data=train_7,controls = ctree_control(mincriterion = 0.7)) pred <- predict(output.tree, newdata = test7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = test7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(test7$solubility) d10 = rbind(d10, data.frame(x,accuracy)) } #SVM #kernel #train accuracy testdata<-train7 testdata[ ,21]<-NULL svm_model <- svm(solubility ~ ., train7,kernel ="radial") pred <- predict(svm_model, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train7$solubility, Predicted = prediction[,1])) sum(diag(cm))/length(train7$solubility) # kernel cv accuracy #radial 0.9072789 0.9057521 #linear 0.8993711 0.8991396 #sigmoid 0.8061413 0.810627 #polynomial 0.9034406 0.9017752 #cv accuracy d2 = NULL yourData<-train7[sample(nrow(train7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7,kernel ="radial") pred <- predict(svm_model, newdata = test_7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7$solubility) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 average_accuracy # training data ratio # train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7,kernel ="radial") pred <- predict(svm_model, newdata = testdata) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = train_7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(train_7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } # cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7data,kernel ="radial") pred <- predict(svm_model, newdata = test_7data) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = testdata_7data$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(testdata_7data$solubility) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_10.csv") #test data d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL svm_model <- svm(solubility ~ ., train_7,kernel ="radial") pred <- predict(svm_model, newdata = test7) prediction<-data.frame(pred) prediction[,1][prediction[,1] >= 0.5] <-1 prediction[,1][prediction[,1] < 0.5] <- 0 cm = as.matrix(table(Actual = test7$solubility, Predicted = prediction[,1])) accuracy=sum(diag(cm))/length(test7$solubility) d2 = rbind(d2, data.frame(x,accuracy)) } d10$SVM=d2[,2] #boosted library("adabag") library("rpart") train7f=train7 train7f$solubility<-as.factor(train7f$solubility) testdata<-train7f testdata[ ,21]<-NULL control=rpart.control(maxdepth=5) adaboost = boosting(solubility~., data=train7f, mfinal=5, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=testdata) cm = as.matrix(table(Actual = train7f$solubility, Predicted = adaboost.pred$class)) sum(diag(cm))/length(train7f$solubility) #8 100 0.9216611 0.9125034 #8 50 0.9202275 0.9107935 #8 10 0.9164354 0.9110244 # 8 5 0.9145856 0.9078809 # 10 5 0.9083888 0.9088974 # 6 5 0.9153718 0.90802 # 4 5 0.913707 0.9086193 # maxdepth 5 iteration 5 0.913707 0.9097298 # cv accuracy d2 = NULL yourData<-train7f[sample(nrow(train7f)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL control=rpart.control(maxdepth=5) adaboost = boosting(solubility~., data=train_7, mfinal=5, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=test_7) cm = as.matrix(table(Actual = testdata_7$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(testdata_7$solubility) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 average_accuracy # traning data ratio # train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7f[ ,21], p=x, list = FALSE) train_7 <- train7f[TrainingDataIndex,] test_7<- train7f[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train_7, mfinal=100, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=testdata) cm = as.matrix(table(Actual = train_7$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(train_7$solubility) d1 = rbind(d1, data.frame(x,accuracy)) } # cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7f[ ,21], p=x, list = FALSE) train_7 <- train7f[TrainingDataIndex,] test_7<- train7f[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train_7data, mfinal=100, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=test_7data) cm = as.matrix(table(Actual = testdata_7data$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(testdata_7data$solubility) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_12.csv") #test data test7f=test7 test7f$solubility<-as.factor(test7f$solubility) d3 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7f[ ,21], p=x, list = FALSE) train_7 <- train7f[TrainingDataIndex,] test_7<- train7f[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL control=rpart.control(maxdepth=8) adaboost = boosting(solubility~., data=train_7, mfinal=100, control=control) adaboost.pred <- predict.boosting(adaboost,newdata=test7f) cm = as.matrix(table(Actual = test7f$solubility, Predicted = adaboost.pred$class)) accuracy=sum(diag(cm))/length(test7f$solubility) d3 = rbind(d3, data.frame(x,accuracy)) } d10$Boosting=d3[,2] write.csv(d10,"DT_13.csv") write.csv(d10,"DT_13.csv") # KNN clean7$job=as.numeric(as.factor(clean7$job)) clean7$marital=as.numeric(as.factor(clean7$marital)) clean7$education=as.numeric(as.factor(clean7$education)) clean7$default=as.numeric(as.factor(clean7$default)) clean7$housing=as.numeric(as.factor(clean7$housing)) clean7$loan=as.numeric(as.factor(clean7$loan)) clean7$contact=as.numeric(as.factor(clean7$contact)) clean7$month=as.numeric(as.factor(clean7$month)) clean7$day_of_week=as.numeric(as.factor(clean7$day_of_week)) clean7$poutcome=as.numeric(as.factor(clean7$poutcome)) str(clean7) set.seed(5) TrainingDataIndex <- createDataPartition(clean7[ ,21], p=0.7, list = FALSE) train7 <- clean7[TrainingDataIndex,] test7<- clean7[-TrainingDataIndex,] # change neigbor n # train accuracy install.packages("class") library("class") knn.model=knn(train=train7[,1:20], test=train7[,1:20], cl=train7$solubility, k =1) d=table(train7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) accuracy # k 100 accuracy 0.9132908 0.9117189 # 50 0.9143082 0.9119495 # 30 0.916343 0.9113026 # 10 0.922401 0.9083892 # 5 0.9304477 0.9033943 # 1 1 0.8877167 # cv accuracy d2 = NULL yourData<-train7[sample(nrow(train7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7 <- yourData[testIndexes, ] train_7 <- yourData[-testIndexes, ] testdata_7<-test_7 test_7[ ,21]<-NULL knn.model=knn(train=train_7[,1:20], test=testdata_7[,1:20], cl=train_7$solubility, k =100) d=table(testdata_7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d2 = rbind(d2, data.frame(i,accuracy)) } average_accuracy=sum(d2[1:10,2])/10 average_accuracy # traning data ratio #train accuracy d1 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL knn.model=knn(train=train_7[,1:20], test=train_7[,1:20], cl=train_7$solubility, k =50) d=table(train_7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d1 = rbind(d1, data.frame(x,accuracy)) } # cv accuracy d2 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] d3 = NULL yourData<-train_7[sample(nrow(train_7)),] folds <- cut(seq(1,nrow(yourData)),breaks=10,labels=FALSE) for (i in seq(1,10,1)) { testIndexes <- which(folds==i,arr.ind=TRUE) test_7data <- yourData[testIndexes, ] train_7data <- yourData[-testIndexes, ] testdata_7data<-test_7data test_7data[ ,21]<-NULL knn.model=knn(train=train_7data[,1:20], test=testdata_7data[,1:20], cl=train_7data$solubility, k =50) d=table(testdata_7data$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d3 = rbind(d3, data.frame(i,accuracy)) } average_accuracy=sum(d3[1:10,2])/10 d2 = rbind(d2, data.frame(x,average_accuracy)) } d1$accuracy2=d2[,2] write.csv(d1,"DT_11.csv") # test data d4 = NULL for (x in seq(0.1,1,0.1)) { set.seed(5) TrainingDataIndex <- createDataPartition(train7[ ,21], p=x, list = FALSE) train_7 <- train7[TrainingDataIndex,] test_7<- train7[-TrainingDataIndex,] testdata<-train_7 testdata[ ,21]<-NULL knn.model=knn(train=train_7[,1:20], test=test7[,1:20], cl=train_7$solubility, k =50) d=table(test7$solubility,knn.model) accuracy=sum(diag(d))/sum(d) d4 = rbind(d4, data.frame(x,accuracy)) } d10$KNN=d4[,2] write.csv(d10,"DT_13.csv")
3b5dc22392ec65738fe393c30f4d2a671c0e78fd
b73f1bbdfad964a17c9225f2d3ef41d28f15cc69
/cachematrix.R
54a50ee734fbbd0415eb96cb9177ec10e05a45c9
[]
no_license
ankitsharma222/ProgrammingAssignment2
b08d80cf018fdcc2c3e29532281b4821c7f65b05
cd47e3d97c935fdd4aa78c624fc91c4c1cf66cc4
refs/heads/master
2021-01-17T17:50:08.324287
2015-02-19T20:17:18
2015-02-19T20:17:18
30,935,524
0
0
null
2015-02-17T20:13:05
2015-02-17T20:13:05
null
UTF-8
R
false
false
1,168
r
cachematrix.R
## these functions help in caching the inverse of a matrix rather than computing it repeatedly ##to set or get the value of matrix or its inverse makeCacheMatrix<-function(x=matrix()){ m<-NULL set<-function(y=matrix()){ x<<-y m<<-NULL ##to assign new data to x and discard the old inverse stored in m. } get<-function()x ## to get the value of x setinv<-function(inv) m<<-inv ##takes inv as an argument and save its value in the m variable in global env getinv<-function() m ##returns the value of mean list(set=set,get=get,setinv=setinv,getinv=getinv)## makecachemeans returns a list of functions } ##2nd function to calculate the inverse or to get a cached value cacheSolve<-function(x=matrix(),...){ m<-z$getinv() ##z=makeCacheMatrix(...) if(!is.null(m)) { print("printing cached inverse") ##to check if m is already calculated return(m) } mat<-z$get() m<-solve(mat) ## to calculate inv if it ws not previously calculated z$setinv(m) ##to set new value of inverse just calculated m }
5f98a1ca38df5101d8a801995b76fa9edeb2daad
1cb7c242633970d93fd874dddd140b4461e35278
/Rscripts/ensemble/compute_ensemble.R
8fd322b9cd874019bdfb79b1ba6a0a612f16ffe2
[ "MIT" ]
permissive
chenxofhit/scRNAseq_clustering_comparison
5c2e1e79ea200dde38edeaa7dad5057541d2af82
de91456d411f97a45b871d55490d1e14b09031f5
refs/heads/master
2020-03-24T06:05:20.625297
2019-08-20T15:23:45
2019-08-20T15:23:45
142,515,237
0
0
MIT
2018-07-27T02:02:45
2018-07-27T02:02:45
null
UTF-8
R
false
false
4,998
r
compute_ensemble.R
## -------------------------------------------------------------------------- ## ## Ensemble between methods, per run, all k ## -------------------------------------------------------------------------- ## args <- (commandArgs(trailingOnly = TRUE)) for (i in 1:length(args)) { eval(parse(text = args[[i]])) } print(clusteringsummary) print(ncores) print(outrds) suppressPackageStartupMessages({ library(plyr) library(dplyr) library(tidyr) library(purrr) library(clue) library(reshape2) library(parallel) }) ## Load clustering summary df <- readRDS(file = clusteringsummary) ## Sample Seurat results, keep only one resolution per k set.seed(42) df_sub <- df %>% dplyr::filter(!(method %in% c("Seurat"))) df_seurat <- df %>% dplyr::filter(method == "Seurat") %>% dplyr::group_by(dataset, method, k) %>% dplyr::filter(resolution == sample(unique(resolution), 1)) %>% dplyr::ungroup() df_sub <- plyr::rbind.fill(df_sub, df_seurat) ## Help function for computing ensemble clusters ## df: dataframe with variables method, k, dataset, cell, trueclass, run ## methods: character vector with method names for ensemble ## output: tibble longformat with method cluster, ensemble cluster, trueclass, dataset ## ------------------------------------------------------------------ helper_ensemble <- function(methods, df) { ## Process each dataset separately l <- vector("list", length(unique(df$dataset))) names(l) <- unique(df$dataset) for (i in unique(df$dataset)) { print(paste(i, paste(methods, collapse = "+"))) ## Extract only results for the current dataset res <- df %>% dplyr::filter(dataset == i) ## Process each k separately combined_l <- vector("list", length(unique(res$k))) names(combined_l) <- unique(res$k) for (u in unique(res$k)) { ## Extract only results for the current k res.k <- res %>% dplyr::filter(k == u) ## Skip if results for one method not exist if (sum(unique(res.k$method) %in% methods) != length(methods)) { next } else { ## Wide format (each method/run combination in a separate column) res.w <- dcast(res.k %>% filter(method %in% methods), trueclass + cell ~ method + run, value.var = "cluster") res2 <- res.w %>% dplyr::select(-trueclass) %>% tibble::column_to_rownames("cell") %>% as.matrix ## If all values are NA, replace them with 0 if (all(is.na(res2))) { res2[] <- 0 } ## Process each run separately runs <- unique(res.k$run) m <- matrix(NA, nrow = nrow(res2), ncol = length(runs)) rownames(m) <- rownames(res2) colnames(m) <- runs for (j in seq_along(runs)) { ## Extract only results for the current run run <- grep(paste0("_", runs[j], "$"), colnames(res2)) res3 <- res2[, run] re <- res3 %>% plyr::alply(2, clue::as.cl_partition) # partition ## Replace NA cluster assignment by zeros re <- lapply(re, function(x) { x <- as.matrix(x$.Data) x[is.na(x)] <- 0 x }) ## Make ensemble and generate consensus clustering re <- clue::as.cl_ensemble(re) if (all(sapply(re, length) == 0)) { m[, runs[j]] <- NA } else { re <- clue::cl_consensus(re, method = "SE", control = list(nruns = 50)) # no NAS! clusters <- clue::cl_class_ids(re) m[, runs[j]] <- clusters[rownames(m)] } } out <- data.frame(res.w, stringsAsFactors = FALSE) %>% dplyr::mutate(dataset = i, k = u, method = paste(methods, collapse = ".")) %>% dplyr::left_join(data.frame(m, stringsAsFactors = FALSE, check.names = FALSE) %>% tibble::rownames_to_column("cell"), by = "cell") combined_l[[as.character(u)]] <- out } } l[[i]] <- plyr::rbind.fill(combined_l) } res.df <- plyr::rbind.fill(l) res.df <- reshape2::melt(data = res.df, id.vars = c("dataset", "trueclass", "cell", "method", "k"), measure.vars = as.character(runs), value.name = "cons_cluster", variable.name = "run") return(res.df) } ## Construct all combinations of two methods comb.ensmbl <- list(l1 = unique(df_sub$method), l2 = unique(df_sub$method)) %>% purrr::cross() %>% purrr::map((paste)) names(comb.ensmbl) <- sapply(comb.ensmbl, function(x) paste0(x, collapse = ".")) ## Remove pairings of a method with itself comb.ensmbl <- comb.ensmbl %>% purrr::discard(function(x) x[1] == x[2]) ## Run out <- mclapply(comb.ensmbl, helper_ensemble, df = df_sub, mc.preschedule = FALSE, mc.cores = ncores) out <- plyr::rbind.fill(out) ## Save saveRDS(out, file = outrds) date() sessionInfo()
990dce1a7e47e3340c5517439776c4c3167a0714
02210921d469ae6c7daf9e159ea985aab6347517
/R/app_server.R
46cea8775fb3f6af3f8c468a04bf649aa2edc3ea
[ "MIT" ]
permissive
frdanconia/shiny_etf_portfolio
e6e94380ac482da53fb1322a1197ca968cad1490
580d9d4c1c53f94e7c77a78058c78794f3b4940a
refs/heads/main
2023-03-03T22:25:16.162724
2021-01-23T13:26:32
2021-01-23T13:26:32
332,216,492
0
0
null
null
null
null
UTF-8
R
false
false
301
r
app_server.R
#' @import shiny app_server <- function(input, output,session) { # List the first level callModules here library(tidyquant) # due to issue: https://github.com/business-science/tidyquant/issues/116 callModule(mod_universe_server, "universe_ui_1") callModule(mod_finder_server, "finder_ui_1") }
6ecfe9b7168449c56e5e9945f4a64131ec604c4e
8ab151cc5bfb154cc4ae4b1d97ddd6b2bedc95fa
/man/solar.long.Rd
cc205edf43403c5b7c1edc1524173c37d5038ea2
[]
no_license
arturochian/MetFns
5eafd4bc404edbbdefd27223c5b8a99d32cd048d
5ce9fc52efdac3c2a12aa18282ab71e53aacf115
refs/heads/master
2020-04-06T04:20:15.871591
2014-09-16T00:00:00
2014-09-16T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
939
rd
solar.long.Rd
\name{solar.long} \alias{solar.long} \title{ Calculation of solar longitude } \description{ Calculates solar longitude with respect to the equinox of 2000.0 for given year, month, day and time. } \usage{ solar.long(year, month, day, time) } \arguments{ \item{year}{ numeric vector of length 4 specifying year. } \item{month}{ numeric vector specifying month of the year. } \item{day}{ numeric vector specifying day. } \item{time}{ numeric vector specifying time in hours. } } \value{ \code{solar.long} returns numeric vector with a value between 0 and 359.999. } \references{ Steyaert C. (1991). Calculating the Solar Longitude 2000.0, \emph{WGN, Journal of the IMO}, 19:2, 31-34. } \author{ Kristina Veljkovic } \seealso{ \code{\link{filter.sol}} } \examples{ ## calculate solar longitude for June 22, 2006, at 4h UT. require(astroFns) solar.long(year=2006,month=6,day=22,time=4) }
7ff68bee61ea077074444adfff8499f12195c7a0
34382754d6fe101536f2a5755801845849a3e1d3
/Covid19/ui.R
de7a9d29dee0dd6f5edc7396355a512911c70cea
[]
no_license
nohturfft/covid19_a
11284c018cb2e742e35bb6eacb0e5af3efe31e91
189eb2f9374c9f9cf3448b407220552b17994109
refs/heads/master
2021-03-16T17:17:07.652394
2020-03-14T11:43:45
2020-03-14T11:43:45
246,926,050
0
0
null
null
null
null
UTF-8
R
false
false
1,214
r
ui.R
# # This is the user-interface definition of a Shiny web application. You can # run the application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) library(dygraphs) # Define UI for application that draws a histogram shinyUI(fluidPage( # App title ---- titlePanel("COVID19"), # Sidebar layout with input and output definitions ---- sidebarLayout( # Sidebar panel for inputs ---- sidebarPanel(width = 2, selectInput(inputId='country', label='Country', choices=c("United Kingdom")), # br() element to introduce extra vertical spacing ---- br() ), # Main panel for displaying outputs ---- mainPanel( # Output: Tabset w/ plot, summary, and table ---- tabsetPanel(type = "tabs", tabPanel("Daily Cases", dygraphOutput("plot_daily")), tabPanel("Cummulative", dygraphOutput("plot_cumm")), tabPanel("Table", tableOutput("table")) ) ) ) ))
74b1a91941b836e1131c9a1252add411121b2b80
a206f33c8cbd90abf2f400f79b233b4e56c89f23
/clusterizacion/clusterizacion_steepest.R
3501d9d296e643dbec62d59c4e5f76338a549c85
[]
no_license
andresrabinovich/algoritmos-geneticos
60a403860fcad3932e5f18bad23a6ac9312c12f1
6b3923981c2f51ed735451f735dd12e1c63a0d75
refs/heads/master
2021-01-10T13:46:18.419293
2015-07-15T12:53:51
2015-07-15T12:53:51
36,511,094
0
0
null
null
null
null
UTF-8
R
false
false
6,776
r
clusterizacion_steepest.R
#/////////////// #CONFIGURACIONES #/////////////// poblacion = 1; pm = 1; #probabilidad de mutacion pc = 0.1; #probabilidad de single-point crossover generaciones = 1000; corridas = 1; dim_red = 2; #los puntos en la red no son reales, son solo los lugares alrededor de los cuales se van a armar los clusters puntos_por_cluster = 3; parametro_de_red = 1; ancho_del_cluster = 0.1; #lo que mide el cluster en x alto_del_cluster = 0.1; #lo que mide el cluster en y k = dim_red^2; #TO DO - Ver de adaptar el algoritmo para no tener que decirle cuantos clusters buscar #//////////////////// #CARGADO DE LIBRERIAS #//////////////////// library(fpc); #///////////////////////////////// #COMIENZAN LAS FUNCIONES GENERICAS #///////////////////////////////// #------------------ #Funcion de fitness #------------------ calcular_fitness <- function(cromosoma, matriz_de_distancias){ #return (binario_a_decimal(cromosoma)); #return (sum(cromosoma)); #Vamos a probar con silhouette como funcion de fitness #s = matrix(0, ncol=1, nrow=ncol(cromosoma)); #for(observacion in 1:ncol(cromosoma)){ # a = matrix(0, ncol=2, nrow=ncol(matriz_de_distancias)); # puntos <- c(1:ncol(cromosoma)); # puntos <- puntos[-observacion]; #Sacamos el punto que estamos viendo de los puntos a siluetear # for(punto in puntos)){ # #Sumamos distinto # a[cromosoma[observacion], 1] # } #} #s<-silhouette(cromosoma, matriz_de_distancias); #si<-summary(s); #return (si$si.summary["Mean"]); #Le sumamos 1 para que sea positiva return (cluster.stats(matriz_de_distancias, cromosoma)$ch); } #------------------- #Funcion de mutacion #------------------- mutar <- function(cromosoma, pm, k){ #Muta un bit del cromosoma con probabilidad pm if(runif(1) <= pm){ for(i in 1:5){ #Elije un locus al azar y lo cambia posicion = sample(1:length(cromosoma), 1); red<-c(1:k); red<-red[-cromosoma[posicion]]; #Sacamos como posibilidad que la mutacion lo deje en el mismo lugar cromosoma[posicion] = sample(red, 1); } } return (cromosoma); } #-------------------- #Funcion de crossover #-------------------- cruzar <- function(cromosomas_padres, pc){ #Creamos los hijos cromosomas_hijos = cromosomas_padres; #Hace crossover entre dos cromosomas con probabilidad pc if(runif(1) <= pc){ #Elije un locus desde donde empezar a cruzar posicion = sample(1:length(cromosomas_padres[1,]), 1); cromosomas_hijos[1, 1:posicion] = cromosomas_padres[2, 1:posicion]; cromosomas_hijos[2, 1:posicion] = cromosomas_padres[1, 1:posicion]; } return (cromosomas_hijos); } #----------------------------------------------------- #Funcion que elige una pareja en funcion de su fitness #----------------------------------------------------- elegir_pareja <- function(fitness){ #VER COMO IMPLEMENTAR LA FUNCION SAMPLE EN C! return (sample(1:length(fitness), 2, replace=FALSE, prob=fitness)); } #------------------------------------ #Genera la matriz de distancias entre #los puntos y cada elemento de la red #------------------------------------ #generar_matriz_de_distancias <- function(puntos, redes){ # matriz_distancia = matrix(0, nrow=nrow(puntos), ncol=nrow(redes)); # for(punto in 1:nrow(puntos)){ # for(red in 1:nrow(redes)){ # matriz_distancia[punto, red] = sqrt((puntos[punto, 1]-redes[red, 1])^2 + (puntos[punto, 2]-redes[red, 2])^2); # } # } # return (matriz_distancia); #} #//////////////////// #COMIENZA EL PROGRAMA #//////////////////// #genero la red equiespaciada a <- seq(1, dim_red*parametro_de_red, parametro_de_red); red <- matrix(data=a, nrow=dim_red^2, ncol=2); red[, 1] <- rep(a, each=dim_red); #genero los puntos de datos alrededor de la red puntos_en_la_red <- dim_red^2; total_de_puntos <- puntos_en_la_red * puntos_por_cluster; tamano_del_cromosoma = total_de_puntos; #Los puntos de ruido que vamos a tener cantidad_de_puntos_de_ruido <- total_de_puntos * nivel_de_ruido; #Genero los puntos de los clusters puntos <- matrix(0, nrow=total_de_puntos, ncol=2); puntos[, 1] <- runif(total_de_puntos, -ancho_del_cluster, ancho_del_cluster) + rep(red[, 1], each=puntos_por_cluster); puntos[, 2] <- runif(total_de_puntos, -alto_del_cluster, alto_del_cluster) + rep(red[, 2], each=puntos_por_cluster); #Matriz de distancias entre los puntos y la red #matriz_de_distancias = generar_matriz_de_distancias(puntos, red); matriz_de_disimilaridad = dist(puntos); #Matriz en blanco que va a guardar los cromosomas de la poblacion nueva despues de cada corrida #Cada cromosoma es una tira ordenada que asigna a cada posicion (cada punto) uno de los clusters posibles #de la red cromosomas_nuevos = matrix(0, ncol=total_de_puntos, nrow=poblacion); #Matriz que guarda el fitness de cada cromosoma fitness = matrix(0, ncol=1, nrow=poblacion); #Comienzan las corridas for(corrida in 1:corridas){ #Genera los cromosomas al azar de la corrida, entre 1 y la cantidad de puntos de la red cromosomas = matrix(sample(1:puntos_en_la_red, poblacion*tamano_del_cromosoma, replace=TRUE), ncol=tamano_del_cromosoma); #Calcula el fitness de los cromosomas for(cromosoma in 1:poblacion){ fitness[cromosoma] = calcular_fitness(cromosomas[cromosoma, ], matriz_de_disimilaridad); } #Generando las generaciones for(generacion in 1:generaciones){ if(generacion%%100 == 0){ print(mean(fitness)); print(generacion); } #Cruza los cromosomas de acuerdo a su fitness. Cuanto mas fitness mas probabilidad de cruza. #Elige poblacion/2 parejas if(poblacion > 1){ pareja_actual = 1; #Indice de la nueva pareja en cada cruza for(cruza in cruzas){ #Elige la pareja a cruzar pareja = elegir_pareja(fitness); #La cruza y genera dos hijos hijos = cruzar(cromosomas[pareja, ], pc); #Asigna a la nueva poblacion los dos hijos cromosomas_nuevos[pareja_actual, ] = hijos[1, ]; cromosomas_nuevos[pareja_actual+1, ] = hijos[2, ]; #Agrega dos al indice de nuevas parejas pareja_actual = pareja_actual + 2; } #Asignamos la nueva poblacion como la poblacion actual cromosomas = cromosomas_nuevos; } #Mutamos los nuevos cromosomas for(cromosoma in 1:poblacion){ cromosoma_nuevo = mutar(cromosomas[cromosoma, ], pm, k); fitness_nuevo = calcular_fitness(cromosoma_nuevo, matriz_de_disimilaridad); #if(fitness_nuevo > fitness[cromosoma] || runif(1) > exp(-(fitness_nuevo-fitness[cromosoma]))) { if(fitness_nuevo > fitness[cromosoma]) { cromosomas[cromosoma, ] = cromosoma_nuevo; fitness[cromosoma] = fitness_nuevo; } } } } #Muestra los mejores fitness graphics.off(); print(cromosomas); plot(puntos[,1],puntos[,2]); points(puntos[,1],puntos[,2],col=rainbow(k)[cromosomas[1, ]],pch=20); dev.new(); plot(silhouette(cromosomas[which.max(fitness), ], matriz_de_disimilaridad));
79859c428fb939c528c8a114845ced2d61eeb64a
e58cb0a3ce95401501f0f0441a492529632b41f7
/submitJobs.R
f4f6aa06672ff707953199a0aefe1b847fd36c5f
[]
no_license
larsgr/GRewdPipeline
ea451c75b5f4d91d4f92a941e3b2f3461566ee98
77a7d5b17373a2139d34327942fcec300b62fb40
refs/heads/master
2020-12-29T02:19:46.034273
2019-01-15T10:24:33
2019-01-15T10:24:33
30,870,561
0
0
null
null
null
null
UTF-8
R
false
false
2,834
r
submitJobs.R
# # NOTE: system("source /etc/profile.d/modules.sh && sbatch jobscript.sh") does not work! # # The module command is not available to "jobscript.sh".. why not? # apparently the job script does inherit the environment where sbatch is executed. # # Need to add #!/bin/bash –l at the start of each job script... submitJob <- function(jobScript, deps=NULL){ if(is.null(deps)){ cmd <- paste("sbatch",jobScript) } else { cmd <- paste0("sbatch -d afterok:",paste(deps,collapse=":")," ",jobScript) } cat("Submit job command:",cmd,"\n") jobIDstr <- system(paste("source /etc/profile.d/modules.sh && module load slurm &&",cmd),intern=T) cat(jobIDstr,"\n") # Output from sbatch: "Submitted batch job 1284123" if( !( length(jobIDstr) == 1 & grepl("^Submitted batch job [0-9]+$",jobIDstr))){ stop("Job submit failed!") } jobID <- stringr::str_extract(jobIDstr,"[0-9]+") return(jobID) } # # orthoMCL.prepOrthoMCL.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/orthoMCL/prepOrthoMCL.job.sh", # deps=NULL) # # orthoMCL.blastOrthoMCL.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/orthoMCL/blastOrthoMCL.job.sh", # deps=orthoMCL.prepOrthoMCL.jobID) # # # orthoMCL.orthoMCL.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/orthoMCL/orthoMCL.job.sh", # deps=orthoMCL.blastOrthoMCL.jobID) # # # ExTbls.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/exprTbls/ExTbls.job.sh", # deps=orthoMCL.orthoMCL.jobID) # # runDESeq.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/DESeq/runDESeq.job.sh", # deps=ExTbls.jobID) # # grpFastas.RJob.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/grpFastas/RJob.job.sh", # deps=orthoMCL.orthoMCL.jobID) # # grpCDSFastas.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/grpCDSFastas/grpCDSFastas.job.sh", # deps=orthoMCL.orthoMCL.jobID) # # prepMAFFT.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/grpAligned/prepMAFFT.job.sh", # deps=grpFastas.RJob.jobID) # # MAFFT.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/grpAligned/MAFFT.job.sh", # deps=prepMAFFT.jobID) # # pal2nal.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/pal2nal/pal2nal.job.sh", # deps=c(MAFFT.jobID,grpCDSFastas.jobID)) # # treesNuc.jobID <- submitJob( # jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/treesNuc/nucTree.job.sh", # deps=pal2nal.jobID) splitGroups.jobID <- submitJob( jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/splitGroups/splitGroups.job.sh", deps=treesNuc.jobID) PAML.jobID <- submitJob( jobScript = "/mnt/NOBACKUP/mariansc/share/orthos/PAML/codeml.job.sh", deps=splitGroups.jobID)
b14f34c23409a8bd39f03d6a41867ddbdae7712a
0420fa9f0a499af5edc75c7ddbe606a7ea684e6c
/data-raw/deprecated/cairo-header-old.R
d83a469983c17ad9e9faa3ff0ca64a6ca8f7ee90
[ "MIT" ]
permissive
coolbutuseless/cairocore
ca8a2b82f594b086c7de38149fb3d3a5f150b455
5bf9f2f170f3c7ee81e0b5c709946745950115d4
refs/heads/master
2022-12-03T18:36:56.909944
2020-08-30T04:04:22
2020-08-30T04:04:22
288,104,420
12
1
null
null
null
null
UTF-8
R
false
false
21,190
r
cairo-header-old.R
cairo_header <- list( cairo_create <- list( include = TRUE, gtkdoc = r"{/** * cairo_create: * @target: target surface for the context * * Creates a new #cairo_t with all graphics state parameters set to * default values and with @target as a target surface. The target * surface should be constructed with a backend-specific function such * as cairo_image_surface_create() (or any other * <function>cairo_<emphasis>backend</emphasis>_surface_create(<!-- -->)</function> * variant). * * This function references @target, so you can immediately * call cairo_surface_destroy() on it if you don't need to * maintain a separate reference to it. * * Return value: a newly allocated #cairo_t with a reference * count of 1. The initial reference count should be released * with cairo_destroy() when you are done using the #cairo_t. * This function never returns %NULL. If memory cannot be * allocated, a special #cairo_t object will be returned on * which cairo_status() returns %CAIRO_STATUS_NO_MEMORY. If * you attempt to target a surface which does not support * writing (such as #cairo_mime_surface_t) then a * %CAIRO_STATUS_WRITE_ERROR will be raised. You can use this * object normally, but no drawing will be done. * * Since: 1.0 **/}", proto_text = "cairo_t * cairo_create (cairo_surface_t *target)" ), cairo_move_to <- list( include = TRUE, gtkdoc = r"{/** * cairo_move_to: * @cr: a cairo context * @x: the X coordinate of the new position * @y: the Y coordinate of the new position * * Begin a new sub-path. After this call the current point will be (@x, * @y). * * Since: 1.0 **/}", proto_text = "void cairo_move_to (cairo_t *cr, double x, double y)" ), cairo_line_to <- list( include = TRUE, gtkdoc = r"{/** * cairo_line_to: * @cr: a cairo context * @x: the X coordinate of the end of the new line * @y: the Y coordinate of the end of the new line * * Adds a line to the path from the current point to position (@x, @y) * in user-space coordinates. After this call the current point * will be (@x, @y). * * If there is no current point before the call to cairo_line_to() * this function will behave as cairo_move_to(@cr, @x, @y). * * Since: 1.0 **/}", proto_text = "void cairo_line_to (cairo_t *cr, double x, double y)" ), cairo_rectangle <- list( include = TRUE, gtkdoc = r"{/** * cairo_rectangle: * @cr: a cairo context * @x: the X coordinate of the top left corner of the rectangle * @y: the Y coordinate to the top left corner of the rectangle * @width: the width of the rectangle * @height: the height of the rectangle * * Adds a closed sub-path rectangle of the given size to the current * path at position (@x, @y) in user-space coordinates. * * This function is logically equivalent to: * <informalexample><programlisting> * cairo_move_to (cr, x, y); * cairo_rel_line_to (cr, width, 0); * cairo_rel_line_to (cr, 0, height); * cairo_rel_line_to (cr, -width, 0); * cairo_close_path (cr); * </programlisting></informalexample> * * Since: 1.0 **/}", proto_text = "void cairo_rectangle (cairo_t *cr, double x, double y, double width, double height)" ), cairo_paint <- list( include = TRUE, gtkdoc = r"{/** * cairo_paint: * @cr: a cairo context * * A drawing operator that paints the current source everywhere within * the current clip region. * * Since: 1.0 **/}", proto_text = "void cairo_paint (cairo_t *cr)" ), cairo_select_font_face <- list( include = TRUE, gtkdoc = r"{/** * cairo_select_font_face: * @cr: a #cairo_t * @family: a font family name, encoded in UTF-8 * @slant: the slant for the font * @weight: the weight for the font * * Note: The cairo_select_font_face() function call is part of what * the cairo designers call the "toy" text API. It is convenient for * short demos and simple programs, but it is not expected to be * adequate for serious text-using applications. * * Selects a family and style of font from a simplified description as * a family name, slant and weight. Cairo provides no operation to * list available family names on the system (this is a "toy", * remember), but the standard CSS2 generic family names, ("serif", * "sans-serif", "cursive", "fantasy", "monospace"), are likely to * work as expected. * * If @family starts with the string "@cairo:", or if no native font * backends are compiled in, cairo will use an internal font family. * The internal font family recognizes many modifiers in the @family * string, most notably, it recognizes the string "monospace". That is, * the family name "@cairo:monospace" will use the monospace version of * the internal font family. * * For "real" font selection, see the font-backend-specific * font_face_create functions for the font backend you are using. (For * example, if you are using the freetype-based cairo-ft font backend, * see cairo_ft_font_face_create_for_ft_face() or * cairo_ft_font_face_create_for_pattern().) The resulting font face * could then be used with cairo_scaled_font_create() and * cairo_set_scaled_font(). * * Similarly, when using the "real" font support, you can call * directly into the underlying font system, (such as fontconfig or * freetype), for operations such as listing available fonts, etc. * * It is expected that most applications will need to use a more * comprehensive font handling and text layout library, (for example, * pango), in conjunction with cairo. * * If text is drawn without a call to cairo_select_font_face(), (nor * cairo_set_font_face() nor cairo_set_scaled_font()), the default * family is platform-specific, but is essentially "sans-serif". * Default slant is %CAIRO_FONT_SLANT_NORMAL, and default weight is * %CAIRO_FONT_WEIGHT_NORMAL. * * This function is equivalent to a call to cairo_toy_font_face_create() * followed by cairo_set_font_face(). * * Since: 1.0 **/}", proto_text = "void cairo_select_font_face (cairo_t *cr, const char *family, cairo_font_slant_t slant, cairo_font_weight_t weight)" ), cairo_set_font_size <- list( include = TRUE, gtkdoc = r"{/** * cairo_set_font_size: * @cr: a #cairo_t * @size: the new font size, in user space units * * Sets the current font matrix to a scale by a factor of @size, replacing * any font matrix previously set with cairo_set_font_size() or * cairo_set_font_matrix(). This results in a font size of @size user space * units. (More precisely, this matrix will result in the font's * em-square being a @size by @size square in user space.) * * If text is drawn without a call to cairo_set_font_size(), (nor * cairo_set_font_matrix() nor cairo_set_scaled_font()), the default * font size is 10.0. * * Since: 1.0 **/}", proto_text = "void cairo_set_font_size (cairo_t *cr, double size)" ), cairo_show_text <- list( include = TRUE, gtkdoc = r"{/** * cairo_show_text: * @cr: a cairo context * @utf8: a NUL-terminated string of text encoded in UTF-8, or %NULL * * A drawing operator that generates the shape from a string of UTF-8 * characters, rendered according to the current font_face, font_size * (font_matrix), and font_options. * * This function first computes a set of glyphs for the string of * text. The first glyph is placed so that its origin is at the * current point. The origin of each subsequent glyph is offset from * that of the previous glyph by the advance values of the previous * glyph. * * After this call the current point is moved to the origin of where * the next glyph would be placed in this same progression. That is, * the current point will be at the origin of the final glyph offset * by its advance values. This allows for easy display of a single * logical string with multiple calls to cairo_show_text(). * * Note: The cairo_show_text() function call is part of what the cairo * designers call the "toy" text API. It is convenient for short demos * and simple programs, but it is not expected to be adequate for * serious text-using applications. See cairo_show_glyphs() for the * "real" text display API in cairo. * * Since: 1.0 **/}", proto_text = "void cairo_show_text (cairo_t *cr, const char *utf8)" ), cairo_set_source_rgb <- list( include = TRUE, gtkdoc = r"{/** * cairo_set_source_rgb: * @cr: a cairo context * @red: red component of color * @green: green component of color * @blue: blue component of color * * Sets the source pattern within @cr to an opaque color. This opaque * color will then be used for any subsequent drawing operation until * a new source pattern is set. * * The color components are floating point numbers in the range 0 to * 1. If the values passed in are outside that range, they will be * clamped. * * The default source pattern is opaque black, (that is, it is * equivalent to cairo_set_source_rgb(cr, 0.0, 0.0, 0.0)). * * Since: 1.0 **/}", proto_text = "void cairo_set_source_rgb (cairo_t *cr, double red, double green, double blue)" ), cairo_set_antialias <- list( include = TRUE, gtkdoc = r"{/** * cairo_set_antialias: * @cr: a #cairo_t * @antialias: the new antialiasing mode * * Set the antialiasing mode of the rasterizer used for drawing shapes. * This value is a hint, and a particular backend may or may not support * a particular value. At the current time, no backend supports * %CAIRO_ANTIALIAS_SUBPIXEL when drawing shapes. * * Note that this option does not affect text rendering, instead see * cairo_font_options_set_antialias(). * * Since: 1.0 **/}", proto_text = "void cairo_set_antialias (cairo_t *cr, cairo_antialias_t antialias)" ), cairo_set_fill_rule <- list( include = TRUE, gtkdoc = r"{/** * cairo_fill_rule_t: * @CAIRO_FILL_RULE_WINDING: If the path crosses the ray from * left-to-right, counts +1. If the path crosses the ray * from right to left, counts -1. (Left and right are determined * from the perspective of looking along the ray from the starting * point.) If the total count is non-zero, the point will be filled. (Since 1.0) * @CAIRO_FILL_RULE_EVEN_ODD: Counts the total number of * intersections, without regard to the orientation of the contour. If * the total number of intersections is odd, the point will be * filled. (Since 1.0) * * #cairo_fill_rule_t is used to select how paths are filled. For both * fill rules, whether or not a point is included in the fill is * determined by taking a ray from that point to infinity and looking * at intersections with the path. The ray can be in any direction, * as long as it doesn't pass through the end point of a segment * or have a tricky intersection such as intersecting tangent to the path. * (Note that filling is not actually implemented in this way. This * is just a description of the rule that is applied.) * * The default fill rule is %CAIRO_FILL_RULE_WINDING. * * New entries may be added in future versions. * * Since: 1.0 **/}", proto_text = "void cairo_set_fill_rule (cairo_t *cr, cairo_fill_rule_t fill_rule);" ), cairo_set_line_width <- list( include = TRUE, gtkdoc = r"{/** * cairo_fill_rule_t: * @CAIRO_FILL_RULE_WINDING: If the path crosses the ray from * left-to-right, counts +1. If the path crosses the ray * from right to left, counts -1. (Left and right are determined * from the perspective of looking along the ray from the starting * point.) If the total count is non-zero, the point will be filled. (Since 1.0) * @CAIRO_FILL_RULE_EVEN_ODD: Counts the total number of * intersections, without regard to the orientation of the contour. If * the total number of intersections is odd, the point will be * filled. (Since 1.0) * * #cairo_fill_rule_t is used to select how paths are filled. For both * fill rules, whether or not a point is included in the fill is * determined by taking a ray from that point to infinity and looking * at intersections with the path. The ray can be in any direction, * as long as it doesn't pass through the end point of a segment * or have a tricky intersection such as intersecting tangent to the path. * (Note that filling is not actually implemented in this way. This * is just a description of the rule that is applied.) * * The default fill rule is %CAIRO_FILL_RULE_WINDING. * * New entries may be added in future versions. * * Since: 1.0 **/}", proto_text = "void cairo_set_line_width (cairo_t *cr, double width)" ), cairo_stroke_preserve <- list( include = TRUE, gtkdoc = r"{/** * cairo_stroke_preserve: * @cr: a cairo context * * A drawing operator that strokes the current path according to the * current line width, line join, line cap, and dash settings. Unlike * cairo_stroke(), cairo_stroke_preserve() preserves the path within the * cairo context. * * See cairo_set_line_width(), cairo_set_line_join(), * cairo_set_line_cap(), cairo_set_dash(), and * cairo_stroke_preserve(). * * Since: 1.0 **/}", proto_text = "void cairo_stroke_preserve (cairo_t *cr)" ), cairo_fill <- list( include = TRUE, gtkdoc = r"{/** * cairo_fill: * @cr: a cairo context * * A drawing operator that fills the current path according to the * current fill rule, (each sub-path is implicitly closed before being * filled). After cairo_fill(), the current path will be cleared from * the cairo context. See cairo_set_fill_rule() and * cairo_fill_preserve(). * * Since: 1.0 **/}", proto_text = "void cairo_fill (cairo_t *cr)" ), cairo_arc <- list( include = TRUE, gtkdoc = r"{/** * cairo_arc: * @cr: a cairo context * @xc: X position of the center of the arc * @yc: Y position of the center of the arc * @radius: the radius of the arc * @angle1: the start angle, in radians * @angle2: the end angle, in radians * * Adds a circular arc of the given @radius to the current path. The * arc is centered at (@xc, @yc), begins at @angle1 and proceeds in * the direction of increasing angles to end at @angle2. If @angle2 is * less than @angle1 it will be progressively increased by * <literal>2*M_PI</literal> until it is greater than @angle1. * * If there is a current point, an initial line segment will be added * to the path to connect the current point to the beginning of the * arc. If this initial line is undesired, it can be avoided by * calling cairo_new_sub_path() before calling cairo_arc(). * * Angles are measured in radians. An angle of 0.0 is in the direction * of the positive X axis (in user space). An angle of * <literal>M_PI/2.0</literal> radians (90 degrees) is in the * direction of the positive Y axis (in user space). Angles increase * in the direction from the positive X axis toward the positive Y * axis. So with the default transformation matrix, angles increase in * a clockwise direction. * * (To convert from degrees to radians, use <literal>degrees * (M_PI / * 180.)</literal>.) * * This function gives the arc in the direction of increasing angles; * see cairo_arc_negative() to get the arc in the direction of * decreasing angles. * * The arc is circular in user space. To achieve an elliptical arc, * you can scale the current transformation matrix by different * amounts in the X and Y directions. For example, to draw an ellipse * in the box given by @x, @y, @width, @height: * * <informalexample><programlisting> * cairo_save (cr); * cairo_translate (cr, x + width / 2., y + height / 2.); * cairo_scale (cr, width / 2., height / 2.); * cairo_arc (cr, 0., 0., 1., 0., 2 * M_PI); * cairo_restore (cr); * </programlisting></informalexample> * * Since: 1.0 **/}", proto_text = "void cairo_arc (cairo_t *cr, double xc, double yc, double radius, double angle1, double angle2)" ), cairo_close_path <- list( include = TRUE, gtkdoc = r"{/** * cairo_close_path: * @cr: a cairo context * * Adds a line segment to the path from the current point to the * beginning of the current sub-path, (the most recent point passed to * cairo_move_to()), and closes this sub-path. After this call the * current point will be at the joined endpoint of the sub-path. * * The behavior of cairo_close_path() is distinct from simply calling * cairo_line_to() with the equivalent coordinate in the case of * stroking. When a closed sub-path is stroked, there are no caps on * the ends of the sub-path. Instead, there is a line join connecting * the final and initial segments of the sub-path. * * If there is no current point before the call to cairo_close_path(), * this function will have no effect. * * Note: As of cairo version 1.2.4 any call to cairo_close_path() will * place an explicit MOVE_TO element into the path immediately after * the CLOSE_PATH element, (which can be seen in cairo_copy_path() for * example). This can simplify path processing in some cases as it may * not be necessary to save the "last move_to point" during processing * as the MOVE_TO immediately after the CLOSE_PATH will provide that * point. * * Since: 1.0 **/}", proto_text = "void cairo_close_path (cairo_t *cr)" ), cairo_translate <- list( include = TRUE, gtkdoc = r"{/** * cairo_translate: * @cr: a cairo context * @tx: amount to translate in the X direction * @ty: amount to translate in the Y direction * * Modifies the current transformation matrix (CTM) by translating the * user-space origin by (@tx, @ty). This offset is interpreted as a * user-space coordinate according to the CTM in place before the new * call to cairo_translate(). In other words, the translation of the * user-space origin takes place after any existing transformation. * * Since: 1.0 **/}", proto_text = "void cairo_translate (cairo_t *cr, double tx, double ty)" ), cairo_scale <- list( include = TRUE, gtkdoc = r"{/** * cairo_scale: * @cr: a cairo context * @sx: scale factor for the X dimension * @sy: scale factor for the Y dimension * * Modifies the current transformation matrix (CTM) by scaling the X * and Y user-space axes by @sx and @sy respectively. The scaling of * the axes takes place after any existing transformation of user * space. * * Since: 1.0 **/}", proto_text = "void cairo_scale (cairo_t *cr, double sx, double sy)" ), cairo_set_source_rgba <- list( include = TRUE, gtkdoc = r"{/** * cairo_set_source_rgba: * @cr: a cairo context * @red: red component of color * @green: green component of color * @blue: blue component of color * @alpha: alpha component of color * * Sets the source pattern within @cr to a translucent color. This * color will then be used for any subsequent drawing operation until * a new source pattern is set. * * The color and alpha components are floating point numbers in the * range 0 to 1. If the values passed in are outside that range, they * will be clamped. * * The default source pattern is opaque black, (that is, it is * equivalent to cairo_set_source_rgba(cr, 0.0, 0.0, 0.0, 1.0)). * * Since: 1.0 **/}", proto_text = "void cairo_set_source_rgba (cairo_t *cr, double red, double green, double blue, double alpha)" ), cairo_text_extents <- list( include = TRUE, gtkdoc = r"{/** * cairo_text_extents: * @cr: a #cairo_t * @utf8: a NUL-terminated string of text encoded in UTF-8, or %NULL * @extents: a #cairo_text_extents_t object into which the results * will be stored * * Gets the extents for a string of text. The extents describe a * user-space rectangle that encloses the "inked" portion of the text, * (as it would be drawn by cairo_show_text()). Additionally, the * x_advance and y_advance values indicate the amount by which the * current point would be advanced by cairo_show_text(). * * Note that whitespace characters do not directly contribute to the * size of the rectangle (extents.width and extents.height). They do * contribute indirectly by changing the position of non-whitespace * characters. In particular, trailing whitespace characters are * likely to not affect the size of the rectangle, though they will * affect the x_advance and y_advance values. * * Since: 1.0 **/}", proto_text = "void cairo_text_extents (cairo_t *cr, const char *utf8, cairo_text_extents_t *extents)" ), cairo_paint_with_alpha <- list( include = TRUE, gtkdoc = r"{/** * cairo_paint_with_alpha: * @cr: a cairo context * @alpha: alpha value, between 0 (transparent) and 1 (opaque) * * A drawing operator that paints the current source everywhere within * the current clip region using a mask of constant alpha value * @alpha. The effect is similar to cairo_paint(), but the drawing * is faded out using the alpha value. * * Since: 1.0 **/}", proto_text = "void cairo_paint_with_alpha (cairo_t *cr, double alpha)" ) ) xxx <- list( include = TRUE, gtkdoc = r"{}", proto_text = "" )
374d1f963849c37a2ba655db2c50ab19675f63dd
9bd0a8ccaad50f716a942192aa31bddc1644bfbd
/SableSurface_2022.R
b01e9ea5c86919c1d0d5582549137632c8ed23b6
[]
no_license
arcticecology/Sable
4ab0df525ae8238cfd7c451431ac090906d4e26d
5e6ca0269b2fc59f7a3b5c234ed020aff82c1aff
refs/heads/main
2023-04-11T03:48:00.876984
2022-08-30T11:57:01
2022-08-30T11:57:01
530,626,321
0
0
null
null
null
null
UTF-8
R
false
false
7,920
r
SableSurface_2022.R
#The script to add norway to the conservative model because only RA is available library(rioja) library(analogue) library(vegan) library(labdsv) setwd("C:\\Users\\Andrew\\Desktop\\Sable\\R\\surface") species3<-read.csv(file="dataset_merge.csv", row.names=1) #read the csv file species1<-species3[, -cbind(118:119) ] #this removes the taxa that are not good for modelling, undiff tanytarsini and such species2 <- species1 / rowSums(species1) * 100 speciesX <- species2[, which(colSums(species2) != 0)] species<-speciesX[, -cbind(116:117) ] #this removes the taxa that are not good for modelling, undiff tanytarsini and such N <- 2 M <- 2 i <- colSums(speciesX >= M) >= N ## L1 species <- speciesX[, i, drop = FALSE] tspecies <- decostand(species, method="hellinger") #transform data #dca (b-diversity) vare.dca <- decorana(tspecies) vare.dca summary(vare.dca) plot(vare.dca) plot(vare.dca, type = "n", xlim=c(-3,3), ylim=c(3,-3)) points(vare.dca, display = "sites", col = "black", cex = 1, pch = 21, bg = "red") text(vare.dca, display="species", pos=2) ## load the data coreinput_PD03<-read.csv(file="PD03m.csv", row.names=1) coresp<-coreinput_PD03[, -cbind(1:3) ] #use this for cores coresp1 <- coresp / rowSums(coresp) * 100 coresp2<-coresp1[, -cbind(117:118) ] cols_to_keep <- intersect(colnames(species),colnames(coresp2)) PD03 <- coresp2[,cols_to_keep, drop=FALSE] coreinput_PD67<-read.csv(file="PD67m.csv", row.names=1) cores67<-coreinput_PD67[, -cbind(1:3) ] #use this for cores coresp67 <- cores67 / rowSums(cores67) * 100 coresp677<-coresp67[, -cbind(117:118) ] cols_to_keep <- intersect(colnames(species),colnames(coresp677)) PD67 <- coresp677[,cols_to_keep, drop=FALSE] coreinput_PD37<-read.csv(file="PD37m.csv", row.names=1) cores37<-coreinput_PD37[, -cbind(1:2) ] #use this for cores coresp37 <- cores37 / rowSums(cores37) * 100 cols_to_keep <- intersect(colnames(PD67),colnames(coresp37)) PD37a <- coresp37[,cols_to_keep, drop=TRUE] coreinput_PD38<-read.csv(file="PD38m.csv", row.names=1) cores38<-coreinput_PD38[, -cbind(1:2) ] #use this for cores coresp38 <- cores38 / rowSums(cores38) * 100 cols_to_keep <- intersect(colnames(PD67),colnames(coresp38)) PD38 <- coresp38[,cols_to_keep, drop=TRUE] PD37 <-PD37a[ -cbind(9:15), ] ## Fit the timetrack ordination core<-PD67 mod <- timetrack(species, core, transform = "hellinger", method = "rda") mod ## Plot the timetrack plot(mod, ptype = "b", col = c("black", "red"), lwd = 2) ## Other options (reorder the time track) ord <- rev(seq_len(nrow(core))) plot(mod, choices = 2:3, order = ord, ptype = "b", col = c("forestgreen", "orange"), lwd = 2) ## scores and fitted methods ## IGNORE_RDIFF_BEGIN head(fitted(mod, type = "passive")) head(scores(mod, type = "passive")) ## IGNORE_RDIFF_END my.pca <- rda(tspecies, scale=FALSE) biplot(my.pca, scaling = 3, type = c("text", "points")) text(my.pca, display = "sites", scaling = 1, cex = 0.8, col = "darkcyan") ## predict locations in timetrack for new observations mod3 <- timetrack(species, PD03, transform = "hellinger", method = "rda") mod4 <- timetrack(species, PD67, transform = "hellinger", method = "rda", scaling=3) mod5 <- timetrack(species, PD37, transform = "hellinger", method = "rda", scaling=3) plot(mod5) mod6 <- timetrack(species, PD38, transform = "hellinger", method = "rda", scaling=3) plot(mod6) mod <- timetrack(species, core, transform = "hellinger", method = "rda", labels=species) mod plot(mod) ## Plot the timetrack plot(mod, ptype = "b", col = c("black", "red"), lwd = 2) plot(mod3, ptype = "b", col = c("black", "red"), lwd = 2) plot(mod4, ptype = "b", col = c("black", "red"), lwd = 2) plot(mod5, ptype = "b", col = c("black", "red"), lwd = 2) library(survival) library(grid) library(gridGraphics) plot(mod, type = "n", ptype = "b", xlim=c(-0.6,0.5), ylim=c(-0.6,0.5)) # capture the plotted output as a grob of PD67 grid.echo() grid.grab() -> k # pull out the data from the grob.. k$children$`graphics-plot-1-points-1`$x -> x k$children$`graphics-plot-1-points-1`$y -> y plot(mod5, type = "n", ptype = "b", xlim=c(-0.6,0.5), ylim=c(-0.6,0.5)) # capture the plotted output as a grob of PD37 grid.echo() grid.grab() -> k2 # pull out the data from the grob.. k2$children$`graphics-plot-1-points-1`$x -> x2 k2$children$`graphics-plot-1-points-1`$y -> y2 plot(mod6, type = "n", ptype = "b", xlim=c(-0.6,0.5), ylim=c(-0.6,0.5)) # capture the plotted output as a grob of PD38 grid.echo() grid.grab() -> k3 # pull out the data from the grob.. k3$children$`graphics-plot-1-points-1`$x -> x3 k3$children$`graphics-plot-1-points-1`$y -> y3 #plot(mod, type = "n", ptype = "b") #points(mod, which = "ordination", col = "grey", pch = 19, cex = 0.7) #lines(x,y, col=1) #points(x,y) plot(mod3, xlim=c(-0.6,0.5), ylim=c(-0.6,0.5)) points(mod3, which = "passive", col = "red") points(mod4, which = "passive", col = "blue") points(mod, which = "ordination", col = "grey", pch = 19, cex = 0.7) lines(x,y, col="blue", lty=2) points(mod5, which = "passive", col = "orange", pch = 21, cex = 1) lines(x2,y2, col="orange", lty=3) points(mod6, which = "passive", col = "black", pch = 21, cex = 1) lines(x3,y3, col="black", lty=4) text(my.pca, display = "sites", scaling=3, pos=4, cex = 0.8, col = "darkcyan") text(my.pca, display="species", scaling=3) legend("bottomleft", inset =0.05,legend=c("PD03", "PD67", "PD37", "PD38"), col=c("red", "blue", "orange", "black"), lty=1:4, cex=0.8, box.lty=0) tiff("SableSurface_b.tiff", width = 7, height = 7, units = 'in', res = 300) plot(mod3, xlim=c(-0.6,0.5), ylim=c(-0.6,0.5), lwd=2) points(mod3, which = "passive", col = "red") points(mod4, which = "passive", col = "blue") points(mod, which = "ordination", col = "grey", pch = 19, cex = 0.7) lines(x,y, col="blue", lty=2, lwd=2) points(mod5, which = "passive", col = "brown", pch = 21, cex = 1) lines(x2,y2, col="brown", lty=3, lwd=3) points(mod6, which = "passive", col = "black", pch = 21, cex = 1) lines(x3,y3, col="black", lty=4, lwd=2) text(my.pca, display = "sites", scaling=3, pos=4, cex = 0.8, col = "darkcyan") #text(my.pca, display="species", scaling=3) legend("bottomleft", inset =0.05,legend=c("PD03", "PD67", "PD37", "PD38"), col=c("red", "blue", "brown", "black"), lty=1:4, cex=0.8, lwd=2, box.lty=0) dev.off() #stratigraphy #fos<-PD03 fos<-PD67 #fos<-PD37 #fos<-PD38 yPD03<-coreinput_PD03[1:2] colnames(yPD03) <- c("Depth","Year") yPD67<-coreinput_PD67[1:2] colnames(yPD67) <- c("Depth","Year") yPD37<-coreinput_PD37[1:2] colnames(yPD37) <- c("Depth","Year") yPD38<-coreinput_PD38[1:2] colnames(yPD38) <- c("Depth","Year") yDepth<-yPD67$Depth yYear<- yPD67$Year #yDepth<-yPD03$Depth #yYear<- yPD03$Year #yDepth<-yPD37$Depth #yYear<- yPD37$Year #yDepth<-yPD38$Depth #yYear<- yPD38$Year nms <- colnames(fos) nms3<-gsub("\\.", " ", nms) diss <- dist(sqrt(fos/100)^2) clust <- chclust(diss, method="coniss") diss <- dist(sqrt(fos/100)^2) clust <- chclust(diss) bstick(clust, 5) library(NbClust) library(ggpalaeo) sp <- strat.plot(fos, scale.percent=TRUE, yvar=yDepth, title="PG Lake", ylabel="", srt.xlabel=45, col.bar=1, plot.line=FALSE, plot.bar = TRUE, lwd.bar=4, y.rev=TRUE, xLeft=0.18, xRight=0.75, y.axis=0.1, x.names=nms3, yTop=0.68, cex.yaxis=1, cex.ylabel=1, cex.xlabel=1.0, cex.axis=1, clust=clust) secondary_scale(sp, yvar = yDepth, yvar2 = yYear, n = 15, ylabel2 = "age (CE)") addClustZone(sp, clust, 3, col="black", lty=2, lwd=2)
956bdb76bff4acf2e5546d3d3465c2f415ac3725
ac82cd0243a121e3749f7dcfb3f31d2fa33b2a9f
/R_Programming/exploring_linux_evolution.R
244e28a8d820f877c19f428a0c405ec05975ee14
[ "MIT" ]
permissive
arpithaupd/Data-Science-Projects
63aa24db88b85f82d2b3e79510fce77f6b596cbf
423695c130a92db1a188b3d3a13871f0f76f6f5b
refs/heads/master
2023-04-29T08:54:42.834503
2021-04-21T16:12:23
2021-04-21T16:12:23
null
0
0
null
null
null
null
UTF-8
R
false
false
2,446
r
exploring_linux_evolution.R
# R script to perform the data analysis and visualization of the Linux evolution # Tasks: # 1. Identifying the TOP 10 contributors to Linux repository # 2. Visualizing the commits over the years to Linux repository #----------------- Reading the dataset log = read.csv('git_log.csv', header = TRUE, sep = ",") head(log) # Display first 6 rows of dataset summary(log) # Summary of the dataset str(log) # Structure of dataset #-----------------Part 1: Data Analysis # 1. calculating number of commits num.commits <- nrow(log) # 2. calculating number of authors num.authors <- length(unique(log$author)) paste(num.authors, ' authors committed ', num.commits,' code changes.') # 3. TOP 10 contributors that changed the Linux kernel very often. freq.data <- table(log$author) top10 <- as.data.frame(head(sort(freq.data, decreasing = T), 10)) colnames(top10) <- c('Authors', 'Commits') top10 #----------------Part 2: Data Visualization # use the information in the timestamp column to create a time series-based column if (!require("lubridate")) install.packages("lubridate") library(lubridate) log$corrected_timestamp <- as.POSIXlt(log$timestamp, origin = "1970-01-01 00:00:00", tz = "UTC") log$timestamp <- NULL summary(log) # Determining first real commit timestamp first.commit <- tail(log, 1)$corrected_timestamp # determining the last sensible commit timestamp last.commit <- today(tzone = "UTC") # filtering out wrong timestamps corrected.log <- log[(log$corrected_timestamp >= first.commit) & (log$corrected_timestamp <= last.commit), ] head(corrected.log) summary(corrected.log) # group the commits by year corrected.log$Year <- year(ymd(as.Date(corrected.log$corrected_timestamp))) corrected.log if (!require("dplyr")) install.packages("dplyr") library(dplyr) visual.log <- corrected.log %>% group_by(Year) %>% summarise(author = n()) colnames(visual.log) <- c('Year', 'Commits') visual.log if (!require("ggplot2")) install.packages("ggplot2") library(ggplot2) q <- ggplot(visual.log, aes(x = Year, y = Commits)) + geom_line(color="blue", size=1.2) + geom_point() q + xlab('Year') + ylab('Commits') + ggtitle('Commits to Linux repo over the years ') + theme(axis.title.x = element_text(size = 15), axis.title.y = element_text(size = 15), plot.title = element_text(hjust = 0.5, size = 20)) # removing all the unnecessary data from environment rm(corrected.log, first.commit, last.commit, freq.data)
a464ab7bce49653298e3a4b3238148f8b15ee36a
5cdd72710fe3a519859bac1a40fd68723330b2ef
/man/latlon_to_km.Rd
ea9483dc6e92eddddbfaa3ea2ec01192abdb3a3e
[]
no_license
cran/stormwindmodel
75b81aa790bfbe30afc16d1c0ebe8bf8251b4b24
59a439b1592cef52ea2d03cb192fa70b033322c0
refs/heads/master
2021-01-12T01:56:16.406086
2020-07-27T06:10:02
2020-07-27T06:10:02
78,445,768
0
0
null
null
null
null
UTF-8
R
false
true
2,118
rd
latlon_to_km.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/latlon_helpers.R \name{latlon_to_km} \alias{latlon_to_km} \title{Calculate distance between two locations} \usage{ latlon_to_km(tclat_1, tclon_1, tclat_2, tclon_2, Rearth = 6378.14) } \arguments{ \item{tclat_1}{A numeric vector giving latitude of the first location (degrees)} \item{tclon_1}{A numeric vector giving longitude of the first location (degrees). This value should be expressed as a positive value for Western hemisphere longitudes.} \item{tclat_2}{A numeric vector giving latitude of the second location (degrees)} \item{tclon_2}{A numeric vector giving longitude of the second location (degrees). This value should be expressed as a positive value for Western hemisphere longitudes.} \item{Rearth}{Radius of the earth (km). Default is 6378.14 km.} } \value{ A vector with the distance between the two locations, in kilometers. } \description{ This function takes latitudes and longitudes for two locations and calculates the distance (in meters) between the locations using the Haversine method. } \details{ This function uses the Haversine method with great circle distance to calculate this distance. It is applying the following equations to determine distance (in kilometers) between two latitude-longitude pairs: \deqn{hav(\gamma) = hav(\phi_1 - \phi_2) + cos(\phi_1)*cos(\phi_2)*hav(L_1 - L_2)}{ hav(\gamma) = hav(\phi1 - \phi2) + cos(\phi1)*cos(\phi2)*hav(L1 - L2)} where: \itemize{ \item{\eqn{\phi_1}{\phi1}: Latitude of first location, in radians} \item{\eqn{\phi_2}{\phi2}: Latitude of second location, in radians} \item{\eqn{L_1}{L1}: Longitude of first location, in radians} \item{\eqn{L_2}{L2}: Longitude of second location, in radians} \item{\eqn{hav(\gamma)}: The haversine function, \eqn{hav(\gamma) = sin^2 \left(\frac{\gamma}{2}\right)}{ hav(\gamma) = sin^2 (\gamma / 2)}} \item{\eqn{R_earth}{Rearth}: Radius of the earth, here assumed to be 6378.14 kilometers} \item{\eqn{D}}: Distance between the two locations, in kilometers } }
82f4f5ffd021ebb6e6428b2c70dacd759452c508
c3ed0eea77de3338cc4820ca27dc04384676b29b
/man/rescale_capped.Rd
9a07036b85ac58b6630e3109368c3e25258f0701
[]
no_license
jrboyd/seqtsne
f3bd94ee23140dc71373af4f1e7ce9ffe60d702f
5a67cbe5af281ec42906689d1a9961d8fe9de68d
refs/heads/master
2022-10-30T18:33:13.005047
2022-10-28T20:22:58
2022-10-28T20:22:58
177,857,441
0
0
null
null
null
null
UTF-8
R
false
true
1,070
rd
rescale_capped.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions_util.R \name{rescale_capped} \alias{rescale_capped} \title{wraps scales::rescale to enforce range of \code{to} on output} \usage{ rescale_capped(x, to = c(0, 1), from = range(x, na.rm = TRUE, finite = TRUE)) } \arguments{ \item{x}{continuous vector of values to manipulate.} \item{to}{output range (numeric vector of length two)} \item{from}{input range (vector of length two). If not given, is calculated from the range of x} } \value{ x rescaled from \code{from} domain to \code{to} domain, within bounds of \code{to} } \description{ wraps scales::rescale to enforce range of \code{to} on output } \examples{ #behaves identically to scales::rescale when x is within 'from' domain chiptsne:::rescale_capped(0:10, to = c(0, 1), c(0, 10)) scales::rescale(0:10, to = c(0, 1), c(0, 10)) #when x exceeds 'from' domain, results are still within 'to' domain chiptsne:::rescale_capped(0:10, to = c(0,1), c(0,5)) #not true for scales::rescale scales::rescale(0:10, to = c(0,1), c(0,5)) }
ec26e819680a50353f148d5713b36799de04bf51
3de27dccf8fdf1855fdacf6c71b138e1b0c48f0e
/L-13.R
ba53c039298aa506a261a2a99bd588488417127b
[]
no_license
tongramt/Forecasting
2a909478af578eb5a9c64ae9779362f33320b0ec
f9c1478bcbe0a83befadddf03b64ccbaad9efd28
refs/heads/main
2023-01-23T17:27:26.631733
2020-11-30T09:46:03
2020-11-30T09:46:03
311,314,162
0
1
null
null
null
null
UTF-8
R
false
false
243
r
L-13.R
# Title : Lecture 13 # Objective : Write and annotate code for lecture 13 # Created by: ThomasGrant # Created on: 29/10/2020 require("fma") tslm.airpass<-tslm(airpass~ season) tslm.airpass Arima(dowjones,order=c(0,1,0),include.mean=FALSE)
8a871a8738acbf0b92ced79bbc62327c321ff211
e6a9d96db33e6cd819e4f1dbd60c80b0eb53a615
/man/fit_agTrend_ssl_mgcv.Rd
c528b310e40ad08314e5a3c11609a5ea11002fe2
[]
no_license
dsjohnson/agTrendTMB
42384a16831a7f21b4bfe61ad61075b6555cac8f
403c55da56e891c8126b912ab8958346d7f1d3c8
refs/heads/master
2023-01-10T19:30:07.323254
2020-11-10T01:19:36
2020-11-10T01:19:36
307,476,720
0
0
null
null
null
null
UTF-8
R
false
true
1,472
rd
fit_agTrend_ssl_mgcv.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_agTrend_ssl_mgcv.R \name{fit_agTrend_ssl_mgcv} \alias{fit_agTrend_ssl_mgcv} \title{Site-level fitting function} \usage{ fit_agTrend_ssl_mgcv(.x, obl.corr = FALSE, debug = FALSE) } \arguments{ \item{.x}{Data frame for an individual site. Must have been passed through \cite{prep_for_fit} first!} \item{obl.corr}{Logical. Should correction be made for oblique photos.} \item{debug}{Logical. Step into fitting function for interactive evaluation} \item{model}{Model for interpolation: 'const', 'lin', or 'gp' (see Details)} } \value{ A list with the following elements: \itemize{ \item summary Parameter and abundance estimate information? \item raw_data Original data plus oblique photo correction estimates \item q_output quality control diagnostic data } } \description{ Fit separate discrete-normal distribution models to an Steller sea lion counts at a single site } \details{ There are 3 models which can be used for within site interpolation: \itemize{ \item \code{const} This interpolates using a simple mean and variance for the site \item \code{lin} A linear fit with time is used for interpolation \item \code{gp} i.e., Gaussian Process. This model used a time trend plus a random walk of order 2 (RW(2)) to interpolate counts. Further, the RW(2) process can be further restrained to minimize 'wiggliness' of the curve when necessary. This prevents overfitting of the curve. } }
1483c90d8326c4cc9daf0ad99b2a70f10357dad5
0b7e9f17d32d84558b95321d92177b7e4532ffdb
/R/match_and_append.R
919e0205abb2b8a9c83540c0057c702ce854ed9a
[]
no_license
agualtieri/mcla_cleaneR
0de5d5a40334e1f2e74b028dd9dff8bb402c97fb
397225a064b486bc48020f4f0d3286a266c862b0
refs/heads/master
2020-05-23T13:52:37.834105
2019-09-19T12:03:08
2019-09-19T12:03:08
186,787,354
0
6
null
2019-07-10T09:25:23
2019-05-15T08:50:52
R
UTF-8
R
false
false
689
r
match_and_append.R
match_and_append <- function(df, source_df, df_variable_for_match, source_df_variable_for_match, variable_to_append){ #setNames(variable_for_match, "variable") #setNames(variable_to_append, "variable") df$appended_var <- source_df$variable_to_append[match(df$variable_for_match, source_df$variable_for_match)] df } test <- match_and_append(cleaning_log_melt, conditions_list, "variable", "check_names", "conditions") cleaning_log_melt$quality_checks <- conditions_list$conditions[match(cleaning_log_melt$variable, conditions_list$check_names)] rm(test)
3b5787ea7aa266e4797e7427fb628b7c085c2943
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/phonTools/examples/spectralslice.Rd.R
a206ae99f4098cf19998b64f6640c02242aeb335
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
450
r
spectralslice.Rd.R
library(phonTools) ### Name: spectralslice ### Title: Spectral Slice ### Aliases: spectralslice slice ### ** Examples ## synthesize schwa-like vowel vowel = vowelsynth (ffs = c(500,1500,2500,3500,4500))$sound ## compare window lengths par (mfrow = c(3,1)) spectralslice (vowel[500:550], fs = 10000) spectralslice (vowel[500:1000], fs = 10000) ## line spectrum spectralslice (vowel[500:600], padding = 0, line = TRUE, fs = 10000)
753442eb1321d43c3c14163c8a2f94ffb846db08
ffcc3072f9a5190b068f296a6d22404bee24eeb0
/man/ocrTesseract.Rd
474a174e2742b72bd532a55725d0ab337cac500d
[ "Apache-2.0" ]
permissive
greenore/ocR
213661192761fc5fdc9ab7f376a03663737c6e66
d47d0b5db1d487f42399be361e11ef98c305fce2
refs/heads/master
2021-01-20T12:04:29.617030
2014-12-18T14:51:32
2014-12-18T14:51:32
26,242,115
6
3
null
null
null
null
UTF-8
R
false
false
2,377
rd
ocrTesseract.Rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{ocrTesseract} \alias{ocrTesseract} \title{OCR with tesseract} \usage{ ocrTesseract(dir_path, image_name, output_base, lang = "eng", psm = 5) } \arguments{ \item{dir_path}{Path to the directory where the input and output files are.} \item{image_name}{The name of the input image. Most image file formats are supported.} \item{output_base}{The basename of the output file (to which the appropriate extension will be appended).} \item{lang}{The language to use. If none is specified, English is assumed. Multiple languages may be specified, separated by plus characters. Tesseract uses 3-character ISO 639-2 language codes: \describe{ \item{}{ara (Arabic), aze (Azerbauijani), bul (Bulgarian), cat (Catalan), ces (Czech), chi_sim (Simplified Chinese), chi_tra (Traditional Chinese), chr (Cherokee), dan (Danish), dan-frak (Danish (Fraktur)), deu (German), ell (Greek), eng (English), enm (Old English), epo (Esperanto), est (Estonian), fin (Finnish), fra (French), frm (Old French), glg (Galician), heb (Hebrew), hin (Hindi), hrv (Croation), hun (Hungarian), ind (Indonesian), ita (Italian), jpn (Japanese), kor (Korean), lav (Latvian), lit (Lithuanian), nld (Dutch), nor (Norwegian), pol (Polish), por (Portuguese), ron (Romanian), rus (Russian), slk (Slovakian), slv (Slovenian), sqi (Albanian), spa (Spanish), srp (Serbian), swe (Swedish), tam (Tamil), tel (Telugu), tgl (Tagalog), tha (Thai), tur (Turkish), ukr (Ukrainian), vie (Vietnamese)} }} \item{psm}{Set Tesseract to only run a subset of layout analysis and assume a certain form of image. The options for N are: \itemize{ \item{0}{ = Orientation and script detection (OSD) only.} \item{1}{ = Automatic page segmentation with OSD.} \item{2}{ = Automatic page segmentation, but no OSD, or OCR.} \item{3}{ = Fully automatic page segmentation, but no OSD. (Default)} \item{4}{ = Assume a single column of text of variable sizes.} \item{5}{ = Assume a single uniform block of vertically aligned text.} \item{6}{ = Assume a single uniform block of text.} \item{7}{ = Treat the image as a single text line.} \item{8}{ = Treat the image as a single word.} \item{9}{ = Treat the image as a single word in a circle.} \item{10}{ = Treat the image as a single character.} }} } \description{ \code{Optical character recognition (OCR) trough tesseract} }
33e20493de0140eead90b391909145f0165283df
37fcfce951487d3ba45d2ba3dbc22b6360939b77
/man/scale_per_minute.Rd
84b16e1346a30f652a7bef40237b058b096f5ad7
[]
no_license
abresler/nbastatR
576155fb58378c03010590c81806515161c03eb5
aba9179ef644f263387c1536d6ddd26104d79cf4
refs/heads/master
2023-08-08T08:52:05.149224
2023-07-19T13:09:21
2023-07-19T13:09:21
43,844,510
307
94
null
2023-02-01T16:59:22
2015-10-07T21:00:59
R
UTF-8
R
false
true
356
rd
scale_per_minute.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{scale_per_minute} \alias{scale_per_minute} \title{Summarise data per minute} \usage{ scale_per_minute(data, scale_columns = NULL) } \arguments{ \item{data}{a data frame} \item{scale_columns}{vector of columns to scale} } \description{ Summarise data per minute }
797c1c10cf0533b58b8e5e79f531d89a51834d09
a48797beca55474d7b39676389f77f8f1af76875
/man/graph_to_pcs.Rd
7b994805ac850e0f349c91fcc0e3a56f07803723
[]
no_license
uqrmaie1/admixtools
1efd48d8ad431f4a325a4ac5b160b2eea9411829
26759d87349a3b14495a7ef4ef3a593ee4d0e670
refs/heads/master
2023-09-04T02:56:48.052802
2023-08-21T21:15:27
2023-08-21T21:15:27
229,330,187
62
11
null
2023-01-23T12:19:57
2019-12-20T20:15:32
R
UTF-8
R
false
true
1,076
rd
graph_to_pcs.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/toposearch.R \name{graph_to_pcs} \alias{graph_to_pcs} \title{Simulate PCs under an admixture graph} \usage{ graph_to_pcs( graph, nsnps = 10000, drift_default = 0.02, admix_default = 0.5, leaves_only = TRUE ) } \arguments{ \item{graph}{An admixture graph as igraph object, or as edge list data frame with a column \code{weight}, as returned by \code{qpgraph()$edges}} \item{nsnps}{Number of SNPs to simulate} \item{drift_default}{Default branch lengths. Ignored if \code{graph} is a data frame with weights} \item{admix_default}{Default admixture weights. Ignored if \code{graph} is a data frame with weights} \item{leaves_only}{Return PCs for leaf nodes only} } \value{ A data frame with PCs for each population } \description{ This function simulates PCA of allele frequencies under an admixture graph model } \examples{ \dontrun{ pcs = graph_to_pcs(example_igraph) pcs \%>\% ggplot(aes(PC1, PC2, label = pop)) + geom_text() + geom_point() } } \seealso{ \code{\link{graph_to_afs}} }
afe6a88b05f53284f158ff0ed7e6d011aa29dda2
40261deb3548e47216204512da1012d927bad4aa
/man/fpl_player_hist.Rd
f9192299d305a838809b0da8c6b121658ee9af12
[]
no_license
jphelps13/fpl.analytics
05195c122036e083cfc01dbf5329020ca527e4b8
4f21706e42cb083125aec50b252efd151994ff80
refs/heads/master
2020-03-25T11:02:51.322784
2018-08-14T15:36:20
2018-08-14T15:36:20
143,717,104
0
0
null
null
null
null
UTF-8
R
false
true
631
rd
fpl_player_hist.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fpl_player_hist.R \docType{data} \name{fpl_player_hist} \alias{fpl_player_hist} \title{Player historical FTP data} \format{An object of class \code{data.table} (inherits from \code{data.frame}) with 1696 rows and 26 columns.} \source{ \url{https://fantasy.premierleague.com/drf/element-summary} } \usage{ fpl_player_hist } \description{ input from a \code{\link{fpl_season_json}} list object. Loops through the ids, and collects all the historical season data for these players } \examples{ fpl_season_json <- pullFplSeasonJson() } \keyword{datasets}
b76e43bc4035110a71ca5ebd0bbd5d821fac321e
781aef24c9fae0d1f275f9ac0eae0f7164b79a69
/R/fitted.sca.R
d8336026a25e95d114f617f5f492ef9d6d142471
[]
no_license
cran/multiway
e3326bc34a72c7a74037c68b053428102fab5cf2
f2e6377ccb5207ddc6d7d8910a0f3293f87185c3
refs/heads/master
2021-01-22T08:53:07.193287
2019-03-13T07:20:03
2019-03-13T07:20:03
34,022,312
1
2
null
null
null
null
UTF-8
R
false
false
249
r
fitted.sca.R
fitted.sca <- function(object,...){ # Calculates Fitted Values (lists) for fit SCA Models # Nathaniel E. Helwig (helwig@umn.edu) # last updated: April 9, 2015 sapply(object$D,function(d,b){tcrossprod(d,b)},b=object$B) }
0cda1358539a4ce14d089eeb196010cc2b612e26
bf7507dfab13cf917d2be97fdd2abee5c21ebf04
/man/ovarianSubsampling.Rd
bc3935ce645ac5553f76652dbc8ff1fef5988c41
[]
no_license
cbg-ethz/TiMEx
acac638214cd97e7ec10e09f74854f13aa351c7e
f0d0f0f5fd8c58bb5da9d01ffa49bbe6ffc09c19
refs/heads/master
2021-01-15T14:50:31.880579
2018-11-12T17:22:15
2018-11-12T17:22:15
38,927,358
4
3
null
2018-11-12T17:22:16
2015-07-11T13:53:33
R
UTF-8
R
false
true
1,468
rd
ovarianSubsampling.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/documentDatasets.R \name{ovarianSubsampling} \alias{ovarianSubsampling} \title{Stability of mutually exclusive groups in ovarian cancer} \format{\code{ovarianSubsampling} is a list with as many elements as subsampling frequencies provided (3 in this case). Each element is further a list with as many elements as number of sizes of the significantly mutually exclusive groups identified. Additionally, \code{bonf} and \code{fdr} are two lists corresponding to each of these elements, representing different multiple correction methods. Finally, each element is a vector of relative counts of the significantly mutually exclusive groups identified. For example, \code{ovarianSubsampling[[1]][[3]]} represents the relative counts of the identified mutually exclusive groups of size 3 for a subsampling frequency of 30\%, for both \code{fdr} and \code{bonf} (bonferroni) multiple correction methods.} \source{ Produced with the function \code{\link{subsampleAnalysis}}, ran with the inputs \code{subsampl<-c(0.3,0.5,0.8)} \code{noReps<-100} and the mutually exclusive groups from \code{\link{ovarianOutput}}. } \description{ Dataset containing the stability of the mutually exclusive groups identified by TiMEx in the ovarian cancer dataset \code{\link{ovarian}}, after subsampling the set of patients at frequencies of \code{30\%}, \code{50\%}, and \code{80\%}, 100 times. }
bd8339cf91e43c371d56badb0a059090c4ac0724
d2d18a40e768ce045c82639e16bf541dd36c660c
/03_diagnostics/hh_size_distro.R
817e9a6b87501c1bdd8431d5c15996dd0d001658
[]
no_license
manzorz/wash_mapping
a607dd7f158217b2628cdf8fc75957abe1a64812
821fa49b66847d256adfb9e0b6fdbd7fef15a2d5
refs/heads/master
2021-07-09T00:14:19.171006
2018-01-31T20:53:32
2018-01-31T20:53:32
95,460,682
0
0
null
2017-06-26T15:21:39
2017-06-26T15:21:38
null
UTF-8
R
false
false
3,562
r
hh_size_distro.R
library(tidyverse) library(ggpubr) library(grid) library(gridExtra) setwd('J:/LIMITED_USE/LU_GEOSPATIAL/geo_matched/wash') # read in extraction load('points_collapsed_2017_07_31.Rdata') load('polys_collapsed_2017_07_31.Rdata') # bind rows to create master dataset alldat <- bind_rows(pt_collapse, poly_collapse) alldat$point <- ifelse(is.na(alldat$lat)|is.na(alldat$long), 'poly','point') # read in post-collapse data mydat <- read_csv(file = 'J:/WORK/11_geospatial/10_mbg/input_data/w_imp.csv', col_types = 'ddddcddcdcddddddd') # define regions cssa <- c('CAR','COD','GNQ','COG', 'GAB','STP','AGO') sssa_hi <- c('NAM','ZAF','BWA') name_hi <- c('MAR','DZA','TUN','LBY','EGY') essa_hilo <- c('SDN','SSD','ETH','ERI','DJI','SOM','KEN','UGA','RWA', 'BDI','TZA','MWI','ZMB','ZWE','LSO','SWZ','MDG') wssa <- c('MRT','MLI','NER','TCD','SEN','GMB','GIN','SLE','LBR','CIV', 'GHA','BFA','TGO','BEN','NGA','CMR', 'CPV') reg_list <- list(cssa, sssa_hi, name_hi, essa_hilo) reg_vec <- unlist(reg_list) # plot for 5 countries at a time violin plots of hh_size by point vs. poly # for pre-collapse data setwd('C:/Users/adesh/Desktop') pdf('pre_collapse_hhsize_dx.pdf') for (i in seq(1, length(reg_vec), by = 5)) { sub_vec <- reg_vec[i:ifelse(i+4>length(reg_vec),length(reg_vec),i+4)] plot_dat <- filter(alldat, iso3 %in% sub_vec) print( ggplot(plot_dat) + geom_violin(fill = 'red', aes(x = iso3, y = hh_size)) + theme_bw() + facet_grid(point ~ ., scales = 'free_y') ) } dev.off() # plot for 5 countries at a time violin plots of hh_size by point vs. poly # for post-collapse data setwd('C:/Users/adesh/Desktop') pdf('post_collapse_hhsize_dx.pdf') for (i in seq(1, length(reg_vec), by = 5)) { sub_vec <- reg_vec[i:ifelse(i+4>length(reg_vec),length(reg_vec),i+4)] plot_dat <- filter(mydat, country %in% sub_vec) print( ggplot(plot_dat) + geom_violin(fill = 'red', aes(x = country, y = N)) + theme_bw() + facet_grid(point ~ ., scales = 'free_y') ) } dev.off() # ggplot scatter of indicator vs. N after running collapse script setwd('C:/Users/adesh/Desktop') pdf('post_collapse_hhsize_dx_indi_pt.pdf') for (i in 1:length(reg_vec)) { sub_vec <- reg_vec[i] plot_dat <- filter(ptdat, iso3 %in% sub_vec) if (nrow(plot_dat) > 0) { g1 <- ggplot(plot_dat, aes(total_hh)) + geom_freqpoly() + ggtitle(sub_vec) + theme_bw() g2 <- ggplot(plot_dat, aes(piped)) + geom_freqpoly() + theme_bw() g3 <- ggplot(plot_dat) + geom_point(aes(x = total_hh, y = piped, col = survey_series)) + theme_bw() g4 <- ggplot(plot_dat, aes(imp)) + geom_freqpoly() + theme_bw() g5 <- ggplot(plot_dat) + geom_point(aes(x = total_hh, y = imp, col = survey_series)) + theme_bw() print(g1, labels = sub_vec) print(ggarrange(g2,g3, labels = sub_vec, ncol = 1, nrow = 2)) print(ggarrange(g4,g5, labels = sub_vec, ncol = 1, nrow = 2)) grid.newpage() print(grid.table(plot_dat %>% group_by(nid, survey_series, year_start) %>% summarize(total_people = sum(total_hh, na.rm = T)))) } } dev.off() plot_dat <- filter(alldat, iso3 %in% c('LSO','MWI','TZA')) ggplot(plot_dat) + geom_boxplot(fill = 'red', aes(x = iso3, y = hh_size)) + theme_bw() + facet_grid(point ~ ., scales = 'free_y') test <- filter(alldat, point == 'poly') %>% group_by(iso3) %>% summarize(hh_75 = quantile(hh_size, 0.75, na.rm = T)) # repeat for peru data peru_dat <- filter(alldat, iso3 %in% 'PER') ggplot(peru_dat) + geom_violin(fill = 'red', aes(x = iso3, y = hh_size)) + theme_bw() + facet_grid(point ~ ., scales = 'free_y')
79f5593b9c6f7f51517e35cf4fce542ae4853fbd
719500684fceaf0a7a80ce663e9cf07802e10b9a
/R/open_url.r
7ffcaf5f14198ed79f949220d2c48d9659095628
[]
no_license
pbreheny/breheny
964baf0670a9eb4975946eae66772d47d9affd11
3e15bb78a769616adb49ea50c800d026f48ef8e7
refs/heads/master
2023-08-09T18:34:35.865871
2023-08-01T15:38:54
2023-08-01T15:38:54
82,972,930
0
0
null
null
null
null
UTF-8
R
false
false
268
r
open_url.r
#' Open a url: quietly, in the background, and in a new window #' #' @param x URL to open #' #' @examples #' open_url('www.google.com') #' @export open_url <- function(x) { system2('sensible-browser', c('--new-window', x), stdout=NULL, stderr=NULL, wait=FALSE) }
66a501d69257b19a349696e42d9f5e07d78bdbab
089b39df2c24c06c3ff605d82705ba588decb419
/scripts/05_mdd_enrichment/01_prep_singlecell_for_ldsc.R
aa93ca1c549006ff02b7ff8ea9a7eab01ab7e737
[]
no_license
JujiaoKang/2020_PNAS_Depression
09526a8b22baa3ecb635fdf71f9b485480692e32
e5bcac0d9a456f433e2d7e374af767f8d6ad2cdf
refs/heads/master
2023-05-27T09:39:26.669418
2021-06-07T12:11:02
2021-06-07T12:11:02
null
0
0
null
null
null
null
UTF-8
R
false
false
15,120
r
01_prep_singlecell_for_ldsc.R
library(tidyverse) library(EWCE) library(MAGMA.Celltyping) library(biomaRt) library(org.Hs.eg.db) library(stringr) # This script will write text files of #################### # set up directories #################### base_dir = '/gpfs/milgram/project/holmes/kma52/mdd_gene_expr' umi_dir = file.path(base_dir, '/data/singlecell/') ldsc_dir = paste0(base_dir, '/data/ldsc') # 37 gene mapping ensembl37 = useMart("ENSEMBL_MART_ENSEMBL", dataset="hsapiens_gene_ensembl", host="grch37.ensembl.org") # easy to map genes hgnc_ens37_df = getBM(attributes=c('ensembl_gene_id', 'entrezgene_id', 'hgnc_symbol','chromosome_name', 'start_position', 'end_position'), mart = ensembl37) hgnc_ens37_df = hgnc_ens37_df[which(hgnc_ens37_df$chromosome_name %in% 1:22),] hgnc_ens37_df = hgnc_ens37_df[!is.na(hgnc_ens37_df$entrezgene_id),] hgnc_ens37_df = hgnc_ens37_df[!duplicated(hgnc_ens37_df$hgnc_symbol),] ################## # Lake 2018 - DFC ################## # EWCE cell-specific gene expression load(paste0(base_dir, '/output/diff_expr/CellTypeData_EWCE_lake_dfc_ctd_batch.rda'), verbose=T) dfc_ctd = ctd dfc_ctd = prepare.quantile.groups(dfc_ctd, specificity_species="human",numberOfBins=10) # load preprocessed data region = 'FrontalCortex' # current out dir prefix = 'lake_dfc_bigcat_ldsc' fine_prefix = 'lake_dfc_superordinate_cat_ldsc' # make if doesnt exist current_dir_out = paste0(ldsc_dir, '/', prefix, '_files') current_finedir_out = paste0(ldsc_dir, '/', fine_prefix, '_files') # write table with all genes and chromosomal positions ldsc_out = hgnc_ens37_df[c('ensembl_gene_id', 'chromosome_name', 'start_position', 'end_position')] colnames(ldsc_out) = c('GENE', 'CHR', 'START', 'END') ldsc_out = ldsc_out[ldsc_out$CHR %in% as.character(1:22),] ldsc_out = ldsc_out[order(ldsc_out$CHR),] ldsc_out = ldsc_out[order(as.numeric(ldsc_out$CHR)),] head(ldsc_out) # write gene coordinates out_path = paste0(current_dir_out, '/', prefix, '_gene_coords.txt') write_delim(ldsc_out, out_path, delim='\t') # out_path = paste0(current_finedir_out, '/', fine_prefix, '_gene_coords.txt') write_delim(ldsc_out, out_path, delim='\t') # genes matched between hgnc coordinate data from and specificity data frame hgnc_ens37_df_dfc = hgnc_ens37_df %>% filter(hgnc_symbol %in% rownames(dfc_ctd[[1]]$specificity_quantiles)) hgnc_ens37_df_nodfc = hgnc_ens37_df %>% filter(!hgnc_symbol %in% rownames(dfc_ctd[[1]]$specificity_quantiles)) # subset specificity data dfc_ctd[[1]]$specificity = dfc_ctd[[1]]$specificity[rownames(dfc_ctd[[1]]$specificity) %in% hgnc_ens37_df_dfc$hgnc_symbol,] dfc_ctd[[2]]$specificity = dfc_ctd[[2]]$specificity[rownames(dfc_ctd[[2]]$specificity) %in% hgnc_ens37_df_dfc$hgnc_symbol,] dfc_ctd = prepare.quantile.groups(dfc_ctd, specificity_species="human", numberOfBins=10) # sanity check using SST spec_check = as.data.frame(dfc_ctd[[1]]$specificity) spec_check = spec_check[rev(order(spec_check$SST)),] which(rownames(spec_check) == 'SST') spec_check = spec_check[rev(order(spec_check$In6)),] which(rownames(spec_check) == 'PVALB') spec_check = as.data.frame(dfc_ctd[[2]]$specificity) spec_check = spec_check[rev(order(spec_check$Inh)),] which(rownames(spec_check) == 'GAD1') # sanity check using SST # FINE cell cat nbins = 10 genebin_num = round(nrow(hgnc_ens37_df_dfc)/nbins) dfc_cells = colnames(dfc_ctd[[1]]$specificity_quantiles) quantile_df = as.data.frame(dfc_ctd[[1]]$specificity_quantiles) for (cell in dfc_cells){ write(cell,'') cur_quantile = quantile_df[[cell]] for (bin in 0:nbins){ ct = bin bin_hgnc = rownames(quantile_df)[which(cur_quantile == bin)] bin_ensembls = hgnc_ens37_df_dfc$ensembl_gene_id[hgnc_ens37_df_dfc$hgnc_symbol %in% bin_hgnc] print(length(bin_ensembls)) out_file = paste0(current_finedir_out, '/', fine_prefix, '_', cell, '_', str_pad(ct,2,pad="0"),'_',as.character(nbins),'_batch_genes.txt') write.table(bin_ensembls, out_file, quote=F, row.names=F, col.names=F) } } write.table(hgnc_ens37_df_nodfc$ensembl_gene_id, paste0(current_finedir_out, '/', fine_prefix, '_nomatch_batch_genes.txt'), quote=F, row.names=F, col.names=F) write.table(hgnc_ens37_df$ensembl_gene_id, paste0(current_finedir_out, '/', fine_prefix, '_allcontrol_batch_genes.txt'), quote=F, row.names=F, col.names=F) # BIG cell cat nbins = 10 genebin_num = round(nrow(hgnc_ens37_df_dfc)/nbins) dfc_cells = colnames(dfc_ctd[[2]]$specificity_quantiles) quantile_df = as.data.frame(dfc_ctd[[2]]$specificity_quantiles) for (cell in dfc_cells){ cur_quantile = quantile_df[[cell]] for (bin in 0:nbins){ ct = bin bin_hgnc = rownames(quantile_df)[which(cur_quantile == bin)] bin_ensembls = hgnc_ens37_df_dfc$ensembl_gene_id[hgnc_ens37_df_dfc$hgnc_symbol %in% bin_hgnc] print(length(bin_ensembls)) out_file = paste0(current_dir_out, '/', prefix, '_', cell, '_bigcat_', str_pad(ct,2,pad="0"),'_', as.character(nbins),'_batch_genes.txt') write.table(bin_ensembls, out_file, quote=F, row.names=F, col.names=F) } } write.table(hgnc_ens37_df_nodfc$ensembl_gene_id, paste0(current_dir_out, '/', prefix, '_nomatch_batch_genes.txt'), quote=F, row.names=F, col.names=F) write.table(hgnc_ens37_df$ensembl_gene_id, paste0(current_dir_out, '/', prefix, '_allcontrol_batch_genes.txt'), quote=F, row.names=F, col.names=F) ################## # Lake 2018 - VIS ################## # Lake VIS - specific gene expression load(paste0(base_dir, '/output/diff_expr/CellTypeData_EWCE_lake_vis_ctd_batch.rda'), verbose=T) vis_ctd = ctd vis_ctd = prepare.quantile.groups(vis_ctd, specificity_species="human",numberOfBins=10) # load preprocessed data region = 'VisualCortex' # current out dir prefix = 'lake_vis_bigcat_ldsc' fine_prefix = 'lake_vis_superordinate_cat_ldsc' # make if doesnt exist current_dir_out = paste0(ldsc_dir, '/', prefix, '_files') current_finedir_out = paste0(ldsc_dir, '/', fine_prefix, '_files') # write table with all genes and chromosomal positions ldsc_out = hgnc_ens37_df[c('ensembl_gene_id', 'chromosome_name', 'start_position', 'end_position')] colnames(ldsc_out) = c('GENE', 'CHR', 'START', 'END') ldsc_out = ldsc_out[ldsc_out$CHR %in% as.character(1:22),] ldsc_out = ldsc_out[order(ldsc_out$CHR),] ldsc_out = ldsc_out[order(as.numeric(ldsc_out$CHR)),] head(ldsc_out) # write gene coordinate file for both BIG/FINE cats out_path = paste0(current_dir_out, '/', prefix, '_gene_coords.txt') write_delim(ldsc_out, out_path, delim='\t') # out_path = paste0(current_finedir_out, '/', fine_prefix, '_gene_coords.txt') write_delim(ldsc_out, out_path, delim='\t') # genes matched between hgnc coordinate data from and specificity data frame hgnc_ens37_df_vis = hgnc_ens37_df %>% filter(hgnc_symbol %in% rownames(vis_ctd[[1]]$specificity_quantiles)) hgnc_ens37_df_novis = hgnc_ens37_df %>% filter(!hgnc_symbol %in% rownames(vis_ctd[[1]]$specificity_quantiles)) # subset specificity data vis_ctd[[1]]$specificity = vis_ctd[[1]]$specificity[rownames(vis_ctd[[1]]$specificity) %in% hgnc_ens37_df_vis$hgnc_symbol,] vis_ctd[[2]]$specificity = vis_ctd[[2]]$specificity[rownames(vis_ctd[[2]]$specificity) %in% hgnc_ens37_df_vis$hgnc_symbol,] vis_ctd = prepare.quantile.groups(vis_ctd, specificity_species="human", numberOfBins=10) # sanity check using SST spec_check = as.data.frame(vis_ctd[[1]]$specificity) spec_check = spec_check[rev(order(spec_check$SST)),] which(rownames(spec_check) == 'SST') spec_check = spec_check[rev(order(spec_check$In6)),] which(rownames(spec_check) == 'PVALB') spec_check = as.data.frame(vis_ctd[[2]]$specificity) spec_check = spec_check[rev(order(spec_check$Inh)),] which(rownames(spec_check) == 'GAD1') # FINE cell cat nbins = 10 genebin_num = round(nrow(hgnc_ens37_df_vis)/nbins) vis_cells = colnames(vis_ctd[[1]]$specificity_quantiles) quantile_df = as.data.frame(vis_ctd[[1]]$specificity_quantiles) # for each cell, write gene lists for each decile (based on cell specificity) for (cell in vis_cells){ write(cell,'') cur_quantile = quantile_df[[cell]] for (bin in 0:nbins){ ct = bin bin_hgnc = rownames(quantile_df)[which(cur_quantile == bin)] bin_ensembls = hgnc_ens37_df_vis$ensembl_gene_id[hgnc_ens37_df_vis$hgnc_symbol %in% bin_hgnc] print(length(bin_ensembls)) out_file = paste0(current_finedir_out, '/', fine_prefix, '_', cell, '_', str_pad(ct,2,pad="0"),'_',as.character(nbins),'_batch_genes.txt') write.table(bin_ensembls, out_file, quote=F, row.names=F, col.names=F) } } write.table(hgnc_ens37_df_novis$ensembl_gene_id, paste0(current_finedir_out, '/', fine_prefix, '_nomatch_batch_genes.txt'), quote=F, row.names=F, col.names=F) write.table(hgnc_ens37_df$ensembl_gene_id, paste0(current_finedir_out, '/', fine_prefix, '_allcontrol_batch_genes.txt'), quote=F, row.names=F, col.names=F) # BIG cell cat nbins = 10 genebin_num = round(nrow(hgnc_ens37_df_vis)/nbins) vis_cells = colnames(vis_ctd[[2]]$specificity_quantiles) quantile_df = as.data.frame(vis_ctd[[2]]$specificity_quantiles) for (cell in vis_cells){ cur_quantile = quantile_df[[cell]] for (bin in 0:nbins){ ct = bin bin_hgnc = rownames(quantile_df)[which(cur_quantile == bin)] bin_ensembls = hgnc_ens37_df_vis$ensembl_gene_id[hgnc_ens37_df_vis$hgnc_symbol %in% bin_hgnc] print(length(bin_ensembls)) out_file = paste0(current_dir_out, '/', prefix, '_', cell, '_bigcat_', str_pad(ct,2,pad="0"),'_', as.character(nbins),'_batch_genes.txt') write.table(bin_ensembls, out_file, quote=F, row.names=F, col.names=F) } } write.table(hgnc_ens37_df_novis$ensembl_gene_id, paste0(current_dir_out, '/', prefix, '_nomatch_batch_genes.txt'), quote=F, row.names=F, col.names=F) write.table(hgnc_ens37_df$ensembl_gene_id, paste0(current_dir_out, '/', prefix, '_allcontrol_batch_genes.txt'), quote=F, row.names=F, col.names=F) # write table with all genes and chromosomal positions ldsc_out = hgnc_ens37_df[c('ensembl_gene_id', 'chromosome_name', 'start_position', 'end_position')] colnames(ldsc_out) = c('GENE', 'CHR', 'START', 'END') ldsc_out = ldsc_out[ldsc_out$CHR %in% as.character(1:22),] ldsc_out = ldsc_out[order(ldsc_out$CHR),] ldsc_out = ldsc_out[order(as.numeric(ldsc_out$CHR)),] out_path = paste0(current_dir_out, '/', prefix, '_gene_coords.txt') head(ldsc_out) write_delim(ldsc_out, out_path, delim='\t') # out_path = paste0(current_finedir_out, '/', fine_prefix, '_gene_coords.txt') write_delim(ldsc_out, out_path, delim='\t') # ABA MTG # ABA MTG # ABA MTG # ABA MTG # ABA MTG # ABA MTG load(paste0(base_dir, '/output/diff_expr/CellTypeData_EWCE_aba_mtg_ctd.rda'), verbose=T) mtg_ctd = ctd mtg_ctd = prepare.quantile.groups(mtg_ctd, specificity_species="human", numberOfBins=10) # 37 gene mapping ensembl38 = useMart("ensembl", dataset="hsapiens_gene_ensembl") # easy to map genes hgnc_ens38_df = getBM(attributes=c('ensembl_gene_id', 'entrezgene_id', 'hgnc_symbol','chromosome_name', 'start_position', 'end_position'), mart = ensembl38) hgnc_ens38_df = hgnc_ens38_df[which(hgnc_ens38_df$chromosome_name %in% 1:22),] hgnc_ens38_df = hgnc_ens38_df[!is.na(hgnc_ens38_df$entrezgene_id),] hgnc_ens38_df = hgnc_ens38_df[!duplicated(hgnc_ens38_df$hgnc_symbol),] # load preprocessed data load(paste0(umi_dir, '/ahba_mtg/mtg_singlecell_ahba_match_seurat_processed.Rdata'), verbose=T) #mtg_genes = colnames(mtg_dat)[1:colnames(mtg_dat)-1] # current out dir prefix = 'aba_mtg_bigcat_ldsc' fine_prefix = 'aba_mtg_finecat_ldsc' # make if doesnt exist current_dir_out = paste0(ldsc_dir, '/', prefix, '_files') current_finedir_out = paste0(ldsc_dir, '/', fine_prefix, '_files') ldsc_out = hgnc_ens38_df[c('ensembl_gene_id', 'chromosome_name', 'start_position', 'end_position')] colnames(ldsc_out) = c('GENE', 'CHR', 'START', 'END') ldsc_out = ldsc_out[ldsc_out$CHR %in% as.character(1:22),] ldsc_out = ldsc_out[order(ldsc_out$CHR),] ldsc_out = ldsc_out[order(as.numeric(ldsc_out$CHR)),] out_path = paste0(current_dir_out, '/',prefix,'_gene_coords.txt') write_delim(ldsc_out, out_path, delim='\t') print(out_path) # out_path = paste0(current_finedir_out, '/',fine_prefix,'_gene_coords.txt') write_delim(ldsc_out, out_path, delim='\t') # genes matched between hgnc coordinate data from and specificity data frame hgnc_ens38_df_mtg = hgnc_ens38_df[hgnc_ens38_df$hgnc_symbol %in% rownames(mtg_ctd[[1]]$specificity_quantiles),] hgnc_ens38_df_non_mtg = hgnc_ens38_df[!hgnc_ens38_df$hgnc_symbol %in% rownames(mtg_ctd[[1]]$specificity_quantiles),] # subset specificity data mtg_ctd[[1]]$specificity = mtg_ctd[[1]]$specificity[rownames(mtg_ctd[[1]]$specificity) %in% hgnc_ens38_df_mtg$hgnc_symbol,] mtg_ctd[[2]]$specificity = mtg_ctd[[2]]$specificity[rownames(mtg_ctd[[2]]$specificity) %in% hgnc_ens38_df_mtg$hgnc_symbol,] #mtg_ctd = prepare.quantile.groups(mtg_ctd, specificity_species="human", numberOfBins=20) mtg_ctd = prepare.quantile.groups(mtg_ctd, specificity_species="human", numberOfBins=10) nbins = 10 genebin_num = round(nrow(hgnc_ens38_df)/nbins) mtg_cells = colnames(mtg_ctd[[1]]$specificity_quantiles) quantile_df = as.data.frame(mtg_ctd[[1]]$specificity_quantiles) for (cell in mtg_cells){ cur_quantile = quantile_df[[cell]] for (bin in 0:nbins){ ct = bin bin_hgnc = rownames(quantile_df)[which(cur_quantile == bin)] bin_ensembls = hgnc_ens38_df_mtg$ensembl_gene_id[hgnc_ens38_df_mtg$hgnc_symbol %in% bin_hgnc] print(length(bin_ensembls)) out_file = paste0(current_finedir_out, '/', fine_prefix, '_', cell, '_', str_pad(ct,2,pad="0"),'_',as.character(nbins),'_genes.txt') write.table(bin_ensembls, out_file, quote=F, row.names=F, col.names=F) } } write.table(hgnc_ens38_df_non_mtg$ensembl_gene_id, paste0(current_finedir_out, '/', fine_prefix, '_nomatch_genes.txt'), quote=F, row.names=F, col.names=F) write.table(hgnc_ens38_df_mtg$ensembl_gene_id, paste0(current_finedir_out, '/', fine_prefix, '_allcontrol_genes.txt'), quote=F, row.names=F, col.names=F) nbins = 10 genebin_num = round(nrow(hgnc_ens38_df)/nbins) mtg_cells = colnames(mtg_ctd[[2]]$specificity_quantiles) quantile_df = as.data.frame(mtg_ctd[[2]]$specificity_quantiles) for (cell in mtg_cells){ cur_quantile = quantile_df[[cell]] for (bin in 0:nbins){ ct = bin bin_hgnc = rownames(quantile_df)[which(cur_quantile == bin)] bin_ensembls = hgnc_ens38_df_mtg$ensembl_gene_id[hgnc_ens38_df_mtg$hgnc_symbol %in% bin_hgnc] print(length(bin_ensembls)) out_file = paste0(current_dir_out, '/', prefix, '_', cell, '_bigcat_', str_pad(ct,2,pad="0"),'_', as.character(nbins),'_genes.txt') write.table(bin_ensembls, out_file, quote=F, row.names=F, col.names=F) } } write.table(hgnc_ens38_df_non_mtg$ensembl_gene_id, paste0(current_dir_out, '/', prefix, '_nomatch_genes.txt'), quote=F, row.names=F, col.names=F) write.table(hgnc_ens38_df_mtg$ensembl_gene_id, paste0(current_dir_out, '/', prefix, '_allcontrol_genes.txt'), quote=F, row.names=F, col.names=F)
d96fc9effa63d8a9e7cb2b1d3c6b8993f5f2ac8f
d46a62735f241be070aa0363ab6870ad2fb46015
/R/node_ckr.R
df439efa95d7d8adf9f3a01b265af59af2567797
[ "MIT" ]
permissive
DataUrbanEconGeek/wikipop
7bda325e50500e0061c17388fcf1c5d4dc110b8e
ba073d08adbd8677dcb4941081cba22fe0124b6a
refs/heads/master
2020-03-22T02:08:54.771580
2018-07-08T04:02:03
2018-07-08T04:02:03
139,352,274
0
0
null
null
null
null
UTF-8
R
false
false
168
r
node_ckr.R
#' A function to check css node #' @import rvest #' @export node_ckr <- function(wikihtml, path){ class(try(html_nodes(wikihtml, css = path),silent=TRUE))[1] }
7ad8d05617dbfba3afa6dd91ffdbc8a8602fcebf
cceb54a5dd3b3053fe70425a9786d670bc48285c
/R/indpca.R
9bbe5368227641db1e30e5c8f7a1efb348c08876
[]
no_license
cran/hierfstat
ee123e045e3592a9ce4181b0f4aa8fd96649da62
30663f916505fb83451a49bbe0fa2fbe082632eb
refs/heads/master
2022-05-12T16:31:17.085452
2022-05-05T22:40:02
2022-05-05T22:40:02
17,696,646
2
3
null
null
null
null
UTF-8
R
false
false
1,574
r
indpca.R
### #'@export indpca<-function(dat,ind.labels=NULL,scale=FALSE){ #requires ade4 #given a genotype data set dat #and individual labels lab #performs a centered, unscaled PCA on individuals #if scale=TRUE, PCA carried out on correlation matrix if (is.genind(dat)) dat<-genind2hierfstat(dat) indp<-pop.freq(cbind(1:dim(dat)[1],dat[,-1])) mati<-NULL for (i in 1:length(indp)) mati<-rbind(mati,indp[[i]]) mati<-t(mati) #ind rows, nbal col mp<-apply(mati,2,mean,na.rm=T) matic<-sweep(mati,2,FUN="-",mp) matic[is.na(matic)]<-0.0 #replace NA with 0 pca.matic<-ade4::dudi.pca(matic,scannf=FALSE,scale=scale,nf=min(min(dim(matic)),50)) if (is.null(ind.labels)) pca.matic$rownames<-as.character(dat[,1]) else pca.matic$rownames<-ind.labels res<-list(call=match.call(),ipca=pca.matic,ifreq=mati) class(res)<-"indpca" res } #' @method print indpca #' @export print.indpca<-function(x,...){ print(names(x)) print("$call:") print(x$call) print("Dimension of individidual frequency matrix is: ") print(dim(x$ifreq)) print("$ipca:") print(x$ipca) invisible(x) } #' @method plot indpca #' @export plot.indpca<-function(x,eigen=FALSE,ax1=1,ax2=2,...){ if(eigen){ graphics::par(mfrow=c(2,1)) graphics::plot(x$ipca$eig/sum(x$ipca$eig),type="h",xlab="axes",ylab="Prop. variance") } graphics::plot(x$ipca$li[,ax1],x$ipca$li[,ax2],xlab=paste("Axis: ",ax1,sep=""),ylab=paste("Axis: ",ax2,sep=""),type="n",...) graphics::text(x$ipca$li[,ax1],x$ipca$li[,ax2],labels=x$ipca$rownames,...) if(eigen) graphics::par(mfrow=c(1,1)) }
ce565e9508d785190a52b1a3fb4a13c81ebc8afc
0e8b34d1a0ba4f1d75ff94bfbec54ac1fd81ee35
/hypothesistests.R
41108a51555a2842c898e14dc2ac0cc743abd36c
[]
no_license
lolfxo/Data_Analysis
f49bb88c2f053d9baad929b9ab6f54ed4617a690
f78db3120f9414a9f819f1bc022269d88b473edf
refs/heads/master
2021-09-07T10:58:22.166168
2018-02-21T23:16:07
2018-02-21T23:16:07
116,081,281
0
0
null
null
null
null
UTF-8
R
false
false
681
r
hypothesistests.R
# Script to create the "hypothesistests" package # Clear current list # rm(list=ls()) # Get working directory getwd() # Create your own R package # Step 0: Packages I will need install.packages("devtools") library("devtools") devtools::install_github("klutometis/roxygen") library(roxygen2) # Step 1: Create your package directory setwd("C:/Users/hongn/Documents/GitHub/Data_Analysis") # Create folder in which to include functions. This is also the package name. # create("hypothesistests") setwd("./hypothesistests") # create the document document() # install 'hypothesistests' package straight from GitHub install_github("lolfxo/Data_Analysis",subdir="hypothesistests")
acfa6e9265e67cfc59c4be64e357fc9aa05e85c7
ba3b0d0347da419c1f9fd07bcc5ddb354ccafd2a
/20190130_equating_multiple_test_items.R
b247f400aa9d5a89f29443bb00fbc26fa6239174
[]
no_license
takuizum/R
ed88c662b5d399444a5a60cd16155e0f49301a62
e07689fd1ec0a53c500b0b87731ff565e9b6500d
refs/heads/master
2020-03-30T12:15:17.624459
2019-06-17T16:20:09
2019-06-17T16:20:09
151,216,645
0
0
null
null
null
null
UTF-8
R
false
false
4,988
r
20190130_equating_multiple_test_items.R
library(tidyverse) library(irtfun2) # gen simulation data set th_1 <- rnorm(1000, 0, 1) th_2 <- rnorm(1000, 0, 1) th_3 <- rnorm(1000, 0, 1) nofitems <- 70 true_a <- rlnorm(nofitems, 0, 0.4) true_b <- rnorm(nofitems, 0, 1) # LETTERS is built-in constatn, that has the 26 upper-case letters of the Roman alphabet ID <- character(0) for(t in 1:(nofitems/10)){ LET <- LETTERS[t] %>% rep(10) NUM <- c(1:10) %>% formatC(width = 3, flag = 0) %>% as.matrix() ID <- c(ID, paste0(LET, NUM)) } t_para <- data.frame(Item = ID, a = true_a, b = true_b, c = rep(0, nofitems)) g1 <- ID[1:30] g2 <- ID[21:50] g3 <- ID[41:70] sim_gen2 <- function(theta, para){ a <- para$a b <- para$b c <- para$c ID <- para$Item dat <- sim_gen(theta = theta, a = a, b = b, c = c) #colnames(dat)[-1] <- ID return(dat) } dat1 <- t_para[ID %in% g1,] %>% sim_gen2(theta = th_1) colnames(dat1)[-1] <- g1 dat2 <- t_para[ID %in% g2,] %>% sim_gen2(theta = th_2) colnames(dat2)[-1] <- g2 dat3 <- t_para[ID %in% g3,] %>% sim_gen2(theta = th_3) colnames(dat3)[-1] <- g3 res_1 <- dat1 %>% estip2(fc = 2) res_2 <- dat2 %>% estip2(fc = 2) res_3 <- dat3 %>% estip2(fc = 2) res_1$para$Item <- res_1$para$Item %>% as.character() res_2$para$Item <- res_2$para$Item %>% as.character() res_3$para$Item <- res_3$para$Item %>% as.character() #CEquating(T_para = res_1$para, F_para = res_2$para) # 項目パラメタの平均と標準偏差を計算し,データフレームに納める # 平均は項目困難度の平均を,標準偏差は外れ値を考慮して識別力幾何平均の逆数とする。 library(pracma) pre_mean <- matrix(nrow = 3, ncol = 3) pre_sd <- matrix(nrow = 3, ncol = 3) # diag(pre_mean) <- c(res_1$para$b %>% mean(), res_2$para$b %>% mean(), res_2$para$b %>% mean()) # diag(pre_sd) <- c(res_1$para$a %>% geomean(), res_2$para$a %>% geomean(), res_2$para$a %>% geomean()) para_list <- list(res_1, res_2, res_3) for(i in 1:3){ # of row row_a <- para_list[[i]]$para$a row_b <- para_list[[i]]$para$b row_item <- para_list[[i]]$para$Item for(j in 1:3){ # of column col_item <- para_list[[j]]$para$Item key <- row_item %in% col_item if(sum(key) == 0){ pre_mean[i, j] <- NA pre_sd[i, j] <- NA next } target_a <- row_a[key] target_b <- row_b[key] pre_mean[i, j] <- target_b %>% mean() pre_sd[i, j] <- target_a %>% geomean() # pre_sd[i, j] <- target_b %>% sd() } } pre_mean <- matrix(c(68.4, 63.3, 0, 53.6, 60.2, 63.1, 55.3, 0, 0, 62.9, 57.1, 54.4, 76.5, 0, 58.9, 54.0), nrow=4) pre_sd <- matrix(c(18.8, 3.4, 0, 5.4, 16.5, 3.5, 14.1, 0, 0, 3.6, 12.7, 6.8, 17.3, 0, 10.9, 6.1), nrow = 4) # n of forms # form <- c(1, 2, 3) form <- c(1,2,3,4) # n of rater # n of common subjects n <- matrix(0, nrow = length(form), ncol = length(form)) for(i in form){ row_item <- para_list[[i]]$para$Item for(j in form){ col_item <- para_list[[j]]$para$Item n[i, j] <- row_item %in% col_item %>% sum() } } n <- matrix(c(20, 10, 0, 10, 10, 20, 10, 0, 0, 10, 20, 10, 10, 0, 10, 20), nrow = 4, ncol = 4) # sigma = A coefficient H1 <- matrix(0, nrow = length(form), ncol = length(form)) for(i in form){ for(j in form){ if(i == j){ # diag element for(k in form[!form == j]){ if(is.na(pre_sd[i, k])) next H1[i, j] <- H1[i, j] + n[i, k] * pre_sd[i, k]^2 } } else { # non diag H1[i, j] <- -n[i, j] * pre_sd[i, j] * pre_sd[j, i] } if(n[i, j] == 0){ # no common item between test two forms H1[i, j] <- 0 next } } } inv_H1 <- eigen(H1, symmetric = F) #inv_H1 <- pinv(H1) A <- inv_H1$vectors[,length(form)] # objective vector # A <- inv_H1[,length(form)] # mean = K coefficient H2 <- matrix(0, nrow = length(form), ncol = length(form)) for(i in form){ for(j in form){ if(n[i, j] == 0){ # no common item between test two forms H2[i, j] <- 0 next } if(i == j){ # diag element for(k in form[!form == j]){ if(is.na(pre_mean[i, k])) next H2[i, j] <- H2[i, j] + n[i, k] * pre_mean[i, k] } } else { # non diag H2[i, j] <- -n[i, j] * pre_mean[i, j] } } } H3 <- matrix(0, nrow = length(form), ncol = length(form)) for(i in form){ for(j in form){ if(n[i, j] == 0){ # no common item between test two forms H3[i, j] <- 0 next } if(i == j){ # diag element for(k in form[!form == j]){ if(is.na(pre_mean[i, k])) next H3[i, j] <- H3[i, j] + n[i, k] } } else { # non diag H3[i, j] <- -n[i, j] } } } K <- -pinv(H3) %*% t(H2) %*% A A K # res_1$para%>% dplyr::mutate(a = a/A[1]) %>% dplyr::mutate(b = A[1]*b + K[1]) # equate_manually <- function(para, A, K){ # #para %>% dplyr::mutate(a = a/A) %>% dplyr::mutate(b = A*b + K) # para$a <- para$a/A # }
252e8e35d2877723ca89fc94d9425b2fb557831c
dc202eab79abffe0ebe0db44a84f71c3886c04c8
/r/sparkr/man/bluemix2d-class.Rd
490fc141e2aad22b622bd047869aa3acca1bbf98
[ "Apache-2.0" ]
permissive
gjebran/ibmos2spark
fa4f5b3d1c2dc690d8de8a3ed76940f5c5801e8c
677b840289b2d7adbb58d5a4c632a3b52d66435a
refs/heads/master
2021-01-16T18:22:04.078824
2017-08-07T22:11:05
2017-08-07T22:11:05
null
0
0
null
null
null
null
UTF-8
R
false
true
1,443
rd
bluemix2d-class.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/osconfig.R \docType{class} \name{bluemix2d-class} \alias{bluemix2d} \alias{bluemix2d-class} \title{sparkcontext: a SparkContext object.} \description{ credentials: a dictionary with the following required keys: auth_url project_id (or projectId) user_id (or userId) password region and optional key: name #[to be deprecated] The name of the configuration. name: string that identifies this configuration. You can use any string you like. This allows you to create multiple configurations to different Object Storage accounts. This is not required at the moment, since credentials['name'] is still supported. When using this from a IBM Spark service instance that is configured to connect to particular Bluemix object store instances, the values for these credentials can be obtained by clicking on the 'insert to code' link just below a data source. } \examples{ library(ibmos2spark) creds = list(name="XXXXX", auth_url="https://identity.open.softlayer.com", region="dallas", project_id = "XXXXX", user_id="XXXXX", password="XXXXX", public = FALSE) bmsc = bluemix(sparkcontext=sc, name=name, credentials = creds) data <- read.df(sqlContext, bmsc$url(name, space,object), source = "com.databricks.spark.csv", header = "true") head(data) }
d76689bdfee233f31e925d89b8c30a0d823912e0
bf9ad245511f9fb28c84ab9603d1a90c85a1f7c2
/R/utilities.R
aeace9bfa370d22f559f31f962809b1249e84c58
[]
no_license
dcava/wpp
20f28a14db5a26dd650431c30419767d68ccdeea
95dc77839caf5d1b48f1d3d93aa8df05f0c60aee
refs/heads/master
2022-12-10T01:46:23.392824
2020-09-15T06:41:41
2020-09-15T06:41:41
31,527,929
1
0
null
2020-09-15T05:45:12
2015-03-02T07:15:33
HTML
UTF-8
R
false
false
5,467
r
utilities.R
spm <- function(impresults) { summary(pool(as.mira(impresults))) } plotdf <- function (impresult) { if (class(impresult)=="coxph") { plotout <- tidy(impresult) plotout %<>% select( term, estimate, conf.low, conf.high, std.error = robust.se, p.value) return(plotout) } else { plotout <- data.frame(spm(impresult)) plotout %<>% transmute( term = rownames(plotout), estimate=est, conf.low=lo.95, conf.high = hi.95, std.error = se, p.value = `Pr...t..`, fmi=round(fmi, 2)) return(plotout) } } mi.ci <- function(poolscalar) { #Take the pooled scalar values for survival and generate 95% CIs df.ps <- with(poolscalar, list(df = df, est = qbar, var=t)) mult <- qt(0.95, df=df.ps$df) upper <- round(df.ps$est + mult*sqrt(df.ps$var), 3) lower <- round(df.ps$est - mult*sqrt(df.ps$var), 3) return(str_c("5yr OS: ", round(df.ps$est,3), " (95% CI ", lower, "-", upper, ")")) } lichtrubin <- function(fit){ ## pools the p-values of a one-sided test according to the Licht-Rubin method ## this method pools p-values in the z-score scale, and then transforms back ## the result to the 0-1 scale ## Licht C, Rubin DB (2011) unpublished if (!is.mira(fit)) stop("Argument 'fit' is not an object of class 'mira'.") fitlist <- fit$analyses if (!inherits(fitlist[[1]], "htest")) stop("Object fit$analyses[[1]] is not an object of class 'htest'.") m <- length(fitlist) p <- rep(NA, length = m) for (i in 1:m) p[i] <- fitlist[[i]]$p.value z <- qnorm(p) # transform to z-scale num <- mean(z) den <- sqrt(1 + var(z)) pnorm( num / den) # average and transform back } pool.km <- function(km.mira.object) { results <- vector("list") for (i in c("median", "LCL", "UCL")){ km.mira.object %>% list.apply(summary) %>% list.map(table) %>% data.frame() %>% select(contains(i)) %>% rowMeans(na.rm = T) %>% { round(./365.25*12,1) } -> results[[i]] } data.frame(results) } pool.5yr <- function(km.mira.object) { results <- vector("list") for (i in c("surv", "std.err")){ km.mira.object %>% list.apply(summary, times=1826.25) %>% list.select(i) %>% list.flatten() %>% list.rbind() %>% data.frame() -> results[[i]] } results } cavcombine <- function(dk, df, display = TRUE) #dk = vector of chi-squared values, df = degrees of freedom #My variant of the miceadds::micombine.chisquare function { M <- length(dk) mean.dk <- mean(dk) sdk.square <- var(sqrt(dk)) Dval <- (mean.dk/df - (1 - 1/M) * sdk.square)/(1 + (1 + 1/M) * sdk.square) df2 <- (M - 1)/df^(3/M) * (1 + M/(M + 1/M)/sdk.square)^2 pval <- pf(Dval, df1 = df, df2 = df2, lower.tail = F) chisq.approx <- Dval * df p.approx <- 1 - pchisq(chisq.approx, df = df) res <- c(D = Dval, p = pval, df = df, df2 = df2, chisq.approx = chisq.approx, p.approx = p.approx) if (display) { message("Combination of Chi Square Statistics for Multiply Imputed Data\n") message(paste("Using", M, "Imputed Data Sets\n")) message(paste("F(", df, ",", round(df2, 2), ")", "=", round(Dval, 3), " p=", round(pval, 5), sep = ""), "\n") message(paste("Chi Square Approximation Chi2(", df, ")", "=", round(chisq.approx, 3), " p=", round(p.approx, 5), sep = ""), "\n") } invisible(res) } summary.MIresult<-function(object,...,alpha=0.05, logeffect=FALSE){ #My version of the MIresult summary function, easy to use with knitr #message("Multiple imputation results:\n") #lapply(object$call, function(a) {cat(" ");print(a)}) out<-data.frame(results=coef(object), se=sqrt(diag(vcov(object)))) crit<-qt(alpha/2,object$df, lower.tail=FALSE) out$"(lower"<-out$results-crit*out$se out$"upper)"<-out$results+crit*out$se out$"missInfo" <-paste(round(100*object$missinfo), "%") return(out) } stable.weights <- function (ps1, stop.method = NULL, estimand = NULL) { if (class(ps1) == "ps") { if (is.null(estimand)) estimand <- ps1$estimand if (!(estimand %in% c("ATT", "ATE"))) stop("estimand must be either \"ATT\" or \"ATE\".") if (estimand != ps1$estimand) { warning("Estimand specified for get.weights() differs from the estimand used to fit the ps object.") } if (length(stop.method) > 1) stop("More than one stop.method was selected.") if (!is.null(stop.method)) { stop.method.long <- paste(stop.method, ps1$estimand, sep = ".") i <- match(stop.method.long, names(ps1$w)) if (is.na(i)) stop("Weights for stop.method=", stop.method, " and estimand=", estimand, " are not available. Please a stop.method and used when fitting the ps object.") } else { warning("No stop.method specified. Using ", names(ps1$ps)[1], "\n") i <- 1 } if (estimand == "ATT") { w <- with(ps1, treat + (1 - treat) * ps[[i]]/(1 - ps[[i]])) return(w) } else if (estimand == "ATE") { w <- with(ps1, ifelse(treat==1, (mean(ps[[i]]))/ps[[i]], (mean(1-ps[[i]]))/(1-ps[[i]]))) return(w) } } }
60fe96d1eb329055faeddb44e2af453f8ba32c54
fa19d30ddd68641f7baf190b3c192dbd4ac04911
/r-programming/assignment-2/cachematrix.R
e6512c3939165691e4fd0f8ec5b0d89e6049697b
[]
no_license
rawat-he/datasciencecoursera
6bb0b07b9e8a9520252388270571ade8549a4274
fd3d37f2deab6d61519d7a8a1c8ef57473d5bf86
refs/heads/master
2021-05-30T10:33:52.690449
2015-07-26T12:52:53
2015-07-26T12:52:53
null
0
0
null
null
null
null
UTF-8
R
false
false
1,988
r
cachematrix.R
## makeCacheMatrix - This function creates a special "matrix" object that can cache its inverse. ## set - This function set the value of matrix 'x' for which matrix inverse needs to be cached # - sets value of matrix cache 'inv' value to NULL # - This function can also be called directly # - mat <- makeCacheMatrix () # - mat$set(matrix(c(1,2,3,4), nrow=2,ncol=2)) ## get - Returns the current value of matrix 'x' ## setinv - set the inverse of matrix to 'inv' ## getinv - returns the inverse value of matrix 'inv' makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinv <- function(matrix.inv) inv <<- matrix.inv getinv <- function() inv list(set = set, get = get, setinv = setinv, getinv = getinv) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinv() if(!is.null(m)) { message("getting matrix inverse from cache") return(m) } data <- x$get() m <- solve(data, ...) message("set new matrix inverse to cache") x$setinv(m) m } ## Sample Run # mat <- makeCacheMatrix (matrix(c(1,2,3,4), nrow=2,ncol=2)) # cacheSolve(mat) #set new matrix inverse to cache # [,1] [,2] #[1,] -2 1.5 #[2,] 1 -0.5 #cacheSolve(mat) #getting matrix inverse from cache # [,1] [,2] #[1,] -2 1.5 #[2,] 1 -0.5 # mat <- makeCacheMatrix() # mat$set(matrix(c(1,2,3,4), nrow=2,ncol=2)) #set new matrix inverse to cache # [,1] [,2] #[1,] -2 1.5 #[2,] 1 -0.5 #cacheSolve(mat) #getting matrix inverse from cache # [,1] [,2] #[1,] -2 1.5 #[2,] 1 -0.5