blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
9ae94990b07de39a247481095951213b41c1ccd1
d556d8044376ffd8bf959b42f02f696d28b40b8d
/regressao_linear.R
428bc020da0996089ac6dad5694dfaee22323f82
[]
no_license
cassioancn/Regress-o-Linear-Simples
8705fd8483d1d6b40b4c5824f706b3ac8b068f38
ff55a4b099479921d82f974386024f27b16528f3
refs/heads/main
2023-03-12T16:49:30.919820
2021-03-08T04:26:19
2021-03-08T04:26:19
345,529,355
0
0
null
null
null
null
ISO-8859-1
R
false
false
2,654
r
regressao_linear.R
library(readxl) dados <- read_excel("~/1UNISINOS/ALURA/RSTUDIO/Regressão Linear/regressaolinear_r-master/dados.xlsx") View(dados) #Plotar os gráficos de dispersão plot(dados$area, dados$preco, main = "Diagrama de Dispersão", xlab = "Área", ylab = "Preço", pch = 19, col = "blue") cor(dados$area, dados$preco) cor.test(dados$area, dados$preco) plot(dados$tempo, dados$preco, main = "Diagrama de Dispersão", xlab = "Tempo", ylab = "Preço", pch = 10, col = "red") cor(dados$tempo, dados$preco) cor.test(dados$tempo, dados$preco) #Plotar o boxplot de preço boxplot(dados$preco) #Identificar a casa do outlier pelo pacote car install.packages("car") library(car) Boxplot(dados$preco) #Descobrir o valor do outlier pela casa dele dados$preco[79] #Descobrir quais são as casas do terceiro quartil (caixa de cima) which(dados$preco > quantile(dados$preco, 0.75)) ##Equação da reta mod1 = lm(preco ~ area, data = dados) mod1 ## Fórmulada da reta para estimar o preço em razão da área. Substituir os coefs na fórmula : preco = b0 + b1*area preco_70 = 502347 + 7851*(70) preco_70 ## Outro modo de fazer a mesma previsão mod1$coefficients[[1]] + mod1$coefficients[[2]]*70 ##Gerar o gráfico disso plot(dados$area, dados$preco, main = "Diagrama e reta", xlab = "Área (m²)", ylab = "Preço (R$)", pch = 19, col = "blue") + abline(mod1, col = "red") ##Validar o modelo: verificar se a área influi no preço das casas summary(mod1) ##Outro mpétodo de validar os dados names(summary(mod1)) summary(mod1)$r.squared ##Verficar o tempo em relação ao tempo mod2 = lm(preco ~ tempo, data = dados) mod2 summary(mod2) ###Verificar os resíduos do modelo plot(mod1$residuals) identify(mod1$residuals, n = 2) mod1$residuals[82] ###Remover uma posição desejada dados_59 = dados[-59,] ###Remover duas casas desejadas dados_59_82 = dados[c(-59, -82),] ###Teste de independência dos erros do modelo install.packages("lmtest") library(lmtest) dwtest(mod1) plot(mod1$fitted.values, mod1$residuals) bptest(mod1) plot(mod1, 2) shapiro.test(mod1$residuals) ####Previsão dados_novos = data.frame(area = c(60, 70)) predict(mod1, newdata = dados_novos)####Vai retornar os valores estimados para o preço em função das áreas requisitadas no vetor ####Previsão de um intervalo predict(mod1, newdata = dados_novos, interval = "prediction") #fit=estimativa pontual; lwr=limite inferior; upr=limite superior ####Predição com intervalo de confiânça predict(mod1, newdata = dados_novos, interval = "confidence")
629314643eb48983984502fa6de8d932e788d113
b482c98ff5065b055887827fb8dc86e3055c6438
/man/trawl_DExp.Rd
2f712f91a91aa8b1d25e5868dd413cbda765d8c9
[]
no_license
cran/trawl
bdb7d749804b73f5170d348e0614aff4d3d253fd
4013b1fa141f3ae3c0c13447488e45125a675b9c
refs/heads/master
2021-06-15T02:31:33.763015
2021-02-22T16:30:02
2021-02-22T16:30:02
145,911,822
0
0
null
null
null
null
UTF-8
R
false
true
883
rd
trawl_DExp.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SimulateTrawl.R \name{trawl_DExp} \alias{trawl_DExp} \title{Evaluates the double exponential trawl function} \usage{ trawl_DExp(x, w, lambda1, lambda2) } \arguments{ \item{x}{the argument at which the double exponential trawl function will be evaluated} \item{w}{parameter in the double exponential trawl} \item{lambda1}{the parameter \eqn{\lambda_1} in the double exponential trawl} \item{lambda2}{the parameter \eqn{\lambda_2} in the double exponential trawl} } \value{ The double exponential trawl function evaluated at x } \description{ Evaluates the double exponential trawl function } \details{ The trawl function is parametrised by parameters \eqn{0\leq w\leq 1} and \eqn{\lambda_1, \lambda_2 > 0} as follows: \deqn{g(x) = w e^{\lambda_1 x}+(1-w) e^{\lambda_2 xz}, \mbox{ for } x \le 0.} }
add1585f37917f937885e9d252d9ebe4c9335fb3
68c51b6b053872846d5e2bd273f3b172b0c1bbf7
/Various Regression Techniques/OrdinalRegression.R
7925c4c755cfa2f57ccfcf2990889181855c8373
[]
no_license
akshatshah14/Machine-Learning
62f158eafc20ce91668e225fe49d503773d9ed5c
fb4048a77f3ba15df91af5ae1d8c3d8e1bd5c0aa
refs/heads/master
2021-07-10T16:58:55.787972
2017-10-09T05:54:02
2017-10-09T05:54:02
106,143,613
1
0
null
null
null
null
UTF-8
R
false
false
1,973
r
OrdinalRegression.R
birthweight = read.csv("C:\\Users\\Akshat\\Dropbox\\Spring 2017- ML for Data Science\\Datasets\\lowbwt.csv"); colnames(birthweight) birthweight$WT = 0 birthweight[birthweight$BWT >0 & birthweight$BWT <2500,]$WT = 4 birthweight[birthweight$BWT >=2500 & birthweight$BWT <3000,]$WT = 3 birthweight[birthweight$BWT >=3000 & birthweight$BWT <3500,]$WT = 2 birthweight[birthweight$BWT >=3500,]$WT = 1 require(foreign) require(ggplot2) require(MASS) require(Hmisc) require(reshape2) polr.fit = polr(as.factor(WT) ~ AGE + LWT + as.factor(RACE) + as.factor(SMOKE) + as.factor(PTL) + as.factor(HT) + as.factor(UI) + as.factor(FTV), data = birthweight, Hess=TRUE) summary(polr.fit) (ctable <- coef(summary(polr.fit))) ## calculate and store p values p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2 ## combined table (ctable <- cbind(ctable, "p value" = p)) ci = confint.default(polr.fit) ## odds ratios exp(coef(polr.fit)) ## OR and CI exp(cbind(OR = coef(polr.fit), ci)) sf <- function(y) { c('Y>=1' = qlogis(mean(y >= 1)), 'Y>=2' = qlogis(mean(y >= 2)), 'Y>=3' = qlogis(mean(y >= 3)), 'Y>=4' = qlogis(mean(y >= 4))) } (s <- with(birthweight, summary(as.numeric(WT) ~ AGE + LWT + as.factor(RACE) + as.factor(SMOKE) + as.factor(PTL) + as.factor(HT) + as.factor(UI) + as.factor(FTV), fun=sf))) #evaluate the parallel slopes assumption glm(I(as.numeric(WT) >= 2) ~ AGE + LWT + as.factor(RACE) + as.factor(SMOKE) + as.factor(PTL) + as.factor(HT) + as.factor(UI) + as.factor(FTV), family="binomial", data = birthweight) glm(I(as.numeric(WT) >= 3) ~ AGE + LWT + as.factor(RACE) + as.factor(SMOKE) + as.factor(PTL) + as.factor(HT) + as.factor(UI) + as.factor(FTV), family="binomial", data = birthweight) glm(I(as.numeric(WT) >= 4) ~ AGE + LWT + as.factor(RACE) + as.factor(SMOKE) + as.factor(PTL) + as.factor(HT) + as.factor(UI) + as.factor(FTV), family="binomial", data = birthweight) s[,5] = s[,5]-s[,4] s[,4] = s[,4]-s[,3] s[,3] = s[,3]-s[,3] s
90bca9ff7672d771a14b0b0c190c706dd28e405c
1f6e9cc31324632eb8bfe1a1ef066369e48eb839
/run_analysis.R
550503ca63965affcdb1556cd823a18dd757b542
[]
no_license
grejpfrut/Getting-and-Cleaning-Data-Course-Project
54213f216fa0e0cbb5d2c9f86e2e49c1af377af8
2990ba1d5605ffded0f012ab7e55d50d146f0e4e
refs/heads/master
2020-03-27T06:43:48.492508
2018-08-25T21:55:44
2018-08-25T21:55:44
146,130,597
0
0
null
null
null
null
UTF-8
R
false
false
2,101
r
run_analysis.R
run_analysis.R <- ##SUBJECT subject_test <- read.table("test/subject_test.txt", sep = ",") subject_train <- read.table("train/subject_train.txt", sep = ",") subject <- rbind(subject_test,subject_train) colnames(subject) <- c("subject") ## Y-test y_test <- read.table("test/y_test.txt", sep = ",") y_train <- read.table("train/y_train.txt", sep = ",") y_total <- rbind(y_test, y_train) colnames(y_total) <- c("activities") ##X train_Xnames <- read.table("features.txt", sep = " ") traincharnames <- as.character(train_Xnames$V2) library(tidyr) test_X <- read.table("test/X_test.txt", nrows = 2947 , sep = "", na.strings=c(""," ","NA")) train_X <- read.table("train/X_train.txt" , sep = "", na.strings=c(""," ","NA")) X_total <- rbind(test_X, train_X) colnames(X_total) <- traincharnames table <- cbind(subject, y_total, X_total) ##subsetting mean and std tableme <- grep("mean",colnames(table)) tablestd <- grep("std",colnames(table)) table <- table[ ,c(1:2,tableme,tablestd)] ##changing activity names table$activities <- as.character(table$activities) table$activities[table$activities == "1"] <- "WALKING" table$activities[table$activities == "2"] <- "WALKING_UPSTAIRS" table$activities[table$activities == "3"] <- "WALKING_DOWNSTAIRS" table$activities[table$activities == "4"] <- "SITTING" table$activities[table$activities == "5"] <- "STANDING" table$activities[table$activities == "6"] <- "LAYING" ##giving descriptive variable names names(table) <- sub("\\()","",names(table)) names(table) <- gsub("-",".",names(table)) names(table) <- sub("^t","time",names(table)) names(table) <- sub("^f","freq",names(table)) ##creating new tidy data set library(dplyr) library(datasets) empty_as_na <- function(x){ if("factor" %in% class(x)) x <- as.character(x) ifelse(as.character(x)!="", x, NA) } table <- table %>% mutate_all(funs(empty_as_na)) table$subject <- as.factor(table$subject) table$activities <- as.factor(table$activities) table <- tbl_df(table) table <- group_by(table, subject, activities) final <- summarise_all(table, mean, na.rm = TRUE) View(final)
ba1091de78f8a317754085b408014b78d05f811c
711e4cc88c3d4302a5e8cb87a74bdfea187647c1
/R/little_helpers.R
fe85baf1d799a417e14d455a13650cc7fb59e9d9
[]
no_license
sib-swiss/dsSwissKnife
43dc22d6f19ea7f0162e2bdcb1ee19018e8c874d
b52dc57ccbc4ebaa58274d1736df3276dc5ca3df
refs/heads/master
2023-06-27T10:36:55.336669
2023-06-14T13:24:55
2023-06-14T13:24:55
311,978,958
3
1
null
null
null
null
UTF-8
R
false
false
9,053
r
little_helpers.R
#' @title Initialize the environment #' @description Set a number of defaults for some of the functions. #' These are: #' #' 1) hidden.fields - fields that must show no information (patient identifiers for instance) #' #' 2) allowed.functions - supplementary functions allowed in dssDeriveColumn (see the documentation for that function) #' #' 3) join.pivot.col - the default column to join (or pivot) by. In the absence of this default #' the 'by.col' argument in dssJoin and dssPivot becomes mandatory #' #' 4) forbidden.options - a number of opal administrative options that are not allowed to be changed by dssSetOption #' #' In order to set/modify any of these options the node administrator must create an .Rprofile file #' in the rserver conf directory (normally /var/lib/rserver/conf) and populate it with the necessary #' R commands to set the above options (see example). Then the following line must be added to the file #' /var/lib/rserver/conf/Rserv.conf: #' #' source /var/lib/rserver/conf/.Rprofile # #'@examples #' # content of the /var/lib/rserver/conf/.Rprofile file that adds as.POSIXct among #' # the allowed functions in dssDeriveColumn: #' options(allowed.functions='as.POSIXct') #' #' .init <- function(){ .mycache <<- new.env(parent=.GlobalEnv) assign('hidden', getOption('hidden.fields'), envir = .mycache) allowed.funcs <- c('abs', 'round', 'ceiling', 'floor', 'signif', 'length', 'paste0', 'as.Date', 'as.character', 'as.numeric','egfr', 'as.factor', 'sub', 'gsub', 'grep','grepl', 'sqrt', getOption('allowed.functions')) #ign('allowed.funcs', allowed.funcs, envir = .mycache) assign('allowed.funcs', allowed.funcs, envir = .mycache) if(!is.null(getOption('join.pivot.col'))){ assign('by.col', getOption('join.pivot.col'), envir = .mycache) } #dissalow change of critical options: forbidden.options <- c(grep('^default\\.|^datashield\\.',names(options()), value = TRUE), 'hidden.fields', 'allowed.functions', 'join.pivot.col','dsQueryLibrary.enforce_strict_privacy',getOption('forbidden.options')) assign('forbidden.options', forbidden.options, envir = .mycache) #test gitd } .split.numerics <- function(x, collist = NULL){ nums <- sapply(x, is.numeric) nums <- nums[is.null(collist) | names(nums) %in% collist] # only collist if supplied nums <- nums[nums == TRUE] #if(length(nums) == 0){ # return(NA) #} #list(numerics = as.data.frame(subset(x, TRUE, nums)), others = as.data.frame(subset(x, TRUE, !nums))) nums <- nums[.trim_hidden_fields(names(nums))] # no hidden fields list(numerics = x[,names(nums), drop= FALSE], others = x[,!(colnames(x) %in% names(nums)), drop = FALSE]) } #' @title Decode from base64 and deserialize from json if necessary #' @description Work around the restrictions imposed by the Opal server on function arguments #' The Opal server is very picky as regards function arguments. The workaround is #' to serialize and encode them on the client and strip the right padding. #' @details It looks for the string 'base64' in the argument to determine if it's encoded #' @param some.thing the thing to be decoded and deserialized from json if necessary #' @return the decoded and deserialized argument #' .decode.arg <- function(some.thing, simplifyMatrix = FALSE){ if(length(some.thing) == 1 && grepl('base64', some.thing, ignore.case = TRUE)){ some.thing <- gsub('base64', '', some.thing, ignore.case =TRUE) serialized <- FALSE if(grepl('serialized', some.thing, ignore.case = TRUE)){ serialized <- TRUE some.thing <- gsub('serialized', '', some.thing, ignore.case =TRUE) } my.dictionary = c('-plus-' = '+', '-slash-' = '/', '-equals-' = '=') sapply(names(my.dictionary), function(x){ some.thing <<- gsub(x, my.dictionary[x], some.thing) }) # if(serialized){ some.thing <- jsonlite::unserializeJSON(RCurl::base64Decode(some.thing)) } else { some.thing <- jsonlite::fromJSON(RCurl::base64Decode(some.thing), simplifyMatrix = simplifyMatrix) } } return(some.thing) } .extract <- function (input, start.env = NULL){ #modified version of dsBase:::extract #it returns a list of (inputname = object) #works for dataframes embedded in lists input <- unlist(input) output <- list() for (i in input) { inputterms <- unlist(strsplit(i, "\\$", perl = TRUE)) if(!is.null(start.env)){ inputterms <- inputterms[2:length(inputterms)] inputterms <- inputterms[!is.na(inputterms)] obj <- as.environment(start.env) } else { obj <- parent.frame() } for(objname in inputterms){ this.env <- as.environment(obj) obj <- get(objname, envir = this.env) } output[[i]] <- obj } return(output) } .dsBase_extract <- function (input){ input <- unlist(input) output1 <- c() output2 <- c() for (i in 1:length(input)) { inputterms <- unlist(strsplit(input[i], "\\$", perl = TRUE)) if (length(inputterms) > 1) { obj1 <- strsplit(input[i], "\\$", perl = TRUE)[[1]][1] obj2 <- strsplit(input[i], "\\$", perl = TRUE)[[1]][2] } else { obj1 <- NA obj2 <- strsplit(input[i], "\\$", perl = TRUE)[[1]][1] } output1 <- append(output1, obj1) output2 <- append(output2, obj2) } output <- list(holders = output1, elements = output2) return(output) } #' #' @title returns all objects in all environments #' @description helper function for dssSubset and dssPivot #' @param start a character the environment name where to start (default .GlobalEnv) #' @return a list of environment names and the respective objects defined in each environment #' .ls.all <- function(start = '.GlobalEnv'){ envir <- get(start) objs <- ls(envir, all.names = TRUE) ret <- list() ret[[start]] <- objs more.envs <- names(which(sapply(objs, function(x)is.environment(get(x)))==TRUE)) c(ret,sapply(more.envs,function(x) ls(get(x), all.names = TRUE), USE.NAMES = TRUE, simplify = FALSE)) } #' #' @title locks or unlocks bindings in environments #' @description helper function for dssSubset and dssPivot #' @param what a list of environments and their respective objects - the output of ls.all above #' @param func a function, either lockBinding or unlockBinding #' .lock.unlock <- function(what , func){ stopifnot(deparse(substitute(func)) %in% c('lockBinding', 'unlockBinding')) invisible(lapply(names(what), function(x){ lapply(what[[x]], function(y){ func(y,get(x)) }) })) } #' @title removes objects from the current workspace #' @description helper function for dssSubset and dssPivot #' @param what a list of environments and their respective objects - the output of a previous call to ls.all #' @param start a character the environment name where to start (default .GlobalEnv) #' .cleanup <- function(initial, start = '.GlobalEnv'){ objs <- .ls.all(start) new.envs <- setdiff(names(objs), names(initial)) Map(function(x){ rm(get(x)) objs[x] <- NULL }, new.envs) invisible(Map(function(x){ new.objs <- setdiff(objs[[x]], initial[[x]]) rm(list = new.objs, pos = get(x)) }, names(objs))) } .dsBase_isValidDSS <- function (obj) { nfilter <- .dsBase_setFilterDSS() if (class(obj) == "character" | class(obj) == "integer" | class(obj) == "logical" | class(obj) == "numeric") { if (length(obj) > 0 & length(obj) < nfilter) { return(FALSE) } else { return(TRUE) } } else { if (class(obj) == "factor") { tt <- tabulate(obj) xx <- which(tt > 0 & tt < nfilter) if (length(xx) > 0) { return(FALSE) } else { return(TRUE) } } else { if (class(obj) == "data.frame" | class(obj) == "matrix") { if (dim(obj)[1] > 0 & dim(obj)[1] < nfilter) { return(FALSE) } else { return(TRUE) } } else { return(FALSE) } } } } .dsBase_setFilterDSS <- function (x = getOption("datashield.privacyLevel", default = 5)) { # from dsBase a <- as.numeric(as.character(x)) return(a) } .dsBase_numNADSS <- function (xvect){ # from dsBase out <- length(which(is.na(xvect))) return(out) } .get_memory_usage <- function(envir = .GlobalEnv){ objnames <- ls(envir = envir, all.names = TRUE) sapply(objnames, function(x){ thisobj <- get(x, envir = envir) out <- c() if(class(thisobj) == 'environment'){ out <- unlist(.get_memory_usage(thisobj)) } else { out <- as.numeric(object.size(thisobj)) } out }, simplify = TRUE) } .trim_hidden_fields <- function(cols){ #first pass: cols <- setdiff(cols, getOption('hidden.fields')) for (r in getOption('hidden.fields.regexes')){ cols <- grep(r, cols, value = TRUE, perl = TRUE, invert = TRUE) } cols } .betterExtract <- function(what, startEnv = parent.frame()){ terms <- strsplit(what, '$' , fixed = TRUE)[[1]] Reduce(function(x,y){ get(y, envir = as.environment(x)) }, terms, init = startEnv) }
6fad7247553ce5cf64ad5e4e74fc8e84c6a77cb4
fef507ac41bb7749840e7f5141ba87fde26d6f95
/code/spython/tangram_libd/code/03_nn_run/09_manual_figure.R
4a59418c545e5514b86479f306b0f8f293ebc2e9
[]
no_license
LieberInstitute/spatialDLPFC
84bc2f0b694473182d315b4d39ab3d18c2dd3536
840700ae86cdd414e024d9658b09dd11712ef470
refs/heads/main
2023-08-04T03:37:18.425698
2023-07-25T18:27:36
2023-07-25T18:27:36
314,001,778
6
0
null
null
null
null
UTF-8
R
false
false
4,304
r
09_manual_figure.R
# The "main tangram deconvolution figure" (Fig 4a from the paper) is hard to # generate outside of squidpy, and requires cell centroid info, which doesn't # seem critically important to the value of the figure. Here I explore if we # can generate a similar figure that only requires cell counts per spot. library("here") library("ggplot2") library("png") library("spatialLIBD") library("jaffelab") library("rjson") library("grid") sample_id <- "151508" img_path <- here( "tangram_libd", "raw-data", "03_nn_run", "hires_histology", paste0(sample_id, ".png") ) clusters_path <- here( "tangram_libd", "processed-data", "03_nn_run", "tangram_out_DLPFC", sample_id, "clusters.csv" ) scale_path <- file.path( "/dcl02/lieber/ajaffe/SpatialTranscriptomics/HumanPilot/10X", sample_id, "scalefactors_json.json" ) plot_path <- here( "tangram_libd", "plots", "03_nn_run", "DLPFC", paste0("my_spot_deconvo_fig_", sample_id, ".pdf") ) spatial_coords_names <- c("pxl_row_in_fullres", "pxl_col_in_fullres") # Fetch and subset SPE to this sample spe <- fetch_data(type = "spe") spe <- spe[, spe$sample_id == sample_id] # Read in image, cell counts, and image scale factors img <- readPNG(img_path) clusters <- read.csv(clusters_path) scale_json <- fromJSON(file = scale_path) # Add spatial coordinates to 'clusters', the data frame of cell counts per # spot clusters$barcode <- ss(clusters$key, "_") stopifnot(all(clusters$barcode == rownames(spatialCoords(spe)))) clusters <- cbind(clusters, spatialCoords(spe)) # Infer the cell types used cell_types <- colnames(clusters)[ !(colnames(clusters) %in% c( "key", "cell_count", "barcode", spatial_coords_names ) ) ] # Make a long data frame, where each row is a cell df_list <- list() i <- 1 for (barcode in rownames(clusters)) { for (cell_type in cell_types) { for (j in seq_len(clusters[barcode, cell_type])) { df_list[[i]] <- c( barcode, as.numeric(clusters[barcode, spatial_coords_names]), cell_type ) i <- i + 1 } } } df_long <- data.frame(do.call(rbind, df_list)) colnames(df_long) <- c("barcode", spatial_coords_names, "cell_type") # Make sure spatialCoords are numeric, and scaled to represent # high-resolution pixels for (colname in spatial_coords_names) { df_long[, colname] <- scale_json$tissue_hires_scalef * as.numeric(df_long[, colname]) } # Reverse y coord of spatialCoords(spe) to agree with the coordinate system # ggplot2 is using df_long[[spatial_coords_names[2]]] <- dim(img)[2] - df_long[[spatial_coords_names[2]]] # Improve plotting speed and file size by keeping a copy of just the spots. # Get spot radius in high-res pixels spots <- df_long[match(unique(df_long$barcode), df_long$barcode), ] spots$spot_radius <- scale_json$spot_diameter_fullres * scale_json$tissue_hires_scalef / 2 # Create the deconvolution figure p <- ggplot() + # The high-res image as the plot's background annotation_custom( rasterGrob( img, width = unit(1, "npc"), height = unit(1, "npc") ), -Inf, Inf, -Inf, Inf ) + # The axes should exactly span the pixel range of the image, and pixels # should be squares scale_x_continuous(limits = c(0, dim(img)[1]), expand = c(0, 0)) + scale_y_continuous(limits = c(0, dim(img)[2]), expand = c(0, 0)) + coord_fixed() + # Plot the individual cells, jittered within a spot geom_jitter( data = df_long, aes_string( x = spatial_coords_names[1], y = spatial_coords_names[2], fill = "cell_type" ), size = 0.45, width = 4, height = 4, color = "black", shape = 21, stroke = 0.05 ) + # Plot empty circles for each spot (to show the spot grid) geom_circle( data = spots, mapping = aes_string( x0 = spatial_coords_names[1], y0 = spatial_coords_names[2], r = "spot_radius" ), size = 0.1 ) + # Brighten colors and improve legend scale_fill_hue(l = 80, name = "Cell type") + guides(fill = guide_legend(override.aes = list(size = 5))) pdf(plot_path) print(p) dev.off()
c94465345fbe4d857ee135e4a23bf5d1e3161268
f643705ec1baf66a44a237c1cb144ed47c498445
/tests/testthat/test_base_convertMLBenchObjToTask.R
3e057ae1a107005540b0b635f55ce29019cbd05e
[]
no_license
HeidiSeibold/mlr
8046328a572604260d6b68add3e10b34ea8961f8
3b24e632b1cfa326147eae7abb0775e09ac26b93
refs/heads/master
2020-06-11T19:39:31.204273
2016-12-02T22:03:30
2016-12-02T22:03:30
75,626,955
1
0
null
2016-12-05T13:24:50
2016-12-05T13:24:50
null
UTF-8
R
false
false
830
r
test_base_convertMLBenchObjToTask.R
context("convertMLBenchObjToTask") test_that("convertMLbenchObjToTask", { requirePackages("!mlbench") # get all mlbench.* functions, 1spiral does not work fs = ls("package:mlbench", pattern = "mlbench") n = 77L for (f in setdiff(fs, c("mlbench.1spiral"))) { task = convertMLBenchObjToTask(f, n = n) expect_is(task, "Task") # for some, n is not properly respected in mlbench if (f %nin% c("mlbench.corners", "mlbench.hypercube", "mlbench.simplex")) expect_equal(getTaskSize(task), n) } # get all mlbench datasets, HouseVotes84 and Ozone have NAs in target col ds = data(package = "mlbench") ds = ds$results[, "Item"] for (d in setdiff(ds, c("HouseVotes84", "Ozone"))) { task = convertMLBenchObjToTask(d, n = n) expect_is(task, "Task") expect_equal(getTaskId(task), d) } })
13df9c43ae6ac8ea486a760c547f29039accadb7
b5bee2eb1150fa5e8e60fa7a259cc2e8aaefb0ef
/man/loadLibraryGloballyAs.Rd
2d4dc621fa59c492d203a0a1a7032f5fe28c29d9
[ "Apache-2.0" ]
permissive
SimonSchafferer/short-ncrna-annotation
f5f71e44d9f0ba34ab583c0fdae1a4b5a45e9c34
0c69f1b01198ba9a065c0becf5739c1281f6f584
refs/heads/master
2021-05-16T02:04:29.189010
2015-02-10T14:00:47
2015-02-10T14:00:47
20,757,069
0
0
null
null
null
null
UTF-8
R
false
false
507
rd
loadLibraryGloballyAs.Rd
% Generated by roxygen2 (4.0.1.99): do not edit by hand \docType{methods} \name{loadLibraryGloballyAs} \alias{loadLibraryGloballyAs} \title{'Loads an R object by a given name into the globalEnvironment} \usage{ loadLibraryGloballyAs(name, filename) } \arguments{ \item{name}{Object name one wants to define} \item{filename}{Path to file and name} \item{envir}{environment were the object should be loaded to} } \value{ TRUE } \description{ 'Loads an R object by a given name into the globalEnvironment }
7ef989db6bf225fd2937b5b9c0d893d00c2fd9bb
d410fdbad98532e294e51da43fbf798cbc04ebc5
/NaiveBayes/NaiveBayes.R
2bf1cb076c0940d7d587bf5e1600a58e839953e7
[]
no_license
muneerulhudha/Machine-Learning-Assignments
b5662745055a42c0fffb9b9fea45019b983657be
829e1cf2bf58e034f599a8a065fac95718d08859
refs/heads/master
2021-01-10T06:53:56.362437
2016-01-25T19:30:54
2016-01-25T19:30:54
50,374,518
0
0
null
null
null
null
UTF-8
R
false
false
730
r
NaiveBayes.R
library(caret) library(e1071) dataset <- read.table(file = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data", header = FALSE, sep = ",") nb <- function(){ accuracy <- 0 primaPart <- createDataPartition(dataset$V9, p = .9,list = FALSE,times = 1) trainingData <- dataset[primaPart,] testData <- dataset[-primaPart,] model <- naiveBayes(as.factor(V9) ~ ., data = trainingData) pred <- predict(model, testData) accuracy <- sum(testData$V9 == pred)/length(pred) return(accuracy) } acc <- c() for(i in 1:10){ acc <- c(acc,nb()) } print("Accuracy in each iteration: ") print(acc) overallAcc <- mean(acc) print("Overall Accuracy: ") print(overallAcc)
82471ea70ba0945fb98dd6da0abb02a88906a947
6ec9b39141936db1b7ad61c20465e6ff5c3988bb
/Data Mining/fold Cross Validation/cmatrixInfo.r
f78290126189d8408904809ffa5d6632c0bc7589
[]
no_license
Kalovelo/ihu-survival-lecture-notes
4806daafc9aea246be3d760ab66bb2b0f43cf59e
837dd9fe4395a9240e58feb16eb46aaf953f1a04
refs/heads/master
2021-05-22T14:44:36.418659
2020-09-07T12:39:11
2020-09-07T12:39:11
252,968,079
5
2
null
null
null
null
UTF-8
R
false
false
886
r
cmatrixInfo.r
#correct predictions accuracy <- function(tp,tn,fp,fn) { correct <- tn+tp all<- tp+fp+tn+fn return (correct/all) } #correct event predictions precision <- function(tp,fp) { predictions<- tp+fp return(tp/predictions) } #event predictions against events recall<- function(tp,fn) { events<-tp+fn return(tp/events) } CMatrixStats<- function(cMatrix) { tp = cMatrix['P','P'] tn = cMatrix['N','N'] fn = cMatrix['P','N'] fp = cMatrix['N','P'] results = matrix(0,nrow=1,ncol=3) colnames(results)<- c('Accuracy','Precision','Recall') results[1,'Accuracy'] <- accuracy(tp,tn,fn,fp) results[1,'Precision'] <- precision(tp,fp) results[1,'Recall'] <- recall(tp,fn) return(results) } generateCMatrix<- function() { cMatrix = matrix(0,nrow=2,ncol=2) colnames(cMatrix)<- c('P','N') rownames(cMatrix)<- c('P','N') return (cMatrix) }
64650a9d5bbe783d6f0937954f15d264294161a7
cafc5841d654b41cac6d462b95c28ab16bee8a43
/BART_iter.R
91f42548debc41dd9511de0623b28acf2e996257
[ "MIT" ]
permissive
xylimeng/OIS
4caefb253f4c84f599d878653049d39c0f7d7b86
29164546c1a552ddd4eba9f8fe67b40f359ae3d4
refs/heads/main
2023-07-16T18:41:43.962066
2021-09-02T15:46:40
2021-09-02T15:46:40
376,896,853
0
0
null
null
null
null
UTF-8
R
false
false
3,712
r
BART_iter.R
BART_iter <- function(X, y, head, dimen, X_selected = NULL, head_selected = NULL, dimen_selected = NULL, num_trees = 20, num_burn_in = 10000, num_iterations_after_burn_in = 5000, num_reps_for_avg = 10, num_permute_samples = 50, standardize = TRUE, train_idx = NULL, seed = NULL) { X <- as.matrix(X) if (!is.null(X_selected)) X_selected <- as.matrix(X_selected) if (is.null(train_idx)) { X_train <- X y_train <- y } else { # Training data X_train <- X[train_idx, ] y_train <- y[train_idx] # Testing data X_test <- X[-train_idx, ] y_test <- y[-train_idx] } # bartMachine only takes dataframe if (standardize == TRUE){ X_train_scale <- as.data.frame(scale(X_train)) } else { X_train_scale <- as.data.frame(X_train) } if(is.null(seed)){ bart_machine <- bartMachine(X_train_scale, y_train, num_trees = num_trees, num_burn_in = num_burn_in, num_iterations_after_burn_in = num_iterations_after_burn_in, run_in_sample = FALSE, serialize = FALSE, verbose = FALSE) } else{ bart_machine <- bartMachine(X_train_scale, y_train, num_trees = num_trees, num_burn_in = num_burn_in, num_iterations_after_burn_in = num_iterations_after_burn_in, run_in_sample = FALSE, serialize = FALSE, seed = seed, verbose = FALSE) } var_sel <- var_selection_by_permute(bart_machine, num_reps_for_avg = num_reps_for_avg, num_permute_samples = num_permute_samples, num_trees_for_permute = 20, plot = FALSE) pos_idx <- sort(var_sel$important_vars_global_se_col_nums) # Check if BART selected any variable if (is.null(X_selected) && length(pos_idx) == 0) { stop("BART did not select any variable, trying another seed...") } else { if (!is.null(X_selected)) { X_selected <- cbind(X_selected, X[, pos_idx]) head_selected <- c(head_selected, head[pos_idx]) if (is.null(dimen)) { dimen_selected <- NULL } else { dimen_selected <- c(dimen_selected, dimen[pos_idx]) } # Remove duplicated data temp <- round(X_selected, digits = 6) dup_index <- duplicated(temp, MARGIN = 2) if (any(dup_index == TRUE)) { X_selected <- X_selected[, !dup_index] head_selected <- head_selected[!dup_index] if (is.null(dimen)) { dimen_selected <- NULL } else { dimen_selected <- dimen_selected[!dup_index] } } } else { X_selected <- X[, pos_idx] head_selected <- head[pos_idx] if (is.null(dimen)) { dimen_selected <- NULL } else { dimen_selected <- dimen[pos_idx] } } BART_output <- list(X_selected = X_selected, head_selected = head_selected, dimen_selected = dimen_selected) return(BART_output) } }
6722cae7a3104ef364dc83d677e00426ff115435
7f186fa9c8dd406f6d9666b9ff65319fc377412b
/man/flowdata.Rd
1d7ba5bb15435da12a0dc1f9801a02c484122b6e
[]
no_license
garciadejalon/FlowRegEnvCost
5419a0f0a94555a065aec2d3a3e977fbb01a70ce
998ea71b15123c2eaa83816bb560680c82775365
refs/heads/master
2021-01-23T16:26:11.101896
2017-11-29T16:11:25
2017-11-29T16:11:25
102,742,161
2
0
null
2017-11-29T16:11:27
2017-09-07T13:45:32
R
UTF-8
R
false
true
693
rd
flowdata.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{flowdata} \alias{flowdata} \title{River water flow data.} \format{An example data frame with 17166 rows and 2 variables: \describe{ \item{Date}{Date (dd/mm/yyyy)} \item{Flow}{Water flow, in m^{3}/s} }} \source{ Source: \url{https://doi.org/10.1007/s11269-017-1663-0} } \usage{ flowdata } \description{ A dataset containing daily river water flow data for the Esla river at the Riaño dam (Northern Spain) from 01/10/1964 to 30/09/2011. The library adds missing days within the whole period automatically with NA flow values when you enter your own time series data. } \keyword{datasets}
3635bff44a4e3fe5c33fc498de3c1d333b62e711
42abe0fef0d12287d170fd2445864f9fb9aec74b
/man/neuprint_common_connectivity.Rd
fda6db905d6496f77793f06f5c2c869b372e0632
[]
no_license
natverse/neuprintr
45bf13ea5f351c7088ad0e9de36a737342e070f3
1bf392fb896710d64e1b03cdddb38ea05a54f5d1
refs/heads/master
2023-08-26T15:19:05.332544
2023-08-16T03:32:48
2023-08-16T03:32:48
176,496,271
3
2
null
2023-09-07T19:59:04
2019-03-19T11:22:52
R
UTF-8
R
false
true
2,002
rd
neuprint_common_connectivity.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/connectivity.R \name{neuprint_common_connectivity} \alias{neuprint_common_connectivity} \title{Get the common synaptic partners for a set of neurons} \usage{ neuprint_common_connectivity( bodyids, statuses = NULL, prepost = c("PRE", "POST"), all_segments = FALSE, dataset = NULL, conn = NULL, ... ) } \arguments{ \item{bodyids}{the cypher by which to make your search} \item{statuses}{if not NULL, only bodies with the given status are considered. Statuses include: Unimportant,0.5assign,Leaves,Prelim Roughly Traced, Anchor, Orphan.} \item{prepost}{whether to look for partners presynaptic or postsynaptic to the given bodyids. So when \code{prepost="PRE"} you will return inputs (upstream partners) of your starting neurons.} \item{all_segments}{if TRUE, all bodies are considered, if FALSE, only 'Neurons', i.e. bodies with a status roughly traced status.} \item{dataset}{optional, a dataset you want to query. If \code{NULL}, the default specified by your R environ file is used or, failing that the current connection, is used. See \code{\link{neuprint_login}} for details.} \item{conn}{optional, a neuprintr connection object, which also specifies the neuPrint server. If NULL, the defaults set in your \code{\link[=Startup]{.Rprofile}} or \code{\link[=Startup]{.Renviron}} are used. See \code{\link{neuprint_login}} for details.} \item{...}{methods passed to \code{neuprint_login}} } \value{ a n x m matrix where n correspond to the neurons that all connect to m bodyids } \description{ Get the neurons that are shared synaptic partners for a set of given bodyids, either upstream or downstream. } \examples{ \donttest{ da2s=neuprint_search('.*DA2.*') da2conn = neuprint_common_connectivity(da2s$bodyid[1:2], prepost='PRE') plot(t(da2conn)) head(cbind(t(da2conn), sum=colSums(da2conn))) } } \seealso{ \code{\link{neuprint_simple_connectivity}}, \code{\link{neuprint_get_adjacency_matrix}} }
5eaa5918324aff63c9434840a2eac652e3c09038
8b2f41a3710354f0778a84680eb6c72eed060312
/External Election Data.R
01b8bf05cbca4239ff7eb68aebd15d425316762d
[]
no_license
paviellehaines/citunited
5714431eac4f550a75142d07397603ae2c358a55
6adb07abfdeacd42faf1cb5079864ad6ea0c49ee
refs/heads/master
2021-05-08T21:14:31.350201
2018-01-31T04:24:22
2018-01-31T04:24:22
119,633,082
0
0
null
null
null
null
UTF-8
R
false
false
28,892
r
External Election Data.R
# READ IN AND CLEAN DATA -------------------------------------------------- library(foreign) library(data.table) library(dplyr) library(plyr) library(tidyr) elections1 <- read.dta("34297-0001-Data.dta") elections2 <- read.csv("SLERs2011to2012_only_2013_05_14.csv") col14 <- read.csv("2014GeneralPrecinctResults.csv") # ORGANIZE AND GENERATE MISSING DATA FOR 2011-2012 ------------------------ elections2 <- subset(elections2, V17 == 1) #Remove non-final elections elections2$V11a <- paste(elections2$V08, elections2$V09, elections2$V10, sep = ".") elections2$election_id <- elections2 %>% group_indices(V02, V05, V11a, V16, V07)#Create unique election ID elections2$candid <- elections2 %>% group_indices(election_id, V19) #Create unique candidate ID totvotes <- aggregate(elections2$V23, by = list(elections2$election_id), FUN = sum) #Sum total votes per election colnames(totvotes) <- c("election_id", "totalvote") totvotes$election_id <- as.integer(totvotes$election_id) elections2 <- subset(elections2, V19 != "WRITEIN") #Remove write-ins for candidate count candvotes <- aggregate(elections2$V23, by = list(elections2$candid), FUN = sum) #Sum total votes per candidate per election colnames(candvotes) <- c("candid", "candvotes") candvotes$candid <- as.integer(candvotes$candid) duplicates <- data.frame(table(elections2$candid)) #Count duplicate candidates in each election colnames(duplicates) <- c("candid", "duplicates") duplicates$candid <- as.integer(duplicates$candid) duplicates$duplicates <- as.integer(duplicates$duplicates) totvotes <- data.table(totvotes) #Prepare data for merge candvotes <- data.table(candvotes) duplicates <- data.table(duplicates) elections2 <- data.table(elections2) elections2 <- merge(elections2, totvotes, by="election_id") #Merge data elections2 <- merge(elections2, candvotes, by="candid") elections2 <- merge(elections2, duplicates, by="candid") elections2$duplicates <- as.integer(elections2$duplicates) elections2a <- subset(elections2, duplicates == 1) #Subset candidates without duplictaes reconcile <- subset(elections2, duplicates > 1) #Remove and subset duplicated candidate ID's reconcile1 <- arrange(reconcile, candid, desc(V23)) reconcile2 <- aggregate(reconcile1$V23, by = list(reconcile1$candid), FUN = max) #Remove duplicates colnames(reconcile2) <- c("candid", "V23") reconcile2$max <- 1 reconcile3 <- merge(reconcile2,reconcile, by= c("candid", "V23"), all = TRUE) reconcile4 <- subset(reconcile3, !is.na(reconcile3$max)) reconcile5 <- subset(reconcile4, select = -c(max)) elections2 <- rbind(elections2a, reconcile5) candidatenum <- data.frame(table(elections2$election_id)) #Count number of candidates in each election colnames(candidatenum) <- c("election_id", "numrow") candidatenum$election_id <- as.integer(candidatenum$election_id) candidatenum <- data.table(candidatenum) #Prepare data for merge elections2 <- merge(elections2, candidatenum, by = "election_id") elections2$candperc <- (elections2$candvotes/elections2$totalvote)*100 #Get candidates percentage missing <- subset(elections2, is.na(elections2$candperc)) #Subset candidates who are missing percentages; Notice they are all races with one candidate and no runner up elections2b <- subset(elections2, !is.na(elections2$candperc)) missing$candperc <- 100 #Set candidates percentages to 100 missing$totalvote <- 1000 elections2 <- rbind(missing, elections2b) #Put data back together elections2[,candrank:=rank(-candvotes,ties.method="first"),by = election_id] #Add rank by race data.frame(elections2) nowinner <- aggregate(elections2$V24, by = list(elections2$election_id), FUN = sum) #Identify elections with no winners and remove nowinner <- subset(nowinner, x == 0) #Notice that all elections have winners nrow(nowinner) noru <- subset(elections2, numrow <= V13) #Generate margin of victory for elections with no runnerups noru$ru <- ifelse(noru$candrank - noru$V13 == 1, 1, 0) noru$ruvotes <- 0 noru$ruperc <- 0 noru$margin <- noru$candperc - noru$ruperc ru1 <- subset(elections2, numrow > V13) #Generate margin of victory of elections with runnerups ru1$ru <- ifelse(ru1$candrank - ru1$V13 == 1, 1, 0) ruperc <- subset(ru1, ru == 1, select = c("candperc", "election_id", "candvotes")) names(ruperc) <- c("ruperc", "election_id", "ruvotes") ru <- merge(ru1, ruperc, by = "election_id", all = TRUE) ru$margin <- ru$candperc - ru$ruperc elections2 <- rbind(noru, ru) #Put the data back together elections2win <- subset(elections2, V24 == 1) #Subset only the winners elections2win$V12 <- ifelse(elections2win$V13 > 1, "Multimember District", "Single-Member District") elections2win <- subset(elections2win, select = c(V02, V05, V07, V08, V09, V10, V11a, V12, V13, numrow, V14, V16, V19, V21, V22, candvotes, ruvotes, totalvote, candperc, ruperc, margin)) names(elections2win) <- c("state", "year", "seat", "distname", "distnum", "distpost", "distid", "distkind", "numwinners", "numcands", "term", "electionkind", "candname", "candparty", "incumbent", "candvotes", "ruvotes", "totalvotes", "candperc", "ruperc", "margin") # ORGANIZE AND GENERATE MISSING DATA FOR 1967-2010 ------------------------ elections1 <- subset(elections1, V17 == 1) #Remove non-final elections elections1 <- subset(elections1, V05 >= 1980) #Remove early elections elections1$V10new <- elections1$V10 #Create a unique district ID elections1$V10new[is.na(elections1$V10new)] <- "." elections1$V11a <- paste(elections1$V08, elections1$V09, elections1$V10new, sep = ".") elections1$election_id <- elections1 %>% group_indices(V02, V05, V11a, V16, V07)#Create unique election ID elections1$candid <- elections1 %>% group_indices(election_id, V19) #Create unique candidate ID totvotes <- aggregate(elections1$V23, by = list(elections1$election_id), FUN = sum) #Sum total votes per election colnames(totvotes) <- c("election_id", "totalvote") totvotes$election_id <- as.integer(totvotes$election_id) candvotes <- aggregate(elections1$V23, by = list(elections1$candid), FUN = sum) #Sum total votes per candidate per election colnames(candvotes) <- c("candid", "candvotes") candvotes$candid <- as.integer(candvotes$candid) duplicates <- data.frame(table(elections1$candid)) #Count duplicate candidates in each election colnames(duplicates) <- c("candid", "duplicates") duplicates$candid <- as.integer(duplicates$candid) duplicates$duplicates <- as.integer(duplicates$duplicates) totvotes <- data.table(totvotes) #Prepare data for merge candvotes <- data.table(candvotes) duplicates <- data.table(duplicates) elections1 <- data.table(elections1) elections1 <- merge(elections1, totvotes, by="election_id") #Merge data elections1 <- merge(elections1, candvotes, by="candid") elections1 <- merge(elections1, duplicates, by="candid") elections1a <- subset(elections1, duplicates == 1) #Subset candidates without duplictaes reconcile <- subset(elections1, duplicates > 1) #Remove and subset duplicated candidate ID's reconcile1 <- arrange(reconcile, candid, desc(V23)) reconcile2 <- aggregate(reconcile1$V23, by = list(reconcile1$candid), FUN = max) #Remove duplicates colnames(reconcile2) <- c("candid", "V23") reconcile2$max <- 1 reconcile3 <- merge(reconcile2,reconcile, by= c("candid", "V23"), all = TRUE) reconcile4 <- subset(reconcile3, !is.na(reconcile3$max)) reconcile5 <- subset(reconcile4, select = -c(max)) elections1 <- rbind(elections1a, reconcile5) elections1$numrow <- elections1$V25 #Number of candidats in the election elections1$candperc <- (elections1$candvotes/elections1$totalvote)*100 #Get candidates percentage missing <- subset(elections1, is.na(elections1$candperc)) #Subset candidates who are missing percentages, but can be deduced elections1b <- subset(elections1, !is.na(elections1$candperc)) missingnocomp <- subset(missing, V13 == 1 & V13 >= V25) #Identify which candidates were unchallenged and give them 100% of votes missingnocomp$candperc <- 100 missingnocomp$candvotes <- 1000 missingcomp <- subset(missing, V13 != 1 | V13 < V25) #Subset data that is missing and cannot be deduced missingcomp$problem <- "missing" problemdata <- subset(elections1, election_id == 480 | election_id == 27965 | election_id == 49449 | election_id == 49651 | election_id == 50794 | election_id == 52607 | election_id == 54025 | election_id == 54973 | election_id == 56072 | election_id == 56287 | election_id == 56580 | election_id == 57569 | election_id == 72767 | election_id == 75615 | election_id == 77018 | election_id == 85763 | election_id == 90869 | election_id == 90872 | election_id == 90926 | election_id == 90985 | election_id == 91071 | election_id == 91876 | election_id == 92180) problemdata$problem <- "wrongwinner" problems <- rbind(missingcomp, problemdata) write.csv(problems, "Missing Data.csv") elections1 <- rbind(missingnocomp, elections1b) #Put data back together elections1[,candrank:=rank(-candvotes,ties.method="first"),by= election_id] #Add rank by race data.frame(elections1) nowinner <- aggregate(elections1$V24, by = list(elections1$election_id), FUN = sum) #Identify elections with no winners and remove nowinner <- subset(nowinner, x == 0) #Notice that all elections have winners nrow(nowinner) noru <- subset(elections1, numrow <= V13) #Generate margin of victory for elections with no runnerups noru$ru <- ifelse(noru$candrank - noru$V13 == 1, 1, 0) noru$ruvotes <- 0 noru$ruperc <- 0 noru$margin <- noru$candperc - noru$ruperc ru1 <- subset(elections1, numrow > V13) #Generate margin of victory of elections with runnerups ru1$ru <- ifelse(ru1$candrank - ru1$V13 == 1, 1, 0) ruperc <- subset(ru1, ru == 1, select = c("candperc", "election_id", "candvotes")) names(ruperc) <- c("ruperc", "election_id", "ruvotes") ru <- merge(ru1, ruperc, by = "election_id", all = TRUE) ru$margin <- ru$candperc - ru$ruperc elections1 <- rbind(noru, ru) #Put the data back together elections1win <- subset(elections1, V24 == 1) #Subset only the winners problems <- subset(elections1win, margin <= 0) elections1win <- subset(elections1win, election_id != 480 & election_id != 27965 & election_id != 49449 & election_id != 49651 & election_id != 50794 & election_id != 52607 & election_id != 54025 & election_id != 54973 & election_id != 56072 & election_id != 56287 & election_id != 56580 & election_id != 57569 & election_id != 72767 & election_id != 75615 & election_id != 77018 & election_id != 85763 & election_id != 90869 & election_id != 90872 & election_id != 90926 & election_id != 90985 & election_id != 91071 & election_id != 91876 & election_id != 92180) elections1win <- subset(elections1win, select = c(V02, V05, V07, V08, V09, V10, V11a, V12, V13, numrow, V14, V16, V19, V21, V22, candvotes, ruvotes, totalvote, candperc, ruperc, margin)) names(elections1win) <- c("state", "year", "seat", "distname", "distnum", "distpost", "distid", "distkind", "numwinners", "numcands", "term", "electionkind", "candname", "candparty", "incumbent", "candvotes", "ruvotes", "totalvotes", "candperc", "ruperc", "margin") # MERGE 1967-2010 AND 2011-2012 ------------------------------------------- elections <- rbind(elections1win, elections2win) #Combine ICPSR datasets elections <- data.frame(elections) elections$data <- "icpsr" write.csv(elections, "Final Organized Data.csv") #Write to a CSV file and combine with corrected and collected data in Excel # ELECTION ANALYSIS ------------------------------------------------------- elections1 <- read.csv("Final Organized Data Ver 3.csv") #Read in new CSV file head(elections1) elections1$electionkind1 <- ifelse(elections1$electionkind == "G" | elections1$electionkind == "GR", "G", elections1$electionkind) elections1$election_id <- elections1 %>% group_indices(state, seat, year, electionkind1) #Create unique election ID elections1$winner <- 1 #Create a variable showing that all candidates in data are winners to allow for counting up total candidates by election elections1$Rseats1 <- ifelse(elections1$candparty == "R", 1, 0) #Create new variables to calculate R seat and vote share elections1$Rvotes1 <- ifelse(elections1$candparty == "R", elections1$candvotes, 0) elections1$Pseats1 <- ifelse(elections1$candparty == "R" | elections1$candparty == "D", 1, 0) elections1$Pvotes1 <- ifelse(elections1$candparty == "R" | elections1$candparty == "D", elections1$totalvotes, 0) electseats <- aggregate(elections1$winner, by = list(elections1$election_id), FUN = sum) #Aggregate new variables electvotes <- aggregate(elections1$totalvotes, by = list(elections1$election_id), FUN = sum) Pseats <- aggregate(elections1$Pseats1, by = list(elections1$election_id), FUN = sum) Pvotes <- aggregate(elections1$Pvotes1, by = list(elections1$election_id), FUN = sum) Rseats <- aggregate(elections1$Rseats1, by = list(elections1$election_id), FUN = sum) Rvotes <- aggregate(elections1$Rvotes1, by = list(elections1$election_id), FUN = sum) names(electseats) <- c("election_id", "electseats") #Rename aggregations names(electvotes) <- c("election_id", "electvotes") names(Pseats) <- c("election_id", "Pseats2") names(Pvotes) <- c("election_id", "Pvotes2") names(Rseats) <- c("election_id", "Rseats2") names(Rvotes) <- c("election_id", "Rvotes2") elections2 <- merge(elections1, electseats, by = "election_id") #Merge aggregations back with original dataset elections2 <- merge(elections2, electvotes, by = "election_id") elections2 <- merge(elections2, Pseats, by = "election_id") elections2 <- merge(elections2, Pvotes, by = "election_id") elections2 <- merge(elections2, Rseats, by = "election_id") elections2 <- merge(elections2, Rvotes, by = "election_id") elections2$Rseats <- elections2$Rseats2/elections2$electseats*100 #Calculate R vote and seat share elections2$Rvotes <- elections2$Rvotes2/elections2$electvotes*100 elections2$PRseats <- elections2$Rseats2/elections2$Pseats2*100 elections2$PRvotes <- elections2$Rvotes2/elections2$Pvotes2*100 elections2$originaldat <- 1 legislate <- subset(elections2, select = -c(Rseats1, Rseats2, Rvotes1, Rvotes2, Pseats1, Pvotes1, Pseats2, Pvotes2)) #Remove uncessary variables # ORGANIZE THE DATA FOR ANALYSIS ------------------------------------------ legislate$nextelect <- legislate$year + legislate$term #Create a variable showing when the rep will next be up for reelection i <- 1990 #Create a loop to create a file that has all sitting reps for each legislative year #Run the loop while (i <= 2015){ legislate1 <- subset(legislate, year < i) legislate2 <- subset(legislate1, nextelect >= i) legislate2$legyear <- i data.frame(legislate2) filename <- as.character(paste("Legislative Data ", i, ".csv", sep = "")) write.csv(legislate2, filename) i <- i + 1 #Reset i for the next iteration of the loop } year90 <- read.csv("Legislative Data 1990.csv") #Read back in each legislative year file year91 <- read.csv("Legislative Data 1991.csv") year92 <- read.csv("Legislative Data 1992.csv") year93 <- read.csv("Legislative Data 1993.csv") year94 <- read.csv("Legislative Data 1994.csv") year95 <- read.csv("Legislative Data 1995.csv") year96 <- read.csv("Legislative Data 1996.csv") year97 <- read.csv("Legislative Data 1997.csv") year98 <- read.csv("Legislative Data 1998.csv") year99 <- read.csv("Legislative Data 1999.csv") year00 <- read.csv("Legislative Data 2000.csv") year01 <- read.csv("Legislative Data 2001.csv") year02 <- read.csv("Legislative Data 2002.csv") year03 <- read.csv("Legislative Data 2003.csv") year04 <- read.csv("Legislative Data 2004.csv") year05 <- read.csv("Legislative Data 2005.csv") year06 <- read.csv("Legislative Data 2006.csv") year07 <- read.csv("Legislative Data 2007.csv") year08 <- read.csv("Legislative Data 2008.csv") year09 <- read.csv("Legislative Data 2009.csv") year10 <- read.csv("Legislative Data 2010.csv") year11 <- read.csv("Legislative Data 2011.csv") year12 <- read.csv("Legislative Data 2012.csv") year13 <- read.csv("Legislative Data 2013.csv") year14 <- read.csv("Legislative Data 2014.csv") year15 <- read.csv("Legislative Data 2015.csv") leganalysis <- rbind(year90, year91, year92, year93, year94, year95, year96, year97, year98, year99, year00, year01, year02, year03, year04, year05, year06, year07, year08, year09, year10, year11, year12, year13, year14, year15) #Combine legislative years write.csv(leganalysis, "Final Legislative Analysis Data.csv") #Do light variable editing in excel to make sure all variables are is coded consistently # PARTISAN DOMINATION ----------------------------------------------------- legislate <- read.csv("Final Legislative Analysis Data.csv") #Read in new dataset legislate <- droplevels(subset(legislate, state != "NE" & state != "LA")) #Remove states with non-partisan elections legislate$legislative_id <- legislate %>% group_indices(state, seat, legyear) #Create unique legislative ID partyseats1 <- table(legislative_id = legislate$legislative_id, party = legislate$candparty) #Tabulate the number of seats held by each party for each election ID partyseats2 <- table(legislate$legislative_id, legislate$candparty) partyseats1 <- as.data.frame(partyseats1) partyseats2 <- as.data.frame.matrix(partyseats2) partyseats <- cbind(partyseats1$legislative_id, partyseats2, row.names = NULL) names(partyseats) <- c("legislative_id", "D", "I", "R") legislate1 <- merge(partyseats, legislate, by = "legislative_id") #Merge counts with full dataset legislate1 <- unique(legislate1) legislate1$majority1 <- ifelse(legislate1$D > legislate1$R, "D", ifelse(legislate1$D == legislate1$R, "T", "R")) #Identify the majority and minority party legislate1$minority1 <- ifelse(legislate1$majority == "T", "T", ifelse(legislate1$majority == "D", "R", "D")) legislate1$majority <- ifelse(legislate1$majority1 == "T", "D", legislate1$majority) legislate1$minority <- ifelse(legislate1$minority1 == "T", "R", legislate1$minority) legislate1$majorseatdiff <- abs(legislate1$D - legislate1$R) #Get difference in seats bewteen major party and minor party legislate1$Rseatdiff <- legislate1$R - legislate1$D #Get seat difference in seats between Republicans and Democrats legislate1$totseats <- legislate1$D + legislate1$I + legislate1$R #Count total seats legislate1$Dperc <- legislate1$D/legislate1$totseats*100 #Calculate percent of Democratic seats legislate1$Iperc <- legislate1$I/legislate1$totseats*100 #Calculate percent of independent seats legislate1$Rperc <- legislate1$R/legislate1$totseats*100 #Calcualte percent of Republican seats legislate1$majorpercdiff <- abs(legislate1$Dperc - legislate1$Rperc) #Calculate differnece in seat percentage between major and minor parties legislate1$Rpercdiff <- legislate1$Rperc - legislate1$Dperc #Calculate seat percentage difference between Republicans and Democrats legislate1$majorover50 <- ifelse(legislate1$majority == "D", legislate1$Dperc - 50, legislate1$Rperc - 50) #Calculate the percentage of majority party seats over 50% legislate1$Rover50 <- legislate1$Rperc - 50 #Calculate the percentage of Republican seats over or under 50% legislate1$nonminor <- ifelse(legislate1$candparty != legislate1$minority, 1, 0) #Identify non-minor reps legislate1$nonminorid <- legislate1 %>% group_indices(legislative_id, nonminor) #Create unique candidate ID legislate1 <- data.table(legislate1) #Rank candidates within each legislative year and chamber legislate1[,candranknonminor:=rank(margin,ties.method="first"),by = nonminorid] legislate2 <- data.frame(legislate1) head(legislate2) write.csv(legislate2, "Partisan Domination 1.csv") #Check data visually in excel and organize variables legislate3 <- read.csv("Partisan Domination 2.csv") #Read data back in legislate3a <- subset(legislate3, nonminor == 0) #Separate minor and non-minor party reps legislate3b <- subset(legislate3, nonminor == 1) legislate3b <- data.table(legislate3b) #Create rank by partisanship legislate3b[,nonminorpidrank:=rank(margin,ties.method="first"),by = c("legislative_id", "candparty")] legislate3b$D <- as.numeric(legislate3b$D) #Format data for if-then conditions legislate3b$I <- as.numeric(legislate3b$I) legislate3b$R <- as.numeric(legislate3b$R) legislate3b$newD1 <- legislate3b$D #Calculate the new number of Democratic seats if the minority party won a seat, starting with the weakest rep legislate3b$newD2 <- ifelse(legislate3b$minority == "D", legislate3b$D + legislate3b$candranknonminor, legislate3b$newD1) legislate3b$newD <- ifelse(legislate3b$candparty == "D", legislate3b$D - legislate3b$nonminorpidrank, legislate3b$newD2) legislate3b$newI1 <- legislate3b$I #Calculate the new number of indpendent seats if the minority party won a seat, starting with the weakest rep legislate3b$newI2 <- ifelse(legislate3b$minority == "I", legislate3b$I + legislate3b$candranknonminor, legislate3b$newI1) legislate3b$newI <- ifelse(legislate3b$candparty == "I", legislate3b$I - legislate3b$nonminorpidrank, legislate3b$newI2) legislate3b$newR1 <- legislate3b$R #Calculate the new number of Republican seats if the minority party won a seat, starting with the weakest rep legislate3b$newR2 <- ifelse(legislate3b$minority == "R", legislate3b$R + legislate3b$candranknonminor, legislate3b$newR1) legislate3b$newR <- ifelse(legislate3b$candparty == "R", legislate3b$R - legislate3b$nonminorpidrank, legislate3b$newR2) legislate3b <- subset(legislate3b, select = -c(newD1, newD2, newI1, newI2, newR1, newR2)) #Remove uncessary variables write.csv(legislate3b, "Partisan Shift.csv") #Visually check data and correct elections with independents in Excel legislate3bb <- read.csv("Partisan Shift 1.csv") #Read csv back in legislate3bb$newmajority <- ifelse(legislate3bb$newD > legislate3bb$newR, "D", ifelse(legislate3bb$newD == legislate3bb$newR, "T", "R")) #Identify who the new majority party would be with each newly one seat legislate3bb$controlshift <- ifelse(legislate3bb$newmajority != "T" & legislate3bb$newmajority != legislate3bb$majority, "Yes", "No") #Identify when the minor party would become the major party write.csv(legislate3bb, "Partisan Shift 2.csv") #Visually check data and calculate required seat and margin shift in excel legislate3bc <- read.csv("Partisan Shift 3.csv") #Read data back in shift <- subset(legislate3bc, select = c(legislative_id, rseatshift, rmarginshift)) #Isolate the newly calculated seat and margin shifts shift <- na.omit(shift) legislate3bd <- subset(legislate3bc, select = -c(nonminor, nonminorid, candranknonminor, nonminorpidrank, newD, newI, newR, newmajority, controlshift, rseatshift, rmarginshift)) #Remove uncessary variables legislate3ab <- subset(legislate3a, select = -c(nonminor, nonminorid, candranknonminor)) #Remov uncessary variables from the original dataset legislatefinal <- rbind(legislate3ab, legislate3bd) #Combine the original and newly calculted datasets legislatefinal <- unique(legislatefinal) #Remove duplicates legislatefinal <- merge(legislatefinal, shift, by = "legislative_id") #Add legislative shifts to the dataset summary(legislatefinal$margin) #View the data legislatefinal$percent5a <- ifelse(legislatefinal$margin <= 5, 1, 0) #Identify margins less than 5% legislatefinal$percent10a <- ifelse(legislatefinal$margin <= 10, 1, 0) #Identify margins less than 10% legislatefinal$percent15a <- ifelse(legislatefinal$margin <= 15, 1, 0) #Identify margins less than 15% percent5num <- aggregate(legislatefinal$percent5a, by = list(legislatefinal$legislative_id), FUN = sum) #Add margins less than 5% by legislature names(percent5num) <- c("legislative_id", "num5") percent5num <- data.frame(percent5num) percent5tot <- subset(legislatefinal, select = c(legislative_id, totseats)) percent5tot <- unique(percent5tot) percent5 <- merge(percent5num, percent5tot, by = "legislative_id") percent5$percent5 <- percent5$num5/percent5$totseats*100 percent5 <- subset(percent5, select = c(legislative_id, percent5)) percent10num <- aggregate(legislatefinal$percent10a, by = list(legislatefinal$legislative_id), FUN = sum) #Add margins less than 10% by legislature names(percent10num) <- c("legislative_id", "num10") percent10num <- data.frame(percent10num) percent10tot <- subset(legislatefinal, select = c(legislative_id, totseats)) percent10tot <- unique(percent10tot) percent10 <- merge(percent10num, percent10tot, by = "legislative_id") percent10$percent10 <- percent10$num10/percent10$totseats*100 percent10 <- subset(percent10, select = c(legislative_id, percent10)) percent15num <- aggregate(legislatefinal$percent15a, by = list(legislatefinal$legislative_id), FUN = sum) #Add margins less than 15% by legislature names(percent15num) <- c("legislative_id", "num15") percent15num <- data.frame(percent15num) percent15tot <- subset(legislatefinal, select = c(legislative_id, totseats)) percent15tot <- unique(percent15tot) percent15 <- merge(percent15num, percent15tot, by = "legislative_id") percent15$percent15 <- percent15$num15/percent15$totseats*100 percent15 <- subset(percent15, select = c(legislative_id, percent15)) legislatefinal <- merge(legislatefinal, percent5, by = "legislative_id") #Combine summed margins and original data legislatefinal <- merge(legislatefinal, percent10, by = "legislative_id") legislatefinal <- merge(legislatefinal, percent15, by = "legislative_id") legislatefinal <- subset(legislatefinal, select = -c(percent5a, percent10a, percent15a)) #Remove uncessary variables write.csv(legislatefinal, "Final Election and Partisan Domination Variables.csv") #Write new dataset # REPLICATION OF ABDUL-RAZZAK ET AL. ELECTORAL OUTCOMES ------------------- legislate <- leganalysis legislate$banstate <- ifelse(legislate$state == "WA" | legislate$state == "OR" |legislate$state == "ID" |legislate$state == "CA" |legislate$state == "NV" |legislate$state == "UT" |legislate$state == "NM" |legislate$state == "NE" |legislate$state == "KS" | legislate$state == "MO" |legislate$state == "AR" |legislate$state == "LA" |legislate$state == "IL" |legislate$state == "IN" | legislate$state == "MS" |legislate$state == "AL" |legislate$state == "GA" |legislate$state == "SC" |legislate$state == "FL" |legislate$state == "VA" |legislate$state == "MD" |legislate$state == "DE" |legislate$state == "NJ" |legislate$state == "NY" |legislate$state == "VT" |legislate$state == "ME"| legislate$state == "HI", 0, 1) legislate$deepsouth <- ifelse(legislate$state == "AL" | legislate$state == "GA" | legislate$state == "MS" | legislate$state == "SC" | legislate$state == "FL", 1, 0) legislate$postcit <- ifelse(legislate$year >= 2010, 1, 0) legislate <- droplevels(subset(legislate, state != "NE" & state != "LA")) legislate$newchamber <- ifelse(legislate$legyear - 1 == legislate$year, 1, 0) repseats <- subset(legislate, electionkind == "G" | electionkind == "SSG") #Remove irrelevant election data repseats <- subset(repseats, !duplicated(election_id)) repseats <- subset(repseats, year >= 1990) repseats$interaction <- repseats$postcit*repseats$banstate lower <- subset(repseats, seat == "House") #Count number of elections for Senate and House nrow(lower) upper <- subset(repseats, seat == "Senate") nrow(upper) library(lfe) fixedlower <- felm(Rseats ~ banstate:postcit + G(state) + G(year), data = lower, cluster = lower$state) #Run replication analyses (ignore warnings) summary(fixedlower) fixedupper <- felm(Rseats ~ banstate:postcit + G(state) + G(year), data = upper, cluster = upper$state) summary(fixedupper) fixedlower <- felm(Rvotes ~ banstate:postcit + G(state) + G(year), data = lower, cluster = lower$state) summary(fixedlower) fixedupper <- felm(PRvotes ~ banstate:postcit + G(state) + G(year), data = upper, cluster = upper$state) summary(fixedupper) fixedlower <- felm(Rseats ~ banstate:postcit + G(state) + G(year) + deepsouth:year, data = lower, cluster = lower$state) summary(fixedlower) fixedupper <- felm(Rseats ~ banstate:postcit + G(state) + G(year) + deepsouth:year, data = upper, cluster = upper$state) summary(fixedupper) fixedlower <- felm(Rvotes ~ banstate:postcit + G(state) + G(year) + deepsouth:year, data = lower, cluster = lower$state) summary(fixedlower) fixedupper <- felm(Rvotes ~ banstate:postcit + G(state) + G(year) + deepsouth:year, data = upper, cluster = upper$state) summary(fixedupper) fixedlower <- felm(Rseats ~ banstate:postcit + G(state) + G(year) + state:year, data = lower, cluster = lower$state) summary(fixedlower) fixedupper <- felm(Rseats ~ banstate:postcit + G(state) + G(year) + state:year, data = upper, cluster = upper$state) summary(fixedupper) fixedlower <- felm(Rvotes ~ banstate:postcit + G(state) + G(year) + state:year, data = lower, cluster = lower$state) summary(fixedlower) fixedupper <- felm(Rvotes ~ banstate:postcit + G(state) + G(year) + state:year, data = upper, cluster = upper$state) summary(fixedupper)
dbf5ba52ce15322fef75d1cdb5edd7433059162d
7cde1a2f5bf3161b5aab0bae82f57c7d8bfed631
/run_analysis.R
2c18b4af97c899095dac04d9e072cb49d09a9378
[]
no_license
davarix/Coursera-Getting-and-Cleaning-Data-Course-Project
50c1262220ca863185e59bb36c06432200ef090a
f12431b08c140586255b9eb5353860d808133c4a
refs/heads/master
2021-01-11T11:30:45.282467
2017-01-26T12:47:56
2017-01-26T12:47:56
80,111,535
0
0
null
null
null
null
UTF-8
R
false
false
3,750
r
run_analysis.R
rm(list = ls()) # Load all necessary packages library(dplyr) # Check whether the data is present in the current work directory data.dir.name <- "UCI HAR Dataset" files <- list.files() if (data.dir.name %in% files == TRUE) { files.path <- list.files(pattern = ".txt$", recursive = TRUE) fns <- file.path(getwd(), files.path) list2env(setNames(lapply(fns, read.table, fill = TRUE), basename(tools::file_path_sans_ext(fns))), globalenv()) } else { filename <- "getdata_dataset.zip" fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileURL, filename, method="curl") unzip(filename) files.path <- list.files(pattern = ".txt$", recursive = TRUE) fns <- file.path(getwd(), files.path) list2env(setNames(lapply(fns, read.table, fill = TRUE), basename(tools::file_path_sans_ext(fns))), globalenv()) } # Check whether all required data sets were loaded into R data.to.load <- c("activity_labels", "features", "subject_test", "subject_train", "X_test", "X_train", "y_test", "y_train") variables <- ls() cond <- data.to.load %in% variables if (all(cond) == FALSE) { stop("Error loading data into R") } # Merges the training and the test sets to create one data set. ## merges both data sets into one dataset xdata <- tbl_df(rbind(X_test, X_train)) # Extracts only the measurements on the mean and standard deviation for each measurement. ## Load data on data labels in dplyr testnames <- tbl_df(features) ## Name the resulting dataframe colnames(testnames) <- c("n", "testname") ## Selecting labels with mean and std measurements testnames_meanstd <- filter(testnames, grepl(".*[Mm]ean.*|.*[Ss]td.*", testname)) ## Renaming the measurement labels testnames_meanstd.names <- testnames_meanstd$testname testnames_meanstd.names <- gsub("-mean", "Mean", testnames_meanstd.names) testnames_meanstd.names <- gsub("-std", "Std", testnames_meanstd.names) testnames_meanstd.names <- gsub("[()-]", "", testnames_meanstd.names) testnames_meanstd.names <- tolower(testnames_meanstd.names) ## Creating a vector with # of columns to select in xdata select_col <- as.numeric(testnames_meanstd$n) ## Selecting those columns from xdata colnames(xdata) <- sprintf("x%d", 1:ncol(xdata)) xdatasel <- select(xdata, num_range("x", select_col)) ## Renaming the xdatasel dataframe to original variable names rename_col <- as.vector(testnames_meanstd.names) colnames(xdatasel) <- rename_col #Uses descriptive activity names to name the activities in the data set activity <- rbind(y_test, y_train) colnames(activity) <- c("activity_id") activity$id<-seq.int(nrow(activity)) ## Name activity_labels data frame colnames(activity_labels) <- c("activity_id", "activity_name") ## Merge activity data and activity labels activity2 <- merge(activity, activity_labels, by="activity_id", all = TRUE, sort = FALSE) activity3 <- tbl_df(activity2) activity3 <- arrange(activity2, id) activity_label <- activity3$activity_name # Merge subject_test and subject_train subject <- rbind(subject_test, subject_train) colnames(subject) <- c("subject") # Appropriately labels the data set with descriptive variable names. final_dataset <- cbind(subject, activity_label, xdatasel) # From the final data set, creates a second, independent tidy data set with the average of each variable for each activity and each subject. by_act_sub <- group_by(final_dataset, subject, activity_label) tidy_groupped <- summarise_each(by_act_sub, funs(mean)) # Output in the current working directory output <- file.path(getwd(), "tidy_dataset.txt") write.table(tidy_groupped, output, row.names = FALSE)
49dec84cbca1f61f0e5f03447cacaf84feec54eb
6d821c8a242005f093001cb50ffda26d98efc392
/R/choosethres.R
c3b99a49cd9207ce4b63332b172eee83982dee54
[]
no_license
cran/extremeIndex
961453a27a2cb87134123df160f33f9921bcda20
b2034ed3a729f86ae74dabceb16e3debbcfbb9d2
refs/heads/master
2021-11-30T22:58:05.392388
2021-11-24T14:30:02
2021-11-24T14:30:02
213,374,552
1
0
null
null
null
null
UTF-8
R
false
false
2,905
r
choosethres.R
#' Function for heuristically choosing the domain where extreme value theory can be applied #' #' @param data a numeric vector containing the observation used for verification #' @param thresh vector of thresholds to try #' @param guess starting values for GPD's sigma and xi (0<xi<1) #' @param plots which parameter plots do you want #' @param R number of bootstrap estimates for confidence intervals #' @param ncpus if you want to make bootstrap on several cores #' @return three plots summarizing the stability of the parameters to threshold. The starting threshold admits kappa=1 and its confidence interval ; #' according Papastathopoulos & Tawn (2013) #' @return a list with thresholds used, GP parameters and CIs, optimal threshold and xi. #' @export #' #' @importFrom graphics abline arrows legend lines par plot points polygon title choosethres=function(data,thresh,guess=c(1,0.1),plots=1:3,R=200,ncpus=1){ thresh=sort(thresh) pe=matrix(0,ncol=3,nrow=length(thresh)) se=array(0,dim=c(length(thresh),3,2)) fi=suppressWarnings(.gp.pw.fit(data[data>thresh[1]]-thresh[1],init=c(1,guess),CI=T,R=R,ncpus=ncpus)) pe[1,]=fi$fit$PWM se[1,,]=t(fi$CI$PWM) for (i in 2:length(thresh)){ print(i) fi=suppressWarnings(.gp.pw.fit(data[data>thresh[i]]-thresh[i],init=c(1,guess),CI=T,R=R,ncpus=ncpus)) pe[i,]=fi$fit$PWM se[i,,]=t(fi$CI$PWM) } iopt=which((se[,1,1]<1)&(se[,1,2]>1))[1] if (is.na(iopt)) { warning("A pareto approximation of data is not available here") } if (!(length(plots) == 1 && is.na(plots))) { plots <- sort(unique(plots)) if (!isTRUE(all(plots %in% 1:3))) { stop("Invalid plot selection. Must be a vector of integers containing indices 1, 2 or 3.") } old.par <- par(no.readonly = TRUE) par(mfrow = c(length(plots), 1), mar = c(4.5, 4.5, 3.1, 0.1)) on.exit(par(old.par)) for (i in plots) { ylims = c(min(se[, i,],pe[,i]), max(se[, i,],pe[,i])) plot(x = thresh, y = pe[, i], pch = 20, xlab = "Threshold", bty = "l", ylab = switch(i, expression(kappa), expression(sigma), expression(xi)), ylim = ylims, type = "n") polygon(c(thresh, rev(thresh)), c( se[, i,1], rev(se[, i,2])), col = "gray57", border = FALSE) abline(v=thresh[iopt],lty=2,col="seagreen") if (i == min(plots)) { title(paste0("Parameter stability plots for EGP")) } if (i == 1) { abline(h = 1, lwd = 0.5, col = "gray20", lty = 2) } arrows(x0 = thresh, y0 = se[, i,1], y1 = se[, i,2], length = 0.05, angle = 90, code = 3) points(x = thresh, y = pe[, i], type = "b", pch = 20) } } return(invisible(list(par = pe, CI = se, thresh = thresh, opt=c(thresh[iopt],pe[iopt,3])))) }
a2524df1dcfdf55c9b1a7c2323b854e4118ea6c2
b5822b9c2a756f4e540c426e7e84af35dae8caec
/rockchalk/inst/examples/centeredRegression.R
a3d92f0a19202d17fdb28e0b7b164419bb4266b6
[]
no_license
pauljohn32/rockchalk
0c75b7a7bc142669efcfabbc70d511f60c3f47e0
fc2d3d04396bf89ef020e824f50db3c348e3e226
refs/heads/master
2022-08-20T02:49:56.898990
2022-07-26T01:20:12
2022-07-26T01:20:12
8,965,635
8
5
null
2022-07-18T00:36:58
2013-03-23T04:07:35
R
UTF-8
R
false
false
13,182
r
centeredRegression.R
## Paul Johnson ## pauljohn@ku.edu 2012-03-09 ## ## This is an R program that uses some functions in the newly revised ## rockchalk package to demonstrate my point that "centered variables" ## don't really make a difference in regressions with interaction. ## ## Centering does not help with multicollinearity, but I mean to say ## more than that. It does not help with regression interpretation, ## if one correctly understands what the parameter estimates and the ## predicted values mean in a regression with interaction. ## Here the idea is the following. The centered b's and se's "seem" different, ## but they are actually calculated on the EXACT SAME fitted plane. The ## difference is that the notcentered model has the y axis positioned at ## x1=0,x2=0, while in the centered model it is instead at ## x1=meanx1, x2=meanx2. The predicted values for any x1, x2 combination ## are EXACTLY the same with either model, as are the estimates of ## uncertainty (in the form of confidence intervals or standard errors). ## Thus it should be possible to take the estimates from the ## notcentered regression and calculate the slopes at x1=meanx1, ## x2=meanx2, and re-produce them. AND I CAN!! This demonstrates ## that claim at the end. library(rockchalk) set.seed(222233) dat3 <- genCorrelatedData3(y ~ 0.1 + 0.2 * x1 + 0.3 * x2 - 0.2 * x1:x2, N = 100000, rho = .31, stde = 80, means = c(x1 = 50, x2 = 50), sds = c(10, 10)) ## We now explore a regression model like this: ## y = b0 + b1 * x1 + b2 *x2 + b3 *x1*x2 + e ## The mean-centered model replaces x1 and x2 by the mean-centered ## versions of those variables, (x1 - mean(x1)) and (x2 - mean(x2)). ## The usual problem that causes researchers to turn to "mean centering" ## is the following. The linear model seems "good", but the interactive ## model seems "bad" without centering. Here's an example. ## First, fit the model without the interaction term. ## y = b0 + b1 * x1 + b2 *x2 + e m1 <- lm(y ~ x1 + x2, data = dat3) summary(m1) dev.new() plotPlane(m1, plotx1="x1", plotx2="x2") ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 424.1485 45.8004 9.261 5.29e-15 *** ## x1 -8.9042 0.8499 -10.477 < 2e-16 *** ## x2 -9.2701 0.8039 -11.531 < 2e-16 *** ## --- ## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 ## Residual standard error: 77.77 on 97 degrees of freedom ## Multiple R-squared: 0.8011, Adjusted R-squared: 0.797 ## F-statistic: 195.4 on 2 and 97 DF, p-value: < 2.2e-16 ## ## Yeah, its a "good model". Everything is "significant". ## ## ## Now the problem. ## Add an interaction. Watch, it ruins everything! ## m2 <- lm(y ~ x1 * x2, data = dat3) summary(m2) dev.new() plotPlane(m2, plotx1 = "x1", plotx2 = "x2", theta = -10) ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -74.45719 179.94096 -0.414 0.67995 ## x1 1.57257 3.75575 0.419 0.67636 ## x2 0.64618 3.55471 0.182 0.85614 ## x1:x2 -0.20526 0.07181 -2.859 0.00522 ** ## --- ## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 ## Residual standard error: 75.05 on 96 degrees of freedom ## Multiple R-squared: 0.8167, Adjusted R-squared: 0.811 ## F-statistic: 142.6 on 3 and 96 DF, p-value: < 2.2e-16 ## Booh, the model's "no good". x1 and x2 "don't matter" ## any more. ## We'd better mean-center x1 and x2. m2mc <- meanCenter(m2) summary(m2mc) ## You can verify that manually, if you don't trust my meanCenter ## function. Results are the same, of course. First, mean center the ## variables with R's scale function: dat3$x1c <- as.numeric(scale(dat3$x1, center = TRUE, scale = FALSE)) dat3$x2c <- as.numeric(scale(dat3$x2, center = TRUE, scale = FALSE)) ## The as.numeric is required because scale returns a matrix ## with one column, not a vector. How annoying! m2manualmc <- lm(y ~ x1c*x2c, dat3) summary(m2manualmc) m2mcmanualpred <- fitted(m2manualmc) ## Confirmed that matches the output from meanCenter. I suppose ## you are sorry you did not trust me now. ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -456.69849 8.01647 -56.970 < 2e-16 *** ## x1 -8.38383 0.84005 -9.980 < 2e-16 *** ## x2 -9.47969 0.77920 -12.166 < 2e-16 *** ## x1:x2 -0.20526 0.07181 -2.859 0.00522 ** ## --- ## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 ## Residual standard error: 75.05 on 96 degrees of freedom ## Multiple R-squared: 0.8167, Adjusted R-squared: 0.811 ## F-statistic: 142.6 on 3 and 96 DF, p-value: < 2.2e-16 ## Yeah! Mean centering saved the day! Woo Hoo! All your ## t values are big and your p values are small. ## Unfortunately, the difference in the output is just a mirage. The ## two fits are describing slope estimates at a DIFFERENT POINT on the ## same curving plane. ## To see that, note that the fitted models (centered or not centered) offer ## EXACTLY THE SAME predicted values! I don't mean "similar" ## I mean exactly the same. Look like this: par(mfcol = c(1,2)) plotPlane(m2, "x1", "x2", plotPoints = FALSE, theta = -25, main = "Not Mean Centered", ticktype = "detailed") plotPlane(m2mc, "x1", "x2", plotPoints = FALSE, theta = -25, main = "Mean Centered", ticktype = "detailed") par(mfcol = c(1,1)) ## Maybe you are not into 3d illustrations. Too bad. I can show the ## same in two dimensions. Let's create a scatterplot displaying the ## predicted values from the ordinary and the mean-centered models plot(fitted(m2), fitted(m2mc), xlab = "predicted from uncentered x", ylab = "predicted from centered x", main = "(Not)Centered Predictions Identical") ## ## ## So, how can it be the two models are EXACTLY the same and yet ## the mean centered one "looks better"? ## ## Here's the simple answer. This is a nonlinear model, the slope ## changes from point to point. If we pick a point where it is ## steep and make a "snapshot" of the slope, we conclude "the ## slope is huge!" If we pick a point where the slope is small ## we conclude "there's no effect of x". Its really just that ## simple. Mean centering amounts to deciding where to measure ## the slope, and sometimes we are lucky enough to accidentally ## pick a steep spot by mean centering. ## ## Now the proponents of the mean-centering approach respond to ## me as follows. "You ignore the fact that the standard errors ## are smaller in the mean centered model. So there is a difference." ## In response, I say this. Just as the slope varies from point to ## point, the standard error also changes from point to point. If you ## choose a point in the data, say where x1 = 48 and x2 = 47.2, and then ## find the equivalent mean-centered point, which is x1c = -1.24 and ## x2c = -1.305, you will find the slopes and the standard errors are ## exactly the same. The slope at x1,x2 in the not centered model is ## exactly the same as the slope in the centered model at x1c,x2c. ## More importantly, from either model, we can calculate the same ## values of not only the slope, but also the standard error, for any ## particular point in the data. ## What is the easiest way to see that the slopes and standard errors ## are the same? It seems obvious to me that, since the predicted ## value plane is identical in the two models, then the slopes have ## to be the same as well. But perhaps that relys on intuition that ## is unique to me. ## Take the the noncentered model and calculate the slope and standard ## error at the mean of x1,x2. The slope at that point in the x1 ## dimension, in terms of the noncentered fit, is ## b1 + b3*x2 ## Which we estimate from the noncentered model as: coef(m2)["x1"] + coef(m2)["x1:x2"] * mean(dat3$x2, na.rm = TRUE) ## x1 ## -8.383827 ## ## And the slope in the x2 dimension is ## b2 + b3*x1 coef(m2)["x2"] + coef(m2)["x1:x2"] * mean(dat3$x1, na.rm = TRUE) ## x2 ## -9.479689 ## ## Please note, those estimates of the slopes EXACTLY match the ## coefficient estimates reported by the centered model. That is to ## say, if you restrict your attention to a particular value of ## (x1,x2), the centered and noncentered models produce EXATCLY the same ## slope estimates. ## And the standard errors are the same as well. ## Reproduce the standard errors in centered model from noncentered model V <- vcov(m2) sqrt(V["x1","x1"] + mean(dat3$x2)^2 * V["x1:x2","x1:x2"] + 2 * mean(dat3$x2) * V["x1","x1:x2"]) ## [1] 0.8400474 ## That's the SAME number reported in the Std.Err. column for the centered ## model. sqrt(V["x2","x2"] + mean(dat3$x1)^2 * V["x1:x2","x1:x2"] + 2 * mean(dat3$x1) * V["x2","x1:x2"]) ##[1] 0.7791997 ## ## Bingo, Fits exactly. The estimates of the centered model are reproduced ## exactly from the notcentered model once the correct co-ordinate ## translation is put in place. The EXACT same t ratios, etc. ## Centering has NO EFFECT whatsoever on multicollinearity. It does ## not affect predicted values, it does not affect our confidence ## in estimates of slopes or predicted values. ## In short, if you understand what a multiple regression with ## interaction "really does," it is impossible to reach any conclusion ## except the following: mean centering makes no difference at all in ## the estimation or interpretation of regression models with ## interaction effects. ## Mean centering only aids the interpretation if one is too lazy to ## understand the curvature of the estimated plane and work with the ## predicted values that are relevant to a problem. It is not more ## easier to interpret the EXACT SAME NUMBER from one model or the ## other. m2rc <- residualCenter(m2) summary(m2rc) op <- par(no.readonly = TRUE) par(mar = c(2,2,2,1), mfcol = c(2,2)) plotPlane(m1, "x1", "x2", plotPoints = TRUE, theta = -25, main = "No Interaction", ticktype = "detailed") plotPlane(m2, "x1", "x2", plotPoints = TRUE, theta = -25, main = "Not Centered", ticktype = "detailed") plotPlane(m2rc, "x1", "x2", plotPoints = TRUE, theta = -25, main = "Residual Centered", ticktype = "detailed") plotPlane(m2mc, "x1", "x2", plotPoints = TRUE, theta = -25, main = "Mean Centered", ticktype = "detailed") par(mfcol = c(1,1)) ## I had trouble believing those plots. Is it possible that the ## predicted values from the residual centered regression are exactly the ## same as the predictions from the orinary non-centered m2 as well as ## the mean centered m2mc? That seemed crazy, so I decided to ## double-check by manually calculating a residual-centered regression ## and extracting predicted values. rcreg <- lm ( I(x1*x2) ~ x1 + x2, data = dat3) rcx1x2 <- resid(rcreg) m2manualrc <- lm(y ~ x1 + x2 + rcx1x2, data = dat3) predm2manualrc <- predict(m2manualrc, newdata = dat3) m2 <- lm( y ~ x1 * x2, data = dat3) m2mc <- meanCenter(m2) m2rc <- residualCenter(m2) m2pred <- predict(m2, newdata = dat3) dat3mc <- dat3 dat3mc$x1 <- dat3mc$x1c dat3mc$x2 <- dat3mc$x2c m2mcpred <- predict(m2mc, newdata = dat3mc) m2rcpred <- predict(m2rc, newdata = dat3) dat4 <- data.frame("m2pred"= m2pred, "m2mcpred" = m2mcpred, "m2mcmaunal"= m2mcmanualpred, "m2rcpred" = m2rcpred, "m2rcpred2" = predm2manualrc) head(dat4) cor(dat4) ## Well, the predicted values are the same. ## and the coefficients on the interaction terms are all the same. ## We already saw that the ordinary interaction and the ## mean-centered regressions are identical. Is it possible ## the residual centered version is just another identical ## model viewed from yet another point in the x1,x2 plane? ## Notice: ## m2 ## lm(formula = y ~ x1 * x2, data = dat3) ## Coefficients: ## (Intercept) x1 x2 x1:x2 ## -74.4572 1.5726 0.6462 -0.2053 ## From rockchalk:::residualCenter(m2): ## m2rc ## lm(formula = y ~ x1 + x2 + x1.X.x2, data = dat) ## Coefficients: ## (Intercept) x1 x2 x1.X.x2 ## 424.1485 -8.9042 -9.2701 -0.2053 ## From rockchalk:::meanCenter(m2) ## > m2mc ## The centered variables are: ## [1] "x1" "x2" ## The call that requested centering was: ## meanCenter.default(model = m2) ## Call: ## lm(formula = y ~ x1 * x2, data = stddat) ## Coefficients: ## (Intercept) x1 x2 x1:x2 ## -456.6985 -8.3838 -9.4797 -0.2053 ## If mean-centering is identical to residual centering, ## there must be some matrix algebra to prove it. ## b-hat = (X'X)-1 X'y ## X = |1 x1 x2 x1*x2| ## mean centered: ## X = |1 x1-m(x1) x2-m(x2) (x1-m(x1))(x2-m(x2))| ## let pred(x1*x2) be the predicted value from the regression ## (x1*x2) = b0 + b1*x1 + b2*x2 ## Then the design matrix for ## residual centered: ## X = |1 x1 x2 (x1*x2)-pred(x1*x2) | ## I think if I were good at matrix algebra with partitioned ## matrices, I could demonstrate why the estimated coefficients ## are actually equivalent.
5a4328fc439a8cf9efa53cbd8d953b4bf0178d72
92e5a14420bbae0b17db12e284710b2814c6de6f
/R/20201109/data_preparation_for_analysis.R
abc3d085db0d36cff52c3d5750b564498aa1b5ab
[]
no_license
jaspershen/chuchu
25e96e8026c0f56d7b69174d1c55f9d4c60cb8c3
e74fdd06e561ae807e81efbb52e7afa376cbba0a
refs/heads/master
2023-01-30T00:17:55.029860
2020-12-09T00:46:36
2020-12-09T00:46:36
283,093,351
1
0
null
null
null
null
UTF-8
R
false
false
5,821
r
data_preparation_for_analysis.R
sxtTools::setwd_project() rm(list = ls()) source("R/tools.R") # load data load("data/lipid20200727/lipidsearch_result/lipid_table") is_table_pos <- readr::read_csv("data/lipid20200727/internal_standard_table/is_table_pos.csv") is_table_neg <- readr::read_csv("data/lipid20200727/internal_standard_table/is_table_neg.csv") setwd("data/lipid20200727/data_preparation_for_analysis/") library(tidyverse) ##first, we should find the is in the peak table data1 <- is_table_pos[,c("mz", "rt")] data2 <- peak_table_pos[,c("mz", "rt")] match_result_pos <- sxtTools::sxtMTmatch(data1 = as.matrix(data1), data2 = as.matrix(data2), mz.tol = 25, rt.tol = 30, rt.error.type = "abs") data1 <- is_table_neg[,c("mz", "rt")] data2 <- peak_table_neg[,c("mz", "rt")] match_result_neg <- sxtTools::sxtMTmatch(data1 = as.matrix(data1), data2 = as.matrix(data2), mz.tol = 25, rt.tol = 30, rt.error.type = "abs") colnames(lipid_table_pos) colnames(lipid_table_neg) is_tag_pos <- is_table_pos[match_result_pos[,1],] is_data_pos <- peak_table_pos[match_result_pos[,2],] %>% dplyr::select(contains('X')) is_data_pos <- cbind(is_tag_pos, is_data_pos) write.csv(is_data_pos, "is_data_pos.csv", row.names = FALSE) is_tag_neg <- is_table_neg[match_result_neg[,1],] is_data_neg <- peak_table_neg[match_result_neg[,2],] %>% dplyr::select(contains('X')) is_data_neg <- cbind(is_tag_neg, is_data_neg) write.csv(is_data_neg, "is_data_neg.csv", row.names = FALSE) save(is_data_pos, file = "is_data_pos") save(is_data_neg, file = "is_data_neg") ##output relative expression_data_relative <- rbind(lipid_table_pos, lipid_table_neg) variable_info_relative <- expression_data_relative[,c(1:16)] expression_data_relative <- expression_data_relative[,-c(1:16)] expression_data_relative <- expression_data_relative %>% dplyr::select(-contains("QC")) %>% dplyr::select(-contains("Blk")) variable_info_relative <- variable_info_relative %>% dplyr::select(peak_name, name, everything()) save(expression_data_relative, file = "expression_data_relative") save(variable_info_relative, file = "variable_info_relative") write.csv(expression_data_relative, "expression_data_relative.csv", row.names = FALSE) write.csv(variable_info_relative, "variable_info_relative.csv", row.names = FALSE) ###calculate concentration for each samples and lipid ##positive is_tag_pos <- is_data_pos[,1:9] is_table_pos <- is_data_pos[,-c(1:9)] %>% dplyr::select(contains("X")) lipid_tag_pos <- lipid_table_pos[,c(1:16)] lipid_table_pos <- lipid_table_pos[,-c(1:16)] %>% dplyr::select(contains("X")) colnames(lipid_table_pos) == colnames(is_table_pos) lipid_tag_pos$Class %>% table() is_tag_pos$name match_item_pos <- list( "AEA" = NA, "Cer" = NA, "ChE" = c(9, 15), "CL" = NA, "DG" = 11, "HexlCer" = NA, "LPC" = 7, "LPE" = 8, "MG" = NA, "PC" = 1, "PE" = 2, "PI" = 5, "PS" = 3, "SM" = 13, "SPH" = NA, "SPHP" = NA, "ST" = NA, "TG" = 12 ) lipid_tag_pos$rt <- lipid_tag_pos$rt * 60 expresion_data_abs_pos <- cal_abs(lipid_tag = lipid_tag_pos, lipid_table = lipid_table_pos, is_tag = is_tag_pos, is_table = is_table_pos, match_item = match_item_pos) express_data_abs_pos1 <- expresion_data_abs_pos$express_data_abs1 express_data_abs_pos2 <- expresion_data_abs_pos$express_data_abs2 variable_info_abs_pos <- expresion_data_abs_pos$variable_info_abs ##negative is_tag_neg <- is_data_neg[,1:9] is_table_neg <- is_data_neg[,-c(1:9)] %>% dplyr::select(contains("X")) lipid_tag_neg <- lipid_table_neg[,c(1:16)] lipid_table_neg <- lipid_table_neg[,-c(1:16)] %>% dplyr::select(contains("X")) colnames(lipid_table_neg) == colnames(is_table_neg) lipid_tag_neg$Class %>% table() is_tag_neg$name match_item_neg <- list( "AEA" = NA, "Cer" = NA, "ChE" = NA, "CL" = NA, "DG" = NA, "HexlCer" = NA, "LPC" =NA, "LPE" = NA, "MG" = NA, "PC" = NA, "PE" = c(1,3), "PI" = NA, "PS" = NA, "SM" = NA, "SPH" = NA, "SPHP" = NA, "ST" = NA, "TG" = NA ) lipid_tag_neg$rt <- lipid_tag_neg$rt * 60 expresion_data_abs_neg <- cal_abs(lipid_tag = lipid_tag_neg, lipid_table = lipid_table_neg, is_tag = is_tag_neg, is_table = is_table_neg, match_item = match_item_neg) express_data_abs_neg1 <- expresion_data_abs_neg$express_data_abs1 express_data_abs_neg2 <- expresion_data_abs_neg$express_data_abs2 variable_info_abs_neg <- expresion_data_abs_neg$variable_info_abs ###combine positive and negative express_data_abs1 <- rbind(express_data_abs_pos1, express_data_abs_neg1) express_data_abs2 <- rbind(express_data_abs_pos2, express_data_abs_neg2) variable_info_abs <- rbind(variable_info_abs_pos, variable_info_abs_neg) mass <- stringr::str_replace_all(variable_info_abs$IonFormula, " ", "") %>% purrr::map(.f = function(x){ x %>% Rdisop::getMolecule() %>% Rdisop::getMass() }) mass <- unlist(mass) variable_info_abs <- data.frame(variable_info_abs, mass, stringsAsFactors = FALSE) express_data_abs1 <- express_data_abs1 %>% as.matrix() %>% `/`(mass) save(express_data_abs1, file = "express_data_abs1") save(express_data_abs2, file = "express_data_abs2") save(variable_info_abs, file = "variable_info_abs") write.csv(express_data_abs1, "expression_data_abs1.csv", row.names = FALSE) write.csv(express_data_abs2, "expression_data_abs2.csv", row.names = FALSE) write.csv(variable_info_abs, "variable_info_abs.csv", row.names = FALSE)
f464e9c174a236f7724e78b093e5d629ef2f68a1
9ee9e9842dd26cb37d1ac23de196745dc5d42ad4
/PracticeRHadoop.R
bbda4a25c5fd617035d7ffd3bad4580454491832
[]
no_license
raviranjan-innoplexus/R-Hadoop-Integration
46a1be9a23ed6fe1b94585b0935fa50ace969c20
d8ed84570bb9b54c2e42adbed44c61473cc10a18
refs/heads/master
2021-01-20T20:52:11.546221
2016-06-23T07:05:32
2016-06-23T07:05:32
61,781,549
0
1
null
null
null
null
UTF-8
R
false
false
1,824
r
PracticeRHadoop.R
Sys.setenv(HADOOP_CMD="/home/hadoopuser/hadoop/bin/hadoop") Sys.setenv("HADOOP_STREAMING"="~/hadoop/share/doc/hadoop/hadoop-mapreduce/hadoop-streaming-2.6.0.jar") library(rJava) library(rhdfs) hdfs.init() gd = hdfs.file("/user/hadoopuser/Rate_PUF.csv","r") fd = hdfs.read(gd) chardata = rawToChar(fd) library(rmr2) data = read.table(textConnection(chardata),quote="",sep = ",", header=T,fill = TRUE) dim(data) #getting far less data than there is str(data) summary(data) View(data) dat = hdfs.line.reader("/user/hadoopuser/Rate_PUF.csv") x = dat$read() class(x) datarate = from.dfs("/user/hadoopuser/Rate_PUF.csv",format = make.input.format("csv",sep = ",")) library(plyr) datafr = as.data.frame(datarate$val) dataldply = ldply(datarate, data.frame) #dataldply = as.data.frame(dataldply) datacolrem = dataldply[ ,-1] colnames(datacolrem) = as.character(unlist(datacolrem[1,])) datacolrem = datacolrem[-1, ] #data1 = read.hdfs("/user/hadoopuser/Rate_PUF.csv") #data11 = as.matrix(data1$val) #dim(data11) #data11 = data11[-1,] #str(data11) #colnames(data11) = data11[1,] #str(data11) Rate_PUF <- read.csv("~/Rate_PUF.csv") #########http://htmlpreview.github.io/?https://github.com/andrie/RHadoop-tutorial/blob/master/2-Taxi-analysis-with-RHadoop.html#/8########## datarate = from.dfs("/user/hadoopuser/Rate_PUF.csv",format = make.input.format("csv",sep = "," , colClasses = "character" , stringsAsFactors = FALSE)) str(datarate) head( values(datarate) ) headerInfo = read.csv("/user/hadoopuser/Rate_PUF.csv",stringsAsFactors = FALSE) headerInfo colClasses = as.character(as.vector(headerInfo[1, ])) names(headerInfo) colClasses dataratefinal = from.dfs("/user/hadoopuser/Rate_PUF.csv",format = make.input.format("csv",sep = "," ,col.names = names(headerInfo), colClasses = colClasses , stringsAsFactors = FALSE))
3b3b7f09866d1f87dba83209752b8378bd909875
32901a09be158774089cbf8960d042e23764ad8d
/man/sgpca.Rd
123b85d4457a0a3bcf49a7e2f50b989457ba9e80
[]
no_license
cran/sGPCA
470a4ef4651cde5d8a99ff01e0935468fe9e1f97
f095235014f6f11cdac7ad31e03127df2f6a31c1
refs/heads/master
2021-01-19T16:56:35.076446
2012-07-05T00:00:00
2012-07-05T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
6,770
rd
sgpca.Rd
\name{sgpca} \alias{sgpca} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Sparse Generalized Principal Component Analysis } \description{ Computes the rank \code{K} sparse, sparse non-negative, two-way sparse, and two-way sparse non-negative GPCA solutions. } \usage{ sgpca(X, Q, R, K = 1, lamu = 0, lamvs = 0, posu = FALSE, posv = FALSE, threshold = 1e-07, maxit = 1000, full.path = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{The \code{n x p} data matrix. \code{X} must be of class \code{matrix} with all numeric values. } \item{Q}{The row generalizing operator, an \code{n x n} matrix. \code{Q} can be of class \code{matrix} or class \code{dcGMatrix}, must be positive semi-definite, and have operator norm one. } \item{R}{The column generalizing operator, an \code{p x p} matrix. \code{R} can be of class \code{matrix} or class \code{dcGMatrix}, must be positive semi-definite, and have operator norm one. } \item{K}{The number of GPCA components to compute. The default value is one. } \item{lamu}{The regularization parameter that determines the sparsity level for the row factor, \code{U}. The default value is 0. If the data is oriented with rows as samples, non-zero \code{lamu} corresponds to two-way sparse methods. } \item{lamvs}{A scalar or vector of regularization parameters that determine the sparsity level for the column factor, \code{V}. The default is 0, with non-zero values corresponding to sparse or two-way sparse methods. If \code{lamvs} is a vector, then the BIC method is used to select the optimal sparsity level. Alternatively, if \code{full.path} is specified, then the solution at each value of \code{lamvs} is returned. } \item{posu}{Flag indicating whether the row factor, \code{U} should be constrained to be strictly positive. The default value is FALSE. } \item{posv}{Flag indicating whether the column factor, \code{V} should be constrained to be strictly positive. The default value is FALSE. } \item{threshold}{Sets the threshold for convergence. The default value is \code{.0001}. } \item{maxit}{Sets the maximum number of iterations. The default value is \code{.0001}. } \item{full.path}{Flag indicating whether the entire solution path, or the solution at each value of \code{lamvs}, should be returned. The default value is FALSE. } } \details{ The \code{sgpca} function has the flexibility to fit combinations of sparsity and/or non-negativity for both the row and column generalized PCs. Regularization is used to encourage sparsity in the GPCA factors by placing an L1 penalty on the GPC loadings, \code{V}, and or the sample GPCs, \code{U}. Non-negativity constraints on \code{V} and/or \code{U} yield sparse non-negative and two-way non-negative GPCA. Generalizing operators as described for \code{\link{gpca}} can be used with this function and have the same properties. When \code{lamvs=0}, \code{lamu=0}, \code{posu=0}, and \code{posv=0}, the GPCA solution also given by \code{\link{gpca}} is returned. The magnitude of the regularization parameters, \code{lamvs} and \code{lamu}, determine the level of sparsity of the factors \code{U} and \code{V}, with higher regularization parameter values yielding sparser factors. If more than one regularization value \code{lamvs} is given, then \code{sgpca} finds the optimal regularization parameter \code{lamvs} by minimizing the BIC derived from the generalized least squares update for each factor. If \code{full.path = TRUE}, then the full path of solutions (\code{U}, \code{D}, and \code{V}) is returned for each value of \code{lamvs} given. This option is best used with 50 or 100 values of \code{lamvs} to well approximate the regularization paths. Numerically, the path begins with the GPCA solution, \code{lamvs=0}, and uses warm starts at each step as \code{lamvs} increases. Proximal gradient descent is used to compute each rank-one solution. Multiple components are calculated in a greedy manner via deflation. Each rank-one solution is solved by iteratively fitting generalized least squares problems with penalties or non-negativity constraints. These regression problems are solved by the Iterative Soft-Thresholding Algorithm (ISTA) or projected gradient descent. } \value{ \item{U}{The left sparse GPCA factors, an \code{n x K} matrix. If \code{full.path} is specified with \code{r} values of \code{lamvs}, then \code{U} is a \code{n x K x r} array.} \item{V}{The right sparse GPCA factors, a \code{p x K} matrix. If \code{full.path} is specified with \code{r} values of \code{lamvs}, then \code{V} is a \code{p x K x r} array.} \item{D}{A vector of the K sparse GPCA values. If \code{full.path} is specified with \code{r} values of \code{lamvs}, then \code{D} is a \code{K x r} matrix.} \item{cumulative.prop.var}{The cumulative proportion of variance explained by the components} \item{bics}{The BIC values computed for each value of \code{lamvs} and each of the \code{K} components.} \item{optlams}{Optimal regularization parameter as chosen by the BIC method for each of the \code{K} components.} } \references{ Genevera I. Allen, Logan Grosenick, and Jonathan Taylor, "A generalized least squares matrix decomposition", arXiv:1102.3074, 2011. Genevera I. Allen and Mirjana Maletic-Savatic, "Sparse Non-negative Generalized PCA with Applications to Metabolomics", Bioinformatics, 27:21, 3029-3035, 2011. } \author{ Frederick Campbell } \examples{ data(ozone2) ind = which(apply(is.na(ozone2$y),2,sum)==0) X = ozone2$y[,ind] n = nrow(X) p = ncol(X) #Generalizing Operators - Spatio-Temporal Smoothers R = Exp.cov(ozone2$lon.lat[ind,],theta=5) er = eigen(R,only.values=TRUE); R = R/max(er$values) Q = Exp.cov(c(1:n),c(1:n),theta=3) eq = eigen(Q,only.values=TRUE) Q = Q/max(eq$values) #Sparse GPCA fit = sgpca(X,Q,R,K=1,lamu=0,lamvs=c(.5,1)) fit$prop.var #proportion of variance explained fit$optlams #optimal regularization param chosen by BIC fit$bics #BIC values for each lambda #Sparse Non-negative GPCA fit = sgpca(X,Q,R,K=1,lamu=0,lamvs=1,posv=TRUE) #Two-way Sparse GPCA fit = sgpca(X,Q,R,K=1,lamu=1,lamvs=1) #Two-way Sparse Non-negative GPCA fit = sgpca(X,Q,R,K=1,lamu=1,lamvs=1,posu=TRUE,posv=TRUE) #Return full regularization paths for inputted lambda values fit = sgpca(X,Q,R,K=1,lamu=0,lamvs=c(.1,.5,1),full.path=TRUE) } \keyword{ PCA } \keyword{principal components analysis} \keyword{multivariate analysis} \keyword{matrix factorization} \keyword{Sparse PCA} \keyword{Non-negative Matrix Factorization} \keyword{Non-negative PCA}
dbb6930058401389fa780b187297e3b08479e5d8
56a3fc1ea8c5308031d6622e0969ad8fe2e85305
/AlignementScoreComparison/AlignmentScoreComparison_1stPass/100Kreads_AScompare.R
d183572a31ada3ed649aac1453f944296de7b305
[]
no_license
shellywanamaker/C_virginica
fd9b5d6a53e29ecc6c2a6bd0b45ed0e929d561d6
666f010186d73dab7412356d95d61ae19aae4b25
refs/heads/master
2023-01-03T22:41:05.521829
2019-07-23T23:09:51
2019-07-23T23:09:51
null
0
0
null
null
null
null
UTF-8
R
false
false
8,989
r
100Kreads_AScompare.R
#another place for install tips: (https://github.com/al2na/methylKit/blob/master/README.md) #steven's solution for proper install: (https://sr320.github.io/MethylKittens/) #from command line, change the permissions on R library folders to make them writable #sudo chmod -R 777 /Users/Shelly/Library/R/ #sudo chmod -R 777 /Library/Frameworks/ #these are steps suggested by the Bioconductor troubleshooting guide (https://bioconductor.org/install/#install-bioconductor-packages) remove.packages("BiocInstaller", lib=.libPaths()) source("http://bioconductor.org/biocLite.R") biocLite("methylKit") library(methylKit) file.list=list("/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_1_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_2_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_3_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_4_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_5_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_6_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_7_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_8_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_9_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt2/zr2096_10_dedup.sorted.bam") file.list2=list("/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_1_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_2_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_3_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_4_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_5_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_6_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_7_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_8_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_9_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt3/zr2096_10_dedup.sorted.bam") file.list3=list("/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_1_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_2_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_3_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_4_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_5_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_6_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_7_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_8_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_9_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt4/zr2096_10_dedup.sorted.bam") file.list4=list("/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_1_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_2_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_3_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_4_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_5_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_6_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_7_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_8_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_9_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt5/zr2096_10_dedup.sorted.bam") file.list5=list("/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_1_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_2_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_3_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_4_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_5_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_6_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_7_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_8_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_9_dedup.sorted.bam", "/Volumes/web/metacarcinus/Cvirginica/Bismark_attempt6/zr2096_10_dedup.sorted.bam") myobj <- processBismarkAln(location = file.list, sample.id = list("1","2","3","4","5","6","7","8","9","10"), assembly = "v3", read.context="CpG", mincov=3, treatment = c(0,0,0,0,0,1,1,1,1,1)) #For each analysis myobj2 <- processBismarkAln(location = file.list2, sample.id = list("1","2","3","4","5","6","7","8","9","10"), assembly = "v3", read.context="CpG", mincov=3, treatment = c(0,0,0,0,0,1,1,1,1,1)) myobj3 <- processBismarkAln(location = file.list3, sample.id = list("1","2","3","4","5","6","7","8","9","10"), assembly = "v3", read.context="CpG", mincov=3, treatment = c(0,0,0,0,0,1,1,1,1,1)) myobj4 <- processBismarkAln(location = file.list4, sample.id = list("1","2","3","4","5","6","7","8","9","10"), assembly = "v3", read.context="CpG", mincov=3, treatment = c(0,0,0,0,0,1,1,1,1,1)) myobj5 <- processBismarkAln(location = file.list5, sample.id = list("1","2","3","4","5","6","7","8","9","10"), assembly = "v3", read.context="CpG", mincov=3, treatment = c(0,0,0,0,0,1,1,1,1,1)) #Find region coverage tiles <- tileMethylCounts(myobj,win.size=1000,step.size=1000) tiles2 <- tileMethylCounts(myobj2,win.size=1000,step.size=1000) tiles3 <- tileMethylCounts(myobj3,win.size=1000,step.size=1000) tiles4 <- tileMethylCounts(myobj4,win.size=1000,step.size=1000) tiles5 <- tileMethylCounts(myobj5,win.size=1000,step.size=1000) #find regions covered mmeth <- unite(tiles, min.per.group = 1L) mmeth2 <- unite(tiles2, min.per.group = 1L) mmeth3 <- unite(tiles3, min.per.group = 1L) mmeth4 <- unite(tiles4, min.per.group = 1L) mmeth5 <- unite(tiles5, min.per.group = 1L) RegionsCovered <- getData(mmeth)[,1:3] RegionsCovered2 <- getData(mmeth2)[,1:3] RegionsCovered3 <- getData(mmeth3)[,1:3] RegionsCovered4 <- getData(mmeth4)[,1:3] RegionsCovered5 <- getData(mmeth5)[,1:3] #sum coverage across samples RegionsCovered$cov_sum <- rowSums(getData(mmeth)[,grep("coverage", colnames(getData(mmeth)))], na.rm = TRUE) RegionsCovered2$cov_sum <- rowSums(getData(mmeth2)[,grep("coverage", colnames(getData(mmeth2)))], na.rm = TRUE) RegionsCovered3$cov_sum <- rowSums(getData(mmeth3)[,grep("coverage", colnames(getData(mmeth3)))], na.rm = TRUE) RegionsCovered4$cov_sum <- rowSums(getData(mmeth4)[,grep("coverage", colnames(getData(mmeth4)))], na.rm = TRUE) RegionsCovered5$cov_sum <- rowSums(getData(mmeth5)[,grep("coverage", colnames(getData(mmeth5)))], na.rm = TRUE) RegionsCovered$par <- "AS1.2" RegionsCovered2$par <- "AS0.6" RegionsCovered3$par <- "AS2" RegionsCovered4$par <- "AS1.2I60" RegionsCovered5$par <- "AS1.2I150" allRegionsCovered <- rbind(RegionsCovered,RegionsCovered2, RegionsCovered3, RegionsCovered4, RegionsCovered5) #plot coverage ggplot(allRegionsCovered, aes(start, cov_sum)) + geom_point(aes(colour = par, shape = par), alpha = 0.7) + scale_color_manual(values = c("orange","green","blue", "magenta","darkgray")) + xlab("position") + ylab("coverage") + facet_wrap(~chr) filter_myobj <- filterByCoverage(myobj,lo.count=3,lo.perc=NULL, hi.count=NULL,hi.perc=99.9) meth_filter=unite(filter_myobj, destrand=TRUE) meth <-unite(filter_myobj) myDiff <- calculateDiffMeth(meth) mmeth_diff <- calculateDiffMeth(mmeth) getMethylationStats(tiles[[2]],plot = TRUE, both.strands = FALSE) getCoverageStats(tiles[[2]],plot = TRUE, both.strands = FALSE)
2498f9aea967f08d0a5db8db309265a56f1326e7
7889f55c00a87518344f2e86c6f4828618a37505
/man/examplemetaheur.Rd
5a62ef48c2e0a7ed3f878fd537a5b9859e1563f7
[]
no_license
mvattulainen/preprocomb
b034947015200511d96c1184443b8d1135444617
3898d40541632c4097028cbe7a79cf5ee3e74f91
refs/heads/master
2021-01-18T22:50:20.107393
2016-11-09T10:08:44
2016-11-09T10:08:44
42,282,758
3
0
null
null
null
null
UTF-8
R
false
true
366
rd
examplemetaheur.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/00Utils.R \docType{data} \name{examplemetaheur} \alias{examplemetaheur} \title{metaheur example} \format{A MetaheurClass object} \usage{ examplemetaheur } \description{ examplemetaheur <- metaheur(examplegrid, model="svmRadial", iterations = 30, nholdout = 400) \cr } \keyword{internal}
cbbd0d95fa5a630fbf434d9186cb9da3a0ced444
088cfbc6e9b5bff3ab028adbf137be835d7687e5
/Life_expectancy_chang_M.R
b1bbbcfb3b6847c0531f6aff1eed744ce0a5e3f9
[]
no_license
facarranza/shareR
a7cf84eda859d226857fce07c7e2df442b42b368
e641a79b939df5bc3b8e2d80cb2f7748acb595fe
refs/heads/master
2020-10-02T03:42:05.904714
2020-04-17T20:22:07
2020-04-17T20:22:07
227,693,229
1
1
null
null
null
null
UTF-8
R
false
false
5,573
r
Life_expectancy_chang_M.R
" *************************************************************************** life_expectancy_chang_M.R --------------------- Date : December 2019 Copyright : (C) 2019 by Felipe Carranza Email : fcarranza@protonmail.com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the MIT License * * * * * *************************************************************************** __author__ = 'Felipe Carranza' __date__ = 'December 2019' __copyright__ = '(C) 2019, Felipe Carranza' " "Real case applied to El Salvador microdata census 2007 for municipal research" library(foreign) #Read the census microdata more than 5 Millions of rows poblation <- read.dta("BaseTotalPer.dta",convert.dates = T,convert.factors = F) str(poblation) #Show the variables library(tibble) glimpse(poblation) #alternativ to str view(poblation) A=data.frame(head(poblation[1:10,])) library(tidyverse) #select the variables that you need, in this case is munic code, Age, Sex Studysection = poblation %>% group_by(munic) %>% select(munic,p02,p03a,p09) #saving write.csv(Studysection,"investigacion_demo2.csv") ############################################## #FInding population by munic code, Age, Sex Studysection <- read.csv("investigacion_demo2.csv") Studysection = data.frame(Studysection$munic,Studysection$p02, Studysection$p03a) vivos=NULL vivos = Studysection %>% group_by(Studysection$Studysection.munic,Studysection$Studysection.p02, Studysection$Studysection.p03a) %>% count(n()) Studysection=NULL write.csv(vivos, "vivos.csv") #Finding deaths by munic code, Age, Sex library(foreign) #reading microdata census of mortality section poblation <- read.dta("BaseTotalMorta.dta",convert.dates = T,convert.factors = F) library(tibble) glimpse(poblation) #alternati to str #selecting munic, age, sex Studysection = poblation %>% group_by(munic) %>% select(munic,d01,d02) #saving write.csv(Studysection,"investigacion_demo2_muertos.csv") poblation=NULL #validating NA cases Studysection <- Studysection[!is.na(Studysection$d01),] #counting table(Studysection$munic,Studysection$d01, Studysection$d02) #new dataframe Studysection = data.frame(Studysection$munic,Studysection$d01, Studysection$d02) muerto=NULL #counting death number by munix, age, sex muertos = Studysection %>% group_by(Studysection$Studysection.munic,Studysection$Studysection.d01, Studysection$Studysection.d02) %>% count(n()) #saving write.csv(muertos,"muertos.csv") #rename cols life people colnames(vivos)[colnames(vivos) == "n()"] <- "vivos_count" colnames(vivos)[colnames(vivos) == "n"] <- "vivos_coun_auxiliar" colnames(vivos)[colnames(vivos) == "Studysection$Studysection.p02"] <- "sexo" colnames(vivos)[colnames(vivos) == "Studysection$Studysection.p03a"] <- "edad" #rename cols death people colnames(muertos)[colnames(muertos) == "n()"] <- "muertos_count" colnames(muertos)[colnames(muertos) == "n"] <- "muertos_coun_auxiliar" colnames(muertos)[colnames(muertos) == "Studysection$Studysection.d02"] <- "sexo" colnames(muertos)[colnames(muertos) == "Studysection$Studysection.d01"] <- "edad" #joining vivos_muertos=NULL vivos_muertos = vivos %>% left_join(muertos) #saving write.csv(vivos_muertos,"vivos_muertos.csv") muertos=NULL poblation=NULL Studysection=NULL vivos=NULL #finding mortality life table by munic, sex #Munic man age vivos_muertos=data.frame(vivos_muertos) vivos_muertos_hombres = vivos_muertos %>% dplyr::filter(vivos_muertos$sexo == 1) write.csv(vivos_muertos_hombres,"vivos_muertos_hombres.csv") #Munic woman age vivos_muertos_mujeres = vivos_muertos %>% dplyr::filter(vivos_muertos$sexo == 2) write.csv(vivos_muertos_mujeres,"vivos_muertos_mujeres.csv") municipios = vivos_muertos$Studysection.Studysection.munic municipios = data.frame(municipios) municipios_list= municipios %>% group_by(municipios) %>% count() write.csv(municipios_list,"Municipios_list.csv") mun #expecting life, for man, repeat the same procedure for woman #NA issue #TODO: Adjust for ages wihtout mortality cases vivos_muertos_hombres = vivos_muertos_hombres %>% mutate_all(funs(replace(., is.na(.), 0))) #saving write.csv(vivos_muertos_hombres,"vivos_muertos_hombres.csv") municipios_list[1,1] municipios_list = data.frame(municipios_list) vivos_muertos_hombres =data.frame(vivos_muertos_hombres) data11 = vivos_muertos_hombres %>% filter(vivos_muertos_hombres$Studysection.Studysection.munic == municipios_list[10,1]) library(tidyverse) data11=data.frame(data11) dat11a = data11 %>% mutate(mdx = data11$muertos_count/ data11$vivos_count ) %>% mutate(qx = (2*mdx) / (2 + mdx)) %>% # death probability mutate(px=1-qx) #survival probability dat11a$lx=0 dat11a$lx[1]=100000 #hipotetical cohort for(i in 2:nrow(dat11a)){ #lx dat11a$lx[i] = dat11a$px[i-1]*dat11a$lx[i-1] } dat11a = dat11a %>% mutate(Lx= (0.5)*(lx + lead(lx)) ) %>% #Age persons lifed LX mutate(Lx= if_else(is.na(Lx),lx,Lx))%>% mutate(Tx= rev(cumsum(rev(Lx)))) %>% # Age for to live mutate(ex= Tx/lx)# %>% # Life expectancy # mutate(exsum= ex + x) %>% # Life expectancy other alternative # mutate(hx = (log(SupPcohortes) -log(lead(SupPcohortes) ))/n) #Hazard ratio if you need it
d22162d0699a632310cfc017bb19c9d2625d4f1c
47d321f1c911444704812e2c7aa403553163fe2f
/scripts/genSimData.R
2f23d9377ffedf5bfd80d163bec1598eccc71864
[]
no_license
jaredpark/cleanNLF
e57502fe5f15d6676f64fdb7a0c50266d4351f95
f7e129fa9f1a16215d69a99b756cf48db17333b7
refs/heads/master
2020-03-30T21:32:32.212472
2013-07-09T23:52:35
2013-07-09T23:52:35
11,297,064
0
0
null
null
null
null
UTF-8
R
false
false
1,044
r
genSimData.R
gameInfoDate = 'sim' mmDate = 'sim' nTargetRows = 1600 nGamesPerWeek = 14 firstWeek = 5 lastWeek = 16 nSeas = round(nTargetRows/(nGamesPerWeek*(lastWeek - firstWeek + 1)), 0) weeks = firstWeek:lastWeek gameWeeks = rep(weeks, rep(nGamesPerWeek, length(weeks))) weekColumn = rep(gameWeeks, nSeas) seasColumn = rep((2007-nSeas+1):2007, rep(nGamesPerWeek*(lastWeek-firstWeek+1), nSeas)) nRows = length(seasColumn) feat1 = abs(rnorm(nRows, 10, 2)) feat2 = abs(rnorm(nRows, 200, 40)) feat3 = abs(rnorm(nRows, 2, .25)) nNoiseFeat = 20 genNoise = function(n){ return(rnorm(n)) } noiseFeat = replicate(nNoiseFeat, genNoise(nRows)) response = 2*sqrt(feat1)+5.1*feat3^2+10*log(feat2) MM = data.frame(cbind('Season' = seasColumn, 'Week' = weekColumn, 'Home' = seq(1:nRows), 'Away' = seq(1:nRows), feat1, feat2, feat3, noiseFeat, 'Response' = response)) gameInfo = MM[,1:4] save(gameInfo, file = paste(gameInfoDir, '/gameInfo_', gameInfoDate, '.Rout', sep = '')) save(MM, file = paste(gameInfoDir, '/MM_', mmDate, '.Rout', sep = ''))
0664d4dec0ef58cf8108d8f33a0621fd38cfc3e9
f9a9f9e260498c0ff96809dee0fb9697c75b064e
/time_series_blog_1/timeseries/timeseries2.R
0bbb4a6ade70e6e7f7a6fec9911e077162609238
[]
no_license
kayfay/R_programming
4b94f9af3684589c0d29ac47a7f740bb313908a8
3f1b3325c2f16d3f578c5f672341b216879aa263
refs/heads/master
2021-04-24T20:54:29.637163
2019-07-07T17:09:26
2019-07-07T17:09:26
117,136,236
0
0
null
null
null
null
UTF-8
R
false
false
2,454
r
timeseries2.R
# Federal Reserve Economic Data - from Quandl FRED/A791RX0Q048SBEA Retailer Sales FRED/RETAILSMSA # Data ranges from Jan 1992 to Sep 2017 # Using the Quandl package to aquire a dataset from the Federal Reserve Economic Data library(forecast) library(Quandl) feddata <- Quandl("FRED/RETAILSMSA", type="ts") print("Rplot9") png(filename="Rplot9.png") plot.ts(feddata, main = "Federal Reserve Economic Data: Retail Sales", ylab = "Dollars in Thousands", xlab = "Years") dev.off() # autocorrelation function for differenced series print("Rplot10") png(filename="Rplot10%d.png") acf(diff(feddata), xaxp = c(0, 75, 4), lag.max=75, main="") pacf(diff(feddata), xaxp = c(0, 75, 4), lag.max=75, main="") dev.off() # autocorrelation function for an autoregressive integrated moving average arima_fit <- auto.arima(feddata) # inspecting arima_fit # The coefficients significantly different and are close to a # mean of zero standard deviations of the standard error # autocorrelation function for arima_fit looks good print("Rplot111 Rplot 112") png(filename="Rplot11%d.png") acf(arima_fit$residuals, main="") pacf(arima_fit$residuals, main="") dev.off() # our lag values also aren't crossing the significant zones print("Rplot121, Rplot122, Rplot123") png(filename="Rplot12%d.png") # plotting plot(arima_fit$residuals, main="", ylab = "Residuals", xlab="Time") abline(a=0, b=0) # histogram hist(arima_fit$residuals, main="", ylab="Frequency", xlab="Residuals") # qq plot qqnorm(arima_fit$residuals, ylab="Sample Quantiles", xlab="Theoretical Quantiles", main="") qqline(arima_fit$residuals) dev.off() plot(forecast(feddata, h=12), main = "Forecast into 2018 of Retailer Sales", xlab = "Time in Months", ylab = "Millions of Dollars",) # Graph png(filename="Rplot13%d.png") op <- par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.6, mgp = c(3.5, 1, 0), cex.lab = 1.5, font.lab = 2, cex.axis = 1.5, bty = "n", las = 1) plot(forecast(feddata, h = 24), xlab = "", ylab = "", main = "Forecast into 2018 of Retailer Sales", type="l", axes = FALSE) axis(1) ; axis(2) mtext("Time in Months", side = 1, line = 2.5, cex = 1.5) mtext("Millions of Dollars", side = 2, line = 5, cex = 1.5, las = 0) dev.off() # lb/ub of forecast print("ub/lb of forecast") f.p <- forecast(feddata, h=12) abs(tail(feddata)[6] - tail(f.p$upper)[12]) / tail(feddata)[6] # [1] 0.1330501 abs(tail(feddata)[6] - tail(f.p$lower)[12]) / tail(feddata)[6] # [1] 0.04680049
bc7b52919706deb4a93a0aa4a7661937f1b82bef
ac15e81168f92c4bd7190703434d7a076fa5c6dc
/plot3.R
9f17332173f136c747ceec3ea427389a0e6b2a16
[]
no_license
suswaram/ExData_Plotting2
a7caaaf19a4ea0e3fc147b762470fd30121e2fe3
e3740609178e3517f7f7ce0a0d2b2231d12892bb
refs/heads/master
2016-09-06T14:21:27.025211
2014-05-12T21:52:24
2014-05-12T21:52:24
null
0
0
null
null
null
null
UTF-8
R
false
false
1,081
r
plot3.R
#install plyr package if not installed library(plyr) #install ggplot2 package if not already installed #install.packages("ggplot2") library(ggplot2) source("loadData.R") # Task3 Of the four types of sources indicated by the type (point, # nonpoint, onroad, nonroad) variable, which of these four sources # have seen decreases in emissions from 1999¨C2008 for Baltimore City? # Which have seen increases in emissions from 1999¨C2008? Use the # ggplot2 plotting system to make a plot for this assignment. # create subset only contain data about baltimore baltimore <- subset(NEI,NEI$fips == "24510") # calculating different sources' contribution to total emission pm25TypeBaltimore <- ddply(baltimore, .(year, type), summarise, totalEmissions = sum(Emissions)) # creating the plot png("plot3.png", width = 800, height = 600) g <- qplot(year, totalEmissions, data = pm25TypeBaltimore, facets = .~type) g + geom_point(aes(color = type), size = 4) + labs(y = "Total PM2.5 Emissions", title = "Sources of PM2.5 Emissions Per Year in Baltimore") ggsave('plot3.png') dev.off()
7335a55c4164bd9439615e2986155a26b0627373
a1bb4782472b046285e29132ea8faee76cecc6f8
/R/ch2.R
d9ffe8b005baf54cc8528f9655c82767336ec828
[]
no_license
cran/orth
bb155ab9fc0e26274f3c5925b71e0c4090b378df
cc72f36de41107427c5445a9700608bb40cf5e6c
refs/heads/master
2021-01-17T05:56:39.888184
2011-01-25T00:00:00
2011-01-25T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
52
r
ch2.R
`ch2` <- function(n) { return(n*(n-1)/2) }
342606e277a68aa904fd54aeb71889bb32dced25
f56d31e470b05737626c1c379b8277c66bfb078b
/R/coin_datacheck.R
7a87e710c1aeba5bd0b83595d79bd74fe7c3525a
[ "MIT" ]
permissive
Lucas-OlivCouto/COINr
81a242d6b0910b6d9286234b390cda4693a5263f
bda637b7fa25f1b09de730248b1eb263f0214ea0
refs/heads/master
2023-08-12T07:48:08.190793
2021-10-11T14:13:40
2021-10-11T14:13:40
null
0
0
null
null
null
null
UTF-8
R
false
false
8,490
r
coin_datacheck.R
#' Detailed unit data check and screener by data availability #' #' Gives detailed tables of data availability, and optionally screens units based on a data #' availability threshold and presence of zeros. Units can be optionally "forced" to be included or excluded, making #' exceptions for the data availability threshold. #' #' The two main criteria of interest are `NA` values, and zeros. The summary table gives percentages of #' `NA` values for each unit, across indicators, and percentage zero values (*as a percentage of non-`NA` values*). #' Each unit is flagged as having low data or too many zeros based on thresholds. #' #' This function currently only supports COINs as inputs, not data frames. #' #' @param COIN The COIN object #' @param dset The data set to be checked/screened #' @param ind_thresh A data availability threshold used for flagging low data and screening units if `unit_screen != "none"`. Default 0.66. Specify as a fraction. #' @param zero_thresh As ind_thresh but for non-zero values. Defaults to 0.05, i.e. it will flag any units with less than 5% non-zero values (equivalently more than 95% zero values). #' @param unit_screen Specifies whether and how to screen units based on data availability or zero values. #' * If set to `"none"` (default), does not screen any units. #' * If set to `"byNA"`, screens units with data availability below `ind_thresh` #' * If set to `"byzeros"`, screens units with non-zero values below `zero_thresh` #' * If set to `"byNAandzeros"`, screens units based on either of the previous two criteria being true. #' * If you simply want to force a unit or units to be excluded (without any other screening), use the `Force` argument and set `unit_screen = TRUE`. #' `unit_screen != "none"` outputs a new data set .$Data$Screened. #' @param Force A data frame with any additional countries to force inclusion or exclusion. First column is `"UnitCode"`. Second column `"Status"` either `"Include"` or `"Exclude"` for each country to force. #' @param out2 Where to output the results. If `"COIN"` (default for COIN input), appends to updated COIN, #' otherwise if `"list"` outputs to data frame. #' #' @importFrom dplyr select starts_with pull mutate filter #' #' @examples #' # build ASEM COIN #' ASEM <- assemble(IndData = ASEMIndData, IndMeta = ASEMIndMeta, AggMeta = ASEMAggMeta) #' # return stats to the COIN, plus screened data set, return to list #' ScreenedData <- checkData(ASEM, dset = "Raw", unit_screen = "byNA", #' ind_thresh = 0.9, out2 = "list") #' # See which units were removed #' print(ScreenedData$RemovedUnits) #' #' @return An updated COIN with data frames showing missing data in `.$Analysis`, and if `unit_screen != "none"` outputs a new data set .$Data$Screened. #' If `out2 = "list"` wraps missing data stats and screened data set into a list. #' #' @export checkData <- function(COIN, dset = NULL, ind_thresh = NULL, zero_thresh = NULL, unit_screen = "none", Force = NULL, out2 = "COIN"){ # Check input type. If not COIN, exit. if (!is.coin(COIN)){ # COIN obj stop("This function currently only supports COINs as inputs.") } # Check for dset. If not specified, exit. if (is.null(dset)){ stop("dset is NULL. Please specify which data set to operate on.") } ##----- SET DEFAULTS -------## # Done here because otherwise if we use regen, this input could be input as NULL if(is.null(ind_thresh)){ ind_thresh <- 2/3 } if(is.null(zero_thresh)){ zero_thresh <- 0.05 } if(is.null(unit_screen)){ unit_screen <- "none" } # Write function arguments to object, FTR COIN$Method$checkData$dset <- dset COIN$Method$checkData$ind_thresh <- ind_thresh COIN$Method$checkData$zero_thresh <- zero_thresh COIN$Method$checkData$unit_screen <- unit_screen COIN$Method$checkData$Force <- Force # Isolate indicator data out1 <- getIn(COIN, dset = dset) ind_data_only <- out1$ind_data_only #--- Check overall data availability nabyrow <- rowSums(is.na(ind_data_only)) # number of missing data by row zerobyrow <- rowSums(ind_data_only == 0, na.rm = TRUE) # number of zeros for each row nazerobyrow <- nabyrow + zerobyrow # number of zeros or NAs for each row Prc_avail = 1 - nabyrow/ncol(ind_data_only) # the percentage of data available Prc_nonzero = 1 - zerobyrow/(ncol(ind_data_only) - nabyrow) # the percentage of non zeros #Prc_avail_and_nonzero = 1 - nazerobyrow/ncol(ind_data_only) # the percentage of non zeros AND not NA data_avail <- data.frame(UnitCode = out1$UnitCodes, N_missing = nabyrow, N_zero = zerobyrow, N_miss_or_zero = nazerobyrow, PrcDataAll = Prc_avail*100, PrcNonZero = Prc_nonzero*100, #PrcDataAndNonZero = Prc_avail_and_nonzero*100, LowDataAll = Prc_avail < ind_thresh, ZeroFlag = Prc_nonzero < zero_thresh, LowDatOrZeroFlag = (Prc_avail < ind_thresh) | (Prc_nonzero < zero_thresh)) #--- Check data availability by group # the easiest way to do this is to loop over groups. Get first the index structure # (selects indicator codes plus all aggregation level columns/codes) agg_levels <- dplyr::select(COIN$Input$IndMeta, "IndCode" | dplyr::starts_with("Agg")) data_avail_bygroup <- data.frame("UnitCode" = out1$UnitCodes) for (ilev in 1:(ncol(agg_levels)-1)){ # loop over aggregation levels, except the last one agg1 <- dplyr::pull(agg_levels,1) # names of indicators agg2 <- dplyr::pull(agg_levels,ilev+1) # names of aggregation groups in the level above agg2_names <- unique(agg2) # only the names of agg level above (no repetitions) # pre-allocate a data frame for prc data availability d_avail_lev <- as.data.frame(matrix(NA, nrow = nrow(ind_data_only), ncol = length(agg2_names))) for (igroup in 1:length(agg2_names)){ # now looping over groups inside this level gname <- agg2_names[igroup] # select group name # get indicator codes belonging to group gcodes <- agg1[agg2 == gname] # get corresponding indicator columns ginds <- ind_data_only[gcodes] # now count prc data available and add to data frame d_avail_lev[,igroup] <- 100*rowSums(!is.na(ginds))/ncol(ginds) } # add column names (aggregation group names) to data availability table colnames(d_avail_lev) <- agg2_names # add to big table data_avail_bygroup <- cbind(data_avail_bygroup, d_avail_lev) } # Now add final column which says if country is included or not, if asked for if (unit_screen == "byNA"){ data_avail <- cbind(data_avail, Included = data_avail$LowDataAll == FALSE) } else if (unit_screen == "byzeros"){ data_avail <- cbind(data_avail, Included = data_avail$ZeroFlag == FALSE) } else if (unit_screen == "byNAandzeros"){ data_avail <- cbind(data_avail, Included = data_avail$LowDatOrZeroFlag == FALSE) } else { data_avail <- cbind(data_avail, Included = TRUE) } if (!is.null(Force)){ # if some countries to force include/exclude # convert to logical Force[2] <- Force[2]=="Include" # substitute in output table data_avail$Included[ data_avail$UnitCode %in% Force$UnitCode[Force$Status == TRUE] ] <- TRUE data_avail$Included[ data_avail$UnitCode %in% Force$UnitCode[Force$Status == FALSE] ] <- FALSE } if (unit_screen != "none"){ # create new data set which filters out the countries that didn't make the cut ScreenedData <- dplyr::filter(out1$ind_data, data_avail$Included) # units that are removed ScreenedUnits <- data_avail$UnitCode[!data_avail$Included] } if (out2 == "list"){ # write to a list return(list( MissDatSummary = data_avail, MissDatByGroup = data_avail_bygroup, ScreenedData = ScreenedData, RemovedUnits = ScreenedUnits )) } else if (out2 == "COIN") { # add summary tables to COIN eval(parse(text=paste0("COIN$Analysis$",dset,"$MissDatSummary<- data_avail"))) eval(parse(text=paste0("COIN$Analysis$",dset,"$MissDatByGroup<- data_avail_bygroup"))) if (unit_screen != "none"){ COIN$Data$Screened <- ScreenedData eval(parse(text=paste0("COIN$Analysis$",dset,"$RemovedUnits<- ScreenedUnits"))) } return(COIN) } else { stop("out2 not recognised, should be either COIN or list") } }
bbeb4881dd67593b1e4532e3ebb8ba1be902447e
ba9dc103b4a474fe3b64b501559017f329e9653f
/Scripts/Exercises_with_solutions.R
2afb7134d6b842b0711c91bf7eb6df990ef13c37
[]
no_license
edinkasia/Intro_DSHSC-Tutorial_week6
ffb534158066ded744a568ccbf13ce6dddba1838
8105b7e5bd9323dc83d741620ca6dde2eb84e8d4
refs/heads/main
2023-09-02T10:38:48.405000
2021-11-03T15:46:58
2021-11-03T15:46:58
424,154,352
0
0
null
2021-11-03T15:46:59
2021-11-03T09:01:43
R
UTF-8
R
false
false
9,867
r
Exercises_with_solutions.R
# You will need to load the required libraries first: tidyverse and here. library(tidyverse) library(here) # We will be using traumatic brain injury data from a Tidy Tuesday challenge # Learn more about the challenge and the data here (spend max 3 minutes looking at the linked page): # https://github.com/rfordatascience/tidytuesday/blob/master/data/2020/2020-03-24/readme.md # Let's start by loading in the data tbi_age <- read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-03-24/tbi_age.csv') tbi_year <- read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-03-24/tbi_year.csv') ### Task 1 # Save both datasets in the Raw_Data folder (use the 'here' library, as I showed in the demo) # Insert your code for Task 1 below this line: # Use a write_csv function, with the here() syntax # Remember, the folder name goes first, followed by file name write_csv(tbi_age, file = here("Raw_Data", "tbi_age.csv")) write_csv(tbi_year, file = here("Raw_Data", "tbi_year.csv")) ### Task 2 # We'll look at tbi_age first. Use your favourite functions for eyeballing data and write a couple of sentences about # this dataset (its dimensions, the variables inside, types of data, etc.) # Insert your code below this line: head(tbi_age) str(tbi_age) # Your description of the dataset # E.g. This dataset has 231 rows and 5 columns. It includes 5 variables: # age group, type of outcome, injury mechanism, estimated number of injuries # in total, and estimated rate per 100000 population. Estimated number and rate # are numeric, type of outcome and injury mechanism, are categorical nominal # variables stored as strings, and age is a categorical ordinal variable, # also stored as a string. # Task 3 # Are there any variables in this dataset that should be treated as factors? # Decide which ones, and turn them into factors. # Once you've done that, think about the order of factor levels. Does it need to be changed? If so, change it. # Insert your code below this line: # Solution: type, injury_mechanism and age_group are all categorical variables, # and so should be stored as factors. We can use the mutate() function to # change those variables, and the factor() function to turn them into factors. # No additional arguments are needed. tbi_age <- tbi_age %>% mutate(type = factor(type), injury_mechanism = factor(injury_mechanism), age_group = factor(age_group)) # When we used the default settings, the factor levels of age were a little # mixed up. We're able to sort them using the levels argument. # Notice how we've got overlapping age_group levels for children - # this would cause trouble if we just added different age groups up. tbi_age <- tbi_age %>% mutate(age_group = factor(age_group, levels = c("0-17", "0-4", "5-14", "15-24", "25-34", "35-44", "45-54", "55-64", "65-74", "75+", "Total"))) # Task 4 # Imagine that you are preparing a brief for the local health authority, and they have asked you to # answer the following questions. For each question, write a chunk of code that would provide the # answer in a clean and reproducible way. # Task 4.1 # Which injury mechanism caused the most deaths in total? # Insert your code below this line: # First, think about subsetting the data - which variables do you need? # You don't want to look at any particular age group, so you can # set age_group to be Total. You are interested in deaths, # so you can set type to be Deaths. Use the filter function, # with both of these criteria applied: tbi_age %>% filter(age_group == "Total" & type == "Deaths") # Using the line above, you already get a pretty small tibble, # from which you can read out the highest number. # If you wanted to make your life even easier, you can arrange # this tibble, with the highest number first: tbi_age %>% filter(age_group == "Total" & type == "Deaths") %>% arrange(desc(number_est)) # This solution is good for eyeballing, as we can see the number of injuries # in descending order. # Task 4.2 # Looking just at the totals, what is the number of ED visits, hospitalisations and deaths # associated with each injury mechanism? Present the results as a tibble. Save this tibble in your # Tables folder. # Insert your code below this line: # As before, start by thinking about filtering - you want all types of outcome, # but you're interested in the Total across age groups. So, you can set # age_group to "Total". You will want to save the tibble, so assign it into a new object. table_age <- tbi_age %>% filter(age_group == "Total") # If you wanted to, you could add the `arrange` function (like above), # just to ease interpretation. table_age <- tbi_age %>% filter(age_group == "Total") %>% arrange(desc(number_est)) # And then, for writing into a file, we use the write_csv function, # with the here helper, to put it into the required folder. write_csv(table_age, here("Tables", "table_age.csv")) # Task 4.3 # Focusing on the oldest age group (75+), draw a bar chart illustrating the number of hospitalisations # associated with each injury mechanism. Make sure that you add a title and appropriate axis labels. # Save this chart in your Plots folder. # Insert your code below this line: plot1 <- tbi_age %>% filter(age_group == "75+" & type == "Hospitalizations") %>% # the str_wrap function from stringr adds line breaks in our injury_mechanism # labels, so they look better in the plot mutate(injury_mechanism = str_wrap(injury_mechanism, width = 15)) %>% # reorder will sort the bars by height, so that the injury mechanism # with most cases is first ggplot(aes(x = reorder(injury_mechanism, desc(-number_est)), y = number_est)) + geom_bar(stat = "identity") + #or use geom_col labs(title = "Injuries, by mechanism, in the 75+ age group", x = "Injury mechanism", y = "Number of injuries") + coord_flip() # this swaps x and y coordinates, making the labels more visible # Note that the labels are very long, and there are different ways to deal with that: # https://stackoverflow.com/questions/1330989/rotating-and-spacing-axis-labels-in-ggplot2 ggsave(here("Plots", "Injuries_oldest_group.pdf")) # ggsave saves the most recent plot by default, but you can specify the plot # with the 'plot' argument ### Let's now look at tbi_year dataset. ### Task 5 # Use functions for eyeballing data and write a couple of sentences about # tbi_year dataset (its dimensions, the variables inside, types of data, etc.) # Insert your code below this line: head(tbi_year) str(tbi_year) # Your description of the dataset: # E.g. This dataset has 216 rows and 5 columns. It includes 5 variables: # injury mechanism, type of outcome, year, estimated number of injuries # in total, and estimated rate per 100000 population. Year, estimated number # and rate are numeric, type of outcome and injury mechanism, are categorical # nominal variables stored as strings. # Task 6 # Are there any variables in this dataset that should be treated as factors? # Decide which ones, and turn them into factors. # Once you've done that, think about the order of factor levels. Does it need to be changed? # Insert your code below this line: tbi_year <- tbi_year %>% mutate(injury_mechanism = factor(injury_mechanism), type = factor(type)) # We don't need to change the factor levels for any variable. # Task 7 intro # Imagine that you are preparing a brief for the local health authority, and they have asked you to # answer the following questions. For each question, write a chunk of code that would provide the # answer in a clean and reproducible way. # Task 7.1 # Plot a line chart that depicts the rate of deaths, per 100000 population, # from traumatic brain injury in years 2006-2014. # Save this chart in your Plots folder. # Insert your code below this line: plot2 <- tbi_year %>% filter(injury_mechanism == "Total" & type == "Deaths") %>% ggplot(aes(x = year, y = rate_est)) + geom_line() + geom_point() + # using two geoms makes the data points more visible labs(title = "Rate of deaths, per 100000 population, from traumatic brain injury", x = "Year", y = "Estimated rate") plot2 ggsave(here("Plots", "Total_death_rates.pdf")) # Task 7.2 # Go to this URL: https://www.cdc.gov/traumaticbraininjury/pdf/TBI-Surveillance-Report-FINAL_508.pdf # On page 14 of the report (page 15 of the pdf document), you'll find Figure 2A, # showing the rates, per 100000 population, of TBI-related ED visits, by year, # and mechanism of injury, 2006-2014. # Try and re-create this figure. # First, focus just on depicting the data (you can use the default settings of ggplot). # If you still have time, try reproducing this figure as closely as possible (colours, style, etc.). # Save the final version of your figure in your Plots folder. # Insert your code below this line: plot3 <- tbi_year %>% filter(type == "Emergency Department Visit" & injury_mechanism != "Total") %>% ggplot(aes(x = year, y = rate_est, colour = injury_mechanism)) + geom_line() + geom_point() + labs(title = "FIGURE 2A: ESTIMATED AGE-ADJUSTED RATES, PER 100,000 POPULATION,OF TRAUMATIC BRAIN INJURY– RELATED EMERGENCY DEPARTMENT VISITS, BY YEAR AND MECHANISM OF INJURY, 2006-2014", y = "Age-adjusted rates of TBI-related ED visits") + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), legend.position = "bottom", legend.title = element_blank()) plot3 ggsave(here("Plots", "TBI_related_ED_visits.pdf"), width=11, height=8.5)
30ebb438cd73a04dfc9996ae8e61eb7c56bf0a24
a1ec9c7e27a2ddae773164976f7a31072ceaffbc
/Plot2.R
bef7ee6f6afcca90928733f6c04962260418bdaf
[]
no_license
drg0919/ExData_Plotting1
00a722747687e4869af13afd47178c33bfb10e7c
41ccc5d7f46fbd19e66fa36ec6cf258d9f76c356
refs/heads/master
2021-01-08T18:29:28.290030
2020-02-21T09:57:44
2020-02-21T09:57:44
242,107,377
0
0
null
2020-02-21T09:55:56
2020-02-21T09:55:55
null
UTF-8
R
false
false
659
r
Plot2.R
my_data<-read.table("household_power_consumption.txt",sep=";",header=TRUE) my_subset<-subset(my_data,Date=="1/2/2007"|Date=="2/2/2007") my_subset$Date <- as.Date(my_subset$Date, format="%d/%m/%Y") my_subset$Time <- strptime(my_subset$Time, format="%H:%M:%S") my_subset[1:1440,"Time"] <- format(my_subset[1:1440,"Time"],"2007-02-01 %H:%M:%S") my_subset[1441:2880,"Time"] <- format(my_subset[1441:2880,"Time"],"2007-02-02 %H:%M:%S") png("Plot 2.png", height=480, width=480) plot(my_subset$Time,as.numeric(as.character(my_subset$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)") title(main="Global Active my_data Vs Time") dev.off()
96b078205e39a58658abd4b0e8a080406781c692
6c6235ddf3aee099cc429e6bf09c4f0453015993
/Text Processing bagian 1.R
a866255cb5654c81dd3a4c245469788900e5aebc
[]
no_license
GANGGAANURAGA/Text-Mining-di-R
6e9e55835ac165fe5d61ba15b2072ab2ab86abe5
a30f907d3f46fb54e8cb6134b60855d62aa75ce2
refs/heads/master
2022-12-10T21:23:36.167245
2020-09-13T13:41:49
2020-09-13T13:41:49
295,157,281
1
0
null
null
null
null
UTF-8
R
false
false
3,495
r
Text Processing bagian 1.R
# library yang digunakan library(tm) library(NLP) library(wordcloud) library(RColorBrewer) library(tokenizers) # data yang digunakan datatext = read.csv("DataLatihText.csv") dim(datatext) # membuat korpus / kumpulan dokumen corpusdc <- iconv(datatext$narasi, to = "UTF-8") # UTF koding karakter corpusdc <- Corpus(VectorSource(corpusdc)) inspect(corpusdc[1:6]) # memunculkan 6 dokumen teratas # pembersihan data text / cleaning text # set kalimat dalam bentuk huruf kecil semua corpusdc <- tm_map(corpusdc,tolower) inspect(corpusdc[1:6]) # punctuation period (full stop) ( . ) comma ( , ) question mark ( ? ) # exclamation mark ( ! ) colon ( : ) semicolon ( ; ) # single quotation marks ( ' ' ) double quotation marks ( " " ) corpusdc <- tm_map(corpusdc, removePunctuation) corpusdc <- tm_map(corpusdc, removeNumbers) inspect(corpusdc[1:6]) # stopword library(stopwords) stopwordID <- read.delim("ID-Stopwords.txt") ccStopwordID <- readline(stopwordID) # tokenize_words(corpusdc, stopwords = ccStopwordID) corpusdc <- tm_map(corpusdc, removeWords, ccStopwordID) inspect(corpusdc[1:6]) corpusdc <- tm_map(corpusdc, removeWords, c('dan','yg','yth','ini','dari','ada','tidak','itu', 'yang','untuk','dengan','dalam','dgn','bahwa', 'lagi','sebagai','jangan','bisa','satu', 'sudah','akan', 'kita', 'akan','dia','utk', 'telah','kepada','saya','kami','saat','jadi','tak','tdk','v', 'oleh','karena','mereka','pada','hanya','seperti','masih', 'anda','tersebut','adalah','the','orang','mau','atau','hari', 'semua','bukan','saja','para','tau','sdh','anak','jika', 'tahun','nya','kalau','banyak','masuk','gak','apa','belum', 'sampai','...','foto','lebih','seorang','tapi','baru', 'terjadi','dapat','info','ternyata','bagi','buat', 'juga','atas','tadi','kalo','bila','kali')) inspect(corpusdc[1:6]) corpusdc <- tm_map(corpusdc, stripWhitespace) inspect(corpusdc[1:6]) length(corpusdc) # pembobotan kata tf-idf dtmbdc = DocumentTermMatrix(corpusdc) # write.csv((as.matrix(dtmbdc)), "tfbdc.csv") dtmbdc.matrix = as.matrix(dtmbdc) inspect(dtmbdc) dtmtfidff <- weightTfIdf(dtmbdc, normalize = TRUE) dtmtfidff.matrix = as.matrix(dtmtfidff) inspect(dtmtfidff) # write.csv((as.matrix(dtmtfidff)), "tfidfbdc.csv") barplot <- rowSums(t(dtmbdc.matrix)) barplot barplot <- subset(barplot, barplot>=10) barplot barplot(barplot, las=2, col=rainbow(50), cex.names = 0.8) # wordcloud library(wordcloud) dtmbdcc = TermDocumentMatrix(corpusdc) dtmbdcc = as.matrix(dtmbdcc) bar <- sort(rowSums(dtmbdcc),decreasing = TRUE) set.seed(222) wordcloud(words = names(bar),freq = bar, max.words = 200, min.freq = 5, colors = brewer.pal(8,"Dark2"), scale = c(5,0,3), rot.per = 0.25) library(wordcloud2) g <- data.frame(names(bar), bar) colnames(g) <- c('narasi','freq') head(g) wordcloud2(g) wordcloud2(g,size = 0.8,shape = 'star',rotateRatio = 0.5,minSize = 1) # Sentimen analisis library(syuzhet) library(lubridate) library(ggplot2) library(scales) library(reshape2) library(dplyr) bdc <- iconv(corpusdc, to ='UTF-8') g1 <- get_nrc_sentiment(bdc) head(g1) barplot(colSums(g1), las=2, col=rainbow(10),main='Sentiment Analysis Untuk Berita Hoax')
456098a917e76d32b34c8982a2837bb0f828a6c2
c409ff3ea8b7c62efd962d37c83793d4fc0dc1bc
/tests/testthat.R
95928d8662da6d94d023551289976728b9df3982
[ "MIT" ]
permissive
markdly/conquestr
e225ecb1347957dc025c5c719d46624f56e01207
7994b3768e26acf1be4ac20821da66ba7f564deb
refs/heads/master
2021-04-30T10:41:43.513747
2018-09-12T05:46:02
2018-09-12T05:46:02
121,339,581
1
0
null
2018-09-12T05:46:03
2018-02-13T04:43:27
R
UTF-8
R
false
false
62
r
testthat.R
library(testthat) library(conquestr) test_check("conquestr")
2150ac614d8d2cabab7061b2eca65b54b80b0f82
bc0058818a923d1fc477fb55200365adfc1ea2e9
/plot1.R
9a311090bb314337a19cbe1466c740899432ebe2
[]
no_license
makoto/ExData_Plotting1
a149c2ee86afb448f5f6c8e7ae26a9bbfaaa961f
96a368f6a99c39a89fb78e4e2ca6b4e9f88f4270
refs/heads/master
2021-01-22T17:14:00.407784
2015-05-10T22:51:07
2015-05-10T22:51:07
35,388,928
0
0
null
2015-05-10T21:35:57
2015-05-10T21:35:56
null
UTF-8
R
false
false
367
r
plot1.R
png("plot1.png", width=4, height=4, units="in", res=300) power <- read.csv('./household_2002.txt', sep=";") power$Date <- as.Date(power$Date, "%d/%m/%Y") power$Time <- strptime(power$Time, '%H:%M:%S', tz = "UTC") hist(power$Global_active_power, breaks=20, col='red', main='Global Active Power', xlab='Global Active Power(kilowatts)') dev.off()
8e3a91f6470dc30eb1785f2ff57a77c11f1756d9
59004c819451c7f552159ec5b2ce500fa365d70d
/R/predict.QRNLMM.R
9be0521b7769e88d2f4d261dd894f319bc8a8706
[]
no_license
cran/qrNLMM
168ca22144c733fa192ef9bda53e93917a46279b
ac1d52d97a4f81205151895054cd553a3c5fd608
refs/heads/master
2022-09-05T00:17:55.503771
2022-08-18T11:40:05
2022-08-18T11:40:05
30,884,909
1
1
null
null
null
null
UTF-8
R
false
false
12,040
r
predict.QRNLMM.R
"predict.QRNLMM" = function(object,x = NULL,groups = NULL,covar = NULL,y = NULL,MC=1000,...){ thin = 5 if(!inherits(object,"QRNLMM")) stop("The object must be an object of the class QRNLMM.") if(is.null(x) & is.null(groups) & is.null(covar) & is.null(y)){ message("NOTE: No newdata provided. Returning fitted values for original dataset.") if(is.null(object$res$beta)){ # several quantiles nquant = length(object) p = as.numeric(lapply(object, function (x) x$res$p)) new.values = matrix(unlist(lapply(object, function (x) x$res$fitted.values)), ncol = nquant, byrow = FALSE) new.values = as.data.frame(new.values) colnames(new.values) = p }else{ # one quantile nquant = 1 new.values = data.frame(new.values = object$res$fitted.values) colnames(new.values) = object$res$p } return(new.values) } # Second case # no 'y' provided if(is.null(x) | is.null(groups)) stop("At least both 'x' and 'groups' must be provided.") if(any(is.na(x))) stop("There are some NA's values in x") if(any(is.na(groups)==TRUE)) stop("There are some NA's values in groups") if(nrow(as.matrix(x)) != length(groups)) stop("groups does not match with the provided data in x. (nrow(x) != length(groups))") if(is.null(object$res$beta)){ # many quantiles obj = object[[1]] p = as.numeric(lapply(object, function (x) x$res$p)) }else{ #one quantile obj = object p = object$res$p } dqnc = countall2( gsub(" ", "", paste0( deparse( obj$res$nlmodel), collapse = ""), fixed = TRUE) ) d = dqnc[1] q = dqnc[2] nc = dqnc[3] if(nc > 0){ # are there covariates? if(is.null(covar)){ stop("Covariates must be provided for the fitted model.") } if(any(is.na(covar))) stop("There are some NA's values in covar") if(nrow(as.matrix(covar)) != length(groups)) stop("'groups' does not match with the provided data in 'covar'. (nrow(covar) != length(groups))") } if(is.null(y)){ message("NOTE: response 'y' not provided. Population curves will be computed.") }else{ if(nrow(as.matrix(y)) != length(groups)) stop("NOTE: response y does not match with the provided data in groups. (length(y) != length(groups))") message("NOTE: response 'y' provided. Individual curves will be computed.") } groups = as.numeric(groups) #d,q,nc n = dim(obj$res$weights)[1] #subjects nquant = length(p) nj = c(as.data.frame(table(groups))[,2]) rand = rep(0,q) new.values = matrix(NA,sum(nj),nquant) # no covariates pb <- progress_bar$new( format = " Predicting... [:bar] :percent eta: :eta", total = length(nj)*nquant, clear = TRUE, width= 60, show_after = 0) pb$tick(0) #Sys.sleep(0.2) if(nc == 0){ # no covariates # one quantile if(nquant == 1){ for (j in 1:length(nj)){ pos = (sum(nj[1:j-1])+1):(sum(nj[1:j])) if(is.null(y)){ # sds = apply( # X = object$res$weights, # MARGIN = 2, # FUN = function(x) return( # fitdistrplus::fitdist( # x, # distr = "norm", # fix.arg = list(mean = 0))$estimate # ) # ) # # rand = qnorm(p = p,sd = sds) rand = apply(object$res$weights,2,quantile,probs = p) new.values[pos,1] = object$res$nlmodel(x = x[pos], fixed = object$res$beta, random = rand) pb$tick() }else{ chutebi0 = chutebi = rep(0,q) chutebibi0 = chutebibi = object$res$Psi # bibi^T bmetro = matrix(MHbi3(j=j,M=MC, y1 = y[pos],x1 = x[pos], cov1 = covar[pos,,drop = FALSE], bi=chutebi0, bibi=chutebibi0, d=d,q=q,p=p,nj=nj, beta=object$res$beta, sigmae=object$res$sigma, D=object$res$Psi, nlmodel=object$res$nlmodel),q,MC) rand_j = apply(bmetro[,seq(1,MC,by = thin)],1,mean) new.values[pos,1] = object$res$nlmodel(x = x[pos], fixed = object$res$beta, random = rand_j) pb$tick() } } #no covariates # several quantiles }else{ for (j in 1:length(nj)){ pos = (sum(nj[1:j-1])+1):(sum(nj[1:j])) for(k in 1:nquant){ if(is.null(y)){ # sds = apply( # X = object[[k]]$res$weights, # MARGIN = 2, # FUN = function(x) return( # fitdistrplus::fitdist( # x, # distr = "norm", # fix.arg = list(mean = 0))$estimate # ) # ) # # rand_k = qnorm(p = p[k],sd = sds) rand_k = apply(object[[k]]$res$weights,2,quantile,probs = p[k]) new.values[pos,1] = obj$res$nlmodel(x = x[pos], fixed = object[[k]]$res$beta, random = rand_k) pb$tick() }else{ chutebi0 = chutebi = rep(0,q) chutebibi0 = chutebibi = object[[k]]$res$Psi/n # bibi^T bmetro = matrix(MHbi3(j=j,M=MC, y1 = y[pos],x1 = x[pos], cov1 = covar[pos,,drop = FALSE], bi=chutebi0, bibi=chutebibi0, d=d,q=q,p=p[k],nj=nj, beta=object[[k]]$res$beta, sigmae=object[[k]]$res$sigma, D=object[[k]]$res$Psi, nlmodel=object[[k]]$res$nlmodel),q,MC) rand_j = apply(bmetro[,seq(1,MC,by = thin)],1,mean) new.values[pos,k] = obj$res$nlmodel(x = x[pos], fixed = object[[k]]$res$beta, random = rand_j) pb$tick() } } } } }else{ # with covariates # one quantile covar = as.matrix(covar) # one quantile if(nquant == 1){ for (j in 1:length(nj)){ pos = (sum(nj[1:j-1])+1):(sum(nj[1:j])) if(is.null(y)){ # sds = apply( # X = object$res$weights, # MARGIN = 2, # FUN = function(x) return( # fitdistrplus::fitdist( # x, # distr = "norm", # fix.arg = list(mean = 0))$estimate # ) # ) # # rand = qnorm(p = p,sd = sds) rand = apply(object$res$weights,2,quantile,probs = p) new.values[pos,1] = object$res$nlmodel(x = x[pos], fixed = object$res$beta, random = rand, covar=covar[pos,,drop=FALSE]) pb$tick() }else{ chutebi0 = chutebi = rep(0,q) chutebibi0 = chutebibi = object$res$Psi # bibi^T bmetro = matrix(MHbi3(j=j,M=MC, y1 = y[pos],x1 = x[pos], cov1 = covar[pos,,drop = FALSE], bi=chutebi0, bibi=chutebibi0, d=d,q=q,p=p,nj=nj, beta=object$res$beta, sigmae=object$res$sigma, D=object$res$Psi, nlmodel=object$res$nlmodel),q,MC) rand_j = apply(bmetro[,seq(1,MC,by = thin)],1,mean) pb$tick() new.values[pos,1] = object$res$nlmodel(x = x[pos], fixed = object$res$beta, random = rand_j, covar=covar[pos,,drop=FALSE]) } } # several quantiles }else{ for (j in 1:length(nj)){ pos = (sum(nj[1:j-1])+1):(sum(nj[1:j])) for(k in 1:nquant){ if(is.null(y)){ # sds = apply( # X = object[[k]]$res$weights, # MARGIN = 2, # FUN = function(x) return( # fitdistrplus::fitdist( # x, # distr = "norm", # fix.arg = list(mean = 0))$estimate # ) # ) # # rand_k = qnorm(p = p[k],sd = sds) rand_k = apply(object[[k]]$res$weights,2,quantile,probs = p[k]) new.values[pos,k] = obj$res$nlmodel(x = x[pos], fixed = object[[k]]$res$beta, random = rand_k, covar=covar[pos,,drop=FALSE]) pb$tick() }else{ chutebi0 = chutebi = rep(0,q) chutebibi0 = chutebibi = object[[k]]$res$Psi/n # bibi^T bmetro = matrix(MHbi3(j=j,M=MC, y1 = y[pos],x1 = x[pos], cov1 = covar[pos,,drop = FALSE], bi=chutebi0, bibi=chutebibi0, d=d,q=q,p=p[k],nj=nj, beta=object[[k]]$res$beta, sigmae=object[[k]]$res$sigma, D=object[[k]]$res$Psi, nlmodel=obj$res$nlmodel),q,MC) rand_j = apply(bmetro[,seq(1,MC,by = thin)],1,mean) pb$tick() new.values[pos,k] = obj$res$nlmodel(x = x[pos], fixed = object[[k]]$res$beta, random = rand_j, covar=covar[pos,,drop=FALSE]) } } } } } new.values = as.data.frame(new.values) colnames(new.values) = p return(new.values) }
7d318447df8c5484ba508c679661edf0adeaa68b
29585dff702209dd446c0ab52ceea046c58e384e
/saeSim/tests/testthat/test-select_cont.R
23b42fdd5dbfee2cc7785cf91d2a4a7ccb56aab5
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,583
r
test-select_cont.R
context("select_cont") test_that(desc = "select_cont", { dat1 <- sim_base(base_id(nDomains=5, nUnits = 2)) %>% sim_gen_e() %>% as.data.frame expect_equal(sum(select_cont(dat1, 1L, "unit", "idD", TRUE)$e == 0), 5) expect_equal(sum(select_cont(dat1, 1L, "unit", "idD", FALSE)$e == 0), 5) expect_equal(sum(select_cont(dat1, 1L, "area", "idD", TRUE)$e == 0), 8) expect_equal(sum(select_cont(dat1, 1L, "area", "idD", FALSE)$e == 0), 8) expect_equal(sum(select_cont(dat1, 1L, "unit", NULL, TRUE)$e == 0), 9) expect_equal(sum(select_cont(dat1, 1L, "unit", NULL, FALSE)$e == 0), 9) expect_true(sum(select_cont(dat1, 0.5, "unit", "idD", TRUE)$e == 0) > 0) expect_true(sum(select_cont(dat1, 0.5, "area", "idD", TRUE)$e == 0) > 0) expect_true(sum(select_cont(dat1, 0.5, "unit", NULL, TRUE)$e == 0) > 0) dat2 <- sim_base(base_id(nDomains = 5, nUnits = 1:5)) %>% sim_gen_e() %>% as.data.frame expect_equal(sum(select_cont(dat2, 1L, "unit", "idD", TRUE)$e == 0), 10) expect_equal(sum(select_cont(dat2, 1L, "unit", "idD", FALSE)$e == 0), 10) expect_equal(sum(select_cont(dat2, 1L, "area", "idD", TRUE)$e == 0), 10) expect_equal(sum(select_cont(dat2, 1L, "unit", NULL, TRUE)$e == 0), 14) expect_equal(sum(select_cont(dat2, 1L, "unit", NULL, FALSE)$e == 0), 14) numberOfContObs <- (select_cont(dat2, nCont = c(0, 0, 0, 0.2, 1), "unit", "idD", TRUE)$idC %>% sum) (numberOfContObs >= 5 & numberOfContObs <= 9) %>% expect_true testthat::expect_equal(sum(select_cont(dat2, c(1, 4), "area", "idD", TRUE)$e == 0), 10) })
c416698e16116a7aa1c9701d86c406524a512d04
24a20d7690d8053c7f9e24091aca6446d758c0f1
/bus_setup.R
0c5fe6b7178e6f70b015fc72242a87d55a7c0d44
[]
no_license
brendanjodowd/bus
8d019e2aadeb22b69093456c2ff6dadc1dd89bad
e4525bada08a91f9d9b304ef5d8ed2f22fb34716
refs/heads/master
2020-04-22T01:50:47.162669
2019-02-10T21:35:49
2019-02-10T21:35:49
170,026,823
0
0
null
null
null
null
UTF-8
R
false
false
425
r
bus_setup.R
library(rjstat) library(rvest) library(dplyr) library(later) bus_url <- "http://rtpi.ie/Text/WebDisplay.aspx?stopRef=" interval_time <- 10 # 46a oconnell street 6059 # pearse street 346 my_bus_stop <- "346" supervalue_stop <- "4616" call_times <- function(stop_number){ paste0(bus_url,stop_number) %>% read_html() %>% html_node("table") %>% html_table() %>% data.frame() } times1 <- call_times(my_bus_stop)
f4594dc9105f190c12e7bbf0e54a5eb881b1c899
fecaed3dea0f640b38c43ea515a5fd615d127164
/OptimalControl/hamilSymTest/stateHiddenInput.R
f82ec418420ac1fa825bcc24eeafb487b59e23a3
[]
no_license
Dominik12345/R
57f272b1afb554090e4311a509c964f1bcc9b828
4843d80e9b3c4771df5dbefe32670a44773d7084
refs/heads/master
2018-09-03T08:35:56.354468
2018-06-04T05:32:39
2018-06-04T05:32:39
109,264,813
0
0
null
null
null
null
UTF-8
R
false
false
404
r
stateHiddenInput.R
hiddenInputState <- function(t, x, parameters, input) { with (as.list(parameters),{ w <- sapply(input, mapply, t) alpha1 = parameters[1] alpha2 = parameters[2] alpha3 = parameters[3] beta1 = parameters[4] beta2 = parameters[5] dx1= -alpha1 * x[1]+ t+ w[1] dx2= -alpha2 * x[2]+ beta1 * x[1]+ w[2] dx3= -alpha3 * x[3]+ beta2 * x[1]+ w[3] list(c(dx1,dx2,dx3)) }) }
b80dfe9ce29d79cfd7bd023f45d60681046f47ca
0c780d32356a4b3a2fefa9392ac545bd078de909
/R/sr-est.R
ebb361bbb1b03a0b3df5e6fc3b3ab30e34f6343e
[ "MIT" ]
permissive
heavywatal/futurervpa
80fdeb533017eba10478d760fd9e15c1cd5b34d5
8fc6cfa6e831c2b6e8245b4eb83d2a769cc508e9
refs/heads/master
2020-04-02T03:56:01.315555
2018-10-21T09:20:12
2018-10-21T09:20:12
null
0
0
null
null
null
null
UTF-8
R
false
false
31,644
r
sr-est.R
############ # RVPAの結果からMSYを計算する関数 # 主に使うのはSR.est(再生産関係をフィットし、MSYを計算)とSR.plot(フィットした結果をプロット) ############ ############ # 使い方 ############ if(0){ # マサバ太平洋のデータを読み込み; modelAはvpaの帰り値 modelA <- readRDS("modelA_res.Rdata") # MSY計算 res1 <- SR.est(modelA, what.est=c(TRUE,TRUE,TRUE), # HS,BH,RIのどれをフィットするか。 bref.year=2013:2015, # 生物パラメータを用いる期間 years=c(1970:2013), # 観測されたSR関係を用いる期間 er.log=TRUE, # 誤差。TRUEで対数正規誤差 fc.year=2013:2015, # MSY計算のさいに選択率を平均する期間 seed=1 # 乱数の種。この値を変えると乱数が変わるので結果も変わる ) res1$summary # 推定パラメータ、管理基準値の確認 # 再生産パラメータa,bはエクセルとほぼ一致するはずだが、管理基準値は確率的シミュレーションをもとに計算しているので、エクセルとは必ずしも一致しない。±5%くらいの違いはあるみたい # 結果のプロット(HSのみ) res1.pred <- plot.SR(res1,what.plot=c("hs")) # 結果のプロット(HS,BH,RIを全て) res1.pred <- plot.SR(res1,what.plot=c("hs","bh","ri")) allplot(res1) # 要約表・グラフの出力 } ############ # fit to S-R relationship & MSE estimation -- old version? ############ SR.est <- function(vpares,SSB.dat=NULL,R.dat=NULL,gamma1=0.0001,er.log=TRUE, years=as.numeric(colnames(vpares$naa)), # 親子関係に推定に使う年のベクトル bref.year=2011:2013,# B0やMSYを計算するさいの生物パラメータの範囲(2011-2013に変更、2016-06-06) fc.year=bref.year, # 将来予測をするときに仮定する選択率をとる年の範囲 seed=1,n.imputation=1, nyear=100, bias.correction=TRUE, # 確率的な将来予測を行うときにbias correctionをするかどうか eyear=0, # 将来予測の最後のeyear+1年分を平衡状態とする # FUN=median, # 漁獲量の何を最大化するか? FUN=mean, # 漁獲量の何を最大化するか? sigma=-1, #加入変動のCV。-1の場合にはobservedの値を使う N=1000, # stochastic計算するときの繰り返し回数 is.small=FALSE, # 将来予測の結果を返さない。 is.boot=1000,# 正の値であれば、SRフィットのノンパラメトリックブートストラップを行う is.Kobe=c(FALSE,FALSE,FALSE), # Kobeの計算をするかどうか。順番に、HS, BH, RIの順 is.5perlower=FALSE, # HSの折れ点を5%の確率で下回るときの親魚資源量 PGY=NULL, # PGY管理基準値を計算するかどうか。計算しない場合はNULLを、計算する場合はc(0.8,0.9,0.95)のように割合を入れる what.est=c(TRUE,TRUE,TRUE) # MSY等を推定するか。順番に、HS, BH, RIの順 ){ #####-------- 内部で使う関数の定義 HS <- function(p,R,SSB,gamma=gamma1,er.log=er.log,MLE=FALSE,a=NULL,b=NULL){ if(!is.null(a)) p[1] <- a if(!is.null(b)) p[2] <- b a <- exp(p[1]) b <- max(SSB)/(1+exp(-p[2])) if(isTRUE(MLE)) sigma <- exp(p[3]) Pred <- function(SSB) a*(SSB+sqrt(b^2+gamma^2/4)-sqrt((SSB-b)^2+gamma^2/4)) if(!isTRUE(MLE)){ if(er.log==FALSE) return(sum((R-Pred(SSB))^2)) else return(sum((log(R)-log(Pred(SSB)))^2)) } else{ if(er.log==FALSE){ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (R-Pred(SSB))^2 ) return(-obj) } else{ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (log(R)-log(Pred(SSB)))^2 ) return(-obj) } } } BH <- function(p,R,SSB,er.log=er.log,MLE=FALSE){ a <- exp(p[1]) b <- exp(p[2]) if(isTRUE(MLE)) sigma <- exp(p[3]) Pred <- function(SSB) a*SSB/(1+b*SSB) if(!isTRUE(MLE)){ if(er.log==FALSE) return(sum((R-Pred(SSB))^2)) else return(sum((log(R)-log(Pred(SSB)))^2)) } else{ if(er.log==FALSE){ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (R-Pred(SSB))^2 ) return(-obj) } else{ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (log(R)-log(Pred(SSB)))^2 ) return(-obj) } } } SL <- function(p,R,SSB,er.log=er.log,MLE=FALSE){ a <- exp(p[1]) # b <- exp(p[2]) if(isTRUE(MLE)) sigma <- exp(p[2]) Pred <- function(SSB) a*SSB if(!isTRUE(MLE)){ if(er.log==FALSE) return(sum((R-Pred(SSB))^2)) else return(sum((log(R)-log(Pred(SSB)))^2)) } else{ if(er.log==FALSE){ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (R-Pred(SSB))^2 ) return(-obj) } else{ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (log(R)-log(Pred(SSB)))^2 ) return(-obj) } } } RI <- function(p,R,SSB,er.log=er.log,MLE=FALSE){ a <- exp(p[1]) b <- exp(p[2]) if(isTRUE(MLE)) sigma <- exp(p[3]) Pred <- function(SSB) a*SSB*exp(-b*SSB) if(!isTRUE(MLE)){ if(er.log==FALSE) return(sum((R-Pred(SSB))^2)) else return(sum((log(R)-log(Pred(SSB)))^2)) } else{ if(er.log==FALSE){ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (R-Pred(SSB))^2 ) return(-obj) } else{ obj <- length(R)*log(1/(sqrt(2*pi)*sigma))-1/2/sigma^2*sum( (log(R)-log(Pred(SSB)))^2 ) return(-obj) } } } # HSを推定するための関数 get.HS <- function(R,SSB,er.log,gamma1,do.profile=TRUE){ reg0 <- lm(R~SSB-1) a0 <- reg0$coef b0 <- 0.9 # hockey-stick res.HS <- optim(c(log(a0),logit(b0)),HS,R=R,SSB=SSB,er.log=er.log,gamma=gamma1) s <- 1 for (j in seq(0.95,0.1,by=-0.05)){ res.HS0 <- optim(c(log(a0),logit(j)),HS,R=R,SSB=SSB,er.log=er.log,gamma=gamma1) if (res.HS0$value < res.HS$value) res.HS <- res.HS0 } res.HS <- optim(res.HS$par,HS,R=R,SSB=SSB,method="BFGS",er.log=er.log,gamma=gamma1) ofv.least.square <- res.HS$value res.HS <- optim(c(res.HS$par,log(sqrt(res.HS$value/length(R)))),HS,R=R,SSB=SSB,method="BFGS",er.log=er.log,gamma=gamma1,MLE=TRUE) a.HS <- exp(res.HS$par[1]) names(a.HS) <- NULL b.HS <- max(SSB)/(1+exp(-res.HS$par[2])) # 曲がるところのx軸(ssb_hs) # r0の計算 r0.HS <- pred.HS(b.HS,a=a.HS,b=b.HS,gamma=gamma1) # もし、b.HSが最大・最小SSBよりも大きい・小さかったら if(b.HS>max(SSB)|b.HS<min(SSB)){ b.HS <- ifelse(b.HS>max(SSB),max(SSB),b.HS) b.HS <- ifelse(b.HS<min(SSB),min(SSB),b.HS) tmpfunc <- function(x,r0,...) (pred.HS(a=x,...)-r0)^2 tmp <- optimize(tmpfunc,c(0,a.HS*10),b=b.HS,gamma=gamma1,SSB=b.HS,r0=r0.HS) a.HS <- tmp$minimum } # 尤度surfaceの計算 if(isTRUE(do.profile)){ a.grid <- c(seq(from=0.1,to=0.9,by=0.1),seq(from=0.91,to=1.09,by=0.02),seq(from=1.1,to=1.5,by=0.1)) * a.HS b.grid <- c(seq(from=0.1,to=0.9,by=0.1),seq(from=0.91,to=1.09,by=0.02),seq(from=1.1,to=1.5,by=0.1)) * b.HS b.grid <- b.grid[b.grid<max(SSB)] obj.data <- expand.grid(a=a.grid,b=b.grid) obj.data$obj <- NA obj.data$log.a <- log(obj.data$a) obj.data$conv.b <- -log(max(SSB)/obj.data$b-1) for(i in 1:nrow(obj.data)) { obj.data$obj[i] <- HS(c(obj.data$log.a[i],obj.data$conv.b[i]),R=R,SSB=SSB, MLE=FALSE,er.log=er.log,gamma=gamma1) } } else{ obj.data <- NA } return(list(a=a.HS,b=b.HS,r0=r0.HS,res=res.HS,obj.data=obj.data,ofv.least.square=ofv.least.square)) } # Beverton-Holt get.BH <- function(R,SSB,er.log){ reg0 <- lm(R~SSB-1) a0 <- reg0$coef b0 <- max(SSB) res.BH <- optim(c(log(a0),log(1/b0)),BH,R=R,SSB=SSB,method="BFGS",er.log=er.log) for (j in seq(0.9,0.1,by=-0.1)){ res.BH0 <- optim(c(log(a0),log(j/b0)),BH,R=R,SSB=SSB,er.log=er.log) if (res.BH0$value < res.BH$value) res.BH <- res.BH0 } # 最尤法で計算しなおしたもので上書き res.BH <- optim(c(res.BH$par,log(sqrt(res.BH$value/length(R)))),BH,R=R,SSB=SSB,method="BFGS",er.log=er.log,MLE=TRUE) a.BH <- exp(res.BH$par[1]) b.BH <- exp(res.BH$par[2]) return(list(a=a.BH,b=b.BH,res=res.BH)) } get.RI <- function(R,SSB,er.log){ reg0 <- lm(R~SSB-1) a0 <- reg0$coef b0 <- max(SSB) # Ricker res.RI <- optim(c(log(a0),log(1/b0)),RI,R=R,SSB=SSB,method="BFGS",er.log=er.log) for (j in seq(0.9,0.1,by=-0.1)){ res.RI0 <- optim(c(log(a0),log(j/b0)),RI,R=R,SSB=SSB,er.log=er.log) if (res.RI0$value < res.RI$value) res.RI <- res.RI0 } # 最尤法 res.RI <- optim(c(res.RI$par,log(sqrt(res.RI$value/length(R)))), RI,R=R,SSB=SSB,method="BFGS",er.log=er.log,MLE=TRUE) a.RI <- exp(res.RI$par[1]) b.RI <- exp(res.RI$par[2]) return(list(a=a.RI,b=b.RI,res=res.RI)) } ##### 関数定義終わり # R.datとSSB.datだけが与えられた場合、それを使ってシンプルにフィットする if(!is.null(R.dat) & !is.null(SSB.dat)){ dat <- data.frame(R=R.dat,SSB=SSB.dat,years=1:length(R.dat)) } else{ vpares$Fc.at.age <- rowMeans(vpares$faa[as.character(fc.year)]) # データの整形 n <- ncol(vpares$naa) L <- as.numeric(rownames(vpares$naa)[1]) dat <- list() dat$R <- as.numeric(vpares$naa[1,]) dat$SSB <- as.numeric(colSums(vpares$ssb)) dat$year <- as.numeric(colnames(vpares$ssb)) # 加入年齢分だけずらす dat$R <- dat$R[(L+1):n] dat$SSB <- dat$SSB[1:(n-L)] dat$year <- dat$year[(L+1):n] # データの抽出 dat <- as.data.frame(dat) dat <- dat[dat$year%in%years,] } R <- dat$R SSB <- dat$SSB # HS推定 # if(what.est[1]==TRUE){ tmp <- get.HS(R,SSB,er.log,gamma1) a.HS <- tmp$a; b.HS <- tmp$b ; r0.HS <- tmp$r0 sd.HS <- exp(tmp$res$par[3]) surface.HS <- tmp$obj.data ofv.least.square <- tmp$ofv.least.square res.HS <- tmp$res boot.HS <- matrix(NA,is.boot,3) jack.HS <- matrix(NA,length(R),3) colnames(boot.HS) <- colnames(jack.HS) <- c("a","b","r0") if(what.est[1]==TRUE&&is.boot>0){ # ブートストラップ for(i in 1:is.boot){ rand <- sample(length(R),size=length(R)*n.imputation,replace=TRUE) tmp <- get.HS(R[rand],SSB[rand],er.log,gamma1,do.profile=FALSE) boot.HS[i,] <- unlist(tmp[c("a","b","r0")]) } for(i in 1:length(R)){ tmp <- get.HS(R[-i],SSB[-i],er.log,gamma1,do.profile=FALSE) jack.HS[i,] <- unlist(tmp[c("a","b","r0")]) } # rownames(jack.HS) <- years } # 予測値 dat$pred.HS <- pred.HS(dat$SSB,a=a.HS,b=b.HS,gamma=gamma1) dat$log.resid.HS <- log(dat$R) - log(dat$pred.HS) # } if(0){ # 直線回帰 reg0 <- lm(R~SSB-1) a0 <- reg0$coef res.SL <- optimize(SL,c(0,log(a0)*10),R=R,SSB=SSB,er.log=er.log) res.SL <- optim(c(res.SL$minimum,log(sqrt(res.SL$objective/length(R)))), SL,R=R,SSB=SSB,er.log=er.log,MLE=TRUE) # res.SL$value <- res.SL$objective a.SL <- exp(res.SL$par[1]) boot.SL <- rep(NA,is.boot) jack.SL <- rep(NA,length(R)) if(is.boot>0){ for(i in 1:is.boot){ rand <- sample(length(R),replace=TRUE) tmp <- optimize(SL,c(0,log(a0)*10),R=R[rand],SSB=SSB[rand],er.log=er.log) boot.SL[i] <- exp(tmp$minimum[1]) } for(i in 1:length(R)){ tmp <- optimize(SL,c(0,log(a0)*10),R=R[-i],SSB=SSB[-i],er.log=er.log) jack.SL[i] <- exp(tmp$minimum[1]) } rownames(jack.SL) <- years } } if(what.est[2]==TRUE){ # BH推定 tmp <- get.BH(R,SSB,er.log) a.BH <- tmp$a; b.BH <- tmp$b; res.BH <- tmp$res sd.BH <- exp(tmp$res$par[3]) boot.BH <- matrix(NA,is.boot,2) jack.BH <- matrix(NA,length(R),2) colnames(boot.BH) <- colnames(jack.BH) <- c("a","b") if(is.boot>0){ # ブートストラップ for(i in 1:is.boot){ rand <- sample(length(R),replace=TRUE) tmp <- get.BH(R[rand],SSB[rand],er.log) boot.BH[i,] <- unlist(tmp[c("a","b")]) } # ジャックナイフも for(i in 1:length(R)){ tmp <- get.BH(R[-i],SSB[-i],er.log) jack.BH[i,] <- unlist(tmp[c("a","b")]) } rownames(jack.BH) <- years } ## dat$pred.BH <- pred.BH(dat$SSB,a=a.BH,b=b.BH) dat$log.resid.BH <- log(dat$R) - log(dat$pred.BH) } if(what.est[3]==TRUE){ # RI推定 tmp <- get.RI(R,SSB,er.log) a.RI <- tmp$a ; b.RI <- tmp$b ; res.RI <- tmp$res sd.RI <- exp(tmp$res$par[3]) boot.RI <- matrix(NA,is.boot,2) jack.RI <- matrix(NA,length(R),2) colnames(boot.RI) <- colnames(jack.RI) <- c("a","b") if(is.boot>0){ # ブートストラップ for(i in 1:is.boot){ rand <- sample(length(R),replace=TRUE) tmp <- get.RI(R[rand],SSB[rand],er.log) boot.RI[i,] <- unlist(tmp[c("a","b")]) } # ジャックナイフも for(i in 1:length(R)){ tmp <- get.RI(R[-i],SSB[-i],er.log) jack.RI[i,] <- unlist(tmp[c("a","b")]) } rownames(jack.RI) <- years } ## dat$pred.RI <- pred.RI(dat$SSB,a=a.RI,b=b.RI) dat$log.resid.RI <- log(dat$R) - log(dat$pred.RI) } # 単に回帰だけする場合 if(!is.null(R.dat) & !is.null(SSB.dat)){ res <- list() paste2 <- function(x,...) paste(x,...,sep="") for(j in which(what.est)){ SR <- c("HS","BH","RI") xx <- c(get(paste2("a.",SR[j])), get(paste2("b.",SR[j])), get(paste2("sd.",SR[j])), get(paste2("res.",SR[j]))$value) names(xx) <- c("a","b","sd","value") res[[j]] <- list(parameter=xx, boot=get(paste2("boot.",SR[j])), jack=get(paste2("jack.",SR[j]))) } names(res) <- SR[what.est] return(res) } #-------------------- B0 & MSY for HS -------------------- # function to minimize # シミュレーション回数ぶんの漁獲量のFUN(mean, geomean, median)を最大化するFを選ぶ tmpfunc <- function(x,f.arg,FUN=FUN,eyear=eyear){ f.arg$multi <- x fout <- do.call(future.vpa2,f.arg) return(-FUN(fout$vwcaa[(nrow(fout$vwcaa)-eyear):nrow(fout$vwcaa),-1])) } tmpfunc2 <- function(x,f.arg,FUN=FUN,eyear=eyear,hsp=0){ f.arg$multi <- x fout <- do.call(future.vpa2,f.arg) tmp <- as.numeric(fout$vssb[(nrow(fout$vssb)-eyear):nrow(fout$vssb),-1]) lhs <- sum(tmp<hsp)/length(tmp) return( (lhs-0.05)^2 + as.numeric(lhs==0) + as.numeric(lhs==1) ) } get.Fhist <- function(farg,vpares,eyear,trace,hsp=0){ Fhist <- NULL original.sel <- farg$res0$Fc.at.age # original F for(j in 1:ncol(vpares$faa)){ farg$res0$Fc.at.age <- vpares$faa[,j] # change the selectivity farg$multi <- 1 tmp <- do.call(future.vpa2,farg) tmp2 <- get.stat(tmp,eyear=eyear,hsp=hsp) # browser() xx <- which.min(abs(trace$ssb.median-tmp2$ssb.median))+c(-1,1) range.tmp <- trace$fmulti[xx] if(is.na(range.tmp[2])) range.tmp[2] <- max(trace$fmulti)*2 if(xx[1]==0) range.tmp <- c(0,range.tmp[1]) tmpfunc <- function(x,farg,ssb.target,eyear){ farg$multi <- x return((get.stat(do.call(future.vpa2,farg),eyear=eyear,hsp=hsp)$ssb.mean-ssb.target)^2) } farg$res0$Fc.at.age <- original.sel # current Fをもとにもどす # originalな選択率のもとで、それを何倍にすればi年目のFで漁獲した時の親魚資源量と同じになるか ores <- optimize(tmpfunc,range.tmp,farg=farg,ssb.target=tmp2$ssb.mean,eyear=eyear) # farg$multi <- ores$minimum # tmp3 <- do.call(future.vpa2,farg) tmp2$fmulti <- ores$minimum Fhist <- rbind(Fhist,tmp2) } return(as.data.frame(Fhist)) } trace.func <- function(farg,eyear,hsp=0, fmulti=c(seq(from=0,to=0.9,by=0.1),1,seq(from=1.1,to=2,by=0.1),3:5,7,20,100)){ trace.res <- NULL farg$outtype <- "FULL" for(i in 1:length(fmulti)){ farg$multi <- fmulti[i] tmp <- do.call(future.vpa2,farg) # trace.res <- rbind(trace.res,get.stat(tmp,eyear=eyear,hsp=hsp)) tmp2 <- cbind(get.stat(tmp,eyear=eyear,hsp=hsp), get.stat2(tmp,eyear=eyear,hsp=hsp)) trace.res <- rbind(trace.res,tmp2) if(tmp2$"ssb.mean"<trace.res$"ssb.mean"[1]/1000){ fmulti <- fmulti[1:i] break() } } trace.res <- as.data.frame(trace.res) trace.res$fmulti <- fmulti return(trace.res) } b0.HS <- b0.BH <- b0.RI <- numeric() # B0 fout.HS <- fout.BH <- fout.RI <- list() fout0.HS <- fout0.BH <- fout0.RI <- list() trace.HS <- trace.BH <- trace.RI <- list() Fhist.HS <- Fhist.BH <- Fhist.RI <- list() fout.HS.5per <- list() for(kk in 1:length(sigma)){ ref.year <- as.numeric(rev(colnames(vpares$naa))[1]) if(sigma[kk]==-1){ if(isTRUE(what.est[1])){ sigma.tmp <- exp(res.HS$par[3]) } else{ if(isTRUE(what.est[2])) sigma.tmp <- exp(res.BH$par[3]) if(isTRUE(what.est[3])) sigma.tmp <- exp(res.RI$par[3]) } } else{ sigma.tmp <- sigma[kk] } #--------- Hockey stick fout0.HS[[kk]] <- future.vpa2(vpares,multi=0,nyear=nyear,start.year=ref.year, N=ifelse(sigma[kk]==0,1,N), ABC.year=ref.year+1,waa.year=bref.year,maa.year=bref.year, M.year=bref.year,is.plot=FALSE, recfunc=HS.rec,seed=seed,outtype="simple", rec.arg=list(a=a.HS,b=b.HS,gamma=gamma1,sd=sigma.tmp,bias.correction=bias.correction)) # b0.HS[kk] <- fout0$vssb[nrow(fout0$vssb),1] #static b0 farg.HS <- fout0.HS[[kk]]$input which.min2 <- function(x){ max(which(min(x)==x)) } if(isTRUE(what.est[1])){ trace.HS[[kk]] <- trace.func(farg.HS,eyear,hsp=b.HS) xx <- which.max(trace.HS[[kk]]$catch.median)+c(-1,1) range.tmp <- trace.HS[[kk]]$fmulti[xx] if(xx[1]==0) range.tmp <- c(0,range.tmp) # if(is.na(xx[2])) range.tmp[2] <- max(trace.HS[[kk]]$fmulti)*10 if(is.na(range.tmp[2])) range.tmp[2] <- max(trace.HS[[kk]]$fmulti)*10 tmp <- optimize(tmpfunc,range.tmp,f.arg=farg.HS,eyear=eyear,FUN=FUN) farg.HS$multi <- tmp$minimum # Fc.at.a * multiがFmsy fout.HS[[kk]] <- do.call(future.vpa2,farg.HS) Fmsy.HS <- tmp$minimum * farg.HS$res0$Fc.at.age ## ここでtraceを追加 trace.HS[[kk]] <- rbind(trace.HS[[kk]],trace.func(farg.HS,eyear,hsp=b.HS, fmulti=tmp$minimum+c(-0.025,-0.05,-0.075,0,0.025,0.05,0.075))) trace.HS[[kk]] <- trace.HS[[kk]][order(trace.HS[[kk]]$fmulti),] ### if(is.Kobe[1]) Fhist.HS[[kk]] <- get.Fhist(farg.HS,vpares,eyear=eyear,trace=trace.HS[[kk]]) if(is.5perlower){ xx <- which.min2((trace.HS[[kk]]$lower-0.05)^2)+c(-1,1) range.tmp <- trace.HS[[kk]]$fmulti[xx] if(xx[1]==0) range.tmp <- c(0,range.tmp) if(is.na(xx[2])) range.tmp[2] <- max(trace.HS[[kk]]$fmulti)*10 tmp <- optimize(tmpfunc2,range.tmp,f.arg=farg.HS,eyear=eyear,FUN=FUN,hsp=b.HS) farg.HS$multi <- tmp$minimum fout.HS.5per[[kk]] <- do.call(future.vpa2,farg.HS) } } #---------------------- calculation of MSY for BH if(isTRUE(what.est[2])){ if(sigma[kk]==-1){ sigma.tmp <- exp(res.BH$par[3]) } else{ sigma.tmp <- sigma[kk] } farg.BH <- farg.HS farg.BH$recfunc <- BH.rec farg.BH$rec.arg <- list(a=a.BH,b=b.BH,sd=sigma.tmp,bias.correction=bias.correction) farg.BH$multi <- 0 fout0.BH[[kk]] <- do.call(future.vpa2,farg.BH) # b0.BH[kk] <- fout0.BH$vssb[nrow(fout0$vssb),1] #static b0 trace.BH[[kk]] <- trace.func(farg.BH,eyear) # tmp <- optimize(tmpfunc,c(0,10),f.arg=farg.BH,eyear=eyear,FUN=FUN) xx <- which.max(trace.BH[[kk]]$catch.median)+c(-1,1) range.tmp <- trace.BH[[kk]]$fmulti[xx] if(xx[1]==0) range.tmp <- c(0,range.tmp) # if(is.na(xx[2])) range.tmp[2] <- max(trace.BH[[kk]]$fmulti)*10 if(is.na(range.tmp[2])) range.tmp[2] <- max(trace.BH[[kk]]$fmulti)*10 tmp <- optimize(tmpfunc,range.tmp,f.arg=farg.BH,eyear=eyear,FUN=FUN) farg.BH$multi <- tmp$minimum fout.BH[[kk]] <- do.call(future.vpa2,farg.BH) Fmsy.BH <- tmp$minimum * farg.BH$res0$Fc.at.age if(is.Kobe[2]) Fhist.BH[[kk]] <- get.Fhist(farg.BH,vpares,eyear=eyear,trace.BH[[kk]]) ## ここでtraceを追加 trace.BH[[kk]] <- rbind(trace.BH[[kk]], trace.func(farg.BH,eyear,hsp=b.BH,fmulti=tmp$minimum+c(-0.025,-0.05,-0.075,0,0.025,0.05,0.075))) trace.BH[[kk]] <- trace.BH[[kk]][order(trace.BH[[kk]]$fmulti),] ### } #------------------- calculation of MSY for RI if(isTRUE(what.est[3])){ if(sigma[kk]==-1){ sigma.tmp <- exp(res.RI$par[3]) } else{ sigma.tmp <- sigma[kk] } farg.RI <- farg.HS farg.RI$recfunc <- RI.rec farg.RI$rec.arg <- list(a=a.RI,b=b.RI,sd=sigma.tmp,bias.correction=bias.correction) farg.RI$multi <- 0 fout0.RI[[kk]] <- do.call(future.vpa2,farg.RI) # b0.RI[kk] <- fout0$vssb[nrow(fout0$vssb),1] #static b0 trace.RI[[kk]] <- trace.func(farg.RI,eyear) xx <- which.max(trace.RI[[kk]]$catch.median)+c(-1,1) range.tmp <- trace.RI[[kk]]$fmulti[xx] if(xx[1]==0) range.tmp <- c(0,range.tmp) # if(is.na(xx[2])) range.tmp[2] <- max(trace.RI[[kk]]$fmulti)*10 if(is.na(range.tmp[2])) range.tmp[2] <- max(trace.RI[[kk]]$fmulti)*10 tmp <- optimize(tmpfunc,range.tmp,f.arg=farg.RI,eyear=eyear,FUN=FUN) farg.RI$multi <- tmp$minimum fout.RI[[kk]] <- do.call(future.vpa2,farg.RI) Fmsy.RI <- tmp$minimum * farg.RI$res0$Fc.at.age if(is.Kobe[3]) Fhist.RI[[kk]] <- get.Fhist(farg.RI,vpares,eyear=eyear,trace.RI[[kk]]) ## ここでtraceを追加 trace.RI[[kk]] <- rbind(trace.RI[[kk]], trace.func(farg.RI,eyear,hsp=b.RI, fmulti=tmp$minimum+c(-0.025,-0.05,-0.075,0,0.025,0.05,0.075))) trace.RI[[kk]] <- trace.RI[[kk]][order(trace.RI[[kk]]$fmulti),] ### } } #-------------------------------------- if(isTRUE(is.5perlower)){ tmp <- as.data.frame(t(sapply(fout.HS.5per,get.stat,eyear=eyear,hsp=b.HS))) tmp$f <- sapply(fout.HS.5per,function(x)x$multi) } else{ tmp <- NA } # 関数を返すとsaveしたときに異常にファイルサイズが大きくなる。原因は不明。 # とりあえず、関数を返すのをやめる output <- list(dat=dat,sigma=sigma,vpares=vpares) if(what.est[1]==TRUE) output$hs <- list(a=a.HS,b=b.HS,sd=sd.HS,gamma=gamma1,ofv=res.HS$value,ofv.least.square=ofv.least.square, res=res.HS,r0=r0.HS,Fhist=Fhist.HS, trace=trace.HS,boot=as.data.frame(boot.HS), jack=as.data.frame(jack.HS),farg=farg.HS, f.msy=sapply(fout.HS,function(x)x$multi), Fmsy=Fmsy.HS,surface=surface.HS, fout=fout.HS, # 最大化したときのFを使って将来予測したときのサマリーをMSYのreference pointとする MSY=as.data.frame(t(sapply(fout.HS,get.stat,eyear=eyear,hsp=b.HS))), B0=as.data.frame(t(sapply(fout0.HS,get.stat,eyear=eyear,hsp=b.HS))), per5=tmp) # sl=list(a=a.SL, # res=res.SL,jack=jack.SL,boot=boot.SL), if(what.est[2]==TRUE) output$bh <- list(a=a.BH,b=b.BH,sd=sd.BH, res=res.BH,r0=NA,#R0を入れないといけない Fhist=Fhist.BH,ofv=res.BH$value, trace=trace.BH,b0=b0.BH,boot=as.data.frame(boot.BH),jack=as.data.frame(jack.BH), f.msy=sapply(fout.BH,function(x)x$multi), fout=fout.BH, Fmsy=Fmsy.BH,farg=farg.BH, MSY=as.data.frame(t(sapply(fout.BH,get.stat,eyear=eyear))), B0=as.data.frame(t(sapply(fout0.BH,get.stat,eyear=eyear)))) if(what.est[3]==TRUE) output$ri <- list(a=a.RI,b=b.RI,sd=sd.RI,ofv=res.RI$value, res=res.RI,r0=NA,#R0を入れないといけない, Fhist=Fhist.RI,farg=farg.RI, trace=trace.RI,b0=b0.RI,boot=as.data.frame(boot.RI), jack=as.data.frame(jack.RI), fout=fout.RI, f.msy=sapply(fout.RI,function(x)x$multi), Fmsy=Fmsy.RI, MSY=as.data.frame(t(sapply(fout.RI,get.stat,eyear=eyear))), B0=as.data.frame(t(sapply(fout0.RI,get.stat,eyear=eyear)))) index <- c("a","b","R0","sd","MSY","B0","f.msy","Fmsy") tmp <- NULL if(what.est[1]==TRUE) tmp <- rbind(tmp,unlist(output$hs[index])) if(what.est[2]==TRUE) tmp <- rbind(tmp,unlist(output$bh[index])) if(what.est[3]==TRUE) tmp <- rbind(tmp,unlist(output$ri[index])) tmp <- as.data.frame(tmp) rownames(tmp) <- c("hs","bh","ri")[what.est] # tmp$nLL <- output$ofv output$summary0 <- tmp colnames(output$summary0)[1] <- "a" output$summary <- output$summary0[c("a","b","sd","MSY.ssb.mean.ssb.mean", "MSY.biom.mean.biom.mean", "MSY.U.mean.U.mean", "MSY.catch.mean.catch.mean", "B0.ssb.mean.ssb.mean", "B0.biom.mean.biom.mean","f.msy")] colnames(output$summary) <- c("a","b","sd","SSB_MSY","B_MSY","U_MSY","MSY","B0(SSB)","B0(Biomass)","FMSY/Fcurrent") output$summary <- cbind(output$summary,output$summary0[,substr(colnames(output$summary0),1,4)=="Fmsy"]) class(output) <- "SR" ##--- PGY管理基準値を計算する if(!is.null(PGY)){ k.tmp <- which(what.est) for(k in 1:length(k.tmp)){ fout.list2 <- list() s <- 1 for(j in 1:length(PGY)){ outtmp <- output[[which(names(output)==c("hs","bh","ri")[k.tmp[k]])[1]]] # outtmp$trace # frange.list <- list(c(output[[which(names(output)==c("hs","bh","ri")[1])[1]]]$f.msy,2), # c(0.01,output[[which(names(output)==c("hs","bh","ri")[1])[1]]]$f.msy)) ttmp <- outtmp$trace[[1]]$catch.mean-PGY[j]*output$summary$MSY[k] ttmp <- which(diff(sign(ttmp))!=0) frange.list <- list(outtmp$trace[[1]]$fmulti[ttmp[1]+0:1], outtmp$trace[[1]]$fmulti[ttmp[2]+0:1]) # browser() for(i in 1:2){ if(k.tmp[k]==1) farg.tmp <- farg.HS if(k.tmp[k]==2) farg.tmp <- farg.BH if(k.tmp[k]==3) farg.tmp <- farg.RI farg.tmp$outtype <- NULL farg.tmp$Frec <- list(stochastic=TRUE, future.year=rev(rownames(outtmp$fout[[1]]$vssb))[1], Blimit=PGY[j]*output$summary$MSY[k], scenario="catch.mean",Frange=frange.list[[i]]) fout.list2[[s]] <- do.call(future.vpa,farg.tmp) s <- s+1 }} PGY.biom <- as.data.frame(t(sapply(fout.list2,get.stat,eyear=eyear))) rownames(PGY.biom) <- paste("PGY",rep(PGY,each=2),rep(c("upper","lower"),length(PGY)),c("hs","bh","ri")[k.tmp[k]],sep="_") PGY.biom$target.catch <- rep(PGY*output$summary$MSY[k],each=2) if(k.tmp[k]==1) output$PGY.biom.hs <- PGY.biom if(k.tmp[k]==2) output$PGY.biom.bh <- PGY.biom if(k.tmp[k]==3) output$PGY.biom.ri <- PGY.biom } } ##--- if(isTRUE(is.small)){ output$hs$fout <- NULL output$bh$fout <- NULL output$ri$fout <- NULL } return(output) } pred.RI <- function(SSB,a,b) a*SSB*exp(-b*SSB) pred.BH <- function(SSB,a,b) a*SSB/(1+b*SSB) pred.HS <- function(SSB,a,b,gamma) a*(SSB+sqrt(b^2+gamma^2/4)-sqrt((SSB-b)^2+gamma^2/4)) pred.SL <- function(SSB,a) a*SSB
20eda438ad86c9c28581ecb71e81937c9db86292
b186a76a1f5535019bef4d6102832445a58a408f
/R/0_data_acquisition.R
206274cdae0ba7fee915c03a864819b50709da71
[]
no_license
brunomc-eco/covidmunBR
117fc9010b0eb70c9e1fccf8f3f41ce2e3dcad94
3a7b86204cfc82f10c57f949f80e0b1961c4ed49
refs/heads/master
2022-11-10T17:58:10.549667
2020-06-26T19:38:28
2020-06-26T19:38:28
267,056,229
0
0
null
null
null
null
UTF-8
R
false
false
827
r
0_data_acquisition.R
# COVID-19 Dashboard for IBGE # Bruno Carvalho, brunomc.eco@gmail.com # Downloading raw data from public sources library(readr) ## ocupacao de leitos, Ministério da Saúde ## https://gestaoleitos.saude.gov.br/app/kibana#/dashboard/18685ad0-81b4-11ea-9f02-bfa3f55b2dad # no final do dashboard, exportar arquivo "Formatted" # salvar na pasta "./data/leitos_MS" # renomear arquivo para "leitos_MS.csv" ## IDB Inter-American Development Bank ## Coronavirus Traffic Congestion Impact in Latin America with Waze Data ## https://www.iadb.org/en/topics-effectiveness-improving-lives/coronavirus-impact-dashboard daily <- read_csv('http://tiny.cc/idb-traffic-daily') metadata <- read_csv('http://tiny.cc/idb-traffic-metadata') write.csv(daily, file = "./data/idb/daily.csv") write.csv(metadata, file = "./data/idb/metadata.csv")
583f21233e35dcf906c3775a27c4d3da6e7becd9
7e503ed19de6efbd965a8d27a74b3b6f2728b3de
/R/simulate_baselers.R
7cbdd391304408aaecafe03ab99e222759885e40
[]
no_license
anhnguyendepocen/baselers
4bf80ae72cbb763c911ce3291fb638297acaf60f
702a2ab2cc7b91c79c269b324122ce3037b82c67
refs/heads/master
2020-04-14T23:59:40.080592
2018-06-29T10:40:37
2018-06-29T10:40:37
null
0
0
null
null
null
null
UTF-8
R
false
false
12,989
r
simulate_baselers.R
#' Simulate baselers dataset #' #' This function can be used to simulate nsim participants. It draws samples from #' a mulitvariate normal distribution with a prespecified correlation matrix (corMat_f #' for women and corMat_m for men). Most (but not all) of the means (but not the #' correlations) are based on actual data of the swiss population. Details can be #' found in the data helpfile you can call with ?baselers if the package is loaded. #' #' @param nsim integer. The number of participants to simulate (same number of men #' and women). If an uneven number is specified, the returned dataset will contain #' one additional row. #' @param corMat_f double matrix. A symmetric correlation matrix. If not specified #' the default matrix is used. #' @param corMat_m double matrix. A symmetric correlation matrix. If not specified #' the default matrix is used. #' @param seed integer. Is passed to set.seed, specify the same number to replicate #' results. If not provided, results will vary randomly. #' #' @export #' #' @return A tibble with nsim rows, containing continous and categorical simulated #' data of inhabitants of Basel. #' @import dplyr #' @importFrom stats runif simulate_baselers <- function(nsim = 1000, corMat_f = NULL, corMat_m = NULL, seed = NULL){ # some sources: # age: https://www.bfs.admin.ch/bfs/de/home/statistiken/kataloge-datenbanken/tabellen.assetdetail.3202980.html # weight, height: https://www.laenderdaten.info/durchschnittliche-koerpergroessen.php # tattoo: https://www.migrosmagazin.ch/tattoos-ohne-grenzen # income: https://www.srf.ch/news/schweiz/mit-7100-franken-pro-monat-ueberleben-oder-etwa-doch-weniger # consultations: https://www.krankenkasse-vergleich.ch/news/das-schweizer-gesundheitssystem-in-der-oecd-statistik, https://www.bfs.admin.ch/bfs/de/home/statistiken/kataloge-datenbanken/tabellen.assetdetail.250255.html if (!is.null(seed) && is.numeric(seed)){ set.seed(seed = seed) } # continuous variables: # age, income (per month), weight (kg), height (cm), children, # happiness (0 to 10), fitness (0 to 10), food, alcohol, tattoos, # rhine (no per month), datause (no of times phone is checked per day), # consultations, hiking # means and sds for females and males mu_f <- c(43.14, 7112, 64, 164, 1.54, 6, 5, 438, 25, 3, 4, 88, 4.5, 60) stddev_f <- c(20, 1200, 8, 10, 1.25, 2, 2, 80, 10, 2.8, 2.5, 20, 2, 20) var_f <- stddev_f ^ 2 mu_m <- c(41.01, 7112, 84.9, 178, 1.54, 6, 5, 438, 40, 3.8, 4, 88, 3, 60) stddev_m <- c(20, 1200, 12, 11, 1.25, 2, 2, 80, 15, 2.8, 2.5, 20, 1.5, 25) var_m <- stddev_m ^ 2 if (is.null(corMat_f)){ # correlation matrix females corMat_f <- matrix(c( var_f[1], .3, -.1, -.15, .2, .1, -.25, -.1, 0, -.45, -.15, -.23, .5, 0, .3, var_f[2], 0, 0, -.1, .15, 0, .5, .2, -.08, 0, 0, -.05, 0, -.1, 0, var_f[3], .6, 0, 0, -.3, 0, .15, 0, 0, 0, .15, -.15, -.15, 0, .6, var_f[4], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, .2, -.1, 0, 0, var_f[5], 0, 0, .4, 0, 0, -.1, 0, 0, 0, .1, .15, 0, 0, 0, var_f[6], .15, .1, 0, 0, .2, -.3, -.15, .25, -.25, 0, -.3, 0, 0, .15, var_f[7], 0, -.05, .15, .2, 0, -.1, .3, -.1, .5, 0, 0, .4, .1, 0, var_f[8], 0, 0, 0, 0, 0, 0, 0, .2, .15, 0, 0, 0, -.05, 0, var_f[9], 0, 0, 0, .15, 0, -.45, -.08, 0, 0, 0, 0, .15, 0, 0, var_f[10], 0, 0, 0, 0, -.15, 0, 0, 0, -.1, .2, .2, 0, 0, 0, var_f[11], 0, 0, .1, -.23, 0, 0, 0, 0, -.3, 0, 0, 0, 0, 0, var_f[12], 0, 0, .5, -.05, .15, 0, 0, -.15, -.1, 0, .15, 0, 0, 0, var_f[13], -.15, 0, 0, -.15, 0, 0, .25, .3, 0, 0, 0, .1, 0, -.15, var_f[14]), ncol = 14) } if (is.null(corMat_m)){ # correlation matrix for males corMat_m <- matrix(c( var_m[1], .3, -.1, -.15, .2, .1, -.25, -.1, 0, -.25, -.15, -.23, .5, 0, .3, var_m[2], 0, 0, -.1, .15, 0, .5, .2, -.08, 0, 0, -.05, 0, -.1, 0, var_m[3], .6, 0, 0, -.3, 0, .15, 0, 0, 0, .15, -.15, -.15, 0, .6, var_m[4], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, .2, -.1, 0, 0, var_m[5], 0, 0, .4, 0, 0, -.1, 0, 0, 0, .1, .15, 0, 0, 0, var_m[6], .15, .1, 0, 0, .2, -.3, -.15, .25, -.25, 0, -.3, 0, 0, .15, var_m[7], 0, -.05, .15, .2, 0, -.1, .3, -.1, .5, 0, 0, .4, .1, 0, var_m[8], 0, 0, 0, 0, 0, 0, 0, .2, .15, 0, 0, 0, -.05, 0, var_m[9], 0, 0, 0, .15, 0, -.25, -.08, 0, 0, 0, 0, .15, 0, 0, var_m[10], 0, 0, 0, 0, -.15, 0, 0, 0, -.1, .2, .2, 0, 0, 0, var_m[11], 0, 0, .1, -.23, 0, 0, 0, 0, -.3, 0, 0, 0, 0, 0, var_m[12], 0, 0, .5, -.05, .15, 0, 0, -.15, -.1, 0, .15, 0, 0, 0, var_m[13], -.15, 0, 0, -.15, 0, 0, .25, .3, 0, 0, 0, .1, 0, -.15, var_m[14]), ncol = 14) } # if matrices are not positive definite, force them to be if (!corpcor::is.positive.definite(corMat_f)){ corMat_f <- corpcor::make.positive.definite(corMat_f, tol=1e-3) } if (!corpcor::is.positive.definite(corMat_m)){ corMat_m <- corpcor::make.positive.definite(corMat_m, tol=1e-3) } # draw samples from multinormal distribution mat_f <- MASS::mvrnorm(n = round(nsim / 2), mu = mu_f, Sigma = corMat_f, empirical = TRUE) mat_m <- MASS::mvrnorm(n = round(nsim / 2), mu = mu_m, Sigma = corMat_m, empirical = TRUE) tib_f <- tibble::as_tibble(mat_f) names(tib_f) <- c("age", "income", "weight", "height", "children", "happiness", "fitness", "food", "alcohol", "tattoos", "rhine", "datause", "consultations", "hiking") tib_m <- tibble::as_tibble(mat_m) names(tib_m) <- c("age", "income", "weight", "height", "children", "happiness", "fitness", "food", "alcohol", "tattoos", "rhine", "datause", "consultations", "hiking") tib_f$sex <- "female" tib_m$sex <- "male" tib_f$education <- sample(c("obligatory_school", "apprenticeship", "SEK_II", "SEK_III"), size = nrow(tib_f), replace = TRUE, prob = c(.146, .447, .093, .314)) tib_m$education <- sample(c("obligatory_school", "apprenticeship", "SEK_II", "SEK_III"), size = nrow(tib_m), replace = TRUE, prob = c(.101, .38, .05, .469)) tib_f$confession <- ifelse(tib_f$education == "SEK_III", sample(c("confessionless", "muslim", "other", "catholic", "evangelical-reformed"), size = nrow(tib_f), replace = TRUE, prob = c(.353, .026, .049, .347, .225)), sample(c("confessionless", "muslim", "other", "catholic", "evangelical-reformed"), size = nrow(tib_f), replace = TRUE, prob = c(.253, .051, .074, .372, .25))) tib_m$confession <- ifelse(tib_m$education == "SEK_III", sample(c("confessionless", "muslim", "other", "catholic", "evangelical-reformed"), size = nrow(tib_m), replace = TRUE, prob = c(.353, .026, .049, .347, .225)), sample(c("confessionless", "muslim", "other", "catholic", "evangelical-reformed"), size = nrow(tib_m), replace = TRUE, prob = c(.253, .051, .074, .372, .25))) tib_f$fasnacht <- sample(c("yes", "no"), size = nrow(tib_f), replace = TRUE, prob = c(.02, .98)) tib_m$fasnacht <- sample(c("yes", "no"), size = nrow(tib_m), replace = TRUE, prob = c(.035, .965)) tib <- rbind(tib_f, tib_m) tib$alcohol <- tib$alcohol + ifelse(tib$fasnacht == "yes", runif(1, 0:40), 0) tib$eyecor <- sample(c("yes", "no"), size = nsim, replace = TRUE, prob = c(.66, .37)) tib <- tib[sample(1:nsim),] # id_scramble <- paste0("bas_", sapply(1:nsim, FUN = function(x) {paste(sample(LETTERS, size = 5, replace = TRUE), collapse = "")})) id_scramble <- 1:nsim tib <- tib %>% mutate(id = id_scramble, age = case_when(age < 18 | age > 105 ~ runif(1, 18, 85), TRUE ~ age), age = round(age, 0), height = round(height, 1), # weight weight = round(weight, 1), ## make 10% of cases NA weight = case_when(runif(nsim) < .15 ~ NA_real_, TRUE ~ weight), ## make 15% of cases NA income = case_when(runif(nsim) < .15 ~ NA_real_, TRUE ~ income), children = case_when(children < 0 ~ runif(1, 0, 3), TRUE ~ children), children = round(children), happiness = case_when(happiness > 10 ~ runif(1, 6, 9), happiness < 5 & runif(1, 0, 1) < .35 ~ runif(1, 6, 9), TRUE ~ happiness), happiness = case_when(happiness < 1 ~ runif(1, 1, 10), TRUE ~ happiness), happiness = round(happiness), fitness = case_when(fitness < 1 | fitness > 10 ~ runif(1, 1, 10), TRUE ~ fitness), fitness = round(fitness), ### income # as a function of happiness, height, fitness, tattoos age income = income + -100 * happiness + 2 * height + 50 * fitness - 50 * tattoos + 150 * age + rnorm(nsim, mean = -150 * 40, sd = 200), income = round(income / 100, 0) * 100, income = case_when(income < 1000 ~ runif(1, 1000, 10000), TRUE ~ income), ### alcohol alcohol = case_when(alcohol < 0 ~ runif(1, 5, 50), TRUE ~ alcohol), alcohol = round(alcohol), ## make 15% of cases 0 alcohol = case_when(runif(nsim) < .15 ~ 0, TRUE ~ alcohol), ## Food as a function of happiness income and alcohol food = 50 * happiness + .1 * income + -10 * alcohol + rnorm(nsim, mean = 0, sd = 200), food = round(food / 10) * 10, ## Tattoos tattoos = case_when(tattoos < 0 ~ 0, TRUE ~ tattoos), tattoos = round(tattoos), ## make 50% of cases 0 tattoos = case_when(runif(nsim) < .5 ~ 0, TRUE ~ tattoos), rhine = case_when(rhine < 0 ~ 0, TRUE ~ rhine), rhine = round(rhine), datause = round(datause), consultations = case_when(consultations < 0 ~ runif(1, 0, 10), TRUE ~ consultations), consultations = round(consultations), hiking = case_when(hiking < 0 ~ 0, TRUE ~ hiking), hiking = round(hiking), # confession ## make 5% of cases NA confession = case_when(runif(nsim) < .15 ~ NA_character_, TRUE ~ confession) ) # Change order a bit tib <- tib %>% select(id, sex, age, height, weight, income, education, confession, everything()) tib }
adf469da8e155492bf7b8e14e2f523cbb712cc48
9dac5dc85beb8dc395fae36ea401a53cb4d1289e
/Visualizations/covariance_matrix_approximation.R
aabd4d7de5d4134f213fbbcc41a97ee6af43d456
[]
no_license
jamiecoops22/MISCADA_PROJECT
e60c491964b420e3901f9912bdcf946ab000b18a
a005d7930cf22bcac42c6c8773dee9e63262b254
refs/heads/main
2023-07-22T10:11:32.022856
2021-09-04T11:45:24
2021-09-04T11:45:24
403,041,747
0
0
null
null
null
null
UTF-8
R
false
false
10,041
r
covariance_matrix_approximation.R
library(spBayes) library(GpGp) library(matlib) # useful functions eucldist <- function(c1, c2){ d1 <- (c1[1] - c2[1]) d2 <- (c1[2] - c2[2]) d <- sqrt(d1**2 + d2**2) return(d) } vecMatch <- function(x, want) { if (length(x) == length(want)){ if (x[1]==want[1]&x[2]==want[2]&x[3]==want[3]){ return(1) } else { return(NULL) } } tfv <- apply(x, 1, function(x, want) isTRUE(all.equal(x, want)), want) which(tfv == TRUE) } hist_set <- function(m, ref_set, start_point){ times <- ref_set[,3] r <- length(times) sqrm <- sqrt(m) coords <- ref_set[,1:2] sind <- vecMatch(ref_set, start_point) #print(sind) start_index <- sind #print(start_index) if (length(start_index) == 0){ # start_point not in ref set spirs <- FALSE } else { spirs <- TRUE } #print(spirs) start_time <- start_point[3] history_set <- matrix(c(0,0,0),nrow=1, ncol=3) hist_times <- which(times < start_time) #print(hist_times) history_set <- rbind(history_set, ref_set[hist_times,]) current_times <- which(times == start_time) #print(current_times) if (spirs){ is <- c() for (i in current_times){ if (i < start_index){ is <- c(is, i) } } #print(is) if (length(is) != 0){ history_set <- rbind(history_set, ref_set[is,]) } } else { history_set <- rbind(history_set, ref_set[current_times,]) } #history_set <- rbind(history_set, ref_set[current_times,]) #print(history_set) return((history_set <- history_set[-1,])) } nn_from_hist <- function(m, history_set, start_point){ ### DEFINE TIME GAP #print(history_set) time_gap <- 1 if (length(history_set) == 3){ return(history_set) } h <- length(history_set[,3]) times <- history_set[,3] coords <- history_set[,1:2] rm <- sqrt(m) if (h <= m){ #print('here') return(unique.matrix(history_set)) } start_time <- start_point[3] nns <- matrix(c(0,0,0),nrow=1, ncol=3) # previous times for (i in (start_time-time_gap):(start_time-(rm+1)*time_gap)){ eds <- rep(1e6, h) for (j in seq(1, h)){ #print('times_j') #print(times[j]) #print('i') #print(i) if (format(round(times[j], 5), nsmall = 5) == format(round(i, 5), nsmall = 5)){ #print('YES') ed <- eucldist(start_point[1:2], coords[j,]) eds[j] <- ed } } cor_inds <- which(eds < 1e6) #print(cor_inds) if (length(cor_inds) != 0){ space_order_i <- order(eds)[1:min(length(cor_inds),rm)] #print(space_order_i) nearest_spatial_i <- history_set[space_order_i,] nns <- rbind(nns, (nearest_spatial_i)) } } nns <- nns[-1,] # current time current_times <- which(times == start_time) #print('current times') #print(current_times) if (length(current_times) != 0){ edst <- rep(1e6, length(current_times)) for (i in current_times){ ed <- eucldist(start_point[1:2], coords[i,]) edst[i] <- ed } tinds <- which(edst < 1e6) time_order <- order(edst)[1:min(length(tinds),rm)] nearest_times <- history_set[time_order,] nns <- rbind(nns, nearest_times) } return(nns) } # overall nearest neighbor function nnsfunc <- function(m, ref_set, start_point){ history_set <- hist_set(m, ref_set, start_point) nns <- nn_from_hist(m, history_set, start_point) #print(nns) if (length(nns) <= 3){ return(nns) } else { return(unique.matrix(nns)) } } # correlation function as of DNNGP article corrf <- function(c1, c2, sigma_squ, a, c, kap){ s1 <- c1[1:2] s2 <- c2[1:2] t1 <- c1[3] t2 <- c2[3] h <- eucldist(s1, s2) u <- abs(t1-t2) f <- sigma_squ/((a*(u**2) + 1)**kap) s <- exp((-c*h)/((a*(u**2)+1)**(kap/2))) return(f*s) } # covariance matrix for all location possibilities corrmat <- function(locs1, locs2, sigma_squ, a, c, kap){ locs1_test <- rbind(locs1, c(0, 0, 0)) locs2_test <- rbind(locs2, c(0, 0, 0)) if (length(locs1_test[,1]) == 2){ nrows <- 1 } else { nrows <- length(locs1[,1]) } if (length(locs2_test[,1]) == 2){ ncols <- 1 } else { ncols <- length(locs2[,1]) } cMat <- matrix(0, nrow = nrows, ncol = ncols) for (i in 1:nrows){ for (j in 1:ncols){ if (nrows == 1 & ncols == 1){ cMat[i,j] <- corrf(locs1, locs2, sigma_squ, a, c, kap) } else if (nrows == 1 & ncols != 1){ cMat[i,j] <- corrf(locs1, locs2[j,], sigma_squ, a, c, kap) } else if (nrows != 1 & ncols == 1){ cMat[i,j] <- corrf(locs1[i,], locs2, sigma_squ, a, c, kap) } else { cMat[i,j] <- corrf(locs1[i,], locs2[j,], sigma_squ, a, c, kap) } } } return(cMat) } # function to compute a_N(li) a_vector <- function(l_i, nns, sigma_squ, a, c, kap){ C_NN <- corrmat(nns, nns, sigma_squ, a, c, kap) C_Nl <- corrmat(nns, l_i, sigma_squ, a, c, kap) if (length(C_NN) == 1){ iC <- 1/(C_NN) } else { # print('nns') # print(nns) # print('C_NN') # print(C_NN) iC <- inv(C_NN) } return(iC %*% C_Nl) } # function to compute diagonal element of matrix F i.e. f_li f_li <- function(l_i, nns, sigma_squ, a, c, kap){ corr <- corrf(l_i, l_i, sigma_squ, a, c, kap) C_lN <- corrmat(l_i, nns, sigma_squ, a, c, kap) C_NN <- corrmat(nns, nns, sigma_squ, a, c, kap) C_Nl <- corrmat(nns, l_i, sigma_squ, a, c, kap) #print((C_NN)) if (length(C_NN) == 1){ iC <- 1/(C_NN) } else { iC <- inv(C_NN) } return(corr - ((C_lN %*% iC) %*% C_Nl)) } # function to compute F matrix F_mat <- function(ref_set, m, sigma_squ, a, c, kap){ r <- length(ref_set[,1]) Fmatrix <- matrix(0, nrow = r, ncol = r) Fmatrix[1,1] <- corrf(ref_set[1,], ref_set[1,], sigma_squ, a, c, kap) for (i in 2:r){ l_i <- ref_set[i,] nns <- nnsfunc(m, ref_set, l_i) #print(nns) f <- f_li(l_i, nns, sigma_squ, a, c, kap) #print(f) Fmatrix[i,i] <- f } return(Fmatrix) } # function to compute V matrix V_mat <- function(ref_set, m, sigma_squ, a, c, kap){ r <- length(ref_set[,1]) V <- matrix(1e6, nrow = r, ncol = r) V[1,1] <- 1 for (i in 2:r){ V[i,1] <- 0 } for (j in 2:r){ nns_j <- nnsfunc(m, ref_set, ref_set[j,]) inds <- c() for (i in 1:r){ if (i == j){ V[i,j] <- 1 } else { ind_match <- vecMatch(nns_j, ref_set[i,]) if (length(ind_match) == 0){ # li not in N_lj V[i,j] <- 0 } else { inds <- c(inds, i) } } } a_Nlj <- a_vector(ref_set[j,], nns_j, sigma_squ, a, c, kap) for (i in 1:length(inds)){ V[inds[i],j] <- -a_Nlj[i] } } return(V) } ### K FUNCTION K_func <- function(ref_set, m, sigma_squ, a, c, kap){ V <- V_mat(ref_set, m, sigma_squ, a, c, kap) #view(V) F_matrix <- F_mat(ref_set, m, sigma_squ, a, c, kap) #view(F_matrix) iF <- inv(F_matrix) Vt <- t(V) iK <- (Vt %*% (iF %*% V)) #view(iK) return(iK) } K_func_test <- function(V, F_matrix){ iF <- inv(F_matrix) Vt <- t(V) iK <- (Vt %*% iF) %*% V return(iK) } C_func_K_known <- function(l_i, l_j, ref_set, K, m, sigma_squ, a, c, kap){ p <- vecMatch(ref_set, l_i) if (length(p) != 0){ i_in_rs <- TRUE } else { i_in_rs <- FALSE } q <- vecMatch(ref_set, l_j) if (length(q) != 0){ j_in_rs <- TRUE } else { j_in_rs <- FALSE } if (i_in_rs){ if (j_in_rs){ return(K[p,q]) } else { nns_lj <- nnsfunc(m, ref_set, l_j) a_lj <- a_vector(l_j, nns_lj, sigma_squ, a, c, kap) return((t(a_lj))%*%K[,p]) } } else { if (j_in_rs){ nns_li <- nnsfunc(m, ref_set, l_i) a_li <- a_vector(l_i, nns_li, sigma_squ, a, c, kap) return((t(a_li))%*%K[,q]) } else { nns_li <- nnsfunc(m, ref_set, l_i) a_li <- a_vector(l_i, nns_li, sigma_squ, a, c, kap) nns_lj <- nnsfunc(m, ref_set, l_j) a_lj <- a_vector(l_j, nns_lj, sigma_squ, a, c, kap) if (vecMatch(l_i, l_j) == 1){ extr <- f_li(l_i, nns_li, sigma_squ, a, c, kap) } else { extr <- 0 } main <- ((t(a_li))%*%K)%*%a_lj return(main + extr) } } } C_func <- function(l_i, l_j, ref_set, m, sigma_squ, a, c, kap){ K <- K_func(ref_set, m, sigma_squ, a, c, kap) C <- C_func_K_known(l_i, l_j, ref_set, K, m, sigma_squ, a, c, kap) return(C) } C_mat <- function(ref_set, m, sigma_squ, a, c, kap){ r <- length(ref_set[,1]) K <- K_func(ref_set, m, sigma_squ, a, c, kap) print('K found') C <- matrix(0, nrow = r, ncol = r) count <- 0 for (i in 1:r){ for (j in 1:r){ C[i,j] <- C_func_K_known(ref_set[i,], ref_set[j,], ref_set, K, m, sigma_squ, a, c, kap) count <- count + 1 print(count) } } return(C) } ### visualization of matrices sigma_squ <- 1 a <- 50 c <- 25 kap <- 0.75 tau_squ <- 0.1 # full cov mat is C cs <- cbind(synth_full$coords.V1, synth_full$coords.V2, synth_full$time) V_test <- V_mat(cs, 4, sigma_squ, a, c, kap) F_test <- F_mat(cs, 4, sigma_squ, a, c, kap) K_test <- K_func_test(V_test, F_test) iK <- K_test iC <- inv(C) C_plot_x <- c() C_plot_y <- c() K_plot_x <- c() K_plot_y <- c() for (i in 1:125){ for (j in 1:125){ if (iC[i,j] != 0){ C_plot_x <- c(C_plot_x, i) C_plot_y <- c(C_plot_y, -j) } if (iK[i,j] != 0){ K_plot_x <- c(K_plot_x, i) K_plot_y <- c(K_plot_y, -j) } } } par(mfrow = c(1,2)) plot(C_plot_x, C_plot_y, xaxt='n', yaxt='n', xlab="", ylab = "", main = expression(C^{-1})) plot(K_plot_x, K_plot_y,xaxt='n', yaxt='n', xlab="", ylab = "", main = expression(tilde(C)^{-1}))
71e58c1c2aaf16db22f337608624a825d1bc16c0
7a4f14629fbaa162e4cb9ecbf50442ba3ff86c5a
/load_data.R
f29551a17230e5a80cccb806915c629a21af82e2
[]
no_license
bryanzk/ExData_Plotting1
800dc2c9e693b380e0b24dbdf1d05a20b5669ab9
4a71ca4eb8c4315383c25343fec8b9a143735bdd
refs/heads/master
2021-01-15T18:14:00.104240
2015-03-13T06:08:21
2015-03-13T06:08:21
32,133,150
0
0
null
2015-03-13T06:05:29
2015-03-13T06:05:29
null
UTF-8
R
false
false
729
r
load_data.R
require(data.table) dt <- fread("household_power_consumption.txt",na.strings="?", stringsAsFactors = FALSE) tar1 <- dt[dt$Date=="1/2/2007",] tar2 <- dt[dt$Date=="2/2/2007",] pdt = rbind(tar1, tar2) pdt$Date <- as.Date(pdt$Date, "%d/%m/%Y") pdt$Time <- as.character(strptime(paste(pdt$Date, pdt$Time), format="%Y-%m-%d %H:%M:%S")) pdt$Global_active_power <- as.numeric(pdt$Global_active_power) pdt$Global_reactive_power <- as.numeric(pdt$Global_reactive_power) pdt$Voltage <- as.numeric(as.character(pdt$Voltage)) pdt$Global_intensity <- as.numeric(pdt$Global_intensity) pdt$Sub_metering_1 <- as.numeric(pdt$Sub_metering_1) pdt$Sub_metering_2 <- as.numeric(pdt$Sub_metering_2) pdt$Sub_metering_3 <- as.double(pdt$Sub_metering_3)
e096cb42514da2f4053f1c269c363d9ac9cf3048
12098bccebee4216aa474ddad99aa8b7c721ef9a
/Scripts/Bar Graph.R
9f769af5048d359e4b0ea4a086c1ac0db5fd86fa
[]
no_license
feltem/info-201-final-project
57cb7439a7a6922108e31c8ef83e552feeec11d4
c1775b97455822ae9f84acda70d78153682dcbe6
refs/heads/master
2020-12-30T12:24:20.306394
2017-06-01T02:45:31
2017-06-01T02:45:31
91,385,904
0
1
null
null
null
null
UTF-8
R
false
false
745
r
Bar Graph.R
# Setup library(dplyr) library(plotly) # Interactive Plotly Graph BuildBar <- function(y.vars, error.vars) { plot <- plot_ly(x = c("Past Month", "Past Year", "Percieved Risk Average"), y = y.vars, type = "bar", error_y = ~list(value = error.vars, color = '#000000'), marker = list(color = c('rgb(58,95,11)', 'rgb(58,95,11)', 'rgb(169,169,169)'), width = 1.5, line = list(color = 'rgb(169,169,169)'))) %>% layout(title = "Marijuana Usage & Risk", xaxis = list(title = "Marijuana Usage"), yaxis = list(title = "Percent")) return(plot) }
ce3ac16ed2ac37c35f75c1a4f039a8e63c7768d3
79d5106cad12c2a6a6afb2bf4a663f5ce50557d8
/R/convert_rfv.R
850f11cf9ec424aaa7a2b9577223afcf4f54ae8a
[]
no_license
yuluc/parseRPDR
6b55b2de21ddd2b5c43f8fd730a771168897bf16
db9c656a0cd98017b69c1822d95c8c8836932153
refs/heads/master
2023-08-02T13:51:27.927674
2021-09-10T11:10:02
2021-09-10T11:10:02
null
0
0
null
null
null
null
UTF-8
R
false
false
5,435
r
convert_rfv.R
#' @title Searches columns for given reason for visit defined by ERFV codes. #' @export #' #' @description Analyzes reason for visit data loaded using \emph{load_rfv}. #' If requested, the data.table is returned with new columns corresponding to boolean values, whether given group of ERFV are present in the given columns. #' If \emph{collapse} is given, then the information is aggregated based-on the \emph{collapse} column and the earliest of latest time of the given diagnosis is provided. #' #' @param d data.table, database containing reason for visit information data loaded using the \emph{load_rfv} function. #' @param code string vector, an array of column names to search. #' @param codes_to_find list, a list of arrays corresponding to sets of ERFV codes. The function searches the columns in code and the name of each list element will be created. #' These columns are indicators whether the given disease is present in the set of ERFV codes or not. #' @param collapse string, a column name on which to collapse the data.table. #' Used in case we wish to assess whether given ERFV are present within all the same instances of \emph{collapse}. See vignette for details. #' @param code_time string, column name of the time column. Defaults to \emph{time_rfv_start}. Used in case collapse is present to provide the earliest or latest instance of diagnosing the given disease. #' @param time_type string, if multiple diagnoses are present within the same case of \emph{collapse}, which timepoint to return. Supported are: "earliest" or "latest". Defaults to \emph{earliest}. #' @param nThread integer, number of threads to use by \emph{dopar} for parallelization. If it is set to 1, then no parallel backends are created and the function is executed sequentially. #' On windows machines sockets are used, while on other operating systems fork parallelization is used. #' #' @return data.table, with indicator columns if provided. #' If \emph{collapse} is present, then only unique ID and the summary columns are returned. #' #' @encoding UTF-8 #' #' @examples \dontrun{ #' #Parse reason for visit columns #' #and create indicator variables for the following reasons and summarize per patient, #' #whether there are any encounters where the given reasons were registered #' reasons <- list(Pain = c("ERFV:160357", "ERFV:140012"), Visit = c("ERFV:501")) #' data_rfv_disease <- convert_rfv(d = data_rfv, keep = FALSE, #' codes_to_find = reasons, nThread = 2, collapse = "ID_MERGE") #' } convert_rfv <- function(d, code = "rfv_concept_id", codes_to_find = NULL, collapse = NULL, code_time = "time_rfv_start", time_type = "earliest", nThread = 4) { .SD=.N=.I=.GRP=.BY=.EACHI=..=..code=.SDcols=i=j=time_to_db=..which_ids_to=..which_ids_from=..collapse=. <- NULL #Initialize multicore if(nThread == 1 | length(codes_to_find) == 1) { `%exec%` <- foreach::`%do%` } else { if(length(codes_to_find) > 0 & length(codes_to_find) < nThread) {nThread <- length(codes_to_find)} if(.Platform$OS.type == "windows") { cl <- parallel::makeCluster(nThread, outfile = "", type = "PSOCK", methods = FALSE, useXDR = FALSE) } else{ cl <- parallel::makeCluster(nThread, outfile = "", type = "FORK", methods = FALSE, useXDR = FALSE) } doParallel::registerDoParallel(cl) `%exec%` <- foreach::`%dopar%` } #Find diagnoses if requested if(!is.null(codes_to_find)) { message(paste0("Finding reasons within specified columns.")) #Find diagnoses per row result <- foreach::foreach(i = 1:length(codes_to_find), .combine="cbind", .inorder=TRUE, .errorhandling = c("pass"), .verbose=FALSE) %exec% { if(is.null(collapse)) { diag_coll <- d[, any(.SD %in% unlist(codes_to_find[i])), .SDcols = code, by=1:nrow(d)] diag_coll$nrow <- NULL data.table::setnames(diag_coll, "V1", names(codes_to_find[i])) diag_coll } else { d[, names(codes_to_find[i]) := any(.SD %in% unlist(codes_to_find[i])), .SDcols = code, by=1:nrow(d)] ID_dt <- unique(d[, collapse, with = FALSE]) #Get IDs if(time_type == "earliest") { #Find time diag_coll <- d[, .(var_time = min(get(code_time))), by=c(collapse, names(codes_to_find[i]))] } else { diag_coll <- d[, .(var_time = max(get(code_time))), by=c(collapse, names(codes_to_find[i]))] } diag_coll <- diag_coll[get(names(codes_to_find[i]))] #Remove negative cases diag_coll <- data.table::merge.data.table(ID_dt, diag_coll, by = collapse, all.x = TRUE, all.y = FALSE) #Merge with IDs to get db diag_coll[[names(codes_to_find[i])]][is.na(diag_coll[[names(codes_to_find[i])]])] <- FALSE data.table::setnames(diag_coll, "var_time", paste0("time_", names(codes_to_find[i]))) diag_coll } } if(exists("cl") & nThread>1) {parallel::stopCluster(cl)} if(is.null(collapse)) { #Remove unnecessary info and combine with original data if non-collapse result <- cbind(d, result) } if(!is.null(collapse) & length(codes_to_find)>1) { #Remove unnecessary ID columns if multiple codes_to_find result[, seq(4, dim(result)[2], 3)] <- NULL } return(result) } else { #If no diagnoses if(exists("cl") & nThread>1) {parallel::stopCluster(cl)} return(d) } }
f2de2bc0e4f646b4efd2e793b2a8170914d151de
0b1e68824e5f5ca9e4353a35c40843cf7827d4ea
/Modified/M3_005.R
53295cc7f3a76f953f558c72901a0a94d128c403
[ "MIT" ]
permissive
Gpower01/WQU_Econometrics
e199d365e1966db29ba3f7957a45178fce3c8813
f9a310c98f0665741b36d99dd3ce5aa8f85ab8a3
refs/heads/main
2023-04-13T12:28:08.392780
2021-04-26T16:27:17
2021-04-26T16:27:17
352,192,713
1
0
null
null
null
null
UTF-8
R
false
false
17,656
r
M3_005.R
install.packages("XML") install.packages("YieldCurve") library(quantmod) # 1.1 Obtain Treasury yield data t2yr = getSymbols(Symbols = "DGS2", src = "FRED", auto.assign = FALSE) t2yr = t2yr["2019-10/2019-11"] t2yr plot(x = index(t2yr), y = t2yr$DGS2, xlab = "Date", ylab = "Yield (%)", type = "l", col = "red", main = "10-Year US Treasury Yields") plot(x = index(t2yr), y = t2yr$DGS2, xlab = "Date", ylab = "Yield (%)", type = "l", col = "red", main = "10-Year US Treasury Yields") t3yr = getSymbols(Symbols = "DGS3", src = "FRED", auto.assign = FALSE) t3yr = t3yr["2019-10/2019-11"] t5yr = getSymbols(Symbols = "DGS5", src = "FRED", auto.assign = FALSE) t5yr = t5yr["2019-10/2019-11"] t7yr = getSymbols(Symbols = "DGS7", src = "FRED", auto.assign = FALSE) t7yr = t7yr["2019-10/2019-11"] t10yr = getSymbols(Symbols = "DGS10", src = "FRED", auto.assign = FALSE) t10yr = t10yr["2019-10/2019-11"] t30yr = getSymbols(Symbols = "DGS30", src = "FRED", auto.assign = FALSE) t30yr = t30yr["2019-10/2019-11"] plot( x = index( t3yr ), y = t3yr$DGS3 ) plot( x = index( t5yr ), y = t5yr$DGS5 ) plot( x = index( t7yr ), y = t7yr$DGS7 ) plot( x = index( t10yr ), y = t10yr$DGS10 ) plot( x = index( t30yr ), y = t30yr$DGS30 ) # 1.2 import GLD ETF getSymbols(Symbols = "GLD", from = "2019-10-01", to = "2019-11-30" , src = "yahoo") Gld_Prices <- GLD$GLD.Close plot( x = index( GLD ), y = Gld_Prices ) # 1.3 import my ETF # U.K Oil and Gas Investment PLC (UKOG.L) getSymbols(Symbols = "UKOG.L", from = "2019-10-01", to = "2019-11-30" , src = "yahoo") UKOG.L Ukog_prices <- UKOG.L$UKOG.L.Close plot( x = index( UKOG.L ), y = Ukog_prices ) # 2.1 calcualte the log returns GLD$GLD.Close gldlog = diff( log( GLD$GLD.Close ) ) gldlog #gldlog2 = na.omit( gldlog ) gldlog2 = gldlog[-1] gldlog2 plot(gldlog2) UKOG.L$UKOG.L.Close ukoglog = diff( log( UKOG.L$UKOG.L.Close ) ) ukoglog #ukoglog2 = na.omit(ukoglog ) ukoglog2 = ukoglog[-1] ukoglog2 plot(ukoglog2) # 3.1 calculate benchmark security average yield for Oct and Nov t2yr = na.omit( t2yr ) t2yravr1 = mean( t2yr$DGS2["2019-10"] ) t2yravr1 t2yravr2 = mean( t2yr$DGS2["2019-11"] ) t2yravr2 t3yr = na.omit( t3yr ) t3yravr1 = mean( t3yr$DGS3["2019-10"] ) t3yravr1 t3yravr2 = mean( t3yr$DGS3["2019-11"] ) t3yravr2 t5yr = na.omit( t5yr ) t5yravr1 = mean( t5yr$DGS5["2019-10"] ) t5yravr1 t5yravr2 = mean( t5yr$DGS5["2019-11"] ) t5yravr2 t7yr = na.omit( t7yr ) t7yravr1 = mean( t7yr$DGS7["2019-10"] ) t7yravr1 t7yravr2 = mean( t7yr$DGS7["2019-11"] ) t7yravr2 t10yr = na.omit( t10yr ) t10yravr1 = mean( t10yr$DGS10["2019-10"] ) t10yravr1 t10yravr2 = mean( t10yr$DGS10["2019-11"] ) t10yravr2 t30yr = na.omit( t30yr ) t30yravr1 = mean( t30yr$DGS30["2019-10"] ) t30yravr1 t30yravr2 = mean( t30yr$DGS30["2019-11"] ) t30yravr2 # 3.2 average price of gold ETF gldavr1 = mean( GLD$GLD.Close["2019-10"]) gldavr1 gldavr2 = mean( GLD$GLD.Close["2019-11"]) gldavr2 # 3.3 average price of equity ETF ukogavr1 = mean( UKOG.L$UKOG.L.Close["2019-10"]) ukogavr1 ukogavr2 = mean( UKOG.L$UKOG.L.Close["2019-11"]) ukogavr2 # 3.4 std of benchmark security std for Oct and Nov t2yr = na.omit( t2yr ) t2yrsd1 = sd( t2yr$DGS2["2019-10"] ) t2yrsd1 t2yrsd2 = sd( t2yr$DGS2["2019-11"] ) t2yrsd2 t3yr = na.omit( t3yr ) t3yrsd1 = sd( t3yr$DGS3["2019-10"] ) t3yrsd1 t3yrsd2 = sd( t3yr$DGS3["2019-11"] ) t3yrsd2 t5yr = na.omit( t5yr ) t5yrsd1 = sd( t5yr$DGS5["2019-10"] ) t5yrsd1 t5yrsd2 = sd( t5yr$DGS5["2019-11"] ) t5yrsd2 t7yr = na.omit( t7yr ) t7yrsd1 = sd( t7yr$DGS7["2019-10"] ) t7yrsd1 t7yrsd2 = sd( t7yr$DGS7["2019-11"] ) t7yrsd2 t10yr = na.omit( t10yr ) t10yrsd1 = sd( t10yr$DGS10["2019-10"] ) t10yrsd1 t10yrsd2 = sd( t10yr$DGS10["2019-11"] ) t10yrsd2 t30yr = na.omit( t30yr ) t30yrsd1 = sd( t30yr$DGS30["2019-10"] ) t30yrsd1 t30yrsd2 = sd( t30yr$DGS30["2019-11"] ) t30yrsd2 # 3.5 std price of gold ETF gldsd1 = sd( GLD$GLD.Close["2019-10"]) gldsd1 gldsd2 = sd( GLD$GLD.Close["2019-11"]) gldsd2 # 3.6 std price of gold ETF ukogsd1 = sd( UKOG.L$UKOG.L.Close["2019-10"]) ukogsd1 ukogsd2 = sd( UKOG.L$UKOG.L.Close["2019-11"]) ukogsd2 # 4.1 graph 6 benchmarks plot( x = index( t2yr ), y = t2yr$DGS2, type = "l", col = "black", xlab = "Time", ylab = "US Treasury yields", ylim = c(0.5,2.5) ) lines( x = index( t3yr ), y = t3yr$DGS3, type = "l", col = "red") lines( x = index( t5yr ), y = t5yr$DGS5, type = "l", col = "blue") lines( x = index( t7yr ), y = t7yr$DGS7, type = "l", col = "gray") lines( x = index( t10yr ), y = t10yr$DGS10, type = "l", col = "purple") lines( x = index( t30yr ), y = t30yr$DGS30, type = "l", col = "green") legend( "bottom", legend=c("2 year", "3 year", "5 year", "7 year", "10 year", "30 year"), lty=1, col=c("black","red", "blue","gray","purple","green"), ncol = 3) # 4.2 graph gold ETF plot( x = index( GLD ), y = GLD$GLD.Close, type = "l", col = "black", xlab = "Time", ylab = "GLD ETF price" ) # 4.3 graph equity ETF at the same plot plot( x = index( GLD ), y = GLD$GLD.Close, type = "l", col = "black", xlab = "Time", ylab = "GLD ETF price" ) par(mar=c(5, 4, 4, 6) + 0.1) par(new=TRUE) plot( x = index( UKOG.L ), y = UKOG.L$UKOG.L.Close, type = "l", col = "red", xlab = "", ylab = "", axes=FALSE) mtext("UKOG.L ETF price",side=4,col="red",line=4) axis(4, ylim=c(0,7000), col="red",col.axis="red",las=1) legend("topright",legend=c("GLDF","UKOG.L ETF"), text.col=c("black","red"),col=c("black","red"), lty=1, cex = 0.75) # 5.1 Nelson-Siegel # Equation : https://en.wikipedia.org/wiki/Fixed-income_attribution # Equation : y_t(τ) = β_{0t} + β_{1t} \frac{1-\exp(-λ τ)}{λ τ} + β_{2t} ≤ft(\frac{1-\exp(-λ τ)}{λ τ} - \exp(-λ τ) \right) library(xts) library(zoo) library(YieldCurve) tyr = cbind( t2yr$DGS2, t3yr$DGS3, t5yr$DGS5, t7yr$DGS7, t10yr$DGS10, t30yr$DGS30) tyr maturity.tyr = c( 2, 3, 5, 7, 10, 30 ) NSParameters <- Nelson.Siegel( rate= tyr, maturity=maturity.tyr ) NSParameters nsp_y = NSrates( NSParameters, maturity.tyr) nsp_y # 5.2 Fit yeild curve for Oct tyr_avr1 = c( t2yravr1,t3yravr1,t5yravr1,t7yravr1,t10yravr1,t30yravr1 ) nsp_oct = c( mean(nsp_y$X2["2019-10"]),mean(nsp_y$X3["2019-10"]),mean(nsp_y$X5["2019-10"]),mean(nsp_y$X7["2019-10"]),mean(nsp_y$X10["2019-10"]),mean(nsp_y$X30["2019-10"]) ) plot( x = maturity.tyr, y = tyr_avr1, type = "b" , col = "black", xlab = "Time", ylab = "US Treasury yield" ) par(new=TRUE) plot( x = maturity.tyr, y = nsp_oct, type = "l", col = "red", xlab = "Time", ylab = "US Treasury yield" ) legend("topleft",legend=c("US Treasury average Oct","Nelson-Siegel Fit"), text.col=c("black","red"),col=c("black","red"), lty=1, cex = 0.75) # 5.3 Fit yeild curve for Nov tyr_avr2 = c( t2yravr2,t3yravr2,t5yravr2,t7yravr2,t10yravr2,t30yravr2 ) nsp_Nov = c( mean(nsp_y$X2["2019-11"]),mean(nsp_y$X3["2019-11"]),mean(nsp_y$X5["2019-11"]),mean(nsp_y$X7["2019-11"]),mean(nsp_y$X10["2019-11"]),mean(nsp_y$X30["2019-11"]) ) plot( x = maturity.tyr, y = tyr_avr2, type = "b" , col = "black", xlab = "Time", ylab = "US Treasury yield" ) par(new=TRUE) plot( x = maturity.tyr, y = nsp_Nov, type = "l", col = "red", xlab = "Time", ylab = "US Treasury yield" ) legend("topleft",legend=c("US Treasury average Nov","Nelson-Siegel Fit"), text.col=c("black","red"),col=c("black","red"), lty=1, cex = 0.75) # 5.4 Comapre Oct and Nov nsp_para_oct = c( mean( NSParameters$beta_0["2019-10"] ), mean(NSParameters$beta_1["2019-10"]), mean(NSParameters$beta_2["2019-10"]),mean(NSParameters$lambda["2019-10"]) ) nsp_para_nov = c( mean( NSParameters$beta_0["2019-11"] ), mean(NSParameters$beta_1["2019-11"]), mean(NSParameters$beta_2["2019-11"]),mean(NSParameters$lambda["2019-11"]) ) nsp_para_oct nsp_para_nov ?Nelson.Siegel # discussion # b0 : is interpreted as the long run levels of interest rates (the loading is 1, it is a constant that does not decay). # b1 : is the short-term component (it starts at 1, and decays monotonically and quickly to 0); # b2 : is the medium-term component (it starts at 0, increases, then decays to zero); # lambda : is the decay factor: large values produce slow decay and can better fit the curve at long maturities, # while small values produce fast decay and can better fit the curve at short maturities; # When we look at the overall yield curve, we can confirm that the yield curve is increasing S curve. # This means that overheating of economy is expected and it will result in inflation. # In the case of inflation, it is better to sell the US treasury. # This is why the b1 and b2 are negative. # The selling induce the increase of yield, this is why the yield curve is increasing. # The fitting parameter of Nelson-Siegel shows the consistent expectations. # b0 is much larger that b1 and b2 which means the long run interest rate is much larger than shor, mid-term. # But lambda is small (1/5) that the fit is better for the short maturities. # This can be explained by the data we collect. We picked many short maturities, yet the long matuiries are only 10, 30 years. # When we compare the Oct and Nov fit result, all of Oct b0,2 < Nov b0,2 although Oct b1 > Nov b1. # This means that for short and long term interest rate, Oct is lower than Nov. # But for mid term interest, Oct is higher than Nov. # 6.0 modeling prices library(tseries) library(ggplot2) # For separate months of October and November # import GLD ETF October getSymbols(Symbols = "GLD", from = "2019-10-01", to = "2019-10-31" , src = "yahoo") Oct_Gld_Prices <- GLD$GLD.Close plot( x = index( GLD ), y = Oct_Gld_Prices ) # import GLD ETF November getSymbols(Symbols = "GLD", from = "2019-11-01", to = "2019-11-30" , src = "yahoo") Nov_Gld_Prices <- GLD$GLD.Close plot(x = index( GLD ), y = Nov_Gld_Prices ) # import my UKOG Equity ETF October getSymbols(Symbols = "UKOG.L", from = "2019-10-01", to = "2019-10-31" , src = "yahoo") UKOG.L Oct_Ukog_prices <- UKOG.L$UKOG.L.Close plot( x = index( UKOG.L ), y = Oct_Ukog_prices ) # import my UKOG Equity ETF November getSymbols(Symbols = "UKOG.L", from = "2019-11-01", to = "2019-11-30" , src = "yahoo") UKOG.L Nov_Ukog_prices <- UKOG.L$UKOG.L.Close plot( x = index( UKOG.L ), y = Nov_Ukog_prices ) # ARM model for GLD prices October (Modeling prices) # ACF & PACF tests for GLD October & November acf(Oct_Gld_Prices) pacf(Oct_Gld_Prices) acf(Nov_Gld_Prices) pacf(Nov_Gld_Prices) # stationary test adf.test(Oct_Gld_Prices) adf.test(Nov_Gld_Prices) # 6.1 ARMA model for GLD ETF prices for October Gld_oct_prices <- arima(Oct_Gld_Prices, order = c(1,0,0)) summary(Gld_oct_prices) Box.test(Gld_oct_prices$residuals, lag = 1) # 6.2 ARMA model for GLD ETF prices for November Gld_nov_prices <- arima(Nov_Gld_Prices, order = c(1, 0,0)) summary(Gld_nov_prices) Box.test(Gld_nov_prices$residuals, lag = 1) # ACF and PACF for UKOG Equity ETF October & November acf(Oct_Ukog_prices) pacf(Oct_Ukog_prices) acf(Nov_Ukog_prices) pacf(Nov_Ukog_prices) # stationary tests adf.test(Oct_Ukog_prices) adf.test(Nov_Ukog_prices) # 6.3 ARMA model for UKOG ETF Prices for October UKog_oct_prices <- arima(Oct_Ukog_prices, order = c(1,0,0)) summary(UKog_oct_prices) Box.test(UKog_oct_prices$residuals, lag = 1) # 6.4 ARMA model for UKOG ETF prices for November UKog_nov_prices <- arima(Nov_Ukog_prices, order = c(1,0,0)) summary(UKog_nov_prices) Box.test(UKog_nov_prices$residuals, lag = 1) # 6.5 Which model performs best # The results indicates that none of the model shows to be statistically significant implying that # the model is not parsimonious. However, ARMA model estimation for the month of # October appears to perform better than the ARMA models for the month of November # for both GLD and UKOG prices.The coefficient for the month of October appears to be generally lower # the coefficient of November models. Also the AIC for the month of October are generally lower than # AIC in November. So comparing both the coefficient and AIC, it suggests that UKOG ETF ARMA October model # appears to perform better with coefficient of 0.5435 and aic = -83.41. # 6.6 What are the significant changes? if any # The significant changes observed in the models includes changes in the coefficient and aic. GLD October ARMA mode # indicate a coefficient of 0.4101 and aic of 52.54, while the GLD ARMA model for November indicated # a coeficient of 0.8936 and aic of 59.3. Similarly, the ARMA model of UKOG October prices indicates a coefficient of 0.5435 # and aic of 83.41 while the ARMA model of November UKOG prices indicated coefficient of 0.8851 and aic of 93.28. # ACF test indicates UKOG October prices autocorrelation of about 0.5 that rapidly decayed. # 7.0 Modelling Volatility # 7.1 GLD ETF Daily high minus low for October Gld_Oct_high <- max(Oct_Gld_Prices) print(Gld_Oct_high) Gld_Oct_low <- min(Oct_Gld_Prices) print(Gld_Oct_low) # October Dialy high minus low Gld_oct_diff <- Gld_Oct_high - Gld_Oct_low print(Gld_oct_diff) # GLD ETF Daily high minus low for November Gld_nov_high <- max(Nov_Gld_Prices) print(Gld_nov_high) Gld_nov_low <- min(Nov_Gld_Prices) print(Gld_nov_low) # November Daily high minus low Gld_nov_diff <- Gld_nov_high - Gld_nov_low print(Gld_nov_diff) # GLD October average Gld_Oct_avg <- mean(Oct_Gld_Prices) print(Gld_Oct_avg) # GLD November average Gld_Nov_avg <- mean(Nov_Gld_Prices) print(Gld_Nov_avg) # 7.2 STD for GLD ETF Returns for October and November # Log Returns for GLD October prices gldlog_oct <- diff(log(Oct_Gld_Prices)) gldlog2_oct <- gldlog_oct[-1] print(gldlog2_oct) # Log Returns for GLD November prices gldlog_nov <- diff(log(Nov_Gld_Prices)) gldlog2_nov <- gldlog_nov[-1] print(gldlog2_nov) # STD for GLD October and November log returns gld_oct_sd = sd(gldlog2_oct) print(gld_oct_sd) gld_nov_sd = sd(gldlog2_nov) print(gld_nov_sd) # 7.3 Volatility GARCH (1,1) model for October GLD Return library(rugarch) gld_gurch <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = "sGARCH"), distribution.model = 'norm') gurcg_gld1 <- ugarchfit(spec =gld_gurch, data = gldlog2_oct) print(gurcg_gld1) # Volatility GARCH model for November GLD Return gurcg_gld2 <- ugarchfit(spec =gld_gurch, data = gldlog2_nov) print(gurcg_gld2) # 7.4 What are the significant changes? if any # GLD ETF GARCH model for October prices indicated a mean value of 0.000407 # and a p-value of 0.737075 while the GLD ETF GARCH model for November prices # indicated a negative mean value of -0.001188 and a smaller p-value of 0.386699. # 7.5 UKOG Daily prices high minus low for October and November, average plot(Oct_Ukog_prices) plot(Nov_Ukog_prices) UKog_oct_high <- max(Oct_Ukog_prices) print(UKog_oct_high) Ukog_oct_low <- min(Oct_Ukog_prices) print(Ukog_oct_low) # October UKOG Daily High minus Low UKog_oct_diff <- UKog_oct_high - Ukog_oct_low print(UKog_oct_diff) #Daily average October UKOG prices Ukog_oct_avg <- mean(Oct_Ukog_prices) print(Ukog_oct_avg) # November UKOG Daily high minus low Ukog_nov_high <- max(Nov_Ukog_prices ) print(Ukog_nov_high) Ukog_nov_low <- min(Nov_Ukog_prices ) print(Ukog_nov_low) # Daily high minus low UKOG Nov prices Ukog_nov_diff <- Ukog_nov_high - Ukog_nov_low print(Ukog_nov_diff) # Daily average November UKOG prices Ukog_nov_avg <- mean(Nov_Ukog_prices ) print(Ukog_nov_avg) # 7.6 returns and STD for UKOG ETF October and November Prices # log returns for UKOG ETF October prices ukoglog_oct <- diff( log(Oct_Ukog_prices) ) print(ukoglog_oct) #ukoglog2 = na.omit(ukoglog ) ukoglog2_oct = ukoglog_oct[-1] print(ukoglog2_oct) plot(ukoglog2_oct) # log returns for UKOG ETF November prices ukoglog_nov <- diff( log(Nov_Ukog_prices ) ) print(ukoglog_nov) #ukoglog2 = na.omit(ukoglog ) ukoglog2_nov = ukoglog_nov[-1] print(ukoglog2_nov) plot(ukoglog2_nov) # STD for UKOG October and November log returns ukog_oct_sd = sd(ukoglog2_oct) print(ukog_oct_sd) ukog_nov_sd = sd(ukoglog2_nov) print(ukog_nov_sd) # 7.7 Volatility GARCH (1,1) model for October UKOG.L Return ukog_gurch <- ugarchspec(mean.model = list(armaOrder = c(0,0)), variance.model = list(model = "sGARCH"), distribution.model = 'norm') gurcg_ukog1 <- ugarchfit(spec =ukog_gurch , data = ukoglog2_oct) print(gurcg_ukog1) # Volatility GARCH model for November GLD Return gurcg_ukog2 <- ugarchfit(spec =ukog_gurch, data = ukoglog2_nov) print(gurcg_ukog2) # 7.8 What are the significant changes? if any # The UKOG GARCH model for October prices indicated a mean value of 0.001098 and # p-value of 0.86797 (suggesting that is not is not statistically significant) while # the GARCH model for November prices indicated a much better model with a mean value # of -0.005280 and p-value of 0.25379. # 8.0 Correlation library(quantmod) # 8.1 compute pearson correlation between gold and equity getSymbols(Symbols = "GLD", from = "2019-09-30", to = "2019-11-30" , src = "yahoo") GLD$GLD.Close plot( x = index( GLD ), y = GLD$GLD.Close ) getSymbols(Symbols = "UKOG.L", from = "2019-09-30", to = "2019-11-30" , src = "yahoo") UKOG.L$UKOG.L.Close plot( x = index( UKOG.L ), y = UKOG.L$UKOG.L.Close ) gld_oct = GLD["2019-10"] gld_oct length( gld_oct$GLD.Close ) ukog_oct = UKOG.L["2019-10"] length( ukog_oct$UKOG.L.Close ) cor.test(gld_oct$GLD.Close, ukog_oct$UKOG.L.Close, method=c("pearson")) gld_nov = GLD["2019-11"] gld_nov$GLD.Close length( gld_nov$GLD.Close ) ukog_nov = UKOG.L["2019-11"] ukog_nov$UKOG.L.Close length( ukog_nov$UKOG.L.Close ) length(ukog_nov[-20,]$UKOG.L.Close) cor.test(gld_nov$GLD.Close, ukog_nov[-20,]$UKOG.L.Close, method=c("pearson"))
2a53c6212ec5b3bbdd40dc4cc08f5830610d3c6f
832669387fa12c1a02bad7d47de5d0aa57b032be
/tests/testthat/test_transform_log.R
9b96c9943c0c643ca43f0d0a9dff92f82a01adee
[]
no_license
zoltanszebenyi/datacleaner
5059db164c7970b9bc185ef4ebb9a37508a486ed
ed0e383809b4c1d1b182489ebcd36e0d9a87f7e1
refs/heads/master
2020-05-18T14:04:08.747851
2019-05-01T19:18:42
2019-05-01T19:18:42
184,459,696
0
0
null
2019-05-01T18:10:16
2019-05-01T18:10:16
null
UTF-8
R
false
false
174
r
test_transform_log.R
context("No negative values") library(datacleaner) test_that("There are no negative values in input", { expect_error(transform_log(c(1,2,-1)), "input can't be negative") })
ac80d7bd7a339f7f47618e111a646214c09a2426
b86d6bf4662ad2bbe2083ea0915ff50f2f455e60
/R/İstatistikVeVeriBilimi/9-)Tidyverse/7-)Summarize.R
f64e398e3461d76a8b30a589067f28a6bd18e093
[]
no_license
DincerDogan/VeriBilimi
a8f04afcfd1a2419da0be72b6d780ffe058c42d6
1632b42e4d1ce744bb1382611022b79265c7896f
refs/heads/master
2023-06-06T13:59:35.726325
2021-06-17T11:43:46
2021-06-17T11:43:46
null
0
0
null
null
null
null
UTF-8
R
false
false
193
r
7-)Summarize.R
summarise( iris,ORTALAMA=mean(iris$Sepal.Length), MEDYAN=median(iris$Sepal.Length), STANDART.SAPMA=sd(iris$Sepal.Length) ) # summarize sayesinde özet bilgilendirmelerimizi yapabiliriz
89dc5b691460c859681a1be197a31cc163f5a0b5
3d7d08c45b1405297a895b74f543634eac13c2af
/base_scripts/nullnetworks.R
749ce8d2ea738db051c62aa14c07d5f369b79f09
[]
no_license
anshuman21111/ant-colony-networks
ed38d96cc907e81292a1701cc5d9995f8ddf14d1
40e1a46d66d7af78dd01590a31bef82c4393ce0a
refs/heads/master
2022-04-30T05:54:02.002730
2022-04-20T15:16:05
2022-04-20T15:16:05
246,055,637
0
0
null
null
null
null
UTF-8
R
false
false
3,499
r
nullnetworks.R
#null networks library("VertexSort") #strength Antsortstrength=list() for(i in 1:6){ set.seed(i) X11=paste("Colony", i, sep = "") print(X11) Antg=dpr(Antgr[[X11]][[1]], viteration_no=1000, vparallel = FALSE, vcpus = 1) Antgr1=Antg[[1]] A=strength(Antgr1, vids = V(Antgr1), loops = TRUE) X2=cbind(as.character(names(A)), as.numeric(A)) colnames(X2)=c("AntID", "Day1") X2=as.data.frame(X2) for (j in 2:41) { Antg=dpr(Antgr[[X11]][[j]], viteration_no=1000, vparallel = FALSE, vcpus = 1) Antgr1=Antg[[1]] A=strength(Antgr1, vids = V(Antgr1), loops = TRUE) X3=cbind(as.character(names(A)), as.numeric(A)) X3=as.data.frame(X3) colnames(X3)=c("AntID", paste("Day", j, sep = "")) X2=left_join(as.data.frame(X2), as.data.frame(X3), by="AntID") print(dim(X2)) } Antsortstrength[[X11]]=X2 X3= paste("~/Downloads/Ants/strength/sort/Colony",i,".csv",sep="") write.csv(X2, file=X3) } #betweenness Antsortbetween=list() for(i in 1:2){ set.seed(i) X11=paste("Colony", i, sep = "") print(X11) Antg=dpr(Antgr[[X11]][[1]], viteration_no=1000, vparallel = FALSE, vcpus = 1) Antgr1=Antg[[1]] A=betweenness(Antgr[[X11]][[1]], v = V(Antgr[[X11]][[1]]), directed = FALSE, nobigint = TRUE, normalized = FALSE) X2=cbind(as.character(names(A)), as.numeric(A)) colnames(X2)=c("AntID", "Day1") X2=as.data.frame(X2) for (j in 2:41) { Antg=dpr(Antgr[[X11]][[j]], viteration_no=1000, vparallel = FALSE, vcpus = 1) Antgr1=Antg[[1]] A=betweenness(Antgr[[X11]][[j]], v = V(Antgr[[X11]][[j]]), directed = FALSE, nobigint = TRUE, normalized = FALSE) X3=cbind(as.character(names(A)), as.numeric(A)) X3=as.data.frame(X3) colnames(X3)=c("AntID", paste("Day", j, sep = "")) X2=left_join(as.data.frame(X2), as.data.frame(X3), by="AntID") print(dim(X2)) } Antsortstrength[[X11]]=X2 X3= paste("~/Downloads/Ants/betweenness/sort/Colony",i,".csv",sep="") write.csv(X2, file=X3) } #bridgebetweenness Antsortbridbetween=list() for(i in 1:2){ set.seed(i) X11=paste("Colony", i, sep = "") print(X11) Antg=dpr(Antgr[[X11]][[1]], viteration_no=1000, vparallel = FALSE, vcpus = 1) Antgr1=Antg[[1]] library(networktools) nodea=list() for(i in 1:6){ X11=paste("Colony", i, sep = "") print(X11) communities00 = Antgr[[X11]][[1]] %>% cluster_louvain(weights = NULL) A00= bridge(Antgr[[X11]][[1]], communities00, useCommunities = "all",directed = NULL, nodes = NULL, normalize = FALSE) for (j in 2:41) { Antg=dpr(Antgr[[X11]][[j]], viteration_no=1000, vparallel = FALSE, vcpus = 1) Antgr1=Antg[[1]] for (j in 2:41) { communities = Antgr[[X11]][[j]] %>% cluster_louvain(weights = NULL) A= bridge(Antgr[[X11]][[j]], communities, useCommunities = "all",directed = NULL, nodes = NULL, normalize = FALSE) for (k in 1:5) { A00i=A00[k] Ai=A[k] namef=paste("A00i$`", namelist[k],"`", sep="") X2=cbind(as.character(names(A00i$namelist[k])), as.numeric(A00i)) colnames(X2)=c("AntID", "Day1") X2=as.data.frame(X2) X3=cbind(as.character(names(Ai)), as.numeric(Ai)) X3=as.data.frame(X3) colnames(X3)=c("AntID", paste("Day", j, sep = "")) nodea[[k]]=left_join(as.data.frame(X2), as.data.frame(X3), by="AntID") #print(dim(X2)) } } } Antsortstrength[[X11]]=X2 X3= paste("~/Downloads/Ants/betweennessbridge/sort/Colony",i,".csv",sep="") write.csv(X2, file=X3) }
ff72c70ad844d1c9b3647a3d5bb9790cbfc72e65
6c1ef8b6a83b51a360fc281d01c47f47376a2c3b
/Ver4_Text_Messsage_mining.R
269d000a147c243026c3a60b39b2da54613cd883
[]
no_license
aliz20/Text-Mining
1fbeb2be8c0924091823faffb584b554ee521cd0
8ca93034e3d9dd8499a41ae17bb002557bd2695d
refs/heads/master
2021-01-17T20:16:19.538829
2016-07-07T06:47:47
2016-07-07T06:47:47
62,772,181
0
0
null
2016-07-07T06:47:47
2016-07-07T03:31:52
R
UTF-8
R
false
false
9,839
r
Ver4_Text_Messsage_mining.R
# Ali Zahraei, 2016 #Text message mining. #THIS IS ANOTHER VERSION OF THIS CODE, MIGHT BE SLIGHTLY DIFFERENT THAN # WHAT I DISCUSSED.IT DOES ANALYTICS, GRAPHICS WAS DONE BY MATLAB. # Porbabilistic Model (Naive Bayes), and Deterministic Model (SVM- Linear & Radial Kernel] and Adaboost # This code reads a text file containg SMS txt messages, and # classify SMS to spam and NON-spam. # First column, data file is the classification ("Spams and Hams"). # Originally code was written for one of my project at UC-Irvine, # and it is majorly adapted recently for some new application. # Code has several major sections. #-------------------------------------------- cls <- function() cat(rep("\n",100)) cls() # ----Section 1------------------------------ #-------------------------------------------- #----Install and call library packages #-------------------------------------------- # Install required packages install.packages('e1071',dependencies=TRUE) library(e1071) #-------------------- install.packages("ada") library("ada") #-------------------- install.packages("tm") library("tm") #---Section 2----------------------------- #----------------------------------------- #----Function Definition #---------------------------------------- #--counter calculate frequency of each word within data set #Function definition , comparing each string with repeated words counter <- function(message,repeatedwords){ #convert to string splittedSMS<-strsplit(toString(message), "\\s+")[[1]] # vect is a seprate vector for each data entry, the length is equal to # number of repeated words vect = NULL vect <- c(rep(0,length(repeatedwords))) # counting frequency of the most repeated words # in the data poll for each SMS txt message. # Function will repeat seprately for each SMS. for (i in 1:length(repeatedwords)){ for (j in 1:length(splittedSMS)){ if (repeatedwords[i] == splittedSMS[j]){ vect[i] <- 1 # vect[i] <- vect[i]+1 } } } return(vect) } #--Define naive Bayes probability Function------------ prob <- function(x,stat) { probability=NULL; if (x==0) { probability[1] <- stat[1,1] probability[2] <- stat[1,2] } else { probability[1] <- stat[2,1] probability[2] <- stat[2,2]} return(probability) } #--Section 3---------------------------------- #--------------------------------------------- #---Loading data and few simple statistics #--------------------------------------------- print("Loading the SMS data - Spams and Hams") #Import the data from txt file / data downloaded from #---Downloaded from http://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection #SMSdata <-read.csv("C:/AliZ/Code/R/Sample_Project/data/SMSSpamCollection.txt", header = TRUE, sep = "\t") SMSdata <-read.csv("SMSSpamCollection.txt", header = TRUE, sep = "\t") colnames(SMSdata) <- c("Spam_OR_Ham","SMS_Content") #tempdata<-SMSdata # Replace ham and Spam with 0 and 1 (logical array) #tempdata$Spam_OR_Ham[tempdata$Spam_OR_Ham=="spam"]<- 0 #tempdata$Spam_OR_Ham[tempdata$Spam_OR_Ham=="ham"]<- 1 #tempdata$Class=="ham" #Simple Statistics Extracted from Original Data Set count <- as.numeric(SMSdata$Spam_OR_Ham=="ham") print ("The ratio of NON-SPAM SMS %"); 100*mean(count) print ("The Varianc of data"); var(count) # Section 4-------------------------------- #------------------------------------------ #---Sampling & Subsetting --> Training and Testing Data Set (two-fold cross validation)---- #------------------------------------------ print ("Sampling & Make two sets of data, Trainig 70%, and Testing 30%"); # tempdata<-SMSdata n<- nrow(tempdata) #set.seed(100) # ind <- sample(1:n) trainval <- ceiling(n* .7) testval<- ceiling(n * .3) # trainData = NULL; trainData <- tempdata[ind[1:trainval],] testData =NULL; testData <- tempdata[ind[(trainval+1):n],] print("Training Data Set Size is"); dim(trainData); print("Testing Data Set Size is"); dim(testData); # Section 5--------------------------------- #------------------------------------------- #--Feature extraction from text message (tm toolbox)---- #------------------------------------------ #(tm) Package # Words with highest frequencies. # We know highly used words, we use this frequency to # train vector space model # corpus(x, reader), VectorSource: identify the character entity of data trsmses<-Corpus(VectorSource(trainData[,2])) #transformation :How to modify corpus #Extra whitespace is eliminated by trsmses<-tm_map(trsmses, stripWhitespace) #Conversion to lower case by trsmses<-tm_map(trsmses, tolower) #Removal of stopwords by trsmses<-tm_map(trsmses, removeWords, stopwords("english")) dtm <- DocumentTermMatrix(trsmses)  # find words with min frequency of (80) repeatedwords<-findFreqTerms(dtm, 80) repeatedwords <- repeatedwords[4:length(repeatedwords)] #Section 6---------------------------------- #------------------------------------------- #----How many of popular words are repeated whithin each text message?---- #------------------------------------------ #vectorized training data set trdata=NULL #vectorized test data set tedata=NULL #Count the number of repeated words in any data instance #train data for (i in 1:length(trainData[,2])){ if (trainData[i,1] == "ham") { trdata = rbind(trdata,c(1,counter(trainData[i,2],repeatedwords))) } else { trdata = rbind(trdata,c(0,counter(trainData[i,2],repeatedwords))) } } #test data for (i in 1:length(testData[,2])){ if (testData[i,1] == "ham") { tedata = rbind(tedata,c(1,counter(testData[i,2],repeatedwords))) } else { tedata = rbind(tedata,c(0,counter(testData[i,2],repeatedwords))) } } #------------------------------------------- #------------------------------------------- #---PREDICTIVE MODELS----------------------- #_____-----_____----------____________------ #---Section 7------------------------------- #------------Naive Bayes-------------------- #-------Lebel Identification ------------- lable.sum <- unique(sort(trdata[,1])) #----------Training Model--------------- stat.data.class0=NULL;stat.data.class1=NULL # for (i in 1:length(lable.sum)){ #Binary labels 1 is first label such=successs for 1 data.class0 <- trdata[trdata[,1]==lable.sum[1],] stat.data.class0 <- (colSums(data.class0[,2:(dim(data.class0)[2])]==lable.sum[1]))/nrow(data.class0) #nrow(trdata) stat.data.class0 <- rbind(stat.data.class0,(colSums(data.class0[,2:(dim(data.class0)[2])]==lable.sum[2]))/nrow(data.class0)) data.class1 <- trdata[trdata[,1]==lable.sum[2],] stat.data.class1 <- (colSums(data.class1[,2:(dim(data.class1)[2])]==lable.sum[1]))/nrow(data.class1) stat.data.class1 <- rbind(stat.data.class1,(colSums(data.class1[,2:(dim(data.class1)[2])]==lable.sum[2]))/nrow(data.class1)) #--Testing-------------------- #--Prior Calculation---------- prior.p.set<- NULL for (i in 1:length(lable.sum)){ prior.p.set[i] <- length(trdata[trdata[,1]==lable.sum[i],1])/dim(trdata)[1] } #-------Posterior Calculation- likelihood.group0 = NULL;likelihood.group1= NULL;assigned.label=NULL; for (i in 1:nrow(tedata)){ for (j in 2:(ncol(tedata)-1)) { likelihood.group0 [j-1] <- prob(tedata[i,j],t(rbind(stat.data.class0[,j-1],stat.data.class1[,j-1])))[1] likelihood.group1 [j-1] <- prob(tedata[i,j],t(rbind(stat.data.class0[,j-1],stat.data.class1[,j-1])))[2] } evidence <- prior.p.set[1]*prod(likelihood.group0)+ prior.p.set[2]*prod(likelihood.group1) posterior <- NULL posterior[1] <- (prior.p.set[1]*prod(likelihood.group0))/evidence posterior[2] <- (prior.p.set[2]*prod(likelihood.group1))/evidence assigned.label[i] <- which.max(posterior[])-1 } #Contigency table tab0 <- table(assigned.label, true=tedata[,1]) print("Naive Bayes Contigency Table") classAgreement(tab0,match.names=FALSE) #---Section 8------------------------------- #----Adaboost------------------------------- adaptiveboost<-ada(x=trdata[,-1],y=trdata[,1],test.x=tedata[,-1], test.y=tedata[,1], loss="logistic", type="gentle", iter=100) summary(adaptiveboost) varplot(adaptiveboost) #------------------------------------------- #---Section 9------------------------------- #----SVM with Radial Kernel------------------ #Tunning SVM C & Gamma parameters----------- #Two main parameters, 0.1 <c < 10; 10e-6 < gamma < 0.1 tuned <- tune.svm (x=trdata[,-1], y=trdata[,1],gamma=10^(-6:-1),cost=10^(-1:1)) summary(tuned) # model1 <- svm(x=trdata[,-1], y=trdata[,1], kernel="radial",gamma=0.01,cost=1, type= "C") summary(model1) prediction1 <- predict(model1,tedata[,-1]) tab1 <- table(pred1=prediction1, true=tedata[,1]) print(" Radial kernel, gamma=0.1,cost=10") classAgreement(tab1) #---Section 10------------------------------- #----SVM with Linear Kernel------------------ #Tunning SVM C & Gamma parameters----------- #----------------------------------------------- model2 <- svm(x=trdata[,-1], y=trdata[,1], kernel="linear",gamma=0.01,cost=1, type= "C") summary(model2) prediction2 <- predict(model2,tedata[,-1]) tab2 <- table(pred2=prediction2, true=tedata[,1]) print(" Linear kernel, gamma=0.1,cost=10") classAgreement(tab2,match.names=FALSE)
0640a2e311fb0e081f9228aff723c5810e89e7ff
358d9bbf2cbc4ba049c293b6ccec1b7f93df9954
/fitting/mifSearch1_TmeanB.R
c5ef211d59a71a77217ef9c0c7b35445c2d15fd3
[]
no_license
pascualgroup/Malaria-highlands
de9c55d695c3e9bdb743a1167be83fd78eba2bd1
c8ae3fa8c76c31530add4562dc2ff103220b9c60
refs/heads/main
2023-02-27T12:25:55.546119
2021-01-27T15:45:39
2021-01-27T15:45:39
329,729,585
0
1
null
null
null
null
UTF-8
R
false
false
2,287
r
mifSearch1_TmeanB.R
rm(list=ls()) require(pomp) require(plyr) dat <- read.csv("DataCovar_1975-2000.csv",header=T) # read in data file with the malaria data and covariates x <- as.numeric(Sys.getenv("PBS_ARRAYID")) seqP <- seq(from = 1, to = 50000, by = 100) now.num <- seqP[x] y <- read.csv("parameters_TmeanB.csv") param <- as.numeric(y[now.num,]) param.names <- colnames(y) names(param) <- param.names source("poObject_TmeanB.R") for(cont in now.num:(now.num+99)){ for(i in 1:3){ seed <- ceiling(runif(1,min=1,max=2^30)) set.seed(seed) param <- as.numeric(y[cont,]) names(param) <- param.names cat(cont, i, "\n") tryCatch(mif2( po, Nmif = 50, #change from 10 to 50 start = param, Np = 1000, #change from 1000 to 15000 cooling.type="hyperbolic", cooling.fraction.50 = 0.5, rw.sd = rw.sd(muEI = 0.03, muIQ = 0.03, muIS = 0.03, muQS = 0.03, sigPRO = 0.03, sigOBS = 0.03, tau = 0.03, rho = 0.03, betaOUT = 0.03, bT4 = 0.03, bT6 = 0.03, b1 = 0.03, b2 = 0.03, b3 = 0.03, b4 = 0.03, b5 = 0.03, b6 = 0.03, q0 = 0.03, S_0 = 0.03, E_0 = 0.03, I_0 = 0.03, Q_0 = 0.03 , K_0 = 0.03, F_0 = 0.03), transform=TRUE), error = function(e) e) -> mifout if(length(coef(mifout)) > 0){ loglik.mif <- replicate(n=5,logLik(pfilter(po, params=coef(mifout),Np=1000,max.fail=500))) bl <- logmeanexp(loglik.mif,se=TRUE) loglik.mif.est <- bl[1] loglik.mif.se <- bl[2] cat(cont,loglik.mif.est,"\n") ### SAVE OUTPUT if(is.finite(loglik.mif.est)){ par.out <- coef(mifout) names(par.out) <- param.names if(file.exists("mifOutput_TmeanB.csv")){ write.table(t(as.matrix(c(cont,seed,par.out,loglik.mif.est,loglik.mif.se))), "mifOutput_TmeanB.csv", append = T, col.names=F, row.names=F, sep = ",") } else{ write.table(t(as.matrix(c(cont,seed,par.out,loglik.mif.est,loglik.mif.se))), "mifOutput_TmeanB.csv", append = T, col.names=c("run","seed", param.names, "loglik", "loglik.se"), row.names=F, sep = ",") } } } } }
7ec1d2f0c9ee81a26861a286bacfc1c3abe5a5cf
422e5b55c4a07e0c489d1eb9ef708345f67d641f
/scripts/samnet_models.R
a1e22dc0bea45ac1b60b3825400b984c271c55bd
[]
no_license
dev-vw/samnet
5ced6e3e379abce0f2c138bbdf61ec43796aeb1d
bc301f87c23a50ef0ea03920ecc5f1a0cd75d837
refs/heads/master
2021-04-05T23:41:35.070795
2018-03-20T20:49:19
2018-03-20T20:49:19
124,447,566
0
0
null
2018-03-08T21:00:14
2018-03-08T21:00:14
null
UTF-8
R
false
false
2,716
r
samnet_models.R
#### -------------------- #### PRELIMINARIES #### -------------------- # import the statnet library library(foreign) library(tidyverse) library(statnet) require(foreign) # sets the current working directory setwd("/Users/vaniawang/Google Drive/grad school/PhD UCSB/projects/msm_seattle/raw data/") # importing the .csv file and converting it to a tibble msmdata <- as.tibble(read.dta('msm_data.dta')) #### -------------------- #### DATA CLEANING #### -------------------- # makes a new race variable that combines race___1 to 99, and then # removes all race variables except "race" from the dataset ## msmdata <- ## mutate(msmdata, race = case_when( ## msmdata$race___1 == 'Checked' ~ 1, ## msmdata$race___2 == 'Checked' ~ 2, ## msmdata$race___3 == 'Checked' ~ 3, ## msmdata$race___4 == 'Checked' ~ 4, ## msmdata$race___5 == 'Checked' ~ 5, ## msmdata$race___6 == 'Checked' ~ 6, ## msmdata$race___88 == 'Checked' ~ 88, ## msmdata$race___99 == 'Checked' ~ 99, ## TRUE ~ 0)) %>% select(-matches('race___')) # removes the date_today and agecheck variable msmdata <- select(msmdata, -date_today) # removes the agecheck variable msmdata <- select(msmdata, -agecheck) # removes the age_incorrect variable msmdata <- select(msmdata, -age_incorrect) # making subsets of data ## msmdata_het <- msmdata %>% ## select(-matches(race___)) #### -------------------- #### Descriptive Statistics #### -------------------- # vector degree list of lifetime sexual partnerships with women, f_rel.life <- msmdata$lifetime_women[!is.na(msmdata$lifetime_women)] # number of msm who have ever had sex with women f_rel.msm_num <- sum(!is.na(msmdata$lifetime_women)) # number of women msm have ever had sex with, assuming no duplicates f_rel.f_num #### -------------------- #### MODELS #### -------------------- ## ----- ## Model 1: ERGM model for LIFETIME sexual relationships with women ## ----- # 146 is the number of men who have ever had sex with women in their # lifetime het.net <- network.initialize(272, directed = FALSE) het.net %v% 'sex' <- c(rep(0, 146), rep(1, 126)) het.deg <- msmdata$lifetime_women[!is.na(msmdata$lifetime_women)] het.mixmat <- c(0, 275)/2 het.edges <- sum(het.mixmat) het.rel <- het.mixmat[1] het.target.stats <- c(het.edges, het.rel) het.fit <- ergm( het.net ~ edges + nodematch('sex'), target.stats = het.target.stats) summary <- (het.fit) het.sim1 <- simulate(het.fit) ## ----- ## MODEL 2: ERGM model for LIFETIME sexual relationships with men ## -----
6ae2e539f21db3467dfbb3efac297a2b3f626625
a6617bd28254e45d0f948c84d1d26d1e17a7689e
/inst/scripts/ROPOS2019.R
a344e50f63f001fdee62b510b928313cc151eeb4
[ "MIT" ]
permissive
BigelowLab/fvcom
37dba77b265e664920f9b68af699a445c5061e79
68566ac2c161bde1f549d6157dc69cee2fc8e704
refs/heads/master
2023-03-06T14:58:59.850621
2023-02-28T14:49:22
2023-02-28T14:49:22
204,050,423
3
0
null
null
null
null
UTF-8
R
false
false
1,164
r
ROPOS2019.R
suppressPackageStartupMessages({ library(ncdf4) library(readr) library(dplyr) library(fvcom) library(sf) }) #devtools::load_all("/mnt/ecocast/corecode/R/fvcom") read_start <- function(filename = "ROPOS2019.geojson", crs = NULL){ x <- sf::read_sf(filename) if (!is.null(crs)) x <- sf::st_transform(x, crs) x } BASE_URL <- 'http://www.smast.umassd.edu:8080/thredds/dodsC/models/fvcom/NECOFS/Archive/NECOFS_GOM' URL <- file.path(BASE_URL,'2019/gom4_201906.nc') X <- fvcom::FVCOMPhysics(URL) P0 <- read_start("/mnt/ecocast/projects/fvcom/ROPOS2019/ROPOS2019.geojson", crs = "+init=nad83:1802") tstep = 60 * 15 tmax = 60 * 60 * 24 * 30 reverse = TRUE verbose = FALSE show_progress = TRUE drag = c(0,0,0) fixed_z = TRUE clip_z = TRUE pp <- particle_track(X, P0 = P0, tstep = tstep, # 15 minutes tmax = tmax, # 30 days reverse = reverse, verbose = verbose, clip_z = clip_z, show_progress = show_progress, filename = "ROPOS2019-06-zclip-track.geojson") X = 0
67926c9228a40df9a915014cd7abb85a56d316eb
871b42e4ff1fa7efc21d8ccdd265e300cb283408
/man/shaidyRegisterRepoAPI.Rd
935d710bf1c467721421145f91c3e6f9d415d415
[]
no_license
MD-Anderson-Bioinformatics/NGCHM-R
2abef86ba0c4b8befaa803b03f37a495e9d86d6b
fdc3519bad51f1de2631d45d897fdcc2357edf90
refs/heads/main
2023-08-31T23:22:28.187163
2023-08-15T20:28:01
2023-08-15T20:28:01
76,884,228
8
5
null
2023-08-17T22:28:11
2016-12-19T17:51:24
R
UTF-8
R
false
true
417
rd
shaidyRegisterRepoAPI.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/shaidy.R \name{shaidyRegisterRepoAPI} \alias{shaidyRegisterRepoAPI} \title{Set the methods for the repository API called api} \usage{ shaidyRegisterRepoAPI(api, methods) } \arguments{ \item{api}{The name of a repository API} \item{methods}{A list of repository methods} } \description{ Set the methods for the repository API called api }
cc5063f698ba8520b5edef27c6d49ea7ee189562
f210aaf6dc2e9e331bcc0a19e151587daa0cb5c5
/working_files/shiny/shiny-jorgen-search.R
032241db22b61e79f664e4da6659b6fbf3b65493
[]
no_license
meakulpa/rubasic
fcbbd388c6a4f037c7fd3e80f43d8462e4815b92
eeeb3aa4624c461c4f4746848d309b2a8ad2c1f7
refs/heads/master
2020-06-05T20:24:18.539452
2019-02-13T21:05:07
2019-02-13T21:05:07
null
0
0
null
null
null
null
UTF-8
R
false
false
1,216
r
shiny-jorgen-search.R
library(shiny) library(spotifyr) library(stringr) library(DT) ui <- shinyUI(fluidPage( titlePanel("Search Spotify"), sidebarLayout( sidebarPanel( textInput("artist", "1. Name an artist:"), htmlOutput("artistImage") ), mainPanel( h3("Artists matching your search"), tableOutput("artistTable") ) ) )) server <- shinyServer(function(input, output) { # Get access token Sys.setenv(SPOTIFY_CLIENT_ID = 'a98864ad510b4af6851331638eec170f') Sys.setenv(SPOTIFY_CLIENT_SECRET = '6445326414dd4bf381afbc779b182223') access_token <- get_spotify_access_token() artists <- reactive({ req(input$artist) get_artists(artist_name = input$artist, access_token = access_token) }) output$artistTable <- renderTable({ artists()$artist_name }, colnames = FALSE) output$artistImage <- renderUI({ image_url <- artists()$artist_img[1] tags$img(src = image_url, height = 200, width = 200) }) }) # Run the application shinyApp(ui = ui, server = server)
1fea88414a93eab7b1d4a799c85f5c21f76a7518
e37e15a714185d736f6ec2d5988d344f561bba44
/R/positions_stat.R
9464ef41e73c0f3c82d24b3a7193cc0a6e3f9c4d
[]
no_license
kovarsky/metagenome_old
add01ca8a005ecd182b1a7a2734853a1fa79d374
1b474ace40fa29d18df0c8de98bca82cbfe96101
refs/heads/master
2016-09-16T04:36:20.633171
2013-07-17T16:40:34
2013-07-17T16:40:34
null
0
0
null
null
null
null
UTF-8
R
false
false
10,885
r
positions_stat.R
stat_table <- function(dir,prefix){ file <- paste(dir,prefix,'varpos_variants.txt',sep='') data <- read.csv(file,header=F,sep='\t') names(data) <- c('chrom','pos','A','T','G','C') norm_func <- function(v){ return(sqrt(sum(v*v))) } data$norm <- apply(data[,3:6],1,norm_func) data$sum <- apply(data[,3:6],1,sum) #data$diff <- data$sum - data$max #data$frac <- data$diff/data$sum data$flag <- prefix print (prefix) print ('--done!--') return(data) } filter_data <- function(data,covtresh=0.5,fractresh=0.1){ maxcov <- max(data$sum) data <- data[-which(is.na(data$frac)),] data <- data[which(data$frac > fractresh),] data <- data[which((data$sum/maxcov) > covtresh),] return(data) } merge_data <- function(data_list){ len <- length(data_list) if (len < 2){ return(data_list[[1]]) } else{ data <- data_list[[1]] for (i in 2:len){ data <- rbind(data,data_list[[i]]) } return(data) } } read_all_tables <- function(dir,prefixes){ data_list <- list() for (prefix in prefixes){ data_list[[prefix]] <- stat_table(dir,prefix) } return(data_list) } pos_processing <- function(data_list,covtresh,fractresh){ len <- length(data_list) for (i in 1:len){ data_list[[i]] <- filter_data(data_list[[i]],covtresh,fractresh) } res <- merge_data(data_list) } comm_pos <- function(data1,data2){ label1 = data1$flag[1] label2 = data2$flag[1] len <- dim(data1)[1] res <- data.frame(res=rep(0, len)) for (i in 3:6){ buff <- data1[,i] * data2[,i] #print(buff) res$res <- res$res + buff #res <- cbind(res,data1[,i]) #res <- cbind(res,data2[,i]) #print(res) } norm <- data1$norm * data2$norm res$res <- sqrt(res$res/norm) #res$norm <- norm res$pos <- data1$pos res[,label1] <- data1$sum res[,label2] <- data2$sum res$chrom <- data1$chrom ind <- which(res$res < 0.5) print(length(ind)) res <- res[ind,] ind <- which(res[,label1]>10 & res[,label2]>10) print(length(ind)) return(res[ind,c("chrom","pos")]) } whole_comm_pos <- function(data_list, name = '/data1/bio/runs-kovarskiy/additional_pileup_files/temp_pos.list'){ len <- length(data_list) combs <- combn(len,2) num <- dim(combs)[2] for (i in 1:num){ buff <- comm_pos(data_list[[combs[1,i]]], data_list[[combs[2,i]]]) if (i > 1){ res <- merge(res,buff,by.x=c('chrom','pos'),by.y=c('chrom','pos'),all.x=T,all.y=T) } else{ res <- buff } } write.table(x=res,file=name,sep='\t',col.names=F,row.names=F,quote=F) return(res) } read_new_tables <- function(dir,prefix) { filename <- paste(dir,prefix,'newvarpos_variants.txt',sep='/') data <- read.csv(filename,sep='\t',header=F) names(data) <- c('chroms','pos','A','T','G','C','sum','diff','max_alt') covfilename <- paste(dir,prefix,'common_coverage_summary.list',sep='/') covdata <- read.csv(covfilename,sep='\t',header=F) names(covdata) <- c('sum','times') return(list(data,covdata)) } read_cov_table <- function(dir,prefix) { covfilename <- paste(dir,prefix,'cover_summ.list',sep='/') covdata <- read.csv(covfilename,sep='\t',header=F) names(covdata) <- c('sum','times') return(covdata) } plot_cov_stat <- function(strainnums){ len <- length(strainnums) j = 0 for (i in strainnums){ print(i) setnum <- as.character(strain_meta.data$set[i]) prefix <- strain_meta.data$str_subset[i] strainname <- paste(strain_meta.data$short_descript[i],as.character(i),sep='_') dir <- paste(CONSTDIR,setnum,sep='') covdata <- read_cov_table(dir,prefix) covdata$strain_num <- i covdata$strainname <- strainname if (j == 0){ res <- covdata } else{ res <- rbind(res,covdata) } j <- j + 1 } return(res) } get_cdf <- function(datalist, max_alt_flag=F){ #maxes <- apply(data[,c('A','T','G','C')],1,max) #data$diff <- data$sum - maxes data <- datalist[[1]] covdata <- datalist[[2]] data$ind <- 1 sum_aggr <- aggregate(data$ind, by=list(data$sum),sum) names(sum_aggr) <- c('sum','total_num') if (max_alt_flag){ sum_diff_aggr <- aggregate(data$ind,by=list(data$max_alt,data$sum),sum) } else{ sum_diff_aggr <- aggregate(data$ind,by=list(data$diff,data$sum),sum) } names(sum_diff_aggr) <- c('diff','sum','num') merged_data <- merge(sum_diff_aggr,covdata,by.x=c('sum'),by.y=c('sum'),all.x=T,all.y=F) merged_data$freq <- merged_data$num/merged_data$times merged_data$variab <- merged_data$diff/merged_data$sum merged_data <- merged_data[which(merged_data$diff != 0),] return(merged_data) #ecdf_data$freq <- ecdf_data$num/ecdf_data$total_num #return(data) } aggregation <- function(data, tresholds){ agg <- function(tresh){ temp <- data[data$variab > tresh,] res <- aggregate(temp$freq,by=list(temp$sum),sum) freqname<-paste('freq',as.character(tresh),sep='') names(res) <- c('sum',freqname) return(res) } len <- length(tresholds) for (i in 1:len){ tresh <-tresholds[i] print(i) if (i==1){ res <- agg(tresh) } else{ res <- merge(res,agg(tresh),by.x=c('sum'),by.y=c('sum'),all.x=F,all.y=F) } } return(res) } aggregation_by_diff <- function(data, tresholds){ agg <- function(tresh){ temp <- data[data$diff > tresh,] res <- aggregate(temp$freq,by=list(temp$sum),sum) numname<-paste('more',as.character(tresh),sep='') names(res) <- c('sum',numname) return(res) } len <- length(tresholds) for (i in 1:len){ tresh <-tresholds[i] print(i) if (i==1){ res <- agg(tresh) } else{ res <- merge(res,agg(tresh),by.x=c('sum'),by.y=c('sum'),all.x=F,all.y=F) } } return(res) } CONSTDIR <- '/data1/bio/runs-kovarskiy/metagenome_data/' plot_position_stat <- function(strainnums, by.diffs = F, max_alt_flag=F){ len <- length(strainnums) j = 0 for (i in strainnums){ print(i) num <- which(strain_meta.data$set == i) setnum <- as.character(strain_meta.data$set[num]) #prefix <- strain_meta.data$str_subset[num] strainname <- paste(strain_meta.data$short_descript[num],num,sep='_') #dir <- paste(CONSTDIR,setnum,sep='') datalist <- read_new_tables(CONSTDIR,setnum) merged_data <- get_cdf(datalist,max_alt_flag) if (by.diffs){ agg <- aggregation_by_diff(merged_data,c(0,1,2,5)) } else{ agg <- aggregation(merged_data,c(0.0,0.01,0.02,0.05)) } agg$strain_num <- i agg$strainname <- strainname if (j == 0){ res <- agg } else{ res <- rbind(res,agg) } j <- j + 1 } return(res) } cover_stat <- function(setnum){ path <- '/data1/bio/runs-kovarskiy/metagenome_data/' statfilename <- paste(path, setnum, sep='','/total_sample_coverages.tsv') statdata <- read.csv(statfilename, sep=' ', head=F) namesfile <- paste('/data1/bio/runs-kovarskiy/metagenome_data',setnum,'pileups.list',sep='/') snames <- read.csv(namesfile, sep='', header=F) snames <- snames[,1] names(statdata) <- c('names','covered_positions','sum_of_cover_depth') statdata$names <- snames statdata <- merge(x=statdata,y=data.sample_names2,by.x=c('names'),by.y=c('names'),all.x=T,all.y=F) return(statdata) } cover_stats <- function(setnums){ len <- length(setnums) for (i in 1:len){ data <- try(cover_stat(setnums[i])) if (typeof(data)!='character'){ summn <- sum(as.numeric(data$sum_of_cover_depth)) summc <- sum(as.numeric(data$covered_positions)) temp <- data.frame(setnum=setnums[i],summn=summn,summc=summc) print(temp) if (i == 1){ res <- temp } else{ res <- rbind(res,temp) } } } return(res) } finstat_plot <- function(position_stat_data){ strain_num_ind <- which(names(position_stat_data)=='strain_num') temp <- melt(position_stat_data[,-strain_num_ind],id.vars=c('sum','strainname')) p <- ggplot(temp,aes(x=sum,y=value,group=variable,fill=variable))+geom_density(stat="identity",alpha=0.5,size=0.3)+facet_wrap(~strainname)+scale_y_continuous(limits=c(0,0.5)) return(p) } snp_rate_stat <- function(setnums, treshs){ len <- length(setnums) for (i in 1:len){ setnum <- setnums[[i]] snp_rate <- snp_rate_load(setnum, treshs) if (i==1){ res <- snp_rate } else{ res <- rbind(res, snp_rate) } } return(res) } snp_rate_load <- function(setnum, treshs=c(5)){ print(setnum) snp_stat_file <- paste('/data1/bio/runs-kovarskiy/metagenome_data',setnum,'new_snp_stat2.tsv',sep='/') namesfile <- paste('/data1/bio/runs-kovarskiy/metagenome_data',setnum,'pileups.list',sep='/') snames <- read.csv(namesfile, sep='', header=F) snames <- snames[,1] #snames <- rep(snames, rep(length(treshs),length(snames))) new_snp_stat <- read.csv(snp_stat_file, sep='', header=F) names(new_snp_stat) <- c('names','varpos_cov','snps') new_snp_stat$treshs <- treshs cov_file <- paste('/data1/bio/runs-kovarskiy/metagenome_data',setnum,'total_sample_coverages.tsv',sep='/') cov_stat <- read.csv(cov_file, sep='', header=F) names(cov_stat) <- c('tcov','tnts') cov_stat$names <- snames res <- merge(new_snp_stat, cov_stat, by.x=c('names'), by.y=c('names'), all.x=F, all.y=F) res$frac <- res$snps/res$tcov res <- merge(res, data.sample_names2, by.x=c('names'), by.y=c('names'),all.x=F,all.y=F) res$setnum <- setnum return(res) } snp_rate_plot <- function(setnums,treshold, covtreshold=10^6){ cumul_cov <- get_cumul_cov(setnums) res <- snp_rate_stat(setnums,c(0.005, 0.01, 0.02, 0.05)) rate_stat <- res res <- res[which(res$treshs==treshold),] res <- res[which(res$tcov>covtreshold),] agg <- aggregate(res$frac,by=list(res$setnum),mean) res$set <- res$setnum res <- res[,-11] res <- merge(res,cumul_cov,by.x=c("set"),by.y=c("set"),all.x=T,all.y=T) #names(agg) <- c("set",'snpfrac') #agg <- merge(agg,cumul_cov,by.x=c("set"),by.y=c("set"),all.x=T,all.y=T) return(list(res, rate_stat)) } get_cumul_cov <- function(setnums){ res <- data.frame(set=c(), possum=c(), ntsum=c(), samplnum=c()) for (setnum in setnums){ fname <- paste('/data1/bio/runs-kovarskiy/metagenome_data',setnum,'total_sample_coverages.tsv',sep='/') data <- read.csv(fname, sep=' ', header=F) possum <- sum(as.numeric(data[,2]),na.rm=T) ntsum <- sum(as.numeric(data[,3]),na.rm=T) if (is.na(ntsum)){ print(data) } samplnum <- length(which(!is.na(data[,3]))) temp <- data.frame(set=setnum, possum=possum, ntsum=ntsum, samplnum=samplnum) res <- rbind(res, temp) } res <- merge(res,strain_meta.data[,c('set','length','short_descript')],by.x=c("set"),by.y=c("set"),all.x=F,all.y=F) return(res) } #plot: ggplot(res,aes(x=flag,ymin=pos,ymax=pos+100,size=50,color=flag)) + geom_linerange() + coord_flip() + scale_size(range = c(1, 50)) + scale_alpha(range=c(0.1,0.5))
c747db7d7cb7ee1fbdb9be29dd8d70e8f4c185c6
78eb8c3af3ca1d5e88ba0a32fb85a6f13adba1b7
/Review_xPassModel.R
19192360aa33bd2336f63e8da4a4c18f0c1a48ea
[]
no_license
tabosch/ASAShootingApp
473f4f114665749f03f9cba858af10e478498b96
c17c6607909ae1b0d69efbf7195e747e641b41e4
refs/heads/master
2020-03-16T19:51:20.368727
2018-05-02T21:22:44
2018-05-02T21:22:44
null
0
0
null
null
null
null
UTF-8
R
false
false
3,083
r
Review_xPassModel.R
# Review xPass model library(dplyr) library(gbm) library(stringr) library(ggplot2) library(scales) passes <- readRDS("IgnoreList/AllPassingData.rds") smod <- readRDS("IgnoreList/xPassModel.rds") smod16 <- readRDS("IgnoreList/xPassModel_2016.rds") passes[["success.pred.16"]] <- predict(smod16, passes, type = "response", n.trees = 1000) # Partial plots #### plot(smod, i.var = "angle", return.grid = T, type = "response", continuous.resolution = 25) %>% mutate(angle = -180*angle/pi) %>% filter(abs(angle) <= 160) %>% ggplot(aes(x = angle, y = y)) + geom_line(size = 2) + xlab("Angle (degrees)") + ylab("Completion probability") + ggtitle("Predicted pass success by angle") + scale_y_continuous(labels = scales::percent, limits = c(0, 1)) + labs(caption = "0 degrees implies forward; positive angles imply the passer's right") + theme(plot.caption = element_text(hjust = 0.5)) xy.tab <- plot(smod, i.var = c("x", "y"), return.grid = T, type = "response", continuous.resolution = 12) names(xy.tab) <- c("x", "y", "pred") xy.tab %>% ggplot(aes(x = x, y = y)) + geom_tile(aes(fill = pred)) + scale_fill_gradient2(low = "red", mid = "yellow", high = "forestgreen", midpoint = .7) + xlab("X coordinate (yards from own goal)") + ylab("Y coordinate (yards from right sideline)") + ggtitle("Predicted pass success by field position") # Playerdiff # Make table of indicators with observed/predicted rates # Validation #### # By position pos.ae <- passes %>% filter(year >= 2017) %>% group_by(season.pos) %>% summarize(Passes = n(), Act = sum(success)/Passes, Exp = sum(success.pred.16)/Passes, `A-E` = Act - Exp) pos.ae %>% ggplot(aes(x = season.pos, y = `A-E`)) + geom_bar(stat = "identity") + geom_line(aes(y = 0)) + xlab("Position") + ggtitle("Actual vs. expected passing by player position") # By gamestate gs.ae <- passes %>% filter(year >= 2017) %>% group_by(Gamestate = pmax(-4, pmin(4, ifelse(home == 1, hscore - ascore, ascore - hscore)))) %>% summarize(Passes = n(), Act = sum(success)/Passes, Exp = sum(success.pred.16)/Passes, `A-E` = Act - Exp) gs.ae %>% ggplot(mapping = aes(Gamestate, `A-E`)) + geom_line(size = 2) + geom_line(aes(y = 0)) + ggtitle("Actual vs. expected passing by gamestate") # By coordinates xy.ae <- passes %>% filter(year >= 2017) %>% group_by(x = cut(x, 4), y = cut(y, 4)) %>% summarize(Passes = n(), Act = sum(success)/Passes, Exp = sum(success.pred.16)/Passes, `A-E` = Act - Exp) xy.ae %>% ggplot(aes(x = x, y = y)) + geom_tile(aes(fill = `A-E`)) + scale_fill_gradient2(low = "red", mid = "forestgreen", high = "red", midpoint = 0) + xlab("X coordinate (yards from own goal)") + ylab("Y coordinate (yards from right sideline)") + ggtitle("Actual vs. expected passing by zone") chi.test.stat <- sum((xy.ae$Act - xy.ae$Exp)^2*xy.ae$Passes/xy.ae$Exp) (p.value <- pchisq(chi.test.stat, 16, lower.tail = F)) # By field zone # By general direction/field zone
e4bf3cd49dc1fcdaea9b346bc9eab153e1c3a621
3fde134c2cee356e5a432ae0a71a96423f76074c
/man/get.pb.umi.Rd
b757b9ecf3c67f637e3aba91ff7b12b2e8144c69
[]
no_license
klprint/snCerFun
7ec3e0d751bd1f4c849f7696843696b68cd1b1ed
17ba9d9b1b1d34c03b4c207b30e4e23e65f7f4e5
refs/heads/master
2020-12-23T17:11:29.468163
2020-01-30T13:06:38
2020-01-30T13:07:15
237,214,006
0
0
null
null
null
null
UTF-8
R
false
true
718
rd
get.pb.umi.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions.R \name{get.pb.umi} \alias{get.pb.umi} \title{Creates a pseudobulk using a grouped tibble} \usage{ get.pb.umi(umi, df.grouped, cell.col = "cell_id", min.cells = 50, n.cores = 1) } \arguments{ \item{umi}{UMI count matrix (genes x cells)} \item{df.grouped}{Grouped tibble. The grouping should only be based on a single column} \item{cell.col}{Column in the tibble, where the cell names are stored.} \item{min.cells}{Minimal number of cells which should be merged to a pseudobulk} \item{n.cores}{Number of cores to run in parallel.} } \value{ Matrix of pseudobulks } \description{ Creates a pseudobulk using a grouped tibble }
deb63056a5b1e9fd2551699bda15462cc2019225
03bd83293b953b58002c3748370c3eacbcee880f
/legacy/printOneSolution.R
c225e61d481f3f0d5c6b1d69b92ed9120adc9d77
[]
no_license
davgutavi/trlab-R-utilities
34f0c25a95461aed482f393d7379dd82412edad7
cf3edbcb4a1cea70c3a2bea4e0e6e28301e7b032
refs/heads/master
2023-07-21T01:31:09.827263
2023-07-13T12:10:29
2023-07-13T12:10:29
107,287,411
1
0
null
null
null
null
UTF-8
R
false
false
4,341
r
printOneSolution.R
source("configuration.R") source("SolUtilities.R") require(lattice) ##***********************************************inputs input <- "/Users/davgutavi/Desktop/soluciones_paper/elu/elu_msl_02.sol" ##***********************************************inputs ##***Lectura del fichero .sol props <- read.properties(input) ##***Obtener información del dataset datasetInfo <- getDataset(props$dataset) show(paste("selected dataset =", datasetInfo$.attrs["name"])) #****Obtener los paths de las listas de genes, condiciones y tiempos paths <- getGSTtags(datasetInfo) show(paths) #****Obtener los genes, condiciones y tiempos en forma de vector genesL <- as.vector(read.table(paths["genes"],sep = "\n")$V1) samplesL <- as.vector(read.table(paths["samples"],sep = "\n")$V1) timesL <- as.vector(read.table(paths["times"],sep = "\n")$V1) #****Obtener valores del dataset dataset <- getDatasetValues(datasetInfo) #****Obtener los puntos [gen,condición,tiempo,expresión génica] de cada solución solutions <-getTriclusters(props,dataset) aux1 <- paste0(unlist(strsplit(input, "/"))) aux2 <- aux1[-length(aux1)] aux3 <-paste0(aux2,collapse="/") out1 <- paste0(aux3, "/graphs/") out2 <- paste0(aux3, "/tri_02") solution<-solutions[[1]] outPath<-out2 ####***************************************************************************************************************************** ####***************************************************************************************************************************** ####***************************************************************************************************************************** ####***************************************************************************************************************************** w<-8 h<-8 fsizeXaxis<-0.7 fsizeYaxis<-0.7 fsizeBoxes<-1.0 color=TRUE axis <-c(fsizeYaxis,fsizeXaxis) boxes<-list (cex = fsizeBoxes) gforctag <- "Genes for each Condition" tforctag <- "Times for each Condition" cforttag <- "Conditions for each Time" eltag <- "expression levels" timetag <- "times" gentag <- "genes" g <- solution$g s <- solution$s t <- solution$t el <- solution$el fg<-factor(g) fs<-factor(s) ft<-factor(t) distance <- 2 left <- distance div <- ceiling(length(levels(fg))/2) right<-length(levels(fg))-distance at<-c(left,div,right) labels<-c(g[left],g[div],g[right]) genes <- c() for (gi in levels(fg)){ genes<-append(genes, as.character(genesL[as.numeric(gi)+1])) } conditions <- c() for (si in levels(fs)){ conditions<-append(conditions, as.character(samplesL[as.numeric(si)+1])) } times <- c() for (ti in levels(ft)){ times<-append(times, as.character(timesL[as.numeric(ti)+1])) } out_gct <-paste0(outPath, "_gct.eps") out_gtc <-paste0(outPath, "_gtc.eps") out_tgc <-paste0(outPath, "_tgc.eps") # x=genes, o=conditions p=times xyplot(el ~ fg | ft,solution, main= cforttag, xlab= gentag, ylab= eltag, groups = s, type = "a", font = "arial", scales = list(x = list(at=at,labels=genes),cex=axis), layout = c(1, nlevels(ft)), strip=strip.custom(factor.levels=times,par.strip.text = boxes)) dev.copy2eps(file = out_gct, width=w, height=h) # x=genes, o=times p=conditions xyplot(el ~ fg | fs, solution, main=tforctag, xlab= gentag, ylab= eltag, groups = t, type = "a", font = "arial", scales = list(x = list(at=at,labels=genes),cex=axis), layout = c(1, nlevels(fs)), strip=strip.custom(factor.levels=conditions,par.strip.text = boxes) ) dev.copy2eps(file = out_gtc, width=w, height=h) # x=times, o=genes p=conditions xyplot(el ~ ft | fs, solution, main=gforctag, xlab= timetag, ylab= eltag, groups = g, type = "a", font = "arial", layout = c(1, nlevels(fs)), scales = list(x = list(labels=times),cex=axis), strip=strip.custom(factor.levels=conditions,par.strip.text = boxes) ) dev.copy2eps(file = out_tgc, width=w, height=h)
be843ab1fecd5b40cf03f236606cc45fdae541c7
7dfa69da69b9a064ae6ec41822751337c65fefad
/r/auto_r/templates/ALL_ssz_as_factor_recovered.r
199fcc402d113e74fb8a7de28ccd9bd2d7070482
[ "MIT" ]
permissive
CESEL/WarningsGuruPaper
1c019743bc4f20306a4c3ec1eceef797008c7753
f40ded3ccd3b818e9a69262c62e9aee38f54f02e
refs/heads/master
2022-04-26T18:10:12.376211
2020-04-10T15:08:41
2020-04-10T15:08:41
254,658,006
0
0
null
2020-04-10T14:42:04
2020-04-10T14:42:04
null
UTF-8
R
false
false
2,448
r
ALL_ssz_as_factor_recovered.r
require("RPostgreSQL") # Used to obtain deviance explained require("modEvA") # used for multi-colinearity | vif require("car") con <- dbConnect(dbDriver("PostgreSQL"), dbname="cas_exp", host="localhost") newWarnings <- dbGetQuery(con, "SELECT new_warnings from commit_warning_summary as w where w.new_warnings > 0") avgDevDelta <- dbGetQuery(con, "SELECT c.fix = 'True' as fix, c.contains_bug = 'TRUE' as contains_bug, repo_name, nd, la, ld, @lt as lt, ndev, @age as age, exp, @rexp as rexp, jlint_warnings, new_jlint_warnings, findbugs_warnings, new_findbugs_warnings, security_warnings, new_security_warnings, build != 'BUILD' as build_failed, warnings > 0 as w_bool from commits as c, commit_warning_recovered_summary as w where c.repository_id = w.repo and c.commit_hash = w.commit") #detach(avgDevDelta) attach(avgDevDelta) sink("{summary_path}/recovered/bugs/models_as_factor_all_repo_logged_recovered.txt") summary(newWarnings) summary(avgDevDelta) print("model 1") mb1 = glm(contains_bug ~ log2(1+nd) + log2(1+la) + log2(1+ld) + log2(1+lt) + fix + log2(1+ndev) + log2(1+age) + log2(1+exp) + log2(1+rexp) + as.factor(repo_name), family=binomial(), control = list(maxit = 50)) summary(mb1) vif(mb1) print(round(exp(coef(mb1)),digits=2)) print(paste("d2 = ", Dsquared(mb1))) # Add the new warning counts print("model 2 - just warnings") mb2 = glm(contains_bug ~ log2(1+new_security_warnings) + log2(1+security_warnings) + log2(1+new_findbugs_warnings) + log2(1+new_jlint_warnings) + log2(1+findbugs_warnings) + log2(1+jlint_warnings) + build_failed + as.factor(repo_name), family=binomial(), control = list(maxit = 50)) summary(mb2) #vif(mb2) print(round(exp(coef(mb2)),digits=2)) print(paste("d2 = ", Dsquared(mb2))) # Combined model print("combined model") mb3 = glm(contains_bug ~ log2(1+nd) + log2(1+la) + log2(1+ld) + log2(1+lt) + fix + log2(1+ndev) + log2(1+age) + log2(1+exp) + log2(1+rexp) + log2(1+new_security_warnings) + log2(1+security_warnings) + log2(1+new_findbugs_warnings) + log2(1+new_jlint_warnings) + log2(1+findbugs_warnings) + log2(1+jlint_warnings) + build_failed + as.factor(repo_name), family=binomial(), control = list(maxit = 50)) summary(mb3) #vif(mb3) print(round(exp(coef(mb3)),digits=2)) print(paste("d2 = ", Dsquared(mb3))) print("original with combined") md1 = anova(mb1, mb3) print(md1) summary(md1) print("warnings with combined") md2 = anova(mb2, mb3) print(md2) summary(md2) sink()
d577ff00695c8d8eaeccba200ddd4ca3dd48ef9e
c758fb1c8ba3238f7e3a9b049a42765796652c53
/make_concoct_cov.r
94083014be88e344a7ff623dfb6cd6e52ac5d191
[]
no_license
microgenomics/MAGnifico
5032e75e223df4c402e69b71d1ff4b272d83e716
e3e293f24c77e523dfa47267a634dfb5f4f4e4e6
refs/heads/master
2020-06-03T03:42:49.489797
2019-06-11T18:29:56
2019-06-11T18:29:56
191,423,525
1
1
null
null
null
null
UTF-8
R
false
false
2,489
r
make_concoct_cov.r
#get directory name #get directory names dir <-commandArgs(trailingOnly = TRUE) ##rhizosphere_2017 ##define sample list and number of samples #samples <- c("G1-1","G1-2","G1-3","G2-1","G2-2","G2-3") #sediments mar de cortes #define sample list and number of samples samples<-c("PRO1748_Plate1_C1", "PRO1748_Plate1_D1", "PRO1748_Plate1_B2", "PRO1748_Plate1_H2", "PRO1748_Plate1_B3", "PRO1748_Plate1_D3", "PRO1748_Plate1_F3", "PRO1748_Plate1_H3", "PRO1748_Plate1_D4", "PRO1748_Plate1_E4", "PRO1748_Plate1_A5", "PRO1748_Plate1_F5", "PRO1748_Plate2_A1", "PRO1748_Plate2_B1", "PRO1748_Plate2_C1", "PRO1748_Plate2_D1", "PRO1748_Plate2_E1", "PRO1748_Plate2_F1", "PRO1748_Plate2_G1", "PRO1748_Plate2_H1", "PRO1748_Plate2_A2", "PRO1748_Plate2_B2", "PRO1748_Plate2_C2", "PRO1748_Plate2_D2", "PRO1748_Plate2_E2", "PRO1748_Plate2_F2", "PRO1748_Plate2_G2", "PRO1748_Plate2_H2", "PRO1748_Plate2_A3", "PRO1748_Plate2_C3", "PRO1748_Plate2_D3", "PRO1748_Plate2_E3", "PRO1748_Plate2_F3", "PRO1748_Plate2_G3", "PRO1748_Plate2_H3", "PRO1748_Plate2_A4", "PRO1748_Plate2_B4", "PRO1748_Plate2_C4", "PRO1748_Plate2_D4", "PRO1748_Plate2_E4", "PRO1748_Plate2_F4", "PRO1748_Plate2_G4", "PRO1748_Plate2_H4", "PRO1748_Plate2_A5", "PRO1748_Plate2_B5", "PRO1748_Plate2_C5", "PRO1748_Plate2_D5", "PRO1748_Plate2_E5", "PRO1748_Plate2_F5", "PRO1748_Plate2_G5", "PRO1748_Plate2_H5", "PRO1748_Plate2_A6", "PRO1748_Plate2_B6", "PRO1748_Plate2_C6") smpl_nb <- length(samples) for (i in 1:smpl_nb ) { #read table of coverage for each samples path <- paste (dir,samples[i] , ".coverage", sep='' ) table_cov <- read.table (path , header = F) #normalize read count to contig length and add a 10000 factor for workable values table_cov$V2 <- table_cov$V3*10000/table_cov$V2 #make a subtable with values of interest table_cov <- table_cov[,c(1,2)] colnames(table_cov) <- c("contig",samples[i]) if (i == 1) { #prepare the general table table_all <- table_cov } #rename sample table cur_sample <- samples[i] assign(paste (cur_sample , "_cov", sep = ""), table_cov) } for (i in 2:smpl_nb ) { #add remaining samples to the general table cur_sample <- paste (samples[i]) current <- get (paste (cur_sample , "_cov" , sep = "")) table_all <- merge (table_all, current, by.table_all = contig, by.current = contig ) } #write the output table path <- paste (dir, "concoct_all.coverage", sep='' ) write.table(table_all[c(2:length(table_all$contig)),] , path , row.names = F , col.names = F , quote = F , sep = "\t")
76ee5c34c790335491f2bb3a3e0809c75991abd2
8a31a89ddd235833c7ab6e50b855810a499d41c7
/transgen/mort_data.R
a0a1c7a1c022fb13e641c36c85b3f300950ab62f
[]
no_license
janejpark/niehs
3030ce0c0672d1180d780cfc2e97b0e7ba2a7e5e
875fae998d9125173d0118cb76d03783063b9e26
refs/heads/master
2021-01-20T15:49:46.075310
2020-04-04T22:42:50
2020-04-04T22:42:50
64,373,948
0
2
null
null
null
null
UTF-8
R
false
false
4,311
r
mort_data.R
## Mortality Data ## 2019 08 12 library(readxl) library(tidyverse) library(ggplot2) library(RColorBrewer) library(gridExtra) setwd("~/Documents/NIEHS_LSU_UCD/niehs/transgen/DAVID/") ## Subset data by 00% and 56% samples at day 15 rawdata <- read_excel("~/Documents/NIEHS_LSU_UCD/Exp1-2/Phenotype/EmbryoMorts/190812_ars_morts.xlsx") morts <- rawdata[rawdata$`Embryo Treatment`=="0" | rawdata$`Embryo Treatment`=="56",] morts$trt <- paste(morts$`Parent Treatment`, morts$`Embryo Treatment`) mortsub <- morts[morts$Day=="15",] # also transferred total start values from day 0 to the mortsub df # e.g. # mortsub[mortsub$trt == "Exposed 56",]$`Total Start` <- morts[morts$trt=="Exposed 56" & morts$Day=="1",]$`Total Start` ## Can't use ANOVA on proportion or count data, need to use poisson regression (glm) mortsub$`Embryo Treatment` <- factor(mortsub$`Embryo Treatment`) mortsub$`Parent Treatment` <- factor(mortsub$`Parent Treatment`) m.glm <- glm(`Percent_Dead` ~ `Parent Treatment` * `Embryo Treatment` + offset(log(`Total Start`)), data=mortsub[mortsub$Gen=="1",]) #m.glm2 <- glm(`Percent_Dead` ~ `Parent Treatment` * `Embryo Treatment` + offset(log(`Total Start`)), family="binomial", # data=mortsub[mortsub$Gen=="1",]) summary(m.glm) anova(m.glm, m.glm2) m <- lm(`Percent_Dead` ~ `Parent Treatment` * `Embryo Treatment` * Gen, data=mortsub[mortsub$Gen=="1",]) ma <- anova(m) # Run the functions length, mean, and sd on the value of "change" for each group, # broken down by parent and embryo treatments library(plyr) cdata <- ddply(mortsub, c("`Parent Treatment`", "`Embryo Treatment`", "Gen"), summarise, N = length(`Percent_Dead`), mean = mean(`Percent_Dead`), sd = sd(`Percent_Dead`), se = sd / sqrt(N) ) cdata ## Line graphs of mortality between 00% and 56% embryonic treatment, control and exposed parent treatments cdata$`Embryo Treatment` <- factor(cdata$`Embryo Treatment`) pd <- position_dodge(0.1) labels <- c(`1` = "F1", `2` = "F2") md <- ggplot(cdata, aes(x=factor(`Embryo Treatment`), y=mean, group=`Parent Treatment`, color=`Parent Treatment`)) + geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.2, position=pd, size=1) + geom_line(position=pd, size=1) + geom_point(position=pd, size=3, shape=21, fill="white") + scale_colour_manual(values = c("#56B4E9", "#E69F00")) + scale_x_discrete(labels=c("0" = "0%", "56" = "56%")) + xlab("Embryo WAF Treatment (%)") + ylab("Proportion Dead") + facet_grid(. ~ Gen, labeller=labeller(Gen = labels)) + ggtitle("Cumulative Mortality of ARS Embryos by Experimental Day 15") + theme_light() ## Heart rate data hearts <- read_excel("~/Documents/NIEHS_LSU_UCD/niehs/transgen/190814_ARS_hr.xlsx") heart <- hearts[hearts$`WAF %`=="0" | hearts$`WAF %` == "0.56",] hdata <- ddply(heart, c("`WAF %`", "Parent_Treatment", "Gen"), summarise, N = length(BPM), mean = mean(BPM), sd = sd(BPM), se = sd / sqrt(N) ) hdata hdata$`WAF %` <- factor(hdata$`WAF %`) hdata$Parent_Treatment <- factor(hdata$Parent_Treatment) ## Line graphs for heart rates pd <- position_dodge(0.1) labels <- c(`0.00` = "0%", `0.56` = "56%") hr <- ggplot(hdata, aes(x=`WAF %`, y=mean, group=Parent_Treatment, color=Parent_Treatment)) + geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.2, position=pd, size=1) + geom_line(position=pd, size=1) + geom_point(position=pd, size=3, shape=21, fill="white") + scale_colour_manual(values = c("#56B4E9", "#E69F00")) + scale_x_discrete(labels=c("0" = "0%", "0.56" = "56%")) + xlab("Embryo WAF Treatment (%)") + ylab("Beats per minute (BPM)") + facet_grid(. ~ Gen) + ggtitle("Heart rates of ARS embryos") + theme_light() ##glm and aov for heartrate data l.f1 <- lm(formula=BPM ~ `WAF %` * `Parent_Treatment`, data=heart[heart$Gen == "F1",]) a.f1 <- anova(l.f1) l.f2 <- lm(formula=BPM ~ `WAF %` * `Parent_Treatment`, data=heart[heart$Gen == "F2",]) a.f2 <- anova(l.f2) l <- lm(formula=BPM ~ `WAF %` * `Parent_Treatment` * Gen, data=heart) a <- anova(l) glm(`Percent_Dead` ~ `Parent Treatment` * `Embryo Treatment` * Gen, family="binomial", data=mortsub) require(gridExtra) phen <- grid.arrange(md, hr, ncol=1)
4ea27b36ef1d24b9c0d93284269e5f52ff0a1d86
2618a34654330482c04f44a233daf3486914b184
/plot2.R
e030c2216e8cbee9fafd941ede9e76a4d197d520
[]
no_license
vgarzon/ExData_Plotting1
864ae421bb0b22dfd7dcda2ab6e42c25d3c0d83a
c614862169188727994651f0677101d14e5ef256
refs/heads/master
2021-01-15T21:02:06.175761
2016-01-29T17:56:26
2016-01-29T17:56:26
50,666,238
0
0
null
2016-01-29T14:09:39
2016-01-29T14:09:39
null
UTF-8
R
false
false
898
r
plot2.R
# Coursera Data Science Specialization # Course 4: Exploratory data analysis # Week 1 assignment - plot2.R # 2016-01-29 library(lubridate) # Read data file, mind header, separator and "NA" hpc_dfr <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE) # Convert combined date, time strings to POSIXct and add column to data frame hpc_dfr$DateTime <- dmy_hms(paste(hpc_dfr$Date, hpc_dfr$Time)) # Time interval datInt <- interval(ymd("2007-02-01"), ymd("2007-02-03")) # Subsetting data for plotting hpc <- hpc_dfr[hpc_dfr$DateTime %within% datInt, ] # Initialize PNG device, 480x480 pixels png(filename = "plot2.png", width = 480, height = 480) # Plot time series plot(Global_active_power ~ DateTime, hpc, type = 'l', ylab = "Global Active Power (kilowatts)", xlab = "") # Close PNG device dev.off()
6e450d84fbfdb2c32131796d0f62d9ec96fc578a
d8f5550dbbe4696c170d391e09d00738714c5dd0
/man/attachContext.Rd
245e267c1ba01ea7398149fb0a0f64e18b2b8ee6
[]
no_license
dami82/mutSignatures
19e044eb99ee8c65c4123c131fa978bd5bd9d93c
ad406018266f80a3048ae6219bf6c09cf6f69134
refs/heads/master
2023-01-24T00:39:46.026623
2023-01-18T21:22:55
2023-01-18T21:22:55
123,127,851
12
0
null
null
null
null
UTF-8
R
false
true
2,169
rd
attachContext.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/s03_all_functions.R \name{attachContext} \alias{attachContext} \title{Attach Nucleotide Context.} \usage{ attachContext( mutData, BSGenomeDb, chr_colName = "chr", start_colName = "start_position", end_colName = "end_position", nucl_contextN = 3, context_colName = "context", skip_seqName_check = FALSE ) } \arguments{ \item{mutData}{data.frame storing mutation data} \item{BSGenomeDb}{a BSGenomeDb-class object, storing info about the genome of interest} \item{chr_colName}{string, name of the column storing seqNames. Defaults to "chr"} \item{start_colName}{string, name of the column storing start positions. Defaults to "start_position"} \item{end_colName}{string, name of the column storing end positions. Defaults to "end_position"} \item{nucl_contextN}{integer, the span of nucleotides to be retrieved around the variant. Defaults to 3} \item{context_colName}{string, name of the column that will be storing the nucleotide context. Defaults to "context"} \item{skip_seqName_check}{logical, shall seqNames be checked to remove non-official chromosomes. Defaults to FALSE} } \value{ a modified data.frame including the nucleotide context in a new column } \description{ Retrieve the nucleotide context around each DNA variant based on the genomic coordinates of the variant and a reference BSGenome database. } \details{ This function is part of the user-interface set of tools included in mutSignatures. This is an exported function. } \references{ More information and examples about mutational signature analysis can be found here: \enumerate{ \item \bold{GitHub Repo}: \url{https://github.com/dami82/mutSignatures/} \item \bold{More info and examples} about the mutSignatures R library: \url{https://www.data-pulse.com/dev_site/mutsignatures/} \item \bold{Sci Rep paper}, introducing mutS: \url{https://www.nature.com/articles/s41598-020-75062-0/} \item \bold{Oncogene paper}, Mutational Signatures Operative in Bladder Cancer: \url{https://www.nature.com/articles/s41388-017-0099-6} } } \author{ Damiano Fantini, \email{damiano.fantini@gmail.com} }
ae9be7bbab9aba1a4128c9156836aa9c2815ef86
8ac6fd04fa2f9d62cf23cce35f2a3bbc787debbd
/server.R
c10d47c93749509cce40f3ed4e16db4762a4cd85
[]
no_license
charliejhadley/idn_family-policies-timeline
dee918b9793c6a5c1aee15793dc84fa8a5cb5d5a
b16d201b5c00995aad707246ae8827af31ae9054
refs/heads/master
2021-07-21T18:51:32.735861
2017-10-31T10:41:41
2017-10-31T10:41:41
null
0
0
null
null
null
null
UTF-8
R
false
false
8,354
r
server.R
## =============================== License ======================================== ## ================================================================================ ## This work is distributed under the MIT license, included in the parent directory ## Copyright Owner: University of Oxford ## Date of Authorship: 2016 ## Author: Martin John Hadley (orcid.org/0000-0002-3039-6849) ## Academic Contact: Mireia Borrell-Porta (orcid.org/0000-0003-2328-1258) ## Data Source: local file ## ================================================================================ library("plyr") library("tidyverse") library("shiny") library("DT") library("lubridate") library("plotly") library("shinyBS") library("scales") library("rfigshare") library("readxl") library("forcats") ## =========================== Beautification =================================== ## ============================================================================== gantt_labeler <- function(start_date = NA, end_date = NA, y_axis = NA, color = NA) { paste0( "Policy Name: ", y_axis, "</br>", "Enforcement Period: ", as.Date(start_date), " to ", as.Date(end_date), "</br>", "Policy Type: ", color ) } new_lines_to_p_tags <- function(text) { gsub(pattern = "\n", replacement = "<br />", text) } ## =========================== Shiny Server Fn =================================== ## ============================================================================== source("data-processing.R", local = T) # source("figshare.R", local = T) source("long-colnames-replacements.R", local = T) shinyServer(function(input, output, session) { output$timeline_selected_cols_UI <- renderUI({ selectInput( "timeline_selected_cols", label = "Columns to show: ", choices = long_colnames_replacements, selected = initial_columns, multiple = TRUE, width = "100%" ) }) output$timeline <- renderPlotly({ timeline_data$type.of.policy <- gsub("allowances policy", "</br>allowances policy", timeline_data$type.of.policy) cutoff_timeline_data <- timeline_data cutoff_timeline_data$valid.from.b[cutoff_timeline_data$valid.from.b < as.Date("1997/01/01")] <- as.Date("1997/01/01") policy_separators <- cutoff_timeline_data %>% filter(valid.from.b > as.Date("1997/01/01") & valid.until.c > as.Date("1997/01/01")) timeline_ggplot <- ggplot( cutoff_timeline_data, aes( x = valid.from.b, xend = valid.until.c, y = name.of.policy, yend = name.of.policy, colour = type.of.policy ) ) + geom_segment( size = 4, aes( # x = Valid.from.b. + 60*60*24*10*3, # xend = Valid.until.c. - 60*60*24*10*3, x = valid.from.b, xend = valid.until.c, # Draw tooltipped geom_segments over everything, make almost invisible y = name.of.policy, yend = name.of.policy, text = NULL ) ) + geom_segment( data = policy_separators, size = 4, aes( # x = Valid.from.b. - 60*60*24*10*3, # xend = Valid.from.b. + 60*60*24*10*3, x = valid.from.b - 18, xend = valid.from.b + 18, y = name.of.policy, yend = name.of.policy, text = NULL ), color = "black" ) + geom_segment( size = 4, show.legend = F, aes( x = valid.from.b, xend = valid.until.c, y = name.of.policy, yend = name.of.policy, text = gantt_labeler( start_date = valid.from.b, end_date = valid.until.c, y_axis = name.of.policy, color = type.of.policy ), alpha = 0.001 # Draw tooltipped geom_segments over everything, make almost invisible ) ) + scale_x_date( breaks = seq(as.Date("1997/01/01"), as.Date(paste0( year(max( cutoff_timeline_data$valid.until.c )) + 1, "-01-01" )), "years"), labels = date_format("%Y"), limits = c(as.Date("1997/01/01"), as.Date( max(cutoff_timeline_data$valid.until.c) )) ) + xlab("") + ylab("") + scale_colour_brewer(name = "Type of Policy", type = "qual", palette = "Dark2") + theme(axis.text.x = element_text(angle = 45, hjust = 1), plot.margin = unit(c(0, 0, 1, 1), "cm")) ggplotly(timeline_ggplot, tooltip = "text", source = "timeline") }) output$timeline_selected_Policy_Table <- DT::renderDataTable({ event_data <- event_data("plotly_click", source = "timeline") selected_Policy <- levels(timeline_data$name.of.policy)[event_data$y] data_to_show <- timeline_data %>% filter(as.character(name.of.policy) == selected_Policy) data_to_show <- data_to_show[, input$timeline_selected_cols] colnames(data_to_show) <- mapvalues( colnames(data_to_show), from = long_colnames_replacements %>% as.character(), to = long_colnames_replacements %>% names(), warn_missing = F ) data_to_show }, extensions = c("FixedHeader", "Buttons"), rownames = FALSE, escape = FALSE, class = 'cell-border stripe', options = list( autoWidth = FALSE, scrollX = TRUE, paging = FALSE, dom = 'Bfrtip', buttons = list( list( extend = "excel", text = "Download current view of data", filename = "Filtered Policies", exportOptions = list(modifier = list(selected = FALSE)) ) ), defer = TRUE, fixedHeader = list(header = TRUE), columnDefs = list(list(width = "200px", targets = 0)) # fixedColumns = list(leftColumns = 2, rightColumns = 1) )) output$plain_datatable_selected_cols_UI <- renderUI({ selectInput( "plain_datatable_selected_cols", label = "Columns to show: ", choices = long_colnames_replacements, multiple = TRUE, selected = initial_columns, width = "100%" ) }) output$plain_datatable_selected_Policy_Table <- DT::renderDataTable({ data_to_show <- timeline_data[, input$plain_datatable_selected_cols] colnames(data_to_show) <- mapvalues( colnames(data_to_show), from = long_colnames_replacements %>% as.character(), to = long_colnames_replacements %>% names(), warn_missing = F ) data_to_show }, extensions = c("FixedHeader", "Buttons"), rownames = FALSE, filter = 'top', escape = FALSE, class = 'cell-border stripe', options = list( autoWidth = FALSE, paging = FALSE, scrollX = TRUE, dom = 'Bfrtip', fixedHeader = list(header = TRUE), buttons = list( list( extend = "excel", text = "Download current view of data", filename = "Filtered Policies", exportOptions = list(modifier = list(selected = FALSE)) ) ), columnDefs = list(list(width = "200px", targets = 0)) # fixedColumns = list(leftColumns = 2, rightColumns = 1) )) output$timeline_selected_Policy_UI <- renderUI({ event_data <- event_data("plotly_click", source = "timeline") if (is.null(event_data)) { wellPanel("Select an event in the timeline to view more details about the policy.") } else { fluidRow(column( uiOutput("timeline_selected_cols_UI"), bsTooltip( "timeline_selected_cols_UI", "Add/remove columns to the table by typing/removing names from here", "top", options = list(container = "body") ), uiOutput("type_of_details"), DT::dataTableOutput("timeline_selected_Policy_Table", width = "100%"), width = 12 )) } }) # output$download_spreadsheet <- downloadHandler( # filename = "policies.xlsx", # # desired file name on client # content = function(con) { # file.copy("data/policies.xlsx", con) # } # ) # })
82fd8f8995003eb1f65fb1b5131a08bcdf3f3aff
e6a13b677387eb24ad0bc4e3b4527445d5912b37
/Reading data in R.R
45a051b232ed71a511ea4f311ad409140d7fab29
[]
no_license
Hrithik1/R-learning
c9af0d1c4c4d3e93eebc801cc9194f64cba61a7f
a03ff0358609aea3df9df0b2d9a242e0fc921bbc
refs/heads/main
2023-04-12T10:55:25.972300
2021-05-14T16:55:35
2021-05-14T16:55:35
326,401,985
1
1
null
2021-01-06T13:58:14
2021-01-03T12:29:12
R
UTF-8
R
false
false
83
r
Reading data in R.R
x<-url("https://www.google.com","r") y<-readLines(x) head(y) #read.table #read.csv
fa670e32a81fe244575e31616e02dd9d9feb11db
bff6f874ddadce8109260ac9c36a8e1f76bc5536
/ndnd_functions_temp/cpgs_hd.R
b76ab4b5d53612cf99b32b5906069a16eb4d0cc9
[]
no_license
ElliotSivel/NDND
b45d06e5b8f0ea9b81e8793ab6f203426b7e4b75
88ef12fe46ffb7ac3ead95dfbe4ab1c50aa7dacb
refs/heads/master
2021-06-22T00:16:58.066969
2021-01-12T17:03:31
2021-01-12T17:03:31
171,261,910
0
0
null
null
null
null
UTF-8
R
false
false
3,438
r
cpgs_hd.R
# library(RcppEigen) # library(Rcpp) # library(inline) #creation de la fonction CPP cpgsCPP='#include <Eigen/Cholesky> using Eigen::Map; using Eigen::MatrixXd; using Eigen::VectorXd; const Map<MatrixXd>A(as<Map<MatrixXd> >(AA)); const Map<VectorXd>b(as<Map<VectorXd> >(bb)); const Map<VectorXd>x0(as<Map<VectorXd> >(xx0)); int N=Rcpp::as<int>(NN); int p=A.cols(); int m=A.rows(); int runup,discard=100; // Check input arguments if (m < (p+1)){ throw std::range_error("dimensions mismatch"); } // Initialisation MatrixXd X(N+runup+discard,p); int n=0; MatrixXd x(p,1); MatrixXd y(p,1); x=x0; // Initialize variables for keeping track of sample mean, covariance // and isotropic transform. MatrixXd M(p,1); M.setZero(); MatrixXd S2(p,p); S2.setZero(); // outer products. MatrixXd S(p,p); MatrixXd S0(p,p); S.setIdentity(); MatrixXd T1(p,p); T1.setIdentity(); MatrixXd W(m,p); W = A; MatrixXd d(m,1); MatrixXd delta0(p,1); MatrixXd delta1(p,1); MatrixXd z(m,1); while (n < (N+runup+discard)){ //sampling loop y=x; // compute approximate stochastic transformation if (n == runup-1){ T1=S.transpose().llt().matrixU(); W = A*T1; } y=T1.inverse()*y; // choose p new components int i=0; for (int i=0;i<p;++i){ //Find points where the line with the (p-1) components x_i //fixed intersects the bounding polytope. VectorXd e(p);e.setZero(); e(i)= 1; z = (W*e); //prevent any divisions by 0 d=(b - W*y); d=d.cwiseQuotient(z); double tmin=-9e9; double tmax=9e9; for (int j=0;j<m;++j){ if (z(j)<0 && tmin<d(j)) tmin=d(j); if (z(j)>0 && tmax>d(j)) tmax=d(j); } y(i)+=(tmin+(tmax-tmin)*rand()/(double)RAND_MAX); } x=T1*y; X.row(n)= x.col(0); ++n; // Incremental mean and covariance updates delta0 = x - M; // delta new point wrt old mean M+= delta0/(double)n; // sample mean delta1= x - M; // delta new point wrt new mean if (n > 1){ S2 +=(n-1)/(double)(n*n)*(delta0*delta0.transpose())+(delta1*delta1.transpose()); S0 = S; S = S2/(double)(n-1); // sample covariance } else { S.setIdentity(); } } return wrap(X.bottomRows(N));' #compilation (à ne faire qu'une seule fois) cpgs2<-cxxfunction(signature(NN="integer",AA="matrix",bb="vector",xx0="vector"),cpgsCPP,plugin="RcppEigen") # #jeu de test # A=matrix(c(1,0,0,1,-1,0,0,-1,1,1),ncol=2,byrow=TRUE) # b=c(50,50,0,0,75) # x0=c(25,10) # # #test de la fonction # debut=Sys.time() # res=cpgs2(150000,A,b,x0) # end=Sys.time() # duree=end-debut # print(duree) # # #test de l'ancienne fonction # debut=Sys.time() # res=cpgs(150000,A,b,x0) # end=Sys.time() # duree=end-debut # print(duree) # # Sys.getenv() # Sys.setenv(PATH = # "C:/Users/a22073/Documents/Rtools/bin/; # C:/Users/a2207/Documents/R-3.6.0/bin/x64; # C:/Program Files (x86)/Common Files/Oracle/Java/javapath; # C:/WINDOWS/system32; # C:/WINDOWS; # C:/WINDOWS/System32/Wbem; # C:/WINDOWS/System32/WindowsPowerShell/v1.0/; # C:/Program Files/Intel/WiFi/bin/; # C:/Program Files/Common Files/Intel/WirelessCommon/; # C:/Program Files/Git/cmd; # C:/Users/Administrator/AppData/Local/Microsoft/WindowsApps; # C:/Users/Administrator/AppData/Local/Box/Box Edit/") # Sys.setenv(BINPREF = "C:/Rtools/mingw_$(WIN)/bin/")
3c5769bb1a06531af4d107e3c34f4e62b3aec9c9
770a778f89aa210f199c30180a6e0365d3baaf3b
/Data/generate_r.R
57d5808f8632f3fc779b0e6da3e98ca17121d6ec
[]
no_license
alizeeguyot/METARET
38e0ced2a09a220c9ab45d310bc614efc4dfe10d
d41ea476e57fdcc5ee7eb5f5ff58c81aea91b715
refs/heads/master
2023-06-06T05:18:00.633568
2021-06-27T08:03:18
2021-06-27T08:03:18
null
0
0
null
null
null
null
UTF-8
R
false
false
8,699
r
generate_r.R
## function that translates choices into each different task ## into an 'r' risk parameter of a CRRA x utility funtion get_r <- function(bibkey, task, choice) { if (bibkey == "Charness2019") { if (task == "EG") { if (choice == 1) { out <- -2.45 } if (choice == 2) { out <- runif(1, min = -2.45, max = -0.16)} if (choice == 3) { out <- runif(1, min = -0.16, max = 0.29)} if (choice == 4) { out <- runif(1, min = 0.29, max = 0.50)} if (choice == 5) { out <- runif(1, min = 0.50, max = 1)} if (choice == 6) { out <- 1} } if (task == "HL") { if (choice == 10) { out <- 2.71 } if (choice == 9) { out <- runif(1, min = 1.95, max = 2.71) } if (choice == 8) { out <- runif(1, min = 1.49, max = 1.95) } if (choice == 7) { out <- runif(1, min = 1.15, max = 1.49) } if (choice == 6) { out <- runif(1, min = 0.85, max = 1.15) } if (choice == 5) { out <- runif(1, min = 0.59, max = 0.85) } if (choice == 4) { out <- runif(1, min = 0.32, max = 0.59) } if (choice == 3) { out <- runif(1, min = 0.03, max = 0.32) } if (choice == 2) { out <- runif(1, min = -0.37, max = 0.03) } if (choice == 1) { out <- -0.37 } if (choice == 0) { out <- -0.37 } } ## TODO: how to deal with the long left tail?? if (task == "IG") { if (choice == 8) { out <- 1 } if (choice == 0) { out <- -1 } if (choice != 8 & choice != 0) { out <- (-0.405465 -log(8 - choice) + log(8 + 1.5*choice))/(-log(8-choice) + log(8 + 1.5*choice)) } } if (task != "HL" & task != "EG" & task != "IG") { out <- NA } } if (bibkey == "Crosetto2016") { if (task == "EG") { if (choice == 1) { out <- -1 } if (choice == 2) { out <- runif(1, min = -1, max = 0.33)} if (choice == 3) { out <- runif(1, min = 0.33, max = 0.62)} if (choice == 4) { out <- runif(1, min = 0.62, max = 0.80)} if (choice == 5) { out <- 1} } if (task == "HL") { if (choice == 10) { out <- 2.71 } if (choice == 9) { out <- runif(1, min = 1.95, max = 2.71) } if (choice == 8) { out <- runif(1, min = 1.49, max = 1.95) } if (choice == 7) { out <- runif(1, min = 1.15, max = 1.49) } if (choice == 6) { out <- runif(1, min = 0.85, max = 1.15) } if (choice == 5) { out <- runif(1, min = 0.59, max = 0.85) } if (choice == 4) { out <- runif(1, min = 0.32, max = 0.59) } if (choice == 3) { out <- runif(1, min = 0.03, max = 0.32) } if (choice == 2) { out <- runif(1, min = -0.37, max = 0.03) } if (choice == 1) { out <- -0.37 } if (choice == 0) { out <- -0.37 } } if (task == "BART") { out <- choice/(100-choice) } if (task == "IG") { if (choice == 4) { out <- 1 } if (choice != 4) { out <- (log(4-choice) - log(8+3*choice) + log(3))/(log(4-choice) +log(2) - log(8+3*choice)) } } } if (bibkey == "Meta2016") { if (task == "HL") { if (choice == 10) { out <- 2.71 } if (choice == 9) {out <- runif(1, min = 1.95, max = 2.71) } if (choice == 8) { out <- runif(1, min = 1.49, max = 1.95) } if (choice == 7) { out <- runif(1, min = 1.15, max = 1.49) } if (choice == 6) { out <- runif(1, min = 0.85, max = 1.15) } if (choice == 5) { out <- runif(1, min = 0.59, max = 0.85) } if (choice == 4) { out <- runif(1, min = 0.32, max = 0.59) } if (choice == 3) { out <- runif(1, min = 0.03, max = 0.32) } if (choice == 2) { out <- runif(1, min = -0.37, max = 0.03) } if (choice == 1) { out <- -0.37 } if (choice == 0) { out <- -0.37 } } } if (bibkey == "Menkhoff_Sakha_2017") { if (task == "EG") { if (choice == 1) { out <- -1 } if (choice == 2) { out <- runif(1, min = -1, max = 0.33)} if (choice == 3) { out <- runif(1, min = 0.33, max = 0.62)} if (choice == 4) { out <- runif(1, min = 0.62, max = 0.80)} if (choice == 5) { out <- 1} } ## this is a bit iffy, to be changed if (task == "EG_loss") { if (choice == 1) { out <- -0.25 } if (choice == 2) { out <- runif(1, min = -0.25, max = 0.60)} if (choice == 3) { out <- runif(1, min = 0.60, max = 0.82)} if (choice == 4) { out <- runif(1, min = 0.82, max = 1)} if (choice == 5) { out <- 1} } if (task == "IG") { if (choice == 100) { out <- 1 } if (choice == 0) { out <- -1 } if (choice != 100 & choice != 0) { out <- (log(1/(2*(100-choice))) + log(100 + 2*choice))/(-log(100-choice) + log(100 + 2*choice)) } } if (task == "CEPL") { if (choice == 20) { out <- 1.52 } if (choice == 19) { out <- runif(1, min = 1.36, max = 1.52) } if (choice == 18) { out <- runif(1, min = 1.22, max = 1.36) } if (choice == 17) { out <- runif(1, min = 1.1, max = 1.22) } if (choice == 16) { out <- runif(1, min = 1, max = 1.1) } if (choice == 15) { out <- runif(1, min = 0.91, max = 1) } if (choice == 14) { out <- runif(1, min = 0.83, max = 0.91) } if (choice == 13) { out <- runif(1, min = 0.76, max = 0.83) } if (choice == 12) { out <- runif(1, min = 0.69, max = 0.76) } if (choice == 11) { out <- runif(1, min = 0.63, max = 0.69) } if (choice == 10) { out <- runif(1, min = 0.57, max = 0.63) } if (choice == 9) { out <- runif(1, min = 0.52, max = 0.57) } if (choice == 8) { out <- runif(1, min = 0.48, max = 0.52) } if (choice == 7) { out <- runif(1, min = 0.43, max = 0.48) } if (choice == 6) { out <- runif(1, min = 0.39, max = 0.43) } if (choice == 5) { out <- runif(1, min = 0.34, max = 0.39) } if (choice == 4) { out <- runif(1, min = 0.3, max = 0.34) } if (choice == 3) { out <- runif(1, min = 0.26, max = 0.3) } if (choice == 2) { out <- runif(1, min = 0.2, max = 0.26) } if (choice == 1) { out <- runif(1, min = -1, max = 0.2) } if (choice == 0) { out <- -1 } } } if (bibkey == "Holzmeister2019") { if (task == "HL") { if (choice == 10) { out <- 2.71 } if (choice == 9) { out <- runif(1, min = 1.95, max = 2.71) } if (choice == 8) { out <- runif(1, min = 1.49, max = 1.95) } if (choice == 7) { out <- runif(1, min = 1.15, max = 1.49) } if (choice == 6) { out <- runif(1, min = 0.85, max = 1.15) } if (choice == 5) { out <- runif(1, min = 0.59, max = 0.85) } if (choice == 4) { out <- runif(1, min = 0.32, max = 0.59) } if (choice == 3) { out <- runif(1, min = 0.03, max = 0.32) } if (choice == 2) { out <- runif(1, min = -0.37, max = 0.03) } if (choice == 1) { out <- -0.37 } if (choice == 0) { out <- -0.37 } } if (task == "BRET") { if (choice == 100) { out <- 10 } if (choice != 100) { out <- choice/(100-choice) } } if (task == "EG") { if (choice == 1) { out <- -1.97 } if (choice == 2) { out <- runif(1, min = -1.97, max = 0)} if (choice == 3) { out <- runif(1, min = 0, max = 0.39)} if (choice == 4) { out <- runif(1, min = 0.39, max = 0.58)} if (choice == 5) { out <- runif(1, min = 0.58, max = 1)} if (choice == 6) { out <- 1} } if (task == "CEPL") { if (choice == 9) { out <- 7.96 } if (choice == 8) { out <- runif(1, min = 3.71, max = 7.96) } if (choice == 7) { out <- runif(1, min = 2.06, max = 3.71) } if (choice == 6) { out <- runif(1, min = 1, max = 2.06) } if (choice == 5) { out <- runif(1, min = 0.07, max = 1) } if (choice == 4) { out <- runif(1, min = -1, max = 0.07) } if (choice == 3) { out <- runif(1, min = -2.93, max = -1) } if (choice == 2) { out <- runif(1, min = -2.93) } if (choice == 1) { out <- -2.93 } } } if (bibkey == "Frey2017") { if (task == "BART") { out <- choice/(128-choice) } if (task == "HL"){ if (choice == 10) { out <- 2.71 } if (choice == 9) { out <- runif(1, min = 1.95, max = 2.71) } if (choice == 8) { out <- runif(1, min = 1.49, max = 1.95) } if (choice == 7) { out <- runif(1, min = 1.15, max = 1.49) } if (choice == 6) { out <- runif(1, min = 0.85, max = 1.15) } if (choice == 5) { out <- runif(1, min = 0.59, max = 0.85) } if (choice == 4) { out <- runif(1, min = 0.32, max = 0.59) } if (choice == 3) { out <- runif(1, min = 0.03, max = 0.32) } if (choice == 2) { out <- runif(1, min = -0.37, max = 0.03) } if (choice == 1) { out <- -0.37 } if (choice == 0) { out <- -0.37 } } } out }
f6f894031a3b15e63d0dd84af440c7b4cf6a0697
54d6ea7251681b5a49ab139a0bf332d06da5439a
/plot/EndOfApril-multipleCorrectionMade/scalable_single_item.R
1aa08f41d011398f9cc4578e48fe6270a02d64fb
[]
no_license
wsgan001/MasterThesis-3
d558b3dcf5b302ef3a4e1e3ffc412ffd67ca29cc
a3dc8797242e3f9b282a8621fced57981fc6bdae
refs/heads/master
2020-04-07T00:31:18.669699
2017-06-03T00:41:58
2017-06-03T00:41:58
null
0
0
null
null
null
null
UTF-8
R
false
false
5,002
r
scalable_single_item.R
tiff("scalable_single_item.png", width=9, height=9, units='in', res=120) par(mfrow=c(3,3), cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) #BIBLE #DATA x <- c(0.01, 0.009, 0.008, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001)*100 zero <- c(28247, 23874, 27147, 28035, 30995, 37511, 43290, 55725, 77181, 176405)/1000 one <- c(26462, 19830, 18776, 22199, 23191, 25965, 31464, 36128, 51794, 133619)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(18, 176)) lines(x, one, col="red", type="o", pch=1) title(main="BIBLE", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) #protein #DATA x <- c(0.9999, 0.99988, 0.99987, 0.99986)*100 zero <- c(316549, 446706, 554299, 686114)/1000 one <- c(330404, 385173, 417496, 446156)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(316, 686)) lines(x, one, col="red", type="o", pch=1) title(main="protein", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) #Kosarak-70 #DATA x <- c(0.003, 0.0028, 0.0026, 0.0024, 0.0022, 0.002)*100 zero <- c(15922, 16873, 18065, 19459, 21731, 24384)/1000 one <- c(13030, 13860, 14687, 17677, 17885, 18931)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(13, 24)) lines(x, one, col="red", type="o", pch=1) title(main="Kosarak-70", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) #LEVIATHAN #DATA x <- c(0.1, 0.09, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.01)*100 zero <- c(3170, 2933, 3398, 4253, 3470, 3782, 4780, 5159, 6189, 10052)/1000 one <- c(2515, 2563, 2887, 2826, 2935, 3285, 3486, 3942, 4755, 7643)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(2, 10)) lines(x, one, col="red", type="o", pch=1) title(main="LEVIATHAN", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) #PubMed #DATA x <- c(0.05, 0.04, 0.03, 0.02, 0.01, 0.005)*100 zero <- c(9556, 10489, 10974, 12881, 20003, 30201)/1000 one <- c(7966, 8307, 8904, 10346, 14256, 21949)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(7, 30)) lines(x, one, col="red", type="o", pch=1) title(main="PubMed", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) #FIFA #DATA x <- c(0.14, 0.12, 0.1, 0.09, 0.08, 0.07, 0.06, 0.05)*100 zero <- c(26572, 28193, 35703, 44324, 48639, 57630, 72229, 117669)/1000 one <- c(22003, 23806, 28856, 33836, 41424, 50408, 65124, 105365)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(22, 117)) lines(x, one, col="red", type="o", pch=1) title(main="FIFA", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) #slen1 #DATA x <- c(0.0017, 0.00165, 0.0016, 0.00155, 0.0015, 0.00145, 0.0014, 0.00135, 0.0013)*100 zero <- c(15077, 16317, 18549, 18766, 19779, 20306, 21739, 23757, 26922)/1000 one <- c(13078, 18296, 22978, 24689, 23803, 27311, 28748, 27514, 29418)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(13, 29)) lines(x, one, col="red", type="o", pch=1) title(main="slen1", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) #slen2 #DATA x <- c(0.0015, 0.00145, 0.0014, 0.00135, 0.0013, 0.00125, 0.0012, 0.00115, 0.0011)*100 zero <- c(13005, 13462, 15471, 20308, 19068, 20138, 23366, 26772, 27830)/1000 one <- c(13452, 14307, 16422, 20270, 22787, 23525, 24869, 29102, 32317)/1000 #PLOT plot(x, zero, log = "xy", type="o", pch=0, xlab="Minsup(%)",ylab="Time (s logscale)", col="blue", xlim = rev(range(x)), ylim=c(13, 32)) lines(x, one, col="red", type="o", pch=1) title(main="slen2", cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5) grid() legend("bottomright", legend=c("reference", "spark_singleItem"), lty=c(1,1),lwd=c(2,2), col=c("blue", "red"), cex = 1) dev.off()
a49a219680d89478bb412dc2c2b336eb1d2658af
4f4ba6d34b2411350abc91cc44b7bd1875970584
/R/data_capitales_prov.R
2dd2aa84d154f1299fdb08af5428578ad6326627
[ "LicenseRef-scancode-warranty-disclaimer", "MIT", "CC-BY-4.0" ]
permissive
perezp44/spanishRentidadesIGN
a2129e25e0251fc9d5ae97abc979d25c41fc3b59
0686da61ddaa47b80a6bf8801214c4539c9c05de
refs/heads/master
2021-05-02T13:20:16.023342
2018-02-09T19:15:20
2018-02-09T19:15:20
120,757,308
0
0
null
null
null
null
UTF-8
R
false
false
696
r
data_capitales_prov.R
#------------------------------ This file documents el df capitales_prov #-- Simplemente tiene las 52 capitales de provincia #- #' Capitales de las 52 "Provincias" de Spain #' #' df con los 52 municipios Spain que son capitales de provincia (+ Ceuta y Melilla) #' #' @source \url{http://www.ign.es/web/ign/portal/rcc-nomenclator-nacional} #' #' @format A data frame with 52 rows y 4 variables: #' \itemize{ #' \item INECodMuni: Código municipio más usado (5 digitos) #' \item NOMBRE_ACTUAL: Nombre del municipio #' \item COD_PROV: Código de la provincia. #' \item PROVINCIA: Nombre de la provincia. #' } #' #' @examples #' \dontrun{ #' df <- capitales_prov #' } #' "capitales_prov"
75c7060db002bf23cd7bb114667d7da20f69e073
a9dec343915b76a49ad1a7e57a53f3690bc40f5a
/R/utils.R
e724dee952723ef03eb2498354b3c564c72e7741
[]
no_license
LongBeachInnovationTeam/CLB-Business-Licensing-Trend-Analysis
8d7f834c5eacf1bed97edfd274774cbe17cbcd19
589b5c9a9f9d8cf1067b40bd7fac3b8f4daaf71b
refs/heads/master
2020-03-13T10:32:49.570402
2017-03-11T09:39:49
2017-03-11T09:39:49
null
0
0
null
null
null
null
UTF-8
R
false
false
5,680
r
utils.R
#' Merge history and forecast for plotting. #' #' @param m Prophet object. #' @param fcst Data frame returned by prophet predict. #' #' @importFrom dplyr "%>%" df_for_plotting <- function(m, fcst) { # Make sure there is no y in fcst fcst$y <- NULL df <- m$history %>% dplyr::select(ds, y) %>% dplyr::full_join(fcst, by = "ds") %>% dplyr::arrange(ds) return(df) } #' Plot the prophet forecast. #' #' @param x Prophet object. #' @param fcst Data frame returned by predict(m, df). #' @param uncertainty Boolean indicating if the uncertainty interval for yhat #' should be plotted. Must be present in fcst as yhat_lower and yhat_upper. #' @param xlabel Optional label for x-axis #' @param ylabel Optional label for y-axis #' @param ... additional arguments #' #' @return A ggplot2 plot. #' #' @examples #' \dontrun{ #' history <- data.frame(ds = seq(as.Date('2015-01-01'), as.Date('2016-01-01'), by = 'd'), #' y = sin(1:366/200) + rnorm(366)/10) #' m <- prophet(history) #' future <- make_future_dataframe(m, periods = 365) #' forecast <- predict(m, future) #' plot_forecast(m, forecast) #' } #' #' @export plot_forecast <- function(x, fcst, uncertainty = TRUE, xlabel = 'ds', ylabel = 'y', ...) { df <- df_for_plotting(x, fcst) forecast.color <- "#0072B2" gg <- ggplot2::ggplot(df, ggplot2::aes(x = ds, y = y)) + ggplot2::labs(x = xlabel, y = ylabel) if (exists('cap', where = df)) { gg <- gg + ggplot2::geom_line( ggplot2::aes(y = cap), linetype = 'dashed', na.rm = TRUE) } if (uncertainty && exists('yhat_lower', where = df)) { gg <- gg + ggplot2::geom_ribbon(ggplot2::aes(ymin = yhat_lower, ymax = yhat_upper), alpha = 0.2, fill = forecast.color, na.rm = TRUE) } gg <- gg + ggplot2::geom_point(na.rm=TRUE) + ggplot2::geom_line(ggplot2::aes(y = yhat), color = forecast.color, na.rm = TRUE) + ggplot2::theme(aspect.ratio = 3 / 5) return(gg) } #' Plot the trend component of a prophet forecast. #' Prints a ggplot2 with panels for trend, weekly and yearly seasonalities if #' present, and holidays if present. #' #' @param m Prophet object. #' @param fcst Data frame returned by predict(m, df). #' @param uncertainty Boolean indicating if the uncertainty interval should be #' plotted for the trend, from fcst columns trend_lower and trend_upper. #' @param xlabel Optional label for x-axis #' @param ylabel Optional label for y-axis #' #' @return A ggplot2 plot. #' #' @export #' @importFrom dplyr "%>%" prophet_plot_trend_component <- function(m, fcst, uncertainty = TRUE, xlabel = 'Day of year', ylabel = '') { df <- df_for_plotting(m, fcst) forecast.color <- "#0072B2" # Plot the trend gg.trend <- ggplot2::ggplot(df, ggplot2::aes(x = ds, y = trend)) + ggplot2::geom_line(color = forecast.color, na.rm = TRUE) if (exists('cap', where = df)) { gg.trend <- gg.trend + ggplot2::geom_line(ggplot2::aes(y = cap), linetype = 'dashed', na.rm = TRUE) } if (uncertainty) { gg.trend <- gg.trend + ggplot2::geom_ribbon(ggplot2::aes(ymin = trend_lower, ymax = trend_upper), alpha = 0.2, fill = forecast.color, na.rm = TRUE) } gg.trend <- gg.trend + ggplot2::labs(x = xlabel, y = ylabel) return (gg.trend) } #' Plot the yearly component of a prophet forecast. #' Prints a ggplot2 with panels for trend, weekly and yearly seasonalities if #' present, and holidays if present. #' #' @param m Prophet object. #' @param fcst Data frame returned by predict(m, df). #' @param uncertainty Boolean indicating if the uncertainty interval should be #' plotted for the trend, from fcst columns trend_lower and trend_upper. #' @param xlabel Optional label for x-axis #' @param ylabel Optional label for y-axis #' #' @return A ggplot2 plot. #' #' @export #' @importFrom dplyr "%>%" prophet_plot_yearly_component <- function(m, fcst, uncertainty = TRUE, xlabel = 'Day of year', ylabel = '') { df <- df_for_plotting(m, fcst) forecast.color <- "#0072B2" # Plot yearly seasonality, if present if ("yearly" %in% colnames(df)) { # Drop year from the dates df.s <- df %>% dplyr::mutate(doy = strftime(ds, format = "2000-%m-%d")) %>% dplyr::group_by(doy) %>% dplyr::slice(1) %>% dplyr::ungroup() %>% dplyr::mutate(doy = zoo::as.Date(doy)) %>% dplyr::arrange(doy) gg.yearly <- ggplot2::ggplot(df.s, ggplot2::aes(x = doy, y = yearly, group = 1)) + ggplot2::geom_line(color = forecast.color, na.rm = TRUE) + ggplot2::scale_x_date(labels = scales::date_format('%B %d')) + ggplot2::labs(x = xlabel, y = ylabel) if (uncertainty) { gg.yearly <- gg.yearly + ggplot2::geom_ribbon(ggplot2::aes(ymin = yearly_lower, ymax = yearly_upper), alpha = 0.2, fill = forecast.color, na.rm = TRUE) } return (gg.yearly) } } export_df_as_csv <- function(df, filename) { write_csv(df, filename, na = "") } #' @importFrom dplyr "%>%" df_daily_count <- function(df, d) { return ( df %>% mutate(ds = lubridate::as_date(d)) %>% group_by(ds) %>% filter(!is.na(ds)) %>% summarise( y = log(n()) ) %>% select(ds, y) ) }
ecc9b852d19374db7161dbe6390b239a18688619
3ece7008ae659e4604da95e3936300eab19732f4
/man/plotEIC.xsAnnotate.Rd
0a41ffa2fc712d142172b1ac5272c4251e26bbe3
[]
no_license
sneumann/CAMERA
14600dee1054917c03c2a48ec24092d7ba5e3416
89bd1e8d7e720bae6532eef5328e602acb38e82a
refs/heads/master
2023-04-12T19:41:01.256040
2022-02-08T08:48:02
2022-02-08T08:48:02
18,191,106
10
24
null
2021-09-27T08:09:11
2014-03-27T20:55:17
R
UTF-8
R
false
false
960
rd
plotEIC.xsAnnotate.Rd
\name{plotEICs-methods} \docType{methods} \alias{plotEICs.xsAnnotate} \alias{plotEICs} \alias{plotEICs,xsAnnotate-method} \title{Plot extracted ion chromatograms from (multiple) Pseudospectra} \description{ Batch plot a list of extracted ion chromatograms to the current graphics device. } \section{Methods}{ \describe{ \item{object = "xsAnnotate"}{ \code{ plotEICs(object, xraw, pspec=1:length(object@pspectra), maxlabel=0, sleep=0)} } } } \value{ None. } \arguments{ \item{object}{the \code{xsAnnotate} object} \item{xraw}{\code{xcmsRaw} object underlying the the xsAnnotate} \item{maxlabel}{How many m/z labels to print} \item{sleep}{seconds to pause between plotting EICs} \item{...}{other graphical parameters} } \author{Steffen Neumann, \email{sneumann@ipb-halle.de}} \seealso{ \code{\link{xsAnnotate-class}}, \code{\link{png}}, \code{\link{pdf}}, \code{\link{postscript}}, } \keyword{methods} \keyword{hplot}
7da0d8aebe1df0e903a54a8468bd37e99344870b
a7458ddecee95867d027d83193fa0423575e171f
/plotMetabolites.R
c55a7dfc069586ed2854ba9d1b5dd6eff0e36253
[]
no_license
riinbre-bioinfo/Ghonem_Metabolites
5a3e20f3ee00ed7fb3f5b582a52d35fc62fff4c2
fb43a47df101e4363936be9c8f7372a723630a5f
refs/heads/master
2020-05-23T05:00:33.946983
2019-05-14T15:05:52
2019-05-14T15:05:52
186,644,305
1
0
null
null
null
null
UTF-8
R
false
false
3,800
r
plotMetabolites.R
# script to create strip plots for the metabolite data, run ANOVA for the metabolite data and create bar plots for enzyme data metabsStripPlot <- function(data_meta.df, data_log.df, xvar, shape, color, grouped = FALSE, shapes = FALSE, colors = FALSE, brewer = FALSE, legend = TRUE) { metabNames = names(data_log.df) data.df = cbind(data_meta.df, data_log.df) stripPlot <- function(metab, data, xvar, shape, color, grouped, shapes, colors, brewer) { p = ggplot(data, aes_(x = as.name(xvar), y = as.name(metab), shape = as.name(shape), color = as.name(color))) + theme_bw() if (legend == FALSE) { p = p + theme(legend.position = "none") } if (is.vector(shapes)) { p = p + scale_shape_manual(values = shapes) } if (is.character(colors) && brewer == FALSE) { p = p + scale_color_manual(values = colors) } else if (is.character(colors) && brewer == TRUE) { p = p + scale_color_brewer(palette = colors) } else { p = p + scale_color_brewer(palette="Dark2") } if (grouped == TRUE) { p = p + geom_boxplot(position = position_dodge(0.5)) + geom_jitter(position = position_dodge(0.5), size = 3) } else { p = p + geom_jitter(position = position_jitter(0.2), size = 3) } p = p + stat_summary(fun.data = mean_se, geom = "errorbar", color = "black", width = 0.5, size = 1.2) + stat_summary(fun.y=mean, geom = "point", color = "black", size = 4, pch = 1) + theme(text = element_text(face = "bold", size = 18), axis.text = element_text(face = "bold", size = 14)) } lapply(metabNames, stripPlot, data = data.df, xvar = xvar, shape = shape, color = color, grouped = grouped, shapes = shapes, colors = colors, brewer = brewer) } metabANOVA <- function(data_meta.df, data_log.df, fixed, random) { # run ANOVA and multicomp Tukey on all metabolites metabNames = names(data_log.df) data.df = cbind(data_meta.df, data_log.df) runANOVA <- function(metab, data, fixed, random) { form = as.formula(paste(metab, paste(fixed, random, sep = "+"), sep = "~")) # construct formula data.aov = aov(form, data = data) # run anova args = list("Tukey") # next commands pass "Treatment" to mcp names(args) = fixed cmp = do.call(mcp, args) data.glht = glht(data.aov, linfct = cmp) # run glht summary(data.glht) } summaries = lapply(metabNames, runANOVA, data = data.df, fixed = fixed, random = random) # return list of anova summaries names(summaries) <- names(data_log.df) summaries } enzymesBarPlot <- function(enzyme, data.df, group) { df.summ <- data.df %>% group_by(get(group)) %>% summarize(Mean = mean(get(enzyme)), se = sd(get(enzyme))/sqrt(n())) names(df.summ)[1] <- group df.summ # p <- ggplot(df.summ, aes(x = get(group), y = Mean, fill = get(group))) + # geom_bar(stat = "identity") + # stat_summary(fun.data = mean_se, geom = "errorbar", color = "black", width = 0.25) # p <- ggplot(df.summ, aes(x = TreatCohort, y = Mean, ymin = Min, ymax = Max, fill = TreatCohort)) + # geom_bar(stat = "identity") # enz_summaries <- lapply(enzymes, function(x) tapply(data.df[, x], data.df[, group], mean)) # names(enz_summaries) <- enzymes # means.df <- rownames_to_column(as.data.frame(enz_summaries), var = group) # barPlot <- function(enzyme, data, xvar) { # p = ggplot(data.df[, enzymes], aes_(x = as.name(xvar), y = as.name(enzyme))) + theme_bw() # p = p + geom_bar(stat("identity")) # p = p + stat_summary(fun.data = mean_se, geom = "errorbar", color = "black", width = 0.25) # } # lapply(enzymes, barPlot, data = data.df, xvar = "TreatCohort") # means.df }
09386f9af5d77f52612eda89fd3f4d899122f933
29585dff702209dd446c0ab52ceea046c58e384e
/TreePar/R/Fderifuncshifth.R
efac414b4b210761ff62d7c2e1fa124650f099f8
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
464
r
Fderifuncshifth.R
Fderifuncshifth<-function(time,t,lambda,mu,k) { i <- inter(time,t) if (time==0){ res<-0 } else { tnew<-c(t[1:i],time) res<-lambda[i]/(lambda[i]-mu[i]) if (i>1) { for (j in 1:(i-1)) { res<-res*exp((lambda[j]-mu[j])*(tnew[j+1]-tnew[j]))}} # if (i<k){ # res<-res*(exp((lambda[i]-mu[i])*(tnew[i+1]-tnew[i]))-1) # } else { res<-res*((lambda[i]-mu[i])*exp((lambda[i]-mu[i])*(tnew[i+1]-tnew[i]))) } #res<-res+Ffuncshifth(t[i],t,lambda,mu) #} res }
a4fb656d85b481004c804e01b776a4169cdb4e8f
c647ec9c91130d4ecf2d7f1cce97827a833516bd
/man/dem.Rd
9a7075ea2a22d7d883378c9827fb9d8a3b3856ee
[ "MIT" ]
permissive
sysmedlab/ehR
1f8e75ed9101bf1416e5cd2106de1cfb6fb978d5
41c219b88c76f2432aa61df2b959d235d71ed0a1
refs/heads/master
2021-09-08T02:01:13.250539
2018-02-23T21:58:27
2018-02-23T21:58:27
103,680,558
4
3
MIT
2018-03-05T15:56:34
2017-09-15T16:52:49
R
UTF-8
R
false
true
464
rd
dem.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dem_data.R \docType{data} \name{dem} \alias{dem} \title{Simulated EHR data - Demographics.} \format{An object of class \code{data.table} (inherits from \code{data.frame}) with 400 rows and 9 columns.} \usage{ data(dem) } \description{ Simulated EHR data (no PHI constraints) featuring demographics data for ~500 patients. } \details{ Maintained by: Clara Marquardt } \keyword{datasets}
4176af58a85909ab57c2554da2abd7ad3dbb7889
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
/inst/snippet/mvn-marginal03.R
ab6d560886777b242e9cb7cab6a7dace96c08c4d
[]
no_license
rpruim/fastR2
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
refs/heads/main
2022-05-05T23:24:55.024994
2022-03-15T23:06:08
2022-03-15T23:06:08
3,821,177
11
8
null
null
null
null
UTF-8
R
false
false
364
r
mvn-marginal03.R
# simulate 3 independent Norm(0,1) vars Z1 <- rnorm(100000); Z2 <- rnorm(100000); Z3 <- rnorm(100000) # create the X's from the Z's X1 <- 1 + Z1; X2 <- 2 + Z1 + Z2; X3 <- 3 + Z1 + 2 * Z2 + Z3 data.frame( `E(X1)` = mean(X1), `E(X2)` = mean(X2), `Var(X1)` = var(X1), `Var(X2)` = var(X2), `Cov(X1, X2)` = cov(X1, X2), check.names = FALSE)
e8efe19c9ff802b900f3bb72cbe64a17bb793437
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/stream/vignettes/stream.R
50879b7baebb0b75805b090f5d43d0c62f3a242e
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
17,013
r
stream.R
### R code from vignette source 'stream.Rnw' ### Encoding: UTF-8 ################################################### ### code chunk number 1: stream.Rnw:138-139 ################################################### options(width = 75, digits = 3, prompt = 'R> ', scipen = 3) ################################################### ### code chunk number 2: stream.Rnw:654-657 ################################################### library("stream") set.seed(1000) stream <- DSD_Gaussians(k = 3, d = 2) ################################################### ### code chunk number 3: stream.Rnw:666-668 ################################################### dstream <- DSC_DStream(gridsize = .1, Cm = 1.2) update(dstream, stream, n = 500) ################################################### ### code chunk number 4: initial_example ################################################### km <- DSC_Kmeans(k = 3) recluster(km, dstream) plot(km, stream, type = "both") ################################################### ### code chunk number 5: stream.Rnw:884-889 ################################################### library("stream") set.seed(1000) stream <- DSD_Gaussians(k = 3, d = 3, noise = .05, p = c(.5, .3, .1)) stream ################################################### ### code chunk number 6: stream.Rnw:910-912 ################################################### p <- get_points(stream, n = 5) p ################################################### ### code chunk number 7: stream.Rnw:923-925 ################################################### p <- get_points(stream, n = 100, class = TRUE) head(p, n = 10) ################################################### ### code chunk number 8: static ################################################### plot(stream, n = 500) ################################################### ### code chunk number 9: static_pc ################################################### plot(stream, n = 500, method = "pc") ################################################### ### code chunk number 10: moa1 ################################################### set.seed(1000) stream <- DSD_Benchmark(1) stream ################################################### ### code chunk number 11: stream.Rnw:985-989 (eval = FALSE) ################################################### ## for(i in 1:4) { ## plot(stream, 250, xlim = c(0, 1), ylim = c(0, 1)) ## tmp <- get_points(stream, n = 1400) ## } ################################################### ### code chunk number 12: moa1 ################################################### plot(stream, 250, xlim = c(0, 1), ylim = c(0, 1)) arrows(.15, .85, .85, .15, col = rgb(.8, .8, .8, .6), lwd = 10) arrows(.15, .15, .85, .85, col = rgb(.8, .8, .8, .6), lwd = 10) tmp <- get_points(stream, n = 1400) ################################################### ### code chunk number 13: moa2 ################################################### plot(stream, 250, xlim = c(0, 1), ylim = c(0, 1)) arrows(.15, .85, .85, .15, col = rgb(.8, .8, .8, .6), lwd = 10) arrows(.15, .15, .85, .85, col = rgb(.8, .8, .8, .6), lwd = 10) tmp <- get_points(stream, n=1400) ################################################### ### code chunk number 14: moa3 ################################################### plot(stream, 250, xlim = c(0, 1), ylim = c(0, 1)) arrows(.15,.85,.85,.15, col=rgb(.8,.8,.8,.6), lwd=10) arrows(.15,.15,.85,.85, col=rgb(.8,.8,.8,.6), lwd=10) tmp <- get_points(stream, n=1400) ################################################### ### code chunk number 15: moa4 ################################################### plot(stream, 250, xlim=c(0,1), ylim=c(0,1)) arrows(.15,.85,.85,.15, col=rgb(.8,.8,.8,.6), lwd=10) arrows(.15,.15,.85,.85, col=rgb(.8,.8,.8,.6), lwd=10) ################################################### ### code chunk number 16: stream.Rnw:1041-1044 (eval = FALSE) ################################################### ## reset_stream(stream) ## animate_data(stream, n = 10000, horizon = 100, ## xlim = c(0, 1), ylim = c(0, 1)) ################################################### ### code chunk number 17: stream.Rnw:1050-1053 (eval = FALSE) ################################################### ## library("animation") ## animation::ani.options(interval = .1) ## ani.replay() ################################################### ### code chunk number 18: stream.Rnw:1060-1062 (eval = FALSE) ################################################### ## saveHTML(ani.replay()) ## saveGIF(ani.replay()) ################################################### ### code chunk number 19: stream.Rnw:1086-1089 ################################################### library("stream") set.seed(1000) stream <- DSD_Gaussians(k = 3, d = 5) ################################################### ### code chunk number 20: stream.Rnw:1094-1095 (eval = FALSE) ################################################### ## write_stream(stream, "data.csv", n = 100, sep = ",") ################################################### ### code chunk number 21: stream.Rnw:1125-1129 ################################################### file <- system.file("examples", "kddcup10000.data.gz", package = "stream") stream_file <- DSD_ReadCSV(gzfile(file), take = c(1, 5, 6, 8:11, 13:20, 23:42), class = 42, k = 7) stream_file ################################################### ### code chunk number 22: stream.Rnw:1141-1142 ################################################### get_points(stream_file, n = 5) ################################################### ### code chunk number 23: stream.Rnw:1149-1151 ################################################### stream_scaled <- DSD_ScaleStream(stream_file, center = TRUE, scale = TRUE) get_points(stream_scaled, n = 5) ################################################### ### code chunk number 24: stream.Rnw:1182-1184 ################################################### data("EuStockMarkets", package = "datasets") head(EuStockMarkets) ################################################### ### code chunk number 25: stream.Rnw:1191-1193 ################################################### replayer <- DSD_Memory(EuStockMarkets, k = NA) replayer ################################################### ### code chunk number 26: stream.Rnw:1199-1201 ################################################### get_points(replayer, n = 5) replayer ################################################### ### code chunk number 27: stream.Rnw:1209-1210 (eval = FALSE) ################################################### ## get_points(replayer, n = 2000) ################################################### ### code chunk number 28: stream.Rnw:1212-1214 ################################################### err <- try(get_points(replayer, n = 2000)) cat(err) ################################################### ### code chunk number 29: stream.Rnw:1228-1230 ################################################### reset_stream(replayer, pos = 100) replayer ################################################### ### code chunk number 30: stream.Rnw:1536-1539 ################################################### library("stream") set.seed(1000) stream <- DSD_Gaussians(k = 3, d = 2, noise = .05) ################################################### ### code chunk number 31: stream.Rnw:1547-1549 ################################################### dstream <- DSC_DStream(gridsize = .1, Cm = 1.2) dstream ################################################### ### code chunk number 32: stream.Rnw:1557-1559 ################################################### update(dstream, stream, n = 500) dstream ################################################### ### code chunk number 33: stream.Rnw:1568-1569 ################################################### head(get_centers(dstream)) ################################################### ### code chunk number 34: cluster ################################################### plot(dstream, stream) ################################################### ### code chunk number 35: cluster-grid ################################################### plot(dstream, stream, grid = TRUE) ################################################### ### code chunk number 36: stream.Rnw:1889-1893 ################################################### library("stream") stream <- DSD_Gaussians(k = 3, d = 2, noise = .05) dstream <- DSC_DStream(gridsize = .1) update(dstream, stream, n = 2000) ################################################### ### code chunk number 37: stream.Rnw:1901-1902 ################################################### evaluate(dstream, stream, n = 100) ################################################### ### code chunk number 38: stream.Rnw:1913-1914 ################################################### evaluate(dstream, stream, measure = c("purity", "crand"), n = 500) ################################################### ### code chunk number 39: stream.Rnw:1955-1961 ################################################### set.seed(1000) stream <- DSD_Benchmark(1) dstream <- DSC_DStream(gridsize = .05, lambda = .01) ev <- evaluate_cluster(dstream, stream, measure = c("numMicroClusters", "purity"), n = 5000, horizon = 100) head(ev) ################################################### ### code chunk number 40: evaluation ################################################### plot(ev[ , "points"], ev[ , "purity"], type = "l", ylab = "Avg. Purity", xlab = "Points") ################################################### ### code chunk number 41: stream.Rnw:1994-1999 (eval = FALSE) ################################################### ## set.seed(1000) ## stream <- DSD_Benchmark(1) ## dstream <- DSC_DStream(gridsize = .05, lambda = .01) ## r <- animate_cluster(dstream, stream, horizon = 100, n = 5000, ## measure = "purity", plot.args = list(xlim = c(0, 1), ylim = c(0, 1))) ################################################### ### code chunk number 42: stream.Rnw:2029-2036 ################################################### library("stream") set.seed(1000) stream <- DSD_Gaussians(k = 3, d = 2, noise = .05) dstream <- DSC_DStream(gridsize = .05, Cm = 1.5) update(dstream, stream, n = 1000) dstream ################################################### ### code chunk number 43: recluster ################################################### plot(dstream, stream, type = "both") ################################################### ### code chunk number 44: recluster2 ################################################### km <- DSC_Kmeans(k = 3, weighted = TRUE) recluster(km, dstream) km plot(km, stream, type = "both") ################################################### ### code chunk number 45: stream.Rnw:2097-2098 ################################################### evaluate(km, stream, measure = c("purity", "crand", "SSQ"), n = 1000) ################################################### ### code chunk number 46: stream.Rnw:2103-2105 ################################################### evaluate(km, stream, c(measure = "purity", "crand", "SSQ"), n = 1000, assign = "macro") ################################################### ### code chunk number 47: stream.Rnw:2128-2131 ################################################### points <- get_points(stream, n = 100) assignment <- get_assignment(dstream, points, type = "macro") assignment ################################################### ### code chunk number 48: silhouette ################################################### assignment[is.na(assignment)] <- 0L library("cluster") plot(silhouette(assignment, dist = dist(points))) ################################################### ### code chunk number 49: data_bng ################################################### set.seed(1000) library("stream") stream <- DSD_Memory(DSD_BarsAndGaussians(noise = .05), n = 1500) stream plot(stream) ################################################### ### code chunk number 50: stream.Rnw:2356-2364 ################################################### algorithms <- list( 'Sample' = DSC_TwoStage(micro = DSC_Sample(k = 100), macro = DSC_Kmeans(k = 4)), 'Window' = DSC_TwoStage(micro = DSC_Window(horizon = 100), macro = DSC_Kmeans(k = 4)), 'D-Stream' = DSC_DStream(gridsize = .7, Cm = 1.5), 'DBSTREAM' = DSC_DBSTREAM(r = .45) ) ################################################### ### code chunk number 51: stream.Rnw:2374-2378 ################################################### for(a in algorithms) { reset_stream(stream) update(a, stream, n = 1000) } ################################################### ### code chunk number 52: stream.Rnw:2383-2384 ################################################### sapply(algorithms, nclusters, type = "micro") ################################################### ### code chunk number 53: microclusters ################################################### op <- par(no.readonly = TRUE) layout(mat = matrix(1:length(algorithms), ncol = 2)) for(a in algorithms) { reset_stream(stream) plot(a, stream, main = description(a), type = "micro") } par(op) ################################################### ### code chunk number 54: microclusters_assignment ################################################### op <- par(no.readonly = TRUE) layout(mat = matrix(1:length(algorithms), ncol = 2)) for(a in algorithms) { reset_stream(stream) plot(a, stream, main = description(a), assignment = TRUE, weight = FALSE, type = "micro") } par(op) ################################################### ### code chunk number 55: stream.Rnw:2462-2469 ################################################### sapply(algorithms, FUN=function(a) { reset_stream(stream, pos = 1001) evaluate(a, stream, measure = c("numMicroClusters", "purity"), type = "micro", n = 500) }) ################################################### ### code chunk number 56: macroclusters ################################################### op <- par(no.readonly = TRUE) layout(mat=matrix(1:length(algorithms), ncol = 2)) for(a in algorithms) { reset_stream(stream) plot(a, stream, main = description(a), type = "both") } par(op) ################################################### ### code chunk number 57: stream.Rnw:2513-2519 ################################################### sapply(algorithms, FUN = function(a) { reset_stream(stream, pos = 1001) evaluate(a, stream, measure = c("numMacroClusters", "purity", "SSQ", "cRand", "silhouette"), n = 500, assign = "micro", type = "macro") }) ################################################### ### code chunk number 58: stream.Rnw:2541-2543 ################################################### set.seed(0) stream <- DSD_Memory(DSD_Benchmark(1), n = 5000) ################################################### ### code chunk number 59: stream.Rnw:2553-2561 ################################################### algorithms <- list( 'Sample' = DSC_TwoStage(micro = DSC_Sample(k = 100, biased = TRUE), macro = DSC_Kmeans(k = 2)), 'Window' = DSC_TwoStage(micro = DSC_Window(horizon = 100, lambda = .01), macro = DSC_Kmeans(k = 2)), 'D-Stream' = DSC_DStream(gridsize = .1, lambda = .01), 'DBSTREAM' = DSC_DBSTREAM(r = .05, lambda = .01) ) ################################################### ### code chunk number 60: stream.Rnw:2572-2577 ################################################### evaluation <- lapply(algorithms, FUN = function(a) { reset_stream(stream) evaluate_cluster(a, stream, horizon = 100, n = 5000, measure = "crand", type = "macro", assign = "micro") }) ################################################### ### code chunk number 61: stream.Rnw:2593-2596 ################################################### Position <- evaluation[[1]][ , "points"] cRand <- sapply(evaluation, FUN = function(x) x[ , "cRand"]) head(cRand) ################################################### ### code chunk number 62: dynamic ################################################### matplot(Position, cRand, type = "l", lwd = 2) legend("bottomleft", legend = names(evaluation), col = 1:6, lty = 1:6, bty = "n", lwd = 2) ################################################### ### code chunk number 63: dynamic_box ################################################### boxplot(cRand, las = 2, cex.axis = .8) ################################################### ### code chunk number 64: stream.Rnw:2654-2662 (eval = FALSE) ################################################### ## library("stream") ## con <- gzcon( ## url(paste("http://archive.ics.uci.edu/ml/machine-learning-databases/", ## "kddcup99-mld/kddcup.data.gz", sep=""))) ## ## stream <- DSD_ReadCSV(con, take=c(1, 5, 6, 8:11, 13:20, 23:42), ## class=42, k=7) ## stream2 <- DSD_ScaleStream(stream, n=1000) ################################################### ### code chunk number 65: stream.Rnw:2668-2670 (eval = FALSE) ################################################### ## dstream <- DSC_DStream(gridsize = .5, gaptime = 10000L, lambda = .01) ## update(dstream, stream2, n = 4000000, verbose = TRUE)
ce983931c4bab5ec37816b88b1fea7fe19fc28d4
b9f0c8a42d1da0aa40fa46197d3c4174648fe6e1
/src/analyze.R
9770a0cdae4f092c358497a7d1e7df738d08f61b
[ "BSD-2-Clause" ]
permissive
danlooo/correlation-workshop
e8f78a27e96bf7c65346a04d600cab1e81003e82
a9040dfcf2c432d8ba79596d615600e19ad79d27
refs/heads/main
2023-03-28T14:36:28.264046
2021-03-26T15:45:25
2021-03-26T15:45:25
null
0
0
null
null
null
null
UTF-8
R
false
false
730
r
analyze.R
#!/usr/bin/env Rscript # # Setup and configure drake # source(here::here("src/setup.R")) cache <- drake::new_cache(".drake") drake::drake_cache(here::here(".drake"))$unlock() make( # plan plan = plan, parallelism = "clustermq", # allow multiple works to access global env at the same time lock_envir = FALSE, recoverable = TRUE, recover = TRUE, # caching cache = cache, garbage_collection = TRUE, memory_strategy = "preclean", # config seed = 1337, keep_going = TRUE, jobs = jobs, # output verbose = 1, format = "qs", log_make = "log/drake.log" ) rmarkdown::render( input = "src/report.Rmd", knit_root_dir = ".", output_dir = ".", output_file = "report.html", )
2b3a2b72d023953c6f92945bea646afd13a28a39
38d127b3e63855d9897eb82f76f9def1fc381e6e
/R/getSubaccounts.R
0905a84e6513cf231636824d1a7f0625a08473d9
[]
no_license
erikpal/bRush
ed2ac601050d82eac15211dacc3047923df413a4
42f47fe87ee982e9207b2f9ec6577daa0a01e764
refs/heads/master
2023-05-11T17:36:19.160006
2023-05-02T15:57:42
2023-05-02T15:57:42
88,922,475
15
2
null
null
null
null
UTF-8
R
false
false
834
r
getSubaccounts.R
#' Get Sub-accounts #' #' Get details of sub-accounts for a provided account. #' @param accountID Integer of the account ID to find sub-accounts for #' @param recursive Boolean to search returned subaccounts for subaccounts #' @param server The base url of a Canvas installation #' @param ... Optional page options to pass to processRequest #' @export getSubaccounts <- function(accountID, recursive = FALSE, server = "test", ...) { url <- loadURL(server) url$path <- "api/v1/accounts/accountID/sub_accounts" url$path <- sub("accountID", accountID, url$path) if (recursive == TRUE) { url$query <- list(recursive = TRUE) } ##Pass the url to the request processor results <- processRequest(url, ...) return(results) }
08bb1d8e8c80acf9ac1ebcdbff1ad04604703bb7
c242f642570f4124c986687a362d46684b058e76
/R/multilevelPSA.demo.R
952c854168a78677123fca23ff5024daac165c4d
[]
no_license
cityofsmiles87/Dissertation
dc07bb3afdf3e7fbe28b9b0030378dbacf7c5e83
8aa6bbad542f4e4a0f777587415e9f1d4736616f
refs/heads/master
2020-09-14T14:25:53.132408
2014-12-16T14:53:29
2014-12-16T14:53:29
null
0
0
null
null
null
null
UTF-8
R
false
false
1,877
r
multilevelPSA.demo.R
options(digits=3) options(width=80) install.packages('multilevelPSA', repos='http://cran.r-project.org') require('multilevelPSA') data(pisana) data(pisa.psa.cols) pisana$MathScore <- apply(pisana[,paste0('PV', 1:5, 'MATH')], 1, sum) / 5 pisana <- pisana[,!names(pisana) %in% c(paste0('PV', 1:5, 'MATH'),paste0('PV', 1:5, 'SCIE'),paste0('PV', 1:5, 'READ'))] mlpsa <- mlpsa.ctree(pisana[,c('CNT','PUBPRIV',pisa.psa.cols)], formula=PUBPRIV ~ ., level2='CNT') mlpsa.df <- getStrata(mlpsa, pisana, level2='CNT') names(mlpsa.df) mlpsa.lr <- mlpsa.logistic(pisana[,c('CNT','PUBPRIV',pisa.psa.cols)], formula=PUBPRIV ~ ., level2='CNT') mlpsa.lr.df <- getPropensityScores(mlpsa.lr, nStrata=5) cv.bal <- covariate.balance(covariates=student[,pisa.psa.cols], treatment=student$PUBPRIV, level2=student$CNT, strata=mlpsa.df$strata) plot(cv.bal) + theme(axis.text.y=element_text(size=5)) ggsave('~/Dropbox/School/Dissertation/Figures/pisabalance.pdf', width=6, height=8.5) mlpsa.df$PUBPRIV <- factor(as.character(mlpsa.df$PUBPRIV), levels=c('Public','Private')) table(mlpsa.df$PUBPRIV) results.psa.math <- mlpsa(response=mlpsa.df$MathScore, treatment=mlpsa.df$PUBPRIV, strata=mlpsa.df$strata, level2=mlpsa.df$CNT) summary(results.psa.math) pdf('Figures/pisamlpsa.pdf', width=6, height=6) plot(results.psa.math) dev.off() mlpsa.circ.plot(results.psa.math, level2.label=FALSE) ggsave('Figures/PISACircPlot.pdf', width=7, height=7) mlpsa.difference.plot(results.psa.math, sd=mean(mlpsa.df$MathScore, na.rm=TRUE), xlim=c(-.2, .05)) ggsave('Figures/pisadiffplot.pdf', width=8, height=3) results.psa.lr.math <- mlpsa(response=pisana$MathScore, treatment=pisana$PUBPRIV, strata=mlpsa.lr.df$strata, level2=mlpsa.lr.df$level2) summary(results.psa.lr.math) plot(results.psa.lr.math)
e0261fce2db31e01c6b1e7be958fa80305e1aeb1
b83cfb6a045040319338cf5e16c0392887993ee0
/RCode/ROracle.r
65dff1360dd10dc769c420104cb8e78dbbf1e60f
[]
no_license
xiaojiezhou/OftenUsed
75360c9318158e24045a646e97d61fbf9f02b44e
d016df176fa24b9763da0ce4aa408184f533273e
refs/heads/master
2022-06-22T03:52:56.112580
2020-05-06T14:42:37
2020-05-06T14:42:37
260,552,757
0
0
null
null
null
null
UTF-8
R
false
false
376
r
ROracle.r
# install.packages('ROracle') require(ROracle) exaDrv = dbDriver("Oracle") exaCon = dbConnect(exaDrv, username="[AN_RT_WS95]", dbname="exa_uskrgprdh") statsdat <- dbGetQuery(exaCon, " SELECT * FROM HHSPEND_MONTH_TEST_V2 where rownum <= 100 " ) dbDisconnect(exaCon) str(statsdat)
309f4b10158a7980384b46910d9981f50f9f32a5
f23b7b75a87ea3280c9e984a3b275a6fd83e135f
/Source/man/read.run.file.Rd
d8fc3ce0f5d41e7258e79e2d6b64cd579127ad14
[]
no_license
HumanExposure/SHEDSHTRPackage
1d830b5d8211e3e7ae000239cec7c17a44af9952
c3d6c45557e77c29592218d7056e0d110e55e5ee
refs/heads/master
2023-07-20T18:44:16.311968
2021-09-03T17:01:52
2021-09-03T17:01:52
113,353,498
4
3
null
null
null
null
UTF-8
R
false
true
510
rd
read.run.file.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ReadData.R \name{read.run.file} \alias{read.run.file} \title{read.run.file} \usage{ read.run.file(run.file = "run_test.txt") } \description{ Each SHEDS run has its own "run.file" that the user prepares before the run. This file contains all the settings and file references needed for the run. Read.run.file occurs at the start of each SHEDS run. The contents of the run.file are examined, and stored in the R object "specs". }
368f1e9deea069c7c6b0f41543909f73766c9120
57aba6b27797866b9c6721d3d1237bba95052659
/Data/PCA.R
44c37fef94331b0d96ed2eb1e5c6b0a7af5f62a8
[]
no_license
efwaring/autumnalTemps
8c179b35ce2c9bd1fa2c464a5c4568549a2e5ea3
3c7021a6f304a7150330b49e454d4675fe69dedb
refs/heads/master
2021-09-06T23:55:56.915291
2018-02-13T18:41:52
2018-02-13T18:41:52
59,041,442
0
0
null
null
null
null
UTF-8
R
false
false
883
r
PCA.R
# PCA for growth chamber data library(ggfortify) allPCA <- read.csv("GCdata.csv") allPCA = allPCA %>% mutate(climate=temp+phoper) %>% filter(climate!=35) %>% select(spp, climate, nTreatment, aamb,dr,netqe,jmax,vcmax,Nitrogen) allPCA$dr = allPCA$dr * -1 log.allPCA <- log(allPCA[c(4, 5, 6, 7, 8, 9)]) log.allPCA[33,]$dr <-0 log.allPCA[31,]$dr <-0 log.allPCA <- na.omit(log.allPCA) log.allPCA$Rd <- log.allPCA$dr log.allPCA$dr<-NULL pr.allPCA <- prcomp(log.allPCA, scale=T, center=T) plot(pr.allPCA) print(pr.allPCA) cumsum((pr.allPCA$sdev)^2) / sum(pr.allPCA$sdev^2) autoplot(pr.allPCA, data = allPCA, fill='spp', shape="nTreatment", loadings = TRUE, loadings.colour = 'black', loadings.label = TRUE, loadings.label.size =4) + scale_shape_manual(values=c(23,21,22))+ scale_fill_manual(values=c("black", NA)) + theme(legend.position="none")
dadb1f8712078326271b59f90d1d4e5eeb427fef
3e508d7cd0798f70f70242a837f99123397fc1c4
/tests/test-sim3way.R
3ae9f99f07d72cbd97d74daaa971e90897343876
[]
no_license
jlivsey/Dvar
f0d142df9866a4232947d114891111097c4c5851
824cdcc822829d655b905bfeca54a40b82f16e36
refs/heads/master
2023-07-26T19:17:28.531305
2023-07-06T13:55:28
2023-07-06T13:55:28
237,249,747
2
0
null
null
null
null
UTF-8
R
false
false
2,270
r
test-sim3way.R
library(L1pack) library(Dvar) options(warn = -1) #' Lets look at simulation for same dimension as Age x sex x blockGroup #' First set the dimensionality and simulate histogram mydim = c(6, 2, 10) A <- array(sample(1:10, size = prod(mydim), replace = TRUE), mydim) Nrep <- 2000 bpar <- 1/3 #' Run simulation with only total. I use only the total because the simulation #' function is not setup to take no margins. res <- Sim3Way( Nrep = Nrep, intab = A, bpar = bpar, marPack = list(list(0,0,0)) ) #' Look at the first 10 estimates, the true values and the rmse x <- cbind( c(A)[1:10], round(res$param[1:10], 4), round(res$rmse[1:10], 4) ) colnames(x) <- c("true", "estimate", "rmse") print(x) #' Construct marPack with all possible margins and total. This is ugly at the #' moment because the simulation function expects a list of lists. eg <- expand.grid(0:mydim[1], 0:mydim[2], 0:mydim[3]) # which rows have all none zero entries (not valid margins) allNotZero <- (eg[,1] != 0) & (eg[, 2] != 0) & (eg[, 3] != 0) marPackMatrix <- eg[!allNotZero, ] marPack = list() for(i in 1:dim(marPackMatrix)[1]){ marPack[[i]] <- list() marPack[[i]][[1]] <- marPackMatrix[i, 1] marPack[[i]][[2]] <- marPackMatrix[i, 2] marPack[[i]][[3]] <- marPackMatrix[i, 3] } #' Run simulation with full margins and total resFull <- Sim3Way( Nrep = Nrep, intab = A, bpar = bpar, marPack = marPack ) #' Add full margins sim results to output table x <- cbind( c(A)[1:10], round(res$param[1:10], 4), round(res$rmse[1:10], 4), round(resFull$param[1:10], 4), round(resFull$rmse[1:10], 4) ) colnames(x) <- c("true", "estimate-none", "rmse-none", "estimate-full", "rmse-full") print(x) #' Interestingly some of the estimates are not better with the full margins. #' Let's look at the average rmse of full vs no margins mean(res$rmse) mean(resFull$rmse) #' Much Better! hist(res$rmse, breaks = 30, main = "rmse with no margins") hist(resFull$rmse, breaks = 30, main = "rmse with full margins") #' Test to see if weight vector is working weightVec <- rep(1, prod(mydim) + length(marPack))
73ffc25acbddb3ee43e0f80709f6221590b371be
3df5e0d9cd46d3d86fe6879d85c96a3f8829fbd3
/man/Scorecard_gen_code_sas.Rd
447898d5f7a329f4808f05656aed1749e01fbcca
[]
no_license
gravesee/rubbish
d69e1de8a8c6be9084941eeb0b0d39c391a3fb74
fce7aba82bbfead3a28bf16bc2972491d6a1991b
refs/heads/master
2021-06-12T23:16:47.411331
2017-04-10T00:21:33
2017-04-10T00:21:33
null
0
0
null
null
null
null
UTF-8
R
false
true
942
rd
Scorecard_gen_code_sas.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scorecard_class.R \name{Scorecard_gen_code_sas} \alias{Scorecard_gen_code_sas} \title{Generate SAS code for Scorecard object} \arguments{ \item{pfx}{character prefix to prepend to variable names by the Scorecard model object. Defaults to 1.} \item{method}{method used for calculating the reference level for adverse action codes. Three possible choices: \itemize{ \item{"min" }{Calculate difference from minimum of perf values - default} \item{"max" }{Calculate difference from maximum of perf values} \item{"neutral" }{Calculate difference from zero}#' }} } \value{ a character vector of SAS code } \description{ generate SAS code represenation of the Scorecard object. The SAS code that is genereated calculates the score, adverse action code distances, and provides a set of macro assignments for assigning adverse action codes to particular bin levels. }
f8a68740b6fc37b66b72e15e9bfc432fd34df920
cf63cb42a6b26550f9b6270434e68b64d383ba3e
/R_file/lab3.R
2ebf3f8202045e1cffe7a97f4e1308233984b577
[]
no_license
zyz293/Statistical_Methods_for_Data_Mining
b8f84ad3bfcf1711fabb6272d8391b58bdc0a7cf
3eda7095c8bf61095b92b65e4386c73363e66d79
refs/heads/master
2020-04-21T11:49:22.774238
2015-07-02T22:07:19
2015-07-02T22:07:19
38,459,218
0
0
null
null
null
null
UTF-8
R
false
false
1,078
r
lab3.R
setwd('C:/Users/DaTui/Desktop/winter_quarter/IEME304/lab/3') LAB3 = read.csv("Lab3.csv") CVInd = function(n,K) { #n is sample size; K is number of parts; returns K-length list of indices for each part m = floor(n/K) #approximate size of each part r = n-m*K I = sample(n,n) #random reordering of the indices Ind = list() #will be list of indices for all K parts length(Ind) = K for (k in 1:K) { if (k <= r) kpart = ((m+1)*(k-1)+1):((m+1)*k) else kpart = ((m+1)*r+m*(k-r-1)+1):((m+1)*r+m*(k-r)) Ind[[k]] = I[kpart] #indices for kth part of data } return (Ind) } n=79;K=10 Ind = CVInd(n=n,K=K) y = LAB3[[1]] yhat = y for (k in 1:K) { out = lm(y~.,data=LAB3[-Ind[[k]],]) yhat[Ind[[k]]] = cbind(1,as.matrix(LAB3[Ind[[k]], 2:9]))%*% as.matrix(out$coef) } CVSSE1 = sum((y-yhat)^2) #CV SSE #now compare to a different shrinkage parameter yhat = y for (k in 1:K) { out = lm(y~x1+x2,data=LAB3[-Ind[[k]],]) yhat[Ind[[k]]] = cbind(1,as.matrix(LAB3[Ind[[k]], 2:3]))%*% as.matrix(out$coef) } CVSSE2 = sum((y-yhat)^2) #CV SSE c(CVSSE1,CVSSE2)
ce0850060d362d99bcce056b3d47c01355d35748
774c97f4ceeebbfa7db0efd09a07c318763c530b
/R/Gen_x_z.R
21e25b3b70f6210f7cc63687902358b0b3d9f858
[]
no_license
BIG-S2/GFPLVCM
3c709126c81b4fe3dba3fc533d46c5860a96e40a
47075a7e8dd9bcec21892f3278d8babbd9d35ca8
refs/heads/master
2020-05-03T12:03:59.954202
2019-03-30T21:54:38
2019-03-30T21:54:38
178,615,183
1
0
null
null
null
null
UTF-8
R
false
false
1,879
r
Gen_x_z.R
Gen_x_z<-function(time_number, time_point, u_time, len_k){ ## generate x and z for one subject phi=function(t,k){ if(k==1) return(1) else return(sqrt(2)*cos((k-1)*pi*t) ); } ################################################################### ### generate the correlation relationship within the subject ###### ################################################################### # sigma_z_temp=diag( time_number ) # for(j in 1: time_number ){ # for(k in j: time_number ){ # sigma_z_temp[j, k] = exp(- abs ( time_point[j] - time_point[k] ) ) # sigma_z_temp[k, j]=sigma_z_temp[j, k] # } # } sigma_z_temp = exp( -abs( outer(time_point, time_point, "-") ) ) z_temp_value = MASS::mvrnorm( n=1,rep(0,time_number), sigma_z_temp ) ###################################### ### generate x for a subject ######### ###################################### kk=matrix(rep(c( 1:len_k ), u_time ), len_k, u_time ); u=matrix(rep(seq(0,1,length=u_time),len_k ), len_k, u_time, byrow=T); phi_u=matrix(mapply(function(t,k)phi(t,k),u,kk), len_k, u_time); M = MASS::mvrnorm( n=len_k,rep(0,time_number), sigma_z_temp ) ## len_k*length of S x_temp_value=0 for(k in 1: len_k){ x_temp_value=x_temp_value + as.matrix( M[k, ] )%*%phi_u[k, ]*(-1)^(k+1)/k } ################################# ### generate beta ############### ################################# kk_s=matrix(rep(c( 1:len_k ), time_number ), len_k, time_number ); s=matrix(rep(time_point ,len_k ), len_k, time_number, byrow=T); phi_s=matrix(mapply(function(t,k)phi(t,k),s,kk_s), len_k, time_number); beta_temp_value=0 for(k in 1: len_k){ beta_temp_value=beta_temp_value + as.matrix( phi_s[k, ] )%*%phi_u[k, ]*(-1)^(k+1)/(k^2) ## s*u } list(z_temp_value=z_temp_value, x_temp_value=x_temp_value, beta_temp_value=beta_temp_value) }
0c51c9606b972582e704df6f033f33a4809e9146
3098b0bd52cee12201a1e932e42a0be939526e87
/cachematrix.R
b1d6006349a213e37acc1634c49c4fba267a598e
[]
no_license
pjs83/ProgrammingAssignment2
e094c5cd97861962f1a0065b29ce6e75f752ae40
c42e5b3d5fc87bff130844b0841584d545afae3d
refs/heads/master
2020-05-31T00:09:45.134618
2014-09-13T18:50:26
2014-09-13T18:50:26
null
0
0
null
null
null
null
UTF-8
R
false
false
992
r
cachematrix.R
## Matrix inversion using previously calculated inverse matrices if available to speed up the process and avoid recalculating. ## Creates a speial matrix object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { A <- NULL set <- function(y) { x <<- y A <<- NULL } get <- function() x setsolve <- function(solve) A <<- solve getsolve <- function() A list(set = set, get = get, setsolve = setsolve, getsolve = getsolve) } ## Computes the inverse of the special matrix returned bz makeCacheMatrix. If the inverse has already been computed, then the function ## retrieves the inverse from the cache. cacheSolve <- function(x, ...) { A <- x$getsolve() if(!is.null(A)) { message("getting cached data") return(A) } data <- x$get() A <- solve(data, ...) x$setsolve(A) A }
6150193427becf2545dde1801443d02bfa68214d
c7fddad890f1e87f6ca12685f5777aa39cf320f6
/plot1.R
969cfd765041277ec17916ddafdecd57ad24ed1c
[]
no_license
garydeinert/ExplData-Analysis-Course-Project-1
6c24acc5e7b10f26efc6c10c50f2d7ff0915e932
dad8efb485ecbcf96c4e08751cf9180b49b80d39
refs/heads/master
2020-05-19T11:19:44.980615
2015-03-08T19:45:49
2015-03-08T19:45:49
31,861,703
0
0
null
null
null
null
UTF-8
R
false
false
1,346
r
plot1.R
## ======== set directory and filename here. dir <- "C:/Users/Gary/Desktop/Data Science Specialzation/Exploratory Data Course Proj 1" setwd(dir) filenm <- "household_power_consumption.txt" ## ========= end set directory and filename my.data <- as.data.frame(read.table(filenm, header=TRUE, sep = ";", na.strings = "?")) my.data$Date <- as.Date(my.data$Date,"%d/%m/%Y") my.data$Time <- strptime(my.data$Time,"%H:%M:%S") ## Date: Date in format dd/mm/yyyy, Time in format hh:mm:ss ## Global_active_power: household global minute-averaged active power (in kilowatt) ## Global_reactive_power: household global minute-averaged reactive power (in kilowatt) ## Voltage: minute-averaged voltage (in volt) ## Global_intensity: household global minute-averaged current intensity (in ampere) ## --- All sub-metering in watt-hours of active energy. ## Sub_metering_1: kitchen - dishwasher, oven, microwave ## Sub_metering_2: laundry room - washing-machine,dryer, refrigerator, light. ## Sub_metering_3: an electric water-heater and an air-conditioner. my.subset <- my.data[my.data$Date=="2007-02-01" | my.data$Date == "2007-02-02",] png(filename = "plot1.png", width = 480, height = 480) hist(my.subset$Global_active_power,main="Global Active Power", col="Red",xlab = "Global Active Power (Kilowatts)") dev.off()
607f3e8109d7f96cfc70d73617a0faca82b89e97
21bea3d65527823f2c05a0651902d0493e87fdec
/R/mathOps.R
94399bbdba0c6586df938639db07ec18d1d00395
[]
no_license
cran/fSeries
b75f581bf81a53a6121ade5325f3397c21382a9a
e7b134694da3344e8c8863330be35c6c1dd464b0
refs/heads/master
2021-03-12T22:56:17.873727
2009-05-25T00:00:00
2009-05-25T00:00:00
17,718,720
1
1
null
null
null
null
UTF-8
R
false
false
8,720
r
mathOps.R
# This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU Library General # Public License along with this library; if not, write to the # Free Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA # Copyrights (C) # for this R-port: # 1999 - 2007, Diethelm Wuertz, GPL # Diethelm Wuertz <wuertz@itp.phys.ethz.ch> # info@rmetrics.org # www.rmetrics.org # for the code accessed (or partly included) from other R-ports: # see R's copyright and license files # for the code accessed (or partly included) from contributed R-ports # and other sources # see Rmetrics's copyright file ################################################################################ # METHOD: MATHEMATICAL OPERATIONS ON DATA: # Ops.timeSeries Returns group 'Ops' for a 'timeSeries' object # abs.timeSeries Returns abolute values of a 'timeSeries' object # sqrt.timeSeries Returns sqrt values of a 'timeSeries' object # exp.timeSeries Returns exponentials of a 'timeSeries' object # log.timeSeries Returns logarithms of a 'timeSeries' object # sign.timeSeries Returns the signs of a 'timeSeries' object # diff.timeSeries Differences a 'timeSeries' object # scale.timeSeries Centers and/or scales a 'timeSeries' object # quantile.timeSeries Returns quantiles of an univariate 'timeSeries' ################################################################################ Ops.timeSeries = function(e1, e2 = 1) { # A function implemented by Diethelm Wuertz # Description: # Uses group 'Ops' generic functions for 'timeSeries' objects # Arguments: # e1, e2 - two objects of class 'timeSeries'. # Value: # Returns an object of class 'timeSeries'. # FUNCTION: # Save: s1 = e1 s2 = e2 # Which one is a 'timeSeries' object? i1 = inherits(e1, "timeSeries") i2 = inherits(e2, "timeSeries") # Match positions and FinCenter? if (i1 && i2) { if (!identical(as.vector(e1@positions), as.vector(e2@positions))) stop("positions slot must match") if (!identical(e1@FinCenter, e2@FinCenter)) stop("FinCenter slot must match") } # Extract Data Slot: if (i1) e1 = e1@Data if (i2) e2 = e2@Data # Compute: s = NextMethod(.Generic) # Make timeSeries: if ( i1) { s1@Data = s; s = s1 } if (!i1 && i2) { s2@Data = s; s = s2 } if ( i1 && !i2) s@units = s1@units if (!i1 && i2) s@units = s2@units if ( i1 && i2) s@units = paste(s1@units, "_", s2@units, sep = "") colnames(s@Data) = s@units df = data.frame() if (i1) { if (dim(s1@recordIDs)[1] > 0) df = s1@recordIDs } if (i2) { if (dim(s2@recordIDs)[1] > 0) df = s2@recordIDs } if (i1 & i2) { if (dim(s1@recordIDs)[1] > 0 & dim(s2@recordIDs)[1] > 0) df = data.frame(s1@recordIDs, s2@recordIDs) } s@recordIDs = df # Return Value: s } # ------------------------------------------------------------------------------ abs.timeSeries = function(x) { # A function implemented by Diethelm Wuertz # Description: # Returns absolute values of a 'timeSeries' object # Arguments: # x - an uni- or multivariate return series of class 'timeSeries'. # Note: # abs is .Primitive # FUNCTION: # Absolute Values: x@Data = abs(x@Data) # Return Value: x } # ------------------------------------------------------------------------------ sqrt.timeSeries = function(x) { # A function implemented by Diethelm Wuertz # Description: # Returns logarithmic values of a 'timeSeries' object # Arguments: # x - an uni- or multivariate return series of class 'timeSeries'. # Note: # sqrt is .Primitive # FUNCTION: # Absolute Values: x@Data = sqrt(x@Data) # Return Value: x } # ------------------------------------------------------------------------------ exp.timeSeries = function(x) { # A function implemented by Diethelm Wuertz # Description: # Returns exponential values of a 'timeSeries' object # Arguments: # x - an uni- or multivariate return series of class 'timeSeries'. # Note: # exp is .Primitive # FUNCTION: # Absolute Values: x@Data = exp(x@Data) # Return Value: x } # ------------------------------------------------------------------------------ log.timeSeries = function(x, base = exp(1)) { # A function implemented by Diethelm Wuertz # Description: # Returns logarithmic values of a 'timeSeries' object # Arguments: # x - an uni- or multivariate return series of class 'timeSeries'. # Note: # log is .Primitive # FUNCTION: # Absolute Values: x@Data = log(x@Data, base = base) # Return Value: x } # ------------------------------------------------------------------------------ sign.timeSeries = function(x) { # A function implemented by Diethelm Wuertz # Description: # Returns the signs of a 'timeSeries' object # Arguments: # x - an uni- or multivariate return series of class 'timeSeries'. # Note: # sign is .Primitive # FUNCTION: # Which sign ? x@Data = sign(x@Data) # Return Value; x } # ------------------------------------------------------------------------------ diff.timeSeries = function(x, lag = 1, diff = 1, trim = FALSE, pad = NA, ...) { # A function implemented by Diethelm Wuertz # Description: # Difference 'timeSeries' objects. # Arguments: # x - a 'timeSeries' object. # lag - an integer indicating which lag to use. # By default 1. # diff - an integer indicating the order of the difference. # By default 1. # trim - a logical. Should NAs at the beginning of the # series be removed? # pad - a umeric value with which NAs should be replaced # at the beginning of the series. # Value: # Returns a differenced object of class 'timeSeries'. # FUNCTION: # Convert: y = as.matrix(x) # Check NAs: # if (any(is.na(y))) stop("NAs are not allowed in time series") # Difference: z = diff(y, lag = lag, difference = diff) # Trim: if (!trim) { diffNums = dim(y)[1] - dim(z)[1] zpad = matrix(0*y[1:diffNums, ] + pad, nrow = diffNums) rownames(zpad) = rownames(y)[1:diffNums] z = rbind(zpad, z) } # Record IDs: df = x@recordIDs if (trim) { if (sum(dim(df)) > 0) { TRIM = dim(df)[1] - dim(z)[1] df = df[-(1:TRIM), ] } } # Return Value: timeSeries(data = z, charvec = rownames(z), units = colnames(z), format = x@format, zone = x@FinCenter, FinCenter = x@FinCenter, recordIDs = df, title = x@title, documentation = x@documentation) } # ------------------------------------------------------------------------------ scale.timeSeries = function(x, center = TRUE, scale = TRUE) { # A function implemented by Diethelm Wuertz # Description: # Centers and/or scales a 'timeSeries' object. # Arguments: # FUNCTION: # Scale: x@Data = scale(x = x@Data, center = center, scale = scale) # Return Value: x } # ------------------------------------------------------------------------------ quantile.timeSeries = function(x, ...) { # A function implemented by Diethelm Wuertz # Description: # Returns quantiles of an univariate 'timeSeries # Arguments: # FUNCTION: # Check: stopifnot(NCOL(x) == 1) # Quantiles: ans = quantile(x = as.vector(x), ...) # Return Value: ans } ################################################################################
ad29ab583d57c46115c768679d1e6c2746d19da2
be236873f4cd08b8e4a2b0b7a228385da2645abe
/run_analysis.R
58dcc54f313e9b9b1dcb92616d6ea66d0aa1fe60
[]
no_license
gitbeckywong/Coursera-3-Project
f17585b4b22a31a68672b83818924587406b3c12
6b2bdf2339d077da614ddb742d18c3f44b1c1fb7
refs/heads/master
2020-06-09T04:47:06.838179
2014-10-24T06:24:03
2014-10-24T06:24:03
null
0
0
null
null
null
null
UTF-8
R
false
false
3,374
r
run_analysis.R
## Load the "dplyr" package. (It is assumed that the "dplyr" package has already been installed.) library(dplyr) ## Before proceeding, ensure all folders and data files are contained in the working folder. ## Specifically, the folders "test" and "train" should be in the working folder. ## Read all required files into R. Use relative file paths as needed. ## Note - "Inertial Signals" folder is ignored, since it contains no mean or std data. features <- tbl_df(read.table("features.txt", sep = "", header = FALSE, stringsAsFactors = FALSE)) subject_test <- tbl_df(read.table("./test/subject_test.txt", sep = "", header = FALSE, col.names = "Subject")) X_test <- tbl_df(read.table("./test/X_test.txt", sep = "", header = FALSE, col.names = features[[2]])) Y_test <- tbl_df(read.table("./test/Y_test.txt", sep = "", header = FALSE, col.names = "Activity")) subject_train <- tbl_df(read.table("./train/subject_train.txt", sep = "", header = FALSE, col.names = "Subject")) X_train <- tbl_df(read.table("./train/X_train.txt", sep = "", header = FALSE, col.names = features[[2]])) Y_train <- tbl_df(read.table("./train/Y_train.txt", sep = "", header = FALSE, col.names = "Activity")) activity_labels <- tbl_df(read.table("activity_labels.txt", sep ="", header = FALSE)) ## Step 1: ## Use cbind and rbind to "merge" all files into one complete data set. test_all <- tbl_df(cbind(subject_test, Y_test, X_test)) train_all <- tbl_df(cbind(subject_train, Y_train, X_train)) completedata <- tbl_df(rbind(test_all, train_all)) ## Step 2: ## Use select and contains to extract subject, activity, and any mean or std measurements, ## as well as any other variables that may contain the string "mean" or "std". meanstddata_withFreq <- select(completedata, Subject, Activity, contains("mean"), contains("std")) ## Remove any columns with "meanFreq" or "angle" from this dataset, as they are not what the question is asking for. meanstddata <- select(meanstddata_withFreq, -contains("meanFreq"), -contains("angle")) ## Step 3: ## Convert Activity column from integers to factors, then change the levels to the activity_labels. fact <- as.factor(meanstddata$Activity) levels(fact) <- activity_labels[[2]] meanstddata$Activity <- fact ## Step 4: ## Rename all variables to be more descriptive. ## Due to variables' lengths, I have used camelCase for easier understanding. ## Use gsub() function to more easily edit the variable names (rather than having to rename each one individually). tablenames <- names(meanstddata) newnames <- gsub("\\.","",tablenames) newnames <- gsub("BodyBody","Body",newnames) newnames <- gsub("mean","Mean",newnames) newnames <- gsub("std","Std",newnames) newnames <- gsub("Acc","Acceleration",newnames) newnames <- gsub("Gyro","Gyroscope",newnames) newnames <- gsub("Mag","Magnitude",newnames) newnames <- gsub("tBody","TimeBody",newnames) newnames <- gsub("tGravity","TimeGravity",newnames) newnames <- gsub("fBody","FrequencyBody",newnames) names(meanstddata) <- newnames ## Step 5: Create a new independent tidy data set with the average of each variable for each activity and each subject. ## (Chaining used to simplify code.) finaltidydata <- meanstddata %>% group_by(Subject, Activity) %>% summarise_each(funs(mean)) ## Create a .txt file containg the final tidy data. write.table(finaltidydata, file = "tidydata.txt", row.name = FALSE)
d9bb0de74830281bf794e3bc272e7844372024a8
762d865864484193149b4e0902304064324c6ac0
/DevelopingDataProductsFinalProject/server.R
02701458f671184743124f1a658edbd2a4a975a6
[]
no_license
sairampraneeth/Developing-Data-Products-Final-Project
8c70fa19b147795f10671821ca38f5f4d7d85813
43864c1cf3ad4d2eff392deac554cab70a3ba9c7
refs/heads/master
2021-01-10T23:39:23.327578
2016-10-10T04:31:43
2016-10-10T04:31:43
70,410,309
0
0
null
null
null
null
UTF-8
R
false
false
1,874
r
server.R
library(shiny) data(mtcars) Dset <- mtcars PredictionModel <- lm(mpg ~ .,data = Dset) checker <- TRUE shinyServer( function(input,output){ printEvent <- eventReactive(input$button1,{ test <- data.frame(cyl = input$id1, disp = input$id2, hp = input$id3, drat = input$id4, wt = input$id5, qsec = input$id6, vs = switch(input$id7,V = 0,S = 1), am = switch(input$id8,A = 0,M = 1), gear = input$id9, carb = input$id10 ) predmod <<- PredictionModel print(paste("The Predicted Mileage is",predict(predmod,test),".The current prediction model has a variability coverage of ",round(summary(predmod)$r.squared * 100,2),"Percent")) }) observeEvent(input$button2,{ test <- data.frame(cyl = input$id1, disp = input$id2, hp = input$id3, drat = input$id4, wt = input$id5, qsec = input$id6, vs = switch(input$id7,V = 0,S = 1), am = switch(input$id8,A = 0,M = 1), gear = input$id9, carb = input$id10 ) predmod <<- PredictionModel pred <- predict(predmod,test) newRecord <- data.frame(mpg = pred,test) Dset <<- rbind(Dset,newRecord) PredictionModel <<- lm(mpg ~ .,data = Dset) }) output$predictedvalue <- renderText({ printEvent() }) actionDone <- eventReactive(input$button2,{ print(paste("New Record Added!! Total Number of Records = ",nrow(Dset))) }) output$ActionDone <- renderText({ actionDone() }) } )
568db051443a0a7129f7664793e9d9d83ed7b5c5
387b669bd2f9a5b9695d529b8f3f4e71ef4d4f35
/lab/course1/Robust sumaries.R
b3b4ab7159fba7989be4acb76f5d4dcc5c4b673d
[]
no_license
chadchouGitHub/GenomeNote
c6bda81db246ee4d487b2be6ccdeb3c71e92d5ac
383565129447440d3a4319821951a26b27872d8d
refs/heads/master
2020-04-04T18:21:40.816236
2015-05-11T23:41:36
2015-05-11T23:41:36
30,615,246
0
0
null
null
null
null
UTF-8
R
false
false
2,865
r
Robust sumaries.R
# Robust summaries and log transformation # All these function are in the base package. set.seed(1) x=c(rnorm(100,0,1)) ##real distribution x[23] <- 100 ##mistake made in 23th measurement boxplot(x) mean(x) sd(x) #> mean(x) #[1] 1.108142 ## it is random number from 0 to 1, how the mean is bigger than 1. Because one outliner "100" # is big enough to mess up the mean. #> sd(x) #[1] 10.02938 # So does the SD. #----The Median Absolute Deviance-- mad()------------------------------# # The median absolute deviace (MAD) is a robust summary for the standard deviation. #defined by computing the differences between each point and the median and taking # the median of their absolute values: # 1.4826median{|Xi−median(Xi)|} The "1.4826 " just a scale factor that guarantees an unbiased. ???? mad(x) # [1] 0.8857141 Not sure how much close to real mean, but make much more sense. #--------------------------Spearman correlation---------------------------# # This is compare to correlation. set.seed(1) x=c(rnorm(100,0,1)) ##real distribution x[23] <- 100 ##mistake made in 23th measurement y=c(rnorm(100,0,1)) ##real distribution y[23] <- 84 ##similar mistake made in 23th measurement library(rafalib) mypar(1,1) plot(x,y,main=paste0("correlation=",round(cor(x,y),3)),pch=21,bg=1,xlim=c(-3,100),ylim=c(-3,100)) abline(0,1) # The correlation=0.99 is caculate from cor(x,y) But real cor is not 0.99 set.seed(1) x1=c(rnorm(100,0,1)) ##real distribution y1=c(rnorm(100,0,1)) ##real distribution library(rafalib) mypar(1,1) plot(x1,y1,main=paste0("correlation=",round(cor(x1,y1),3)),pch=21,bg=1,xlim=c(-3,3),ylim=c(-3,3)) abline(0,1) # The real cor is 0.001, if there is no outline # Below use spearman cor to compare to cor including outliner. # To apply spearman cor the cor() need to assign method="spearman" mypar(1,2) plot(x,y,main=paste0("correlation=",round(cor(x,y),3)),pch=21,bg=1,xlim=c(-3,100),ylim=c(-3,100)) plot(rank(x),rank(y),main=paste0("correlation=",round(cor(x,y,method="spearman"),3)),pch=21,bg=1,xlim=c(-3,100),ylim=c(-3,100)) abline(0,1) # The real cor w/o outliner is 0.001 when outline (100,84) point in this 100 points (all between 0 to 1) # the cor become 0.99, but the spearman cor is 0.066 much more sense. hist(ratios) # Where is this data set????? #log(x/y)=log(x)−log(y)=−(log(y)−log(x))=log(y/x) x <- 2^(-5:5) ##this 1/32,1/16,1/8,...,1,2,...,32 mypar2(1,2) plot(x) abline(h=1) plot(log2(x)) abline(h=0) #------------------------------------------------------ x=2^seq(1,5) y=c(rev(1/x),1,x) Names=c(paste0("1/",rev(x)),1,x) mypar(1,1) plot(seq(along=y),y,xlab="",ylab="",type="n",xaxt="n") text(seq(along=y),y,Names,cex=1.5) abline(h=1) #------------------------------------------------------ plot(seq(along=y),y,xlab="",ylab="",type="n",log="y",xaxt="n") text(seq(along=y),y,Names,cex=1.5) abline(h=1)
1328aba9f03508de495306e22f10a2383b90ff47
b7b0b1dc4b412fa31026b5807f9d3fa1ef73f3ea
/man/funkmeans03A.Rd
08aa1dd410abc5494574eee8ebdcdd9a71c6fce6
[ "MIT" ]
permissive
kisungyou/T4cluster
efc268fb199ee0ce085ccd4872a04bd215986045
cf15db529209023c0409a70ebfc1b721d8b785b0
refs/heads/master
2023-07-31T18:15:41.016081
2021-09-23T04:42:04
2021-09-23T04:42:04
295,601,655
3
1
null
null
null
null
UTF-8
R
false
true
2,550
rd
funkmeans03A.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functional_kmeans03A.R \name{funkmeans03A} \alias{funkmeans03A} \title{Functional K-Means Clustering by Abraham et al. (2003)} \usage{ funkmeans03A(fdobj, k = 2, ...) } \arguments{ \item{fdobj}{a \code{'fd'} functional data object of \eqn{N} curves by the \pkg{fda} package.} \item{k}{the number of clusters (default: 2).} \item{...}{extra parameters including \describe{ \item{maxiter}{the maximum number of iterations (default: 10).} \item{nstart}{the number of random initializations (default: 5).} }} } \value{ a named list of S3 class \code{T4cluster} containing \describe{ \item{cluster}{a length-\eqn{N} vector of class labels (from \eqn{1:k}).} \item{mean}{a \code{'fd'} object of \eqn{k} mean curves.} \item{algorithm}{name of the algorithm.} } } \description{ Given \eqn{N} curves \eqn{\gamma_1 (t), \gamma_2 (t), \ldots, \gamma_N (t) : I \rightarrow \mathbf{R}}, perform \eqn{k}-means clustering on the coefficients from the functional data expanded by B-spline basis. Note that in the original paper, authors used B-splines as the choice of basis due to nice properties. However, we allow other types of basis as well for convenience. } \examples{ # ------------------------------------------------------------- # two types of curves # # type 1 : sin(x) + perturbation; 20 OF THESE ON [0, 2*PI] # type 2 : cos(x) + perturbation; 20 OF THESE ON [0, 2*PI] # type 3 : sin(x) + cos(0.5x) ; 20 OF THESE ON [0, 2*PI] # ------------------------------------------------------------- ## PREPARE : USE 'fda' PACKAGE # Generate Raw Data datx = seq(from=0, to=2*pi, length.out=100) daty = array(0,c(100, 60)) for (i in 1:20){ daty[,i] = sin(datx) + rnorm(100, sd=0.5) daty[,i+20] = cos(datx) + rnorm(100, sd=0.5) daty[,i+40] = sin(datx) + cos(0.5*datx) + rnorm(100, sd=0.5) } # Wrap as 'fd' object mybasis <- fda::create.bspline.basis(c(0,2*pi), nbasis=10) myfdobj <- fda::smooth.basis(datx, daty, mybasis)$fd ## RUN THE ALGORITHM WITH K=2,3,4 fk2 = funkmeans03A(myfdobj, k=2) fk3 = funkmeans03A(myfdobj, k=3) fk4 = funkmeans03A(myfdobj, k=4) ## FUNCTIONAL PCA FOR VISUALIZATION embed = fda::pca.fd(myfdobj, nharm=2)$score ## VISUALIZE opar <- par(no.readonly=TRUE) par(mfrow=c(1,3)) plot(embed, col=fk2$cluster, pch=19, main="K=2") plot(embed, col=fk3$cluster, pch=19, main="K=3") plot(embed, col=fk4$cluster, pch=19, main="K=4") par(opar) } \references{ \insertRef{abraham_unsupervised_2003}{T4cluster} } \concept{functional}
386aee9e8a3d0e1f9ef282c832534725811c97d9
d71f89f90448ca3dae2efe3cfd2212e59d41eec4
/R/Chunks.R
14d8bc18e02ea85b4e9999c67231c4638b9dd41e
[]
no_license
warnbergg/regone
ce2b5f9db882b40c025fc8a669e9d65f5404e3a8
851be8e37259b1b0f599038e08ae14c27365b454
refs/heads/master
2023-08-22T02:18:39.488804
2021-10-13T14:11:07
2021-10-13T14:11:07
null
0
0
null
null
null
null
UTF-8
R
false
false
626
r
Chunks.R
#' Chunks #' #' Splitting vector into specified n.chunks. #' @param vec Any vector. Vector to split into chunks. No default. #' @param n.chunks Integer vector of length 1. Number of chunks to split the vector into. No default. #' @param exclude Character vector. Names of elements to remove from vector before splitting. Particularly relevant for use in package regone. Defaults to c("residuals", "predicted"). #' @export Chunks <- function(vec, n.chunks, exclude = c("residuals", "predicted")) { vec <- vec[!vec %in% exclude] chunks <- split(vec, cut(seq_along(vec), n.chunks, labels = FALSE)) return (chunks) }
204a3c0ad05d3279c25de83c6bf6d389af323be5
72450ba3ff4cf6e5d28252a4af1d1d17ea07a7e1
/R/dataVision.R
c3b0de9707ae1b0169798441a36e58f5b8947fb9
[ "MIT" ]
permissive
aikinman/dataVision
a214287848635520520b790b82505e527dd0fd37
ddfbb0ef9d41924be02d503f9d5c6c1ba7338bea
refs/heads/main
2023-01-19T17:25:42.552921
2020-11-19T05:47:16
2020-11-19T05:47:16
314,026,789
0
0
null
null
null
null
UTF-8
R
false
false
587
r
dataVision.R
#' dataVision #' #' @param x #' @param y #' @param df #' @param verbose #' @param na.rm #' #' @return a plot visualizing your x and y data from your specified dataset #' @export #' #' @examples #' plotFunction('Sepal.Width', 'Petal.Width', iris) #' dataVision <- function(x, y, df, verbose = TRUE, na.rm =FALSE) { if(verbose) message("Plotting your data...stay tuned!") if(x == class(logical) | x == class(integer) | y == class(logical) | y == class(integer)) warning('x and y must be either class = factor or numeric to use this function!') plot(df[,y]~df[,x], pch=1, cex=0.8) }
21631ebc6ddcf7f518bd8652316589ff2c2fbf47
b1d31f04054b32b1dbca0d8ce7406dd63b4130f1
/R/utils.R
d33e0e55e5d795bbd9dacfd8fa85e107d2200257
[]
no_license
lianos/GenomicCache
15d711415e47c1080ffadb1a558a60d3e3bc24d9
dc89e1e7b29d5702e00e50ec5926b74012a68886
refs/heads/master
2020-04-17T15:15:23.388012
2019-01-02T19:06:01
2019-01-02T19:06:01
816,175
0
1
null
null
null
null
UTF-8
R
false
false
5,598
r
utils.R
##' Checks that a directory exists and will create it if not. ##' ##' If the directory does not exist, and the caller does not want to create it ##' an error will be thrown ##' ##' @export ##' @author Steve Lianoglou \email{slianoglou@@gmail.com} ##' ##' @param path The path to the directory to check. ##' @param create A logical indicating whether or not the directory should be ##' created if it doesn't exist ##' @param verbose Let us know what's going on ##' ##' @return \code{TRUE} if everything is kosher, otherwise an error is thrown. checkOrCreateDirectory <- function(path, create=FALSE, verbose=TRUE, raise.error=TRUE) { if (!dir.exists(path)) { if (!create) { if (raise.error) { stop("Directory", path, "does not exist", sep=" ") } else { return(FALSE) } } else { if (verbose) cat("Creating directory", path, "...\n") if (!dir.create(path)) { if (raise.error) { stop("Error! Check permissions? Parent directory exists?") } else { return(FALSE) } } } } path } ##' Convenience method to sets \code{NA}'s in a logical vector to \code{FALSE}. ##' ##' @export ##' @author Steve Lianoglou \email{slianoglou@@gmail.com} ##' ##' @param the.logical A logical vector/Rle ##' @return A \code{logical} with \code{NA} values set to \code{FALSE} na.logical <- function(the.logical) { the.logical <- as.logical(the.logical) the.logical[is.na(the.logical)] <- FALSE the.logical } ##' Convert NA values in vectors and data.frames to a default value ##' ##' @param wut The thing to convert ##' @param to The value convert NA to ##' @return The same type as \code{wut} convert.na <- function(wut, to=".defaults.") { if (is.character(to) && to[1] == ".defaults.") { to <- list(logical=FALSE, numeric=0, integer=0L, character="", factor="") } if (is.vector(wut) || is.factor(wut)) { wut.type <- is(wut)[1] if (is.list(to)) { if (!wut.type %in% names(to)) { stop("Unknown default conversion value for", wut.type, sep=" ") } to <- to[[wut.type]] } if (wut.type == 'factor') { levels(wut) <- c(levels(wut), to) } wut[is.na(wut)] <- to } else if (inherits(wut, 'data.frame') || inherits(wut, 'DataFrame')) { cols <- 1:ncol(wut) if (is(wut, 'data.table')) { ## Don't change key columns cols <- setdiff(cols, which(colnames(wut) %in% key(wut))) } for (idx in cols) { wut[[idx]] <- convert.na(wut[[idx]], to=to) } } wut } filterByChr <- function(grl, which.chr=NULL) { if (!is.null(which.chr)) { keep <- sapply(grl, function(g) { length(g) > 0 && all(as.logical(seqnames(g) == which.chr)) }) grl <- grl[keep] } grl } checkVerbose <- function(...) { verbose <- list(...)$verbose if (is.null(verbose)) verbose <- options()$verbose verbose } ## Loads one item from the rda file. if what is null, it will ## load the first item load.it <- function(rda.file, what=NULL) { if (!file.exists(rda.file)) { stop("Can't find data file ", rda.file) } if (substring(rda.file, nchar(rda.file) - 3L) == '.rds') { return(readRDS(rda.file)) } e <- new.env() vars <- load(rda.file, e) if (length(vars) == 0L) { stop("No objects found in ", rda.file) } if (is.null(what)) { what <- vars[1] } if (!what %in% vars) { stop("Object `", what, "` not found in ", rda.file) } get(what, e, inherits=FALSE) } ##' Returns the bioconductor annoation package name for the given genome. ##' ##' @param from A character string naming the genome, ie. hg18, mm9, etc. ##' The function also checks to see if it is the name of the package itself. ##' @param package Passed through to the \code{\link{annotationPackage}} ##' function. getAnnoPackageName <- function(from, package=NULL) { is.anno.package <- length(grep('^org\\..*\\.db$', from) == 1L) if (is.anno.package) { ## this is probably the package name itself if (!require(from, character.only=TRUE)) { stop("Unknown package: ", from) } from } else { ## probably the genome annotationPackage(from, package=package) } } dir.exists <- function(path) { path <- as.character(path) sapply(file.info(path)$isdir, isTRUE) } assert.dir.exists <- function(path) { if (!dir.exists(path)) stop("Can't access directory: ", path) } ##' Returns an object of type \code{type} from a list, this is most useful ##' when \code{the.list} has one object of \code{type} in it. ##' ##' Primarily used to get arguments out of function calls with \code{(...)} ##' assumes tha ##' ##' If this object isn't found, or other error, returns \code{NULL} takeFromListByType <- function(the.list, type, multi=FALSE, index=FALSE) { take <- which(sapply(the.list, function(arg) inherits(arg, type))) if (length(take) == 0L) { return(NULL) } if (length(take) > 1) { if (is.logical(multi[1])) { if (!multi) { warning("Multiple objects of type ", type, " found.") take <- '..NOTHING..' } } else if (is.numeric(multi)) { if (any(multi > length(take)) || any(multi < 0L)) { warning("multi take subscript(s) out of bounds") take <- '..NOTHING..' } else { take <- take[multi] } } else { warning("Illegal type of multi argument: ", is(multi)[1]) take <- '..NOTHING..' } } if (index) { ret.val <- take } else { ret.val <- if (length(take) > 1) the.list[take] else the.list[[take]] } ret.val }
c98de8feb3e162686abe321b097a0c2f91976400
ec4e37d127bbe0b7ce3a3aad97730f2c5658616d
/R/get_restricted.R
b7f3a2dae5a4c5e3baa8bd52af94061b361fc424
[]
no_license
EmilNejst/Rcivecm
7d5f02f5a54c9ae107c31ab18a80127108972811
fb7cb123b732673b8f989d2175c6bbe095098af2
refs/heads/master
2021-01-22T10:57:05.921639
2017-02-26T16:53:55
2017-02-26T16:53:55
82,055,001
0
0
null
null
null
null
UTF-8
R
false
false
611
r
get_restricted.R
#' @title Get exogenous variables restricted to the cointegration vector #' #' @param data is an xts object with the naming conventions of #' Rcivecm. #' #' @return an xts object with the exogenous variables that #' are restricted to the cointegration space get_restricted <- function(data) { variable_names <- colnames(data) if(sum(str_sub(variable_names,1,2) == 'xr_')==0) { restricted_names = NULL } else { restricted_names <- variable_names[(str_sub(variable_names,1,2) == 'xr_')] restricted_variables <- data[,restricted_names] } return(restricted_variables) }
ffefdda02b3915ce7e273b8cfeb241b49ec64377
e23185c2b0e2f37beb090832feafaf647cd8f19d
/scripts/TidyAustenWellsBrontes.R
5e3a658db25ce30bdb7eb58933bbe497819ffc4a
[]
no_license
Jonlittle27/Text-Mining-Projects
9a9122f2be02e718091fce10c5b298409cc662e5
5eca661c2809c1390cc4d018b8bd0c17739d6305
refs/heads/master
2022-11-24T19:28:19.361906
2020-07-28T18:22:23
2020-07-28T18:22:23
271,325,521
0
0
null
null
null
null
UTF-8
R
false
false
586
r
TidyAustenWellsBrontes.R
##Libraries library(gutenbergr) library(tidyverse) library(tidytext) #Gutenberg book downloads ##Import Austen Books ja_books <- gutenberg_download(c(161,1342,141,158,121,105)) ##Import Wells Books hgw_books <- gutenberg_download(c(35,36,159,5230)) ##Import Bronte Books bs_books <- gutenberg_download(c(767,768,969,1260,9182)) #Basic text tidying ##Tidy Austen Books ja_tidy <- ja_books %>% unnest_tokens(word,text) ##Tidy Wells Books hgw_tidy <- hgw_books %>% unnest_tokens(word,text) ##Tidy Bronte Sisters Books bs_tidy <- bs_books %>% unnest_tokens(word,text)
9eb3d75a9b4885e8d4f9cf65c78cdbb5eec677d0
fe487c83debd4b17e05641122c82c0b771d4b11d
/man/lines3DDataFrame.Rd
7ac9876a366e4e3f8f3763ba043d7aa047292bf9
[]
no_license
CloudComputer/geomod3D
5a21c673048cfef147d8bc68c901ea599f49b638
fd00f8f3188977a07028b08e1e6a7f055322bc60
refs/heads/master
2021-08-14T05:59:02.322040
2017-11-14T17:41:02
2017-11-14T17:41:02
114,229,121
1
0
null
2017-12-14T09:20:03
2017-12-14T09:20:02
null
UTF-8
R
false
true
2,562
rd
lines3DDataFrame.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lines3DDataFrame.R \name{lines3DDataFrame} \alias{lines3DDataFrame} \alias{lines3DDataFrame} \title{Drillhole database} \usage{ lines3DDataFrame(lines_list = NULL, df = NULL, collar = NULL, assay = NULL, survey = NULL, holeid = "HOLEID", from = "FROM", to = "TO", X = "X", Y = "Y", Z = "Z") lines3DDataFrame(lines_list = NULL, df = NULL, collar = NULL, assay = NULL, survey = NULL, holeid = "HOLEID", from = "FROM", to = "TO", X = "X", Y = "Y", Z = "Z") } \arguments{ \item{lines_list}{A \code{list} containing the line segments' coordinates.} \item{df}{A \code{data.frame} with the line segments' attributes.} \item{collar, assay, survey}{Data frames with drillhole data.} \item{holeid, from, to, X, Y, Z}{Column names from which to draw the parameters.} } \description{ Drillhole database represented as line segments. Extends the \code{spatial3DdataFrame} class. Drillhole database represented as line segments. } \details{ The \code{"HOLEID"} column is used by this object's methods. Do not remove or rename it. There are two ways to build a \code{lines3DDataFrame} object: either providing the data in the format that is seen in the final object (less common) or by providing collar and assay data (more common). In the first mode, both \code{lines_list} and \code{df} must be given. \code{lines_list} is a \code{list} where each entry is a numeric vector of length 6 containing the X, Y, and Z coordinates for both ends of a line segment. \code{df} is a data frame containing the attributes for each segment. In the second mode, both \code{collar} and \code{assay} must be given. \code{collar} contains the X, Y, and Z coordinates for each drillhole's collar. \code{assay} contains the lithological description, chemical analysis, etc. in intervals delimimted by the \code{from} and \code{to} columns. \code{survey} contains the dipmeter measurements for each interval. If it is missing, the holes are assumed vertical. The \code{holeid} column must be present in all three data frames. } \section{Slots}{ \describe{ \item{\code{coords}}{A \code{list} containing the coordinates for each data unit.} \item{\code{data}}{A \code{data.frame} with each data unit's attributes.} \item{\code{bbox}}{A \code{matrix} containing the coordinates of two opposing edges of the object's bounding box.} }} \seealso{ \code{\link{lines3DDataFrame-init}}, \code{\link{spatial3DDataFrame-class}} \code{\link{spatial3DDataFrame-class}}, \code{\link{lines3DDataFrame-class}} }
885a3de210ed0a67e4f45402b654a75c394c28c1
44adaf73180ccb431abe3f0464968d3d3832c8f9
/R/sim-fmri-2covar.R
980296edd663b20173a2f0f17795fd55e78ea6aa
[]
no_license
bioimaginggroup/adaptsmoFMRI
7f3870ff27ccdeb9c3a1a50a5a1c42bb683906ae
60c0272cccd2d2cf3ee2cefda3049699dd5b0b01
refs/heads/master
2021-06-05T15:42:41.654811
2021-04-20T09:59:08
2021-04-20T09:59:08
147,805,013
1
0
null
2018-09-07T09:51:43
2018-09-07T09:51:43
null
UTF-8
R
false
false
3,758
r
sim-fmri-2covar.R
#' This function returns the synthetic spatiotemporal data set resembling functional MR Images (fMRI) data. #' #' The returned data is simulated on a 20 x 20 grid. #' #' @name sim.fmri2COVAR #' @aliases sim.fmri2COVAR #' @title Simulate FMRI Data #' @usage sim.fmri2COVAR(hrf, beta.Var1, beta.Var2) #' @param hrf haemodynamic response function, needs to be a vector of length \code{T}. #' @param beta.Var1 scalar, defines the height of the activated area, in form of a cylinder of the first grid. #' @param beta.Var2 scalar, defines the height of the activated area, in form of a cylinder of the second grid. #' @author Max Hughes #' @import Matrix #' @note This function is solely for two covariates. #' @examples #' # non-transformed hr-function #' T <- 180 #' seq.length <- T*3 #' index <- seq(3, T*3, by = 3) #' vis <- rep(c(-0.5, 0.5), each=30, times=ceiling(T/30*1.5)) #' vis <- as.matrix(vis[index]) #' aud <- rep(c(-0.5, 0.5), each=45, times=ceiling(T/30*1.5)) #' aud <- as.matrix(aud[index]) #' hrf <- cbind(vis,aud) #' # define height of activation area #' beta.Var1 <- beta.Var2 <- 3 #' # use function to obtain fmri data #' data <- sim.fmri2COVAR(hrf, beta.Var1, beta.Var2)$fmri sim.fmri2COVAR <- function(hrf, beta.Var1, beta.Var2){ if(any(is.na(hrf))) stop("\nNAs in hr function.\n") ## For model with two covariates p <- dim(hrf)[2] if(p!=2) stop("Haemodynamic response function needs to be a matrix with column dimension of 2.") I <- 400 ## covariate(s) (for now only one: visual) Z <- as.matrix(hrf) # note: Z is only for one pixel T <- dim(Z)[1] Z.Var1 <- as.matrix(Z[,1]) Z.Var2 <- as.matrix(Z[,2]) IZ.Var1 <- kronecker(as(diag(1, nrow=I, ncol=I), "sparseMatrix"), Z.Var1) IZ.Var2 <- kronecker(as(diag(1, nrow=I, ncol=I), "sparseMatrix"), Z.Var2) ## generate sigma^2 sigma.sq <- numeric(I) for(i in 1:I) sigma.sq[i] <- 25 + 2*rnorm(1) ## generate epsilon eps <- matrix(nrow=T, ncol=I) for(i in 1:I) eps[,i] <- rnorm(T, mean=0, sd=sqrt(sigma.sq[i])) # Variance or sd??? eps <- as.vector(eps) ## generate beta grid for Variable 1 beta.sim.Var1 <- matrix(0, nrow=I/20, ncol=I/20) beta.sim.Var1[8:14,8:14] <- beta.Var1 beta.sim.Var1[8,8] <- 0 beta.sim.Var1[9,8] <- 0 beta.sim.Var1[8,9] <- 0 beta.sim.Var1[13,8] <- 0 beta.sim.Var1[14,9] <- 0 beta.sim.Var1[14,8] <- 0 beta.sim.Var1[9,14] <- 0 beta.sim.Var1[8,13] <- 0 beta.sim.Var1[8,14] <- 0 beta.sim.Var1[14,13] <- 0 beta.sim.Var1[13,14] <- 0 beta.sim.Var1[14,14] <- 0 beta.sim.Var1 <- as.vector(beta.sim.Var1) ## generate beta grid for Variable 2 beta.sim.Var2 <- matrix(0, nrow=I/20, ncol=I/20) beta.sim.Var2[8:14,8:14] <- beta.Var2 beta.sim.Var2[8,8] <- 0 beta.sim.Var2[9,8] <- 0 beta.sim.Var2[8,9] <- 0 beta.sim.Var2[13,8] <- 0 beta.sim.Var2[14,9] <- 0 beta.sim.Var2[14,8] <- 0 beta.sim.Var2[9,14] <- 0 beta.sim.Var2[8,13] <- 0 beta.sim.Var2[8,14] <- 0 beta.sim.Var2[14,13] <- 0 beta.sim.Var2[13,14] <- 0 beta.sim.Var2[14,14] <- 0 beta.sim.Var2 <- as.vector(beta.sim.Var2) ## generate y y <- numeric(T*I) y <- IZ.Var1%*%beta.sim.Var1+IZ.Var2%*%beta.sim.Var2+eps ## convert back to to an array of dim (nrow x ncol x T), so that it coincides with ## the dim of real data y <- t(matrix(nrow=T, ncol=I, data=y)) y <- array(y, dim=c(20,20,T)) # interchange the first two subscripts on a 3-way array y y <- aperm(y, c(2,1,3)) # same procededure for epsilon eps <- t(matrix(nrow=T, ncol=I, data=eps)) eps <- array(eps, dim=c(20,20,T)) eps <- aperm(eps, c(2,1,3)) return(list("fmri"=y, "hrf"=Z, "coeff1"=beta.sim.Var1, "coeff2"=beta.sim.Var2, "resid"=eps, "sigma"=sigma.sq)) }