blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
ce32cc0da45f210233177859bc4bf210c9fec021
966ad13abda2543348f0b0a28fe6069d7c3e0609
/Otros años/Laboratorio/PECL3 FCD/Parte 1/2_regresion.r
ca61dc88877c03cbc8b7e774026210ebbf1a3401
[]
no_license
YoInterneto/FCDLaboratorio
28ecae0b4428b9edb9efbddfd4a206713402aef6
b85666d8ac9d38d140cc04bba954b23b92c820ff
refs/heads/master
2023-02-14T20:20:57.329008
2021-01-12T19:41:00
2021-01-12T19:41:00
307,632,622
0
0
null
null
null
null
UTF-8
R
false
false
324
r
2_regresion.r
# Establecemos el directorio de trabajo como el directorio donde se encuentra el script de R directorio <- dirname(parent.frame(2)$ofile) setwd(directorio) # Cargamos los datos planetas <- read.table("./planetas.txt") print(planetas) # Hacemos el estudio de la regresion regresion = lm(D~R, data=planetas) print(regresion)
5e8f75e00d363ae1d4b06ea20845ab3bd6db588d
eb8922cb386fe3d5f73c199ab4c79a7b3b668f48
/R/ICCbare.R
164d73a715fd0ca1530fc2eb63526681929c7023
[]
no_license
matthewwolak/ICC
460163f31a4dfc70a64bb3ec3744b338e0a67946
6b19771940648e21ba9775b309a91e15259601bd
refs/heads/master
2022-05-29T02:35:30.756411
2022-05-20T13:06:21
2022-05-20T13:06:21
37,179,987
7
4
null
2022-05-19T15:07:43
2015-06-10T06:38:22
R
UTF-8
R
false
false
3,059
r
ICCbare.R
#' @rdname ICCest #' @export #' @importFrom stats aggregate anova aov formula na.omit replications ICCbare <- function(x, y, data = NULL){ icall <- list(y = substitute(y), x = substitute(x)) if(is.character(icall$y)){ warning("passing a character string to 'y' is deprecated since ICC vesion 2.3.0 and will not be supported in future versions. The argument to 'y' should either be an unquoted column name of 'data' or an object") if(missing(data)) stop("Supply either the unquoted name of the object containing 'y' or supply both 'data' and then 'y' as an unquoted column name to 'data'") icall$y <- eval(as.name(y), data, parent.frame()) } if(is.name(icall$y)) icall$y <- eval(icall$y, data, parent.frame()) if(is.call(icall$y)) icall$y <- eval(icall$y, data, parent.frame()) if(is.character(icall$y)) icall$y <- eval(as.name(icall$y), data, parent.frame()) if(is.character(icall$x)){ warning("passing a character string to 'x' is deprecated since ICC vesion 2.3.0 and will not be supported in future versions. The argument to 'x' should either be an unquoted column name of 'data' or an object") if(missing(data)) stop("Supply either the unquoted name of the object containing 'x' or supply both 'data' and then 'x' as an unquoted column name to 'data'") icall$x <- eval(as.name(x), data, parent.frame()) } if(is.name(icall$x)) icall$x <- eval(icall$x, data, parent.frame()) if(is.call(icall$x)) icall$x <- eval(icall$x, data, parent.frame()) if(is.character(icall$x) && length(icall$x) == 1) icall$x <- eval(as.name(icall$x), data, parent.frame()) tdata <- data.frame(icall) tdata <- na.omit(tdata) a <- length(unique(tdata$x)) if(!is.null(attributes(tdata)$na.action)){ warning(cat("NAs removed from rows:\n", unclass(attributes(tdata)$na.action), "\n")) } if(!is.factor(tdata$x)){ warning("'x' has been coerced to a factor") tdata$x <- as.factor(tdata$x) } else{ if(length(levels(tdata$x)) > a){ tdata$x <- factor(as.character(tdata$x), levels = unique(tdata$x)) warning("Missing levels of 'x' have been removed") } } fmla <- formula(tdata) if (!is.list(replications(fmla, tdata))){ tmp1 <- aggregate(tdata[, 1], list(tdata[, 2]), FUN = mean) tmp2 <- aggregate(tdata[, 1], list(tdata[, 2]), FUN = length) ord.data <- tdata[order(tdata[, 2]),] Treat.m <- rep(tmp1$x, tmp2$x) Among <- (Treat.m - rep(mean(tdata[, 1]), nrow(tdata)))^2 Within <- (ord.data[, 1] - Treat.m)^2 MS <- c(sum(Among), sum(Within)) / c(length(tmp2$x) - 1, length(tmp2$x) * (tmp2$x[1]-1)) var.a <- (MS[1] - MS[2]) / tmp2$x[1] return(var.a / (var.a + MS[2])) } else{ tmpbb <- anova(aov(fmla, data = tdata)) MSa <- tmpbb[3][1, 1] tmp.outj <- aggregate(y ~ x, data = tdata, FUN = length)$y var.a <- (MSa - tmpbb[3][2, 1]) /((1 / (a - 1)) * (sum(tmp.outj) - (sum(tmp.outj^2) / sum(tmp.outj)))) return(var.a / (tmpbb[3][2,1] + var.a)) } }
eceaa148afd50e097058b087dba4ba6a98fe8f02
6acb4a76ec6af83585d131571b0ea5afa12b79b5
/utils/conversion.R
132f2eabb80fd47edee72bdecbf64cd698ca66ee
[ "CC-BY-4.0" ]
permissive
anhnguyendepocen/StatCompLab
f59e400330e2b9734b769899e779caa9fe8cfb8e
92e8b5afd3464782d1ca2e58af5799bf176d54cb
refs/heads/main
2023-04-03T00:32:42.266266
2021-03-28T21:38:24
2021-03-28T21:38:24
null
0
0
null
null
null
null
UTF-8
R
false
false
4,304
r
conversion.R
convert_rmd <- function(input_filename, type = NULL, solutions = FALSE, output_filename = NULL) { new_type <- match.arg(type, c("vignette", "tutorial")) orig_name <- basename(input_filename) orig_name <- gsub(pattern = "\\.Rmd", replacement = "", x = orig_name) # Parse input file rmd <- readr::read_file(input_filename) rmd_split <- strsplit(rmd, "---") config <- yaml::read_yaml(text = rmd_split[[1]][2]) # Detect output type if (identical(config[["output"]], "learnr::tutorial")) { orig_type <- "tutorial" } else if (identical(config[["output"]], "rmarkdown::html_vignette")) { orig_type <- "vignette" } else { stop("Unknown original type") } # Detect .vignette and .solutions flags orig_tutorial <- grepl(pattern = "\\n\\.tutorial <- TRUE\\n", rmd) orig_solutions <- grepl(pattern = "\\n\\.solutions <- TRUE\\n", rmd) if (identical(new_type, orig_type) && identical(solutions, orig_solutions)) { warning("Input of same type as output. Nothing to do.") } # Construct output filename if (is.null(output_filename)) { new_name <- paste0( orig_name, if (solutions) { "Solutions" } else { NULL } ) if (new_type == "tutorial") { output_filename <- file.path( "inst/tutorials", new_name, paste0(new_name, ".Rmd") ) } if (new_type == "vignette") { output_filename <- file.path( "vignettes", paste0(new_name, ".Rmd") ) } } new_dir <- dirname(output_filename) new_name <- basename(output_filename) new_name <- gsub(pattern = "\\.Rmd", replacement = "", x = new_name) config$title <- paste0( config[["title"]], if (solutions && !orig_solutions) { " (solutions)" } else { NULL } ) if (new_type == "tutorial") { config$output <- "learnr::tutorial" config$runtime <- "shiny_prerendered" if (is.null(config$tutorial)) { config$tutorial <- list() } config$tutorial$id = paste0( "shinyapps.finnlindgren.StatComp", orig_name ) } else { config$runtime <- NULL } if (new_type == "vignette") { config$output <- "rmarkdown::html_vignette" config$vignette <- paste0("%\\VignetteIndexEntry{", config[["title"]], "}\n", "%\\VignetteEngine{knitr::rmarkdown}\n", "%\\VignetteEncoding{UTF-8}\n") } else { config$vignette <- NULL } rmd_split[[1]][2] <- yaml::as.yaml(config) new_rmd <- paste0(rmd_split[[1]], collapse = "---\n") new_rmd <- sub(pattern = "\\n\\.tutorial <- [^\\n]*\\n", replacement = paste0("\n.tutorial <- ", identical(new_type, "tutorial"), "\n"), new_rmd) new_rmd <- sub(pattern = "\\n\\.solutions <- [^\\n]*\\n", replacement = paste0("\n.solutions <- ", solutions, "\n"), new_rmd) if (!dir.exists(dirname(output_filename))) { dir.create(dirname(output_filename)) } write(new_rmd, file = output_filename) # Copy resources if (identical(orig_type, "tutorial")) { resource_dirs <- c("images") for (res_dir in resource_dirs) { in_dir <- file.path( dirname(input_filename), res_dir) out_dir <- file.path( dirname(output_filename), res_dir) if (dir.exists(in_dir)) { if (!dir.exists(out_dir)) { dir.create(out_dir) } for (file in list.files(in_dir)) { file.copy(file.path(in_dir, file), file.path(out_dir, file)) } } } } invisible(NULL) } convert_tutorial_rmd <- function(input_filename) { convert_rmd(input_filename, type = "tutorial", solutions = TRUE) convert_rmd(input_filename, type = "vignette", solutions = FALSE) convert_rmd(input_filename, type = "vignette", solutions = TRUE) }
2b65a72d13dac9cb965633eb3e134be812b0df7e
36d6419049d37f98ddd1bb567ecb7761c7a4183e
/R/Bayes_Ord_Design_PO.R
57aa12230cac0a40fb592ddb1b318cd1d580a4f4
[]
no_license
alicechen310/BayesOrdDesign
9fc919854d47a05530fde7dc0aefe29c619c4d08
728b58471a472b069fc166154ab8427cea2485d5
refs/heads/master
2023-09-04T23:32:08.504393
2021-09-21T12:40:02
2021-09-21T12:40:02
null
0
0
null
null
null
null
UTF-8
R
false
false
4,126
r
Bayes_Ord_Design_PO.R
#' Determine the sample size for Bayesian two-stage trial design #' of ordinal endpoints with proportional odds assumption #' #' @description #' Obtain estimated sample size based on user-specified type I #' error, power and effect size defined by the odds ratio between #' the treatment and control groups, under the proportional #' odds (PO) assumption. #' #' @param or_alt effect size to be detected (under H_1) #' in terms of odds ratio #' @param pro_ctr distribution of clinical categories for the #' control group #' @param alpha the desirable type I error rate to be controlled #' @param power the desirable power to be achieved #' @param nmax the maximum sample size for searching to get the desirable power #' @param ntrial the number of simulated trials #' @param method whether the statistical test for interim/final analysis is Bayesian or #' Frequentist. method = "Frequentist" for Frequentist approach; method = "Bayesian" #' for Bayesian approach #' #' #' @return ss_po() returns recommended sample size for each of #' two groups for the interim and final stages, by assuming 1:1 equal #' randomization for the two groups at each stage; and the corresponding power. #' @export #' @details #' Grid search of sample size is used for guarantee a desirable type I error rate. #' The upper limitation is 200, and lower limitation default is sample size 50 #' for the control and treatment groups at each stage. Default increment of the #' sequence is 50. #' #' For the parameter estimation section, we have two options, and can be selected using #' the method argument.Two following options are available: (i) method = "Frequentist", #' (ii) method = "Bayesian". If method = "Frequentist", parameters are estimated via package #' ordinal, which is based on frequentist method, while method = "Bayesian", parameters are #' estimated through Bayesian model. #' #' Please note, in our example, argument ntrial = 5 is for the time saving purpose. #' #' #' @examples #' ss_po(or_alt = 1.5, pro_ctr = c(0.58,0.05,0.17,0.03,0.04,0.13), alpha = 0.05, #' power = 0.8, nmax = 100, ntrial = 5, method ="Frequentist") #' ## sample size calculator ss_po = function(or_alt, pro_ctr, alpha, power, nmax, ntrial,method){ N = 200 # maximum sample size # under null, calculate thresholds cf_grid = seq(0.6, 0.7, by=0.1) threshold_grid = seq(0.85, 0.99, by=0.05) log_or = rnorm(ntrial, log(1), sd = 0.2) or = exp(log_or) or.mat = matrix(rep(or, each = length(pro_ctr)-1, times=1),ncol = length(pro_ctr)-1,byrow = TRUE) output = c() for (cf in cf_grid){ for (threshold in threshold_grid){ out = multiple_trial_po(sim_runs = ntrial, or.mat, pro_ctr = pro_ctr, n = N, cf = cf, threshold = threshold, method = method) rr = c(cf, threshold, out) output = rbind(output, rr) colnames(output) = c("cf", "threshold", "PET(%)", "alpha", "avgss") results = as.data.frame(output) } } index = min(which(abs(results$alpha-alpha)==min(abs(results$alpha-alpha)))) vec = c(results[index,c(1,2)]) thrsh = c(vec$cf, vec$threshold) names(thrsh) = c("futility", "superority") output = c() log_or = rnorm(ntrial, log(or_alt), sd = 0.2) or = exp(log_or) or.mat = matrix(rep(or, each = length(pro_ctr)-1, times=1), ncol = length(pro_ctr)-1,byrow = TRUE) n_grid = seq(50, nmax, by = 50) for (n in n_grid){ out = multiple_trial_po(sim_runs = ntrial, or.mat, pro_ctr = pro_ctr, n, cf = vec$cf, threshold = vec$threshold, method = method) rr = c(n, out) output = rbind(output, rr) colnames(output) = c("sample size", "PET(%)", "Power", "avgss") } results = list() index = min(which(abs(output[,3]-power)==min(abs(output[,3]-power)))) results$sample_size = output[index, 1] results$power = output[index, 3] results$threshold = thrsh return(results) }
73423b0791a0e4f1c8f61144d12a16f857c74ffe
3c38d8cbe00ffb6d1150682ea1f3c79acfc33d96
/R/CG_inflator.R
31e0202e7d63330fb155bc8a2d18dc079f2f659e
[]
no_license
HughParsonage/grattan
c0dddf3253fc91511d122870a65e65cc918db910
cc3e37e1377ace729f73eb1c93df307a58c9f162
refs/heads/master
2023-08-28T00:12:35.729050
2023-08-25T08:02:25
2023-08-25T08:02:25
30,398,321
26
11
null
2022-06-26T15:44:27
2015-02-06T06:18:13
R
UTF-8
R
false
false
3,012
r
CG_inflator.R
#' Forecasting capital gains #' #' @name CG_population_inflator #' @param x To be inflated. #' @param from_fy,to_fy Financial years designating the inflation period. #' @param forecast.series One of \code{"mean"}, \code{"lower"}, \code{"upper"}. What estimator to use in forecasts. \code{"lower"} and \code{"upper"} give the lower and upper boundaries of the 95\% prediction interval. #' @param cg.series (Not implemented.) #' @return For \code{CG_population_inflator}, the number of individuals estimated to incur capital gains in \code{fy_year}. #' For \code{CG_inflator}, an estimate of the nominal value of (total) capital gains in \code{to_fy} relative to the nominal value in \code{from_fy}. CG_population_inflator <- function(x = 1, from_fy, to_fy, forecast.series = "mean", cg.series){ from_fy <- validate_fys_permitted(from_fy) to_fy <- validate_fys_permitted(to_fy) stopifnot(forecast.series %in% c("mean", "lower", "upper", "custom")) last_fy <- max(from_fy, to_fy) last_year <- fy2yr(last_fy) input <- data.table(x = x, from_fy = from_fy, to_fy = to_fy) nse_forecast_series <- forecast.series out_tbl <- cg_inflators_1516 %>% copy %>% .[forecast.series == nse_forecast_series] input %>% merge(out_tbl, by.x = "from_fy", by.y = "fy_year", all.x = TRUE, sort = FALSE) %>% setnames("n_CG", "n_CG_from") %>% merge(out_tbl, by.x = "to_fy", by.y = "fy_year", all.x = TRUE, sort = FALSE) %>% setnames("n_CG", "n_CG_to") %$% { x * n_CG_to / n_CG_from } } #' @rdname CG_population_inflator CG_inflator <- function(x = 1, from_fy, to_fy, forecast.series = "mean"){ prohibit_vector_recycling(x, from_fy, to_fy) stopifnot(is.numeric(x)) cg_fys <- union(cg_inflators_1213[["fy_year"]], cg_inflators_1617[["fy_year"]]) from_fy <- validate_fys_permitted(from_fy, permitted_fys = cg_fys) to_fy <- validate_fys_permitted(to_fy, permitted_fys = cg_fys) nse_forecast_series <- forecast.series cg_inflators_tbl <- cg_inflators_1516[forecast.series == nse_forecast_series] # CRAN Note avoidance ordering <- NULL input <- data.table(x = x, from_fy = from_fy, to_fy = to_fy) %>% .[, ordering := 1:.N] raw_out <- input %>% merge(cg_inflators_tbl, by.y = "fy_year", by.x = "from_fy", all.x = TRUE) %>% setnames("zero_discount_Net_CG_total", "from_cg") %>% merge(cg_inflators_tbl, by.y = "fy_year", by.x = "to_fy", all.x = TRUE) %>% setnames("zero_discount_Net_CG_total", "to_cg") %>% setorderv("ordering") %$% { x * to_cg / from_cg } # The tax expenditures reflect totals, not means, so we need to adjust for # totals. raw_out / CG_population_inflator(1, from_fy = from_fy, to_fy = to_fy) } CGT_DISCOUNT <- function(yr) { # A constant if_else(yr >= 1985L, 0.5, 0) }
60d9143b1adfcaafb96b5737738ba50b8a466b34
25db982bcae8baab147439622b38c5c882b21d38
/man/base_regioes_saude.Rd
9182c9ad2da1f34816be5017dbccb33715955726
[]
no_license
Glauco1990/AnaliseCOVID19Brasil
f3eab5e29e5471face3c9d3e40ac80281b8d8fe6
de31dc3cad0a5972284376961db9c6726ba4c675
refs/heads/master
2022-12-12T10:25:51.523173
2020-09-15T08:28:24
2020-09-15T08:28:24
null
0
0
null
null
null
null
UTF-8
R
false
true
685
rd
base_regioes_saude.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subfuncoes.R \name{base_regioes_saude} \alias{base_regioes_saude} \title{Gera a Base Derivada ao nivel de Regioes de Saude} \usage{ base_regioes_saude() } \value{ Base Derivada ao nivel de Regioes de Saude. } \description{ Gera a Base Derivada ao nivel de Regioes de Saude com diversos indicadores normalizados para as analises graficas (medias moveis de 7 dias, taxa de mortalidade de casos detectados, taxas por 100 mil habitantes, indicadores normalizados por populacao, indicadores normalizados por area, indicadores normalizados por densidade populacional e indicadores normalizados com logaritmos). }
6be9f95b1f9761f95dad8ed9f7f477aa58f11317
b64c3840bc62fb63c5455c4158254638b42ef139
/PA1.R
61eb2afc6977677c1638ca298fdaab7c343044d7
[]
no_license
chtongueek/RepData_PeerAssessment1
4211901e0c9d9e33b88e5e00186a3a19c72465b5
97eef9583a0158d2d84140aa92395165d009f611
refs/heads/master
2020-12-03T09:28:59.764347
2015-06-11T18:19:34
2015-06-11T18:19:34
37,091,086
0
0
null
2015-06-08T20:30:41
2015-06-08T20:30:41
null
UTF-8
R
false
false
2,710
r
PA1.R
#### Loading and preprocessing #### graphics.off() # 1) Read in data data = read.csv("./activity.csv", header = T, stringsAsFactors = F) # 2) Pre-processing: making number of steps a numeric data$steps = as.numeric(data$steps) #### What is the mean total number of steps taken per day #### # 1) Calculate total number of steps per day steps1 = aggregate(data$steps, by = list(data$date), FUN = sum, na.rm = T) # 2) Make a histogram hist(steps1[,2],10, main = "Steps Frequency", xlab = "Steps") # 3) Calculate the mean and median number of steps for each day steps2 = aggregate(data$steps, by = list(data$date), FUN = mean, na.rm = T) steps3 = aggregate(data$steps, by = list(data$date), FUN = median, na.rm = T) totals = cbind(steps2, steps3[,2]) names(totals) = c("Date", "Average Steps", "Median Steps") #### What is the average daily activity pattern #### # Get the mean number of steps per interval pat = aggregate(data$steps, by = list(data$interval), FUN = mean, na.rm = T) # 1) Plot the mean number of steps per interval plot(pat, type = 'l', main = "Average Steps", xlab = "Interval", ylab = "Steps") # 2) Get the interval with the maximum mean number of steps maxInt = pat[max(pat[,2]), 1] #### Imputing missing values #### # 1) Count the total number of NAs NAcount = sum(is.na(data)) # 2) & 3) # Create second data frame equal to the first data2 = data # Find where NAs occur subs = which(is.na(data2[,1])) # replace NAs with mean corresponding to their interval tag for (i in 1:length(subs)){ data2[subs[i],1] = pat[data2$interval[i] == pat[,1], 2] } # 4) Find the total, mean, and median number of steps per interval rep1 = aggregate(data2$steps, by = list(data2$date), FUN = sum, na.rm = T) rep2 = aggregate(data2$steps, by = list(data2$date), FUN = mean, na.rm = T) rep3 = aggregate(data2$steps, by = list(data2$date), FUN = median, na.rm = T) hist(rep1[,2], 10, main = "Step Frequency (NA replaced)", xlab = "Steps") #### Differences in activity patterns #### ## Date 1, 2012-10-01 is a Monday ## # 1) # Convert date to date type data2$date = as.Date(data2$date) # Take the difference of each date and the first date, mod 7 datediff = as.numeric((data2$date - data2$date[1]))%%7 # The first date is a monday, so datediffs from 0-4 will be monday thru friday # datediffs 5 and 6 will be saturday and sunday data2$wkdy[datediff %in% 0:4] = "weekday" data2$wkdy[datediff %in% 5:6] = "weekend" wkdy = aggregate(data2$steps, by = list(data2$interval, data2$wkdy), FUN = mean) names(wkdy) = c("interval", "weekday", "steps") # 2) library(lattice) xyplot(wkdy$steps~wkdy$interval | wkdy$weekday, layout = c(1,2), type = 'l', xlab = "Interval", ylab = "Average Steps")
80c5e71385d86b98923251ed1a0a9de207c73f39
a3864f60b8dc3a3b9af5a42547c25470e38f7bf6
/man/PE.Rd
2af31a001b75f31c80779a025853bf826c0079c3
[]
no_license
cwmiller21/mvdalab
c29ffb52a41a969a1c2701ccab77901afed2fbd9
e9f1bea960cdf7dd2d0472581f9fe97c30bd6d4f
refs/heads/master
2021-01-18T05:24:46.133643
2016-02-29T10:54:45
2016-02-29T10:54:45
52,802,389
1
0
null
2016-02-29T15:38:22
2016-02-29T15:38:22
null
UTF-8
R
false
false
724
rd
PE.Rd
\name{PE} \alias{PE} \title{Percent Explained Variation of X} \description{This function provides both the cumulative and individual percent explained for the X-block for an \code{mvdareg} and \code{mvdapca} objects.} \usage{PE(object)} \arguments{ \item{object}{ an object of class \code{mvdareg} or \code{mvdapca} objects. } } \details{ This function provides both the cumulative and individual percent explained for the X-block for an \code{mvdareg} or \code{mvdapca} objects. } \author{Nelson Lee Afanador (\email{nelson.afanador@mvdalab.com})} \examples{ mod1 <- plsFit(log.RAI ~., scale = TRUE, data = Penta[, -1], ncomp = 3, contr = "contr.none", method = "bidiagpls", validation = "oob") PE(mod1) }
47a2ec3bbf8de660c0c945e72f4541d17bdaa057
77bbd565e1da3809dbc758dcf75d90ab9b31c855
/man/group_by.fduper.Rd
84cc63c63428db051a36cccc8ccbab2a2c8ab03a
[]
no_license
gmyrland/fduper
21c285e7a7282e41854aafbd084e665c6cccf249
8faba6c9fca284a0016bcdc57f946286080772f7
refs/heads/master
2021-08-14T20:03:03.558646
2017-11-16T16:32:46
2017-11-16T16:32:46
110,996,016
0
0
null
null
null
null
UTF-8
R
false
true
370
rd
group_by.fduper.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods-dplyr.R \name{group_by.fduper} \alias{group_by.fduper} \title{fduper wrapper for dplyr group_by} \usage{ \method{group_by}{fduper}(.data, ...) } \arguments{ \item{.data}{A fduper object} } \value{ The fduper object after processing } \description{ fduper wrapper for dplyr group_by }
e051c42a16fe2d95526d0f2b576f2f54b7acc5bd
af91bcce4d7fa8311f6319ae7e0f1644cc38ee0b
/R/05-mlr-project.R
3a7c0bb25e16630fa5df2253bed15bad360036e9
[ "MIT" ]
permissive
johnmorehouse/2019-feature-selection
c3d67a30dd01f6e51dce839914865ac267e98cb0
5f87df3c80c6cc94826699f15bf89427e7571369
refs/heads/master
2022-01-26T19:02:46.167275
2019-06-15T13:48:25
2019-06-15T13:48:25
null
0
0
null
null
null
null
UTF-8
R
false
false
1,264
r
05-mlr-project.R
tune_ctrl_wrapper = function(propose.points, iters, n, param_set) { ctrl = makeMBOControl(propose.points = propose.points) ctrl = setMBOControlTermination(ctrl, iters = iters) ctrl = setMBOControlInfill(ctrl, crit = crit.ei) tune.ctrl = makeTuneControlMBO(mbo.control = ctrl, mbo.design = generateDesign(n = n, par.set = param_set)) } tune_wrapper = function(learner, level, cpus, tune.control, show.info, par.set, resampling, measure, task) { configureMlr(on.learner.error = "warn", on.error.dump = TRUE) parallelStart(mode = "multicore", level = level, cpus = ignore(cpus), mc.set.seed = TRUE) set.seed(12345, kind = "L'Ecuyer-CMRG") xgboost_tuned = tuneParams(learner, task = task, resampling = resampling, par.set = par.set, control = tune.control, show.info = ignore(show.info), measure = measure) parallelStop() return(xgboost_tuned) } train_wrapper = function(learner, tune_object, task) { lrn_xgboost = setHyperPars(makeLearner(learner), par.vals = tune_object$x) m = train(lrn_xgboost, task) return(m) }
50b6590798f9e64d124e13284a1826deeedd54eb
73fba0a3dd94cf4da335b3879a63e3c4f4300608
/man/plot.oneway.Rd
3b23f116f0a94c892df7bcd57555079afaa87c23
[]
no_license
kostask84/npar
d5643ac00a4dfa84e10da912cfe53161e43dfdb5
ea298b9a626da0402c577fdabf8a43566917d9ac
refs/heads/master
2021-05-09T17:23:35.633064
2017-02-06T07:41:03
2017-02-06T07:41:03
null
0
0
null
null
null
null
UTF-8
R
false
true
923
rd
plot.oneway.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.oneway} \alias{plot.oneway} \title{Plot nonparametric group comparisons} \usage{ \method{plot}{oneway}(x, ...) } \arguments{ \item{x}{an object of class \code{oneway}.} \item{...}{additional arguments passed to the \code{\link{boxplot}} function.} } \description{ \code{plot.oneway} plots nonparametric group comparisons. } \details{ This function plots nonparametric group comparisons created by the \code{\link{oneway}} function using annotated side by side boxplots. Medians and sample sizes are placed at the top of the chart. The overall median is represented by a horizontal dashed line. } \examples{ results <- oneway(hlef ~ region, life) plot(results, col="lightblue", main="Multiple Comparisons", xlab="US Region", ylab="Healthy Life Expectancy at Age 65") } \author{ Rob Kabacoff <rkabacoff@statmethods.net> }
e5556cd5dc07e079807576b7639d4fb2fe656bcc
6d707693f800b611e41b70e1fc42e63b135b566c
/tests/testthat/test-hello.R
f5712ccc540d8e3f82b5f7da02749c75d8b152f6
[]
no_license
gergness/ttjtest
c3136f4b6a6ea10865a8809c93cdb3affa59db3f
408981622846862ca1f8898998cf3a679fed0a11
refs/heads/master
2021-03-04T22:55:33.289240
2020-03-09T15:31:24
2020-03-09T15:31:24
246,073,464
0
0
null
null
null
null
UTF-8
R
false
false
51
r
test-hello.R
test_that("hello", { expect_equal(hello(), 0) })
83a02ddf786bebea79623b0a60d0b827778448ea
1ef7b4ff34df2633d699d60c0353969c187e5eeb
/clustering.R
20e9de144aecf189ddeab41e8a940468fe1da467
[]
no_license
estefsaenz/MSc_Foundations_of_DataAnalytics
c977b13ba8e4e32de96829425f909e128b3e409c
62243309811afdfa70e39191868ba91f3ea50f44
refs/heads/master
2020-08-21T19:56:55.913183
2019-10-19T16:18:38
2019-10-19T16:18:38
216,234,805
0
0
null
null
null
null
UTF-8
R
false
false
1,903
r
clustering.R
setwd("C:/Users/estef/OneDrive/Warwick - MSc in Data Analytics/CS910 Foundations of Data Analytics/Final Project") library(ggplot2) library(plyr) library(gridExtra) library(Hmisc) library(factoextra) tema <- theme_bw() theme_set(tema) data <- read.csv('london_wards.csv') housing <- read.csv('london_housing.csv') demo <- read.csv('london_demo.csv') vars.int <- names(data)[-c(1:4)] lapply(data,class) for(var in vars.int){ data[,var] <- as.numeric(as.character(data[,var])) } vars.h <- names(housing)[-c(1:4)] for(var in vars.h){ housing[,var] <- as.numeric(as.character(housing[,var])) } vars.d <- names(demo)[-c(1:4)] for(var in vars.d){ demo[,var] <- as.numeric(as.character(demo[,var])) } # ===================== # SCATTER-PLOT MATRIX # ===================== formula <- parse(text=paste(vars.int, collapse ='+')) pairs(~formula, data=data, main="Simple Scatterplot Matrix") ######## # SVM ######## comps <- princomp(data[,vars.int]) summary(comps) biplot(comps) #biplot(comps, choices=c(2,3)) comps.h <- princomp(housing[,vars.h]) summary(comps.h) biplot(comps.h) #biplot(comps.h, choices=c(2,3)) comps.d <- princomp(demo[,vars.d]) summary(comps.d) biplot(comps.d) biplot(comps.d, choices=c(2,3)) # =========== # CLUSTERING # =========== hclust_methods <- c("ward.D", "single", "complete", "average") list_plot<-list() for(i in 1:length(hclust_methods)){ hc <- hclust(dist(data[,vars.int],method="euclidean"), method = hclust_methods[i]) #if(i==4){cut=4} else{cut=2} grp <- cutree(hc, k = 5) list_plot[[i]]<-fviz_cluster(list(data = data[,vars.int], cluster = grp)) + ggtitle(paste0('method :',hclust_methods[i])) } grid.arrange(list_plot[[1]],list_plot[[2]], list_plot[[3]],list_plot[[4]],ncol=2) clust.1 <- hclust(dist(base.seg[,vars.int.1]), method='ward')
0eafc693600790598ccd3a5e23d11df6bfca1e43
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/IceCast/man/get_init_month.Rd
5f5f9585b0390cf3bcbfc3bb484d3a57103bdaed
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
862
rd
get_init_month.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{get_init_month} \alias{get_init_month} \title{Get initialization month} \usage{ get_init_month(month, lag) } \arguments{ \item{month}{forecast month (integer from 1 to 12 corresponing to month of year)} \item{lag}{months in advance prediction is being made (integer from 1 to 11).} } \value{ integer corresponding to the initialization month } \description{ Determine initialization month based on month being forecast and lag. Considers lags up to 11 months in advance. } \details{ Note that this calculation assumes that the prediction for a month is its on first day. This differs from the labeling used in Director et al. (2017) which rounds up to the nearest full month. } \examples{ init_month <- get_init_month(month = 10, lag = 4) init_month }
c805245d4331fabb23e5e92270d4df11f1b14a00
24415cd8d6f4d92af14d07e905cb11f007c0ebfd
/man/meta_ghap.Rd
550190ccc80ac11993cb5685d48e73734940ff45
[]
no_license
yonicd/shinyghap
428d2edeccfbbee276d376d81dfed221fd76eded
e27cc1b42bb0d68ff0fe9f724151856ed17bcd58
refs/heads/master
2021-07-14T13:19:33.398017
2017-10-18T17:03:55
2017-10-18T17:03:55
107,418,933
0
0
null
null
null
null
UTF-8
R
false
true
1,865
rd
meta_ghap.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/meta_ghap-data.R \docType{data} \name{meta_ghap} \alias{meta_ghap} \title{Meta data that search_ghap is run against.} \format{A data frame with 9209 rows and 23 variables: \describe{ \item{\code{DOMAIN}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_ID}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_VARIABLE}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_VARIABLE_DESCRIPTION}}{character COLUMN_DESCRIPTION} \item{\code{DOMAIN_DESCRIPTION}}{character COLUMN_DESCRIPTION} \item{\code{DOMAIN_STRUCTURE}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_ID_SHORT}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_DESCRIPTION_SHORT}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_DESCRIPTION}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_ID_ALTERNATE}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_SUBJECT_COUNT}}{integer COLUMN_DESCRIPTION} \item{\code{STUDY_COUNTRY}}{character COLUMN_DESCRIPTION} \item{\code{REPOSITORY_DATA_STATUS}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_TYPE}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_INTERVENTION_TYPE}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_AGE_LOWER_LIMIT}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_AGE_UPPER_LIMIT}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_START_YEAR}}{integer COLUMN_DESCRIPTION} \item{\code{STUDY_STOP_YEAR}}{integer COLUMN_DESCRIPTION} \item{\code{STUDY_POPULATION}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_REPOSITORY_NAME}}{character COLUMN_DESCRIPTION} \item{\code{REPOSITORY_SUBFOLDER}}{character COLUMN_DESCRIPTION} \item{\code{STUDY_URL}}{character COLUMN_DESCRIPTION} }} \usage{ meta_ghap } \description{ Meta data that is built from cross_sectional and longitudinal repositories on ghap } \keyword{datasets}
6547d4b7cb486c1e2e45678e5ebea3b53e600561
d0039bdf133f9027a5fa06e5a5b82cbc16b62791
/R/remove_model.R
774d6e541615870c289f6d4ac206283b9ad7fad1
[]
no_license
smaakage85/modelgrid
1fca104d8ffa147e3708a03c42990cbfd26b3ec2
c392d558e77f5526b6285e47c9a7222370a435d1
refs/heads/master
2021-07-23T11:02:39.940768
2018-11-04T07:47:41
2018-11-04T07:47:41
136,794,655
26
6
null
2018-07-05T19:45:37
2018-06-10T09:12:55
R
UTF-8
R
false
false
1,656
r
remove_model.R
#' Remove model from model grid #' #' Removes an individual model specification from a model grid. If the model has #' been trained, the fitted model will also be deleted. #' #' @param model_grid \code{model_grid} #' @param model_name \code{character}, the unique name (as set by the user) of #' the model, which will be removed from a model grid. #' #' @return \code{model_grid} #' @export #' #' @examples #' library(magrittr) #' #' # Pre-allocate empty model grid. #' mg <- model_grid() #' #' # Add random forest model. #' mg <- #' mg %>% #' add_model(model_name = "Random Forest Test", method = "rf", tuneLength = 5) #' #' # Remove random forest model again. #' remove_model(mg, model_name = "Random Forest Test") remove_model <- function(model_grid, model_name) { # check inputs. if (!inherits(model_grid, "model_grid")) { stop("The 'model_grid' must inherit from the 'model_grid' class.") } if (length(model_grid$models) == 0) { stop("no model specifications have been defined within the model grid.") } # check if a model with that name exists in the model grid. if (!(exists(model_name, model_grid[["models"]]))) { stop("there is no model with that name in the model_grid.") } # delete model specification. model_grid$models <- subset(model_grid$models, names(model_grid$models) != model_name) # delete any existing model fit. if (exists(model_name, model_grid[["model_fits"]])) { model_grid$model_fits <- subset(model_grid$model_fits, names(model_grid$model_fits) != model_name) message("Model fit for ", model_name, " has been deleted.") } # return model grid. model_grid }
c9fa5dd7df62eea9c4ef7ce76ec8acd09f244553
e3d23d0a9087e89edf22e61f5d8de0a79b4d8ccb
/EU_Scandinavia_Demographics/Git_012_src_cd_densityEU.R
c04ec5e824fbda64abe939dc39fbf4d7e5b19940
[]
no_license
benksam/Dashboard_Sample_Collection
cd4326fcee94e68b8aa93f4eedbc4c62371b3b3a
78270851055012e5f6ebfec54cb00cd514ccab9b
refs/heads/master
2020-08-28T22:32:52.518043
2019-10-28T09:41:39
2019-10-28T09:41:39
217,840,348
0
0
null
null
null
null
UTF-8
R
false
false
5,353
r
Git_012_src_cd_densityEU.R
demo_r_d3dens = read.csv("1_RawData/demo_r_d3dens.csv") demo_r_d3dens_EU17 = demo_r_d3dens %>% filter(time == 2017) %>% filter(geo %in% EU_all) %>% select(-time, unit) demo_r_d3dens_EU17r = demo_r_d3dens_EU17 %>% filter(geo %in% EU_id) rdensity_EU = nuts03g %>% right_join(demo_r_d3dens_EU17r, by = c("id" = "geo")) %>% select(id, NUTS_NAME,values) # Extra data wrangling for COUNTRY CENTROIDS joined to Regional Density data demo_r_d3dens_EU17c = demo_r_d3dens_EU17 %>% filter(geo %in% countries) rdensity_EUc = nuts03g %>% right_join(demo_r_d3dens_EU17c, by = c("id" = "geo")) %>% select(id, NUTS_NAME, values) # Country CENTROIDS ccoord = rdensity_EUc %>% st_centroid() %>% select(geometry) %>% as_data_frame() %>% separate(geometry, into = c("clng","clat", "void"), sep = " ") %>% select(1,2) %>% mutate(clng = parse_number(clng), clat = parse_number(clat)) cmergid = bind_cols(as_tibble(rdensity_EUc$id), ccoord) %>% rename(id = value) rdensity_EUc = rdensity_EUc %>% left_join(cmergid, by = "id") # Merging centroid clng/clat of Country-NUTS0 with NUTS3 data nuts3gEU = nuts3gEU %>% left_join(rdensity_EUc %>% st_drop_geometry() %>% select(id, clng, clat), by = c("CNTR_CODE" = "id")) dfnuts3gEU = nuts3gEU %>% st_drop_geometry() %>% select(id, CNTR_CODE, clng, clat) rdensity_EUaug = rdensity_EU %>% left_join(dfnuts3gEU, by= "id") %>% mutate(CNTR_CODE = as_factor(CNTR_CODE)) # glimpse(rdensity_EUaug) ### Summary density data Europe_sum = rdensity_EUaug %>% st_drop_geometry() %>% select(ID = id, Region = NUTS_NAME, Density = values) %>% summarise(Average = mean(Density), Median = median(Density), Minimum = min(Density), Maximum = max(Density), Range = diff(range(Density)), Standard_Deviation = sd(Density)) row.names(Europe_sum) = "Europe" Scandinavia_sum = rdensity_EUaug %>% st_drop_geometry() %>% select(ID = id, Region = NUTS_NAME, Density = values, Country = CNTR_CODE) %>% filter(Country %in% sk) %>% summarise(Average = mean(Density, na.rm = TRUE), Median = median(Density, na.rm = TRUE), Minimum = min(Density, na.rm = TRUE), Maximum = max(Density, na.rm = TRUE), Range = diff(range(Density, na.rm = TRUE)), Standard_Deviation = sd(Density, na.rm = TRUE)) row.names(Scandinavia_sum) = "Scandinavia" Sweden_sum = rdensity_EUaug %>% st_drop_geometry() %>% select(ID = id, Region = NUTS_NAME, Density = values, Country = CNTR_CODE) %>% filter(Country %in% "SE") %>% summarise(Average = mean(Density, na.rm = TRUE), Median = median(Density, na.rm = TRUE), Minimum = min(Density, na.rm = TRUE), Maximum = max(Density, na.rm = TRUE), Range = diff(range(Density, na.rm = TRUE)), Standard_Deviation = sd(Density, na.rm = TRUE)) row.names(Sweden_sum) = "Sweden" Norway_sum = rdensity_EUaug %>% st_drop_geometry() %>% select(ID = id, Region = NUTS_NAME, Density = values, Country = CNTR_CODE) %>% filter(Country %in% "NO") %>% summarise(Average = mean(Density, na.rm = TRUE), Median = median(Density, na.rm = TRUE), Minimum = min(Density, na.rm = TRUE), Maximum = max(Density, na.rm = TRUE), Range = diff(range(Density, na.rm = TRUE)), Standard_Deviation = sd(Density, na.rm = TRUE)) row.names(Norway_sum) = "Norway" Denmark_sum = rdensity_EUaug %>% st_drop_geometry() %>% select(ID = id, Region = NUTS_NAME, Density = values, Country = CNTR_CODE) %>% filter(Country %in% "DK") %>% summarise(Average = mean(Density, na.rm = TRUE), Median = median(Density, na.rm = TRUE), Minimum = min(Density, na.rm = TRUE), Maximum = max(Density, na.rm = TRUE), Range = diff(range(Density, na.rm = TRUE)), Standard_Deviation = sd(Density, na.rm = TRUE)) row.names(Denmark_sum) = "Denmark" Finland_sum = rdensity_EUaug %>% st_drop_geometry() %>% select(ID = id, Region = NUTS_NAME, Density = values, Country = CNTR_CODE) %>% filter(Country %in% "FI") %>% summarise(Average = mean(Density, na.rm = TRUE), Median = median(Density, na.rm = TRUE), Minimum = min(Density, na.rm = TRUE), Maximum = max(Density, na.rm = TRUE), Range = diff(range(Density, na.rm = TRUE)), Standard_Deviation = sd(Density, na.rm = TRUE)) row.names(Finland_sum) = "Finland" Iceland_sum = rdensity_EUaug %>% st_drop_geometry() %>% select(ID = id, Region = NUTS_NAME, Density = values, Country = CNTR_CODE) %>% filter(Country %in% "IS") %>% summarise(Average = mean(Density, na.rm = TRUE), Median = median(Density, na.rm = TRUE), Minimum = min(Density, na.rm = TRUE), Maximum = max(Density, na.rm = TRUE), Range = diff(range(Density, na.rm = TRUE)), Standard_Deviation = sd(Density, na.rm = TRUE)) row.names(Iceland_sum) = "Iceland" EUDensity_Sum = rbind(Europe_sum, Scandinavia_sum, Sweden_sum, Norway_sum, Denmark_sum, Finland_sum, Iceland_sum) EUDensity_Sum = EUDensity_Sum %>% select(1:4,6,5) %>% rownames_to_column(var = "Country")
541122493d2f6f30abc387a4766192c769ad5efe
cc753024b88f646602a35732754cf8d41f71f4a5
/man/score.prior.Rd
6a6084abf1158a539962a92e35be8f5337a50766
[]
no_license
jeeyoungmoon0115/QTLnet.prior
b930eb1654b91a1f63b184973558d4798e7e26fd
490984dca16b78fb813b75f25ac251ee271953e2
refs/heads/master
2016-08-07T12:15:42.936211
2014-11-27T04:28:39
2014-11-27T04:28:39
27,209,158
0
0
null
null
null
null
UTF-8
R
false
false
1,975
rd
score.prior.Rd
\name{score.prior} \alias{score.prior} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Log of the numerator in prior probability of phenotype networks } \description{ The prior probablity of phenotype networks is defined to be \eqn{P(G_Y | B, beta) \propto exp(-\sum_j beta[j] * |B[[j]]-M|)}. The normalizing term (denominator) for this probability is called a partition function. This function \code{score.prior} calculates the log of the numerator and \code{\link[qtlnet.prior]{partition.eff}} approximately calculates the log of the denominator. } \usage{ score.prior(M, beta, B) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{M}{ Adjacency matrix. } \item{beta}{ A vector of beta values. \code{beta[j]} corresponds to \code{B[[j]]}. } \item{B}{ A list of matrices to encode biologicla knowledge. Each matrix (\code{B[[j]]}) corresponds to one type of biological knowledge. } } \details{ If beta is close to 0, the prior is close to a uniform distribution and hence the contribution of biological knowledge is negligible. If \eqn{beta -> infinity}, the prior puts the most of probability on the structure closest to B. } \value{ It returns a numeric value : \eqn{-1 * \sum_j beta[j] * |M - B[[j]]|}. } \author{ Jee Young Moon and Brian S. Yandell } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \references{ Werhli, A. V. and Husmeier, D. (2007) Reconstructing gene regulatory networks with Bayesian networks by combining expression data with multiple sources of prior knowledge. \emph{Stat. Appl. Genet. Molec.} \bold{6}:15. } \seealso{ \code{\link[qtlnet.prior]{partition.eff}} } \examples{ M.new <- diag(5) M.new[1,2] <- M.new[2,3] <- 1 diag(M.new) <- 1 beta <- 1.2 B <- M.new B[1,2] <- 0.8 B[2,3] <- 0.7 B <- list(B) M <- diag(5) M[3,4] <- M[4,5] <- 1 diag(M) <- 0 score.prior(M.new, beta, B) score.prior(M, beta, B) }
4749b3e128eaac813407eb3f4002ec7eeb10be63
600c3810ffef12cdb2080dff2ef4449b11241202
/run_analysis.R
918e2dc7af8e236d381a48b148c535840cbf577e
[]
no_license
JPCarroll17/GetCleanDataProj
3a7f2f7c21e5980d7a72e322cf471084e138922d
41ea66976ff5a83e5bcbcf1ac2593285cb0c68d0
refs/heads/master
2021-01-22T11:12:01.909962
2017-05-28T18:33:19
2017-05-28T18:33:19
92,675,830
0
0
null
null
null
null
UTF-8
R
false
false
2,558
r
run_analysis.R
##JCarroll ##Assignment Coursera Getting and Cleaning Data ##Read in, merge, clean and aggregate data from multiple users of activity monitors. ##5/28/2017 ##Set working directory to location of data downloaded from: ##https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip setwd("/Users/JPCarroll/Desktop/Coursera/Data_Science_Coursera/GetCleanData/Project/Data") #import necessary libraries library(plyr) library(stringr) #Read in data labels Features=read.table("./UCI HAR Dataset/features.txt", col.names = c("Feat_Num","Feature" )) Act_Labels=read.table("./UCI HAR Dataset/activity_labels.txt", col.names = c("Activity","Activity_Label" )) #read in testing data XTest=read.table("./UCI HAR Dataset/test/X_test.txt", header = FALSE) YTest=read.table("./UCI HAR Dataset/test/Y_test.txt", col.names = "Activity"); SubTest=read.table("./UCI HAR Dataset/test/subject_test.txt", header = FALSE, col.names = "Subject"); #read in training data XTrain=read.table("./UCI HAR Dataset/Train/X_Train.txt", header = FALSE) YTrain=read.table("./UCI HAR Dataset/Train/Y_Train.txt", header = FALSE, col.names = "Activity"); SubTrain=read.table("./UCI HAR Dataset/Train/subject_Train.txt", header = FALSE, col.names = "Subject"); #Combine Training and Testing data XALL<-rbind(XTest,XTrain) YALL<-rbind(YTest,YTrain) SubALL<-rbind(SubTest,SubTrain) Combined_ALL<-cbind(SubALL,YALL,XALL) #Remove datasets that are no longer needed rm(XTest,YTest,XTrain,YTrain,SubTest,SubTrain,SubALL,YALL,XALL) #Update column and row names names(Combined_ALL)[3:length(Combined_ALL)[1]]<-t(Features[2]) row.names(Combined_ALL)<-1:dim(Combined_ALL)[1] #Apply activity labels to data Combined_ALL$Activity<-factor(Combined_ALL$Activity,levels=Act_Labels$Activity, labels=Act_Labels$Activity_Label) #Keep only mean and Standard Deviation records Final<-Combined_ALL[grepl("Subject|Activity|[mM]ean\\(\\)|[sS]td\\(\\)",names(Combined_ALL))]#includes only mean and std records #Remove datasets that are no longer needed rm(Act_Labels, Features, Combined_ALL) #Give appropriate names to variables names(Final) <- gsub('^t',"Time_",names(Final)) names(Final) <- gsub('^f',"Frequency_",names(Final)) names(Final) <- gsub('-',"_",names(Final)) names(Final) <- gsub('\\(\\)',"",names(Final)) #Aggregate data by Subject and Activity using mean Tidy_data<-aggregate(. ~Subject + Activity, Final, mean) #write out aggregated data to working directory write.table(Tidy_data, file = "Tidy.txt", append = FALSE, quote = TRUE, sep = " ",row.names = FALSE)
51f85967ce548929080b2522015fbed7f665a273
e3ce3ad557ebd51429ed7acfea936723149a8d4c
/man/snof.Rd
abdc7ae599c9795ef023ebfab07f614802c5591d
[]
permissive
jakobbossek/smoof
87512da9d488acfe3a7cc62aa3539a99e82d52ba
d65247258fab57d08a5a76df858329a25c0bb1b8
refs/heads/master
2023-03-20T02:05:12.632661
2023-03-08T13:59:27
2023-03-08T13:59:27
22,465,741
32
27
BSD-2-Clause
2022-01-21T10:02:19
2014-07-31T10:39:43
R
UTF-8
R
false
true
4,578
rd
snof.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snof.R \name{snof} \alias{snof} \title{Helper function to create numeric single-objective optimization test function.} \usage{ snof( name = NULL, id = NULL, par.len = NULL, par.id = "x", par.lower = NULL, par.upper = NULL, description = NULL, fn, vectorized = FALSE, noisy = FALSE, fn.mean = NULL, minimize = TRUE, constraint.fn = NULL, tags = character(0), global.opt.params = NULL, global.opt.value = NULL, local.opt.params = NULL, local.opt.values = NULL ) } \arguments{ \item{name}{[\code{character(1)}]\cr Function name. Used for the title of plots for example.} \item{id}{[\code{character(1)} | \code{NULL}]\cr Optional short function identifier. If provided, this should be a short name without whitespaces and now special characters beside the underscore. Default is \code{NULL}, which means no ID at all.} \item{par.len}{[\code{integer(1)}]\cr Length of parameter vector.} \item{par.id}{[\code{character(1)}]\cr Optional name of parameter vector. Default is \dQuote{x}.} \item{par.lower}{[\code{numeric}]\cr Vector of lower bounds. A single value of length 1 is automatically replicated to \code{n.pars}. Default is -Inf.} \item{par.upper}{[\code{numeric}]\cr Vector of upper bounds. A singe value of length 1 is automatically replicated to \code{n.pars}. Default is Inf.} \item{description}{[\code{character(1)} | \code{NULL}]\cr Optional function description.} \item{fn}{[\code{function}]\cr Objective function.} \item{vectorized}{[\code{logical(1)}]\cr Can the objective function handle \dQuote{vector} input, i.~e., does it accept matrix of parameters? Default is \code{FALSE}.} \item{noisy}{[\code{logical(1)}]\cr Is the function noisy? Defaults to \code{FALSE}.} \item{fn.mean}{[\code{function}]\cr Optional true mean function in case of a noisy objective function. This functions should have the same mean as \code{fn}.} \item{minimize}{[\code{logical(1)}]\cr Set this to \code{TRUE} if the function should be minimized and to \code{FALSE} otherwise. The default is \code{TRUE}.} \item{constraint.fn}{[\code{function | NULL}]\cr Function which returns a logical vector indicating whether certain conditions are met or not. Default is \code{NULL}, which means, that there are no constraints beside possible box constraints defined via the \code{par.set} argument.} \item{tags}{[\code{character}]\cr Optional character vector of tags or keywords which characterize the function, e.~g. \dQuote{unimodal}, \dQuote{separable}. See \code{\link{getAvailableTags}} for a character vector of allowed tags.} \item{global.opt.params}{[\code{list} | \code{numeric} | \code{data.frame} | \code{matrix} | \code{NULL}]\cr Default is \code{NULL} which means unknown. Passing a \code{numeric} vector will be the most frequent case (numeric only functions). In this case there is only a single global optimum. If there are multiple global optima, passing a numeric \code{matrix} is the best choice. Passing a \code{list} or a \code{data.frame} is necessary if your function is mixed, e.g., it expects both numeric and discrete parameters. Internally, however, each representation is casted to a \code{data.frame} for reasons of consistency.} \item{global.opt.value}{[\code{numeric(1)} | \code{NULL}]\cr Global optimum value if known. Default is \code{NULL}, which means unknown. If only the \code{global.opt.params} are passed, the value is computed automatically.} \item{local.opt.params}{[\code{list} | \code{numeric} | \code{data.frame} | \code{matrix} | \code{NULL}]\cr Default is \code{NULL}, which means the function has no local optima or they are unknown. For details see the description of \code{global.opt.params}.} \item{local.opt.values}{[\code{numeric} | \code{NULL}]\cr Value(s) of local optima. Default is \code{NULL}, which means unknown. If only the \code{local.opt.params} are passed, the values are computed automatically.} } \description{ This is a simplifying wrapper around \code{\link{makeSingleObjectiveFunction}}. It can be used if the function to generte is purely numeric to save some lines of code. } \examples{ # first we generate the 10d sphere function the long way fn = makeSingleObjectiveFunction( name = "Testfun", fn = function(x) sum(x^2), par.set = makeNumericParamSet( len = 10L, id = "a", lower = rep(-1.5, 10L), upper = rep(1.5, 10L) ) ) # ... and now the short way fn = snof( name = "Testfun", fn = function(x) sum(x^2), par.len = 10L, par.id = "a", par.lower = -1.5, par.upper = 1.5 ) }
6a2cfaf8533c9c558f18e19cee126f2353d673ae
0b55ce5f483e63126661e7d54f48912074ff62ca
/cap05/arrange03.R
ddf7dbbace4c2a44fba8c98d9a6d79987b412130
[ "CC0-1.0" ]
permissive
vcwild/r4ds
645233bbb4a366e6537184669d5e59841691992d
b916782637d5940d35b16e1dfa1eaf961cab34b0
refs/heads/master
2022-06-28T01:48:44.703215
2020-05-07T02:26:01
2020-05-07T02:26:01
255,899,071
0
0
null
null
null
null
UTF-8
R
false
false
30
r
arrange03.R
(arrange(flights, dep_delay))
3a1ec653a7e4b9a17748520088e5aa2d85b20ed9
bd934086a13e48de72fb819051537ba6e9ed3842
/man/hybrid.Rd
5c4543ae67ad1a41dff29ca0b181c8618b778002
[]
no_license
cran/endogenous
5a35f5fc443db36dd56dedb5f1988a0af0a70ea5
bc4d32242ab79ef0a0e92d906210aaa9f902c1cd
refs/heads/master
2021-06-06T10:07:19.044862
2016-10-29T10:48:20
2016-10-29T10:48:20
72,274,716
0
0
null
null
null
null
UTF-8
R
false
false
5,674
rd
hybrid.Rd
\name{hybrid} \alias{hybrid} \alias{print.hybrid} \alias{summary.hybrid} \title{ Hybrid model with structural shift (permits covariate-specific treatment effects) } \description{ James Heckman's Hybrid Model with Structural Shift (also known as the Treatment Effects Model). Jointly models outcome regression model and endogenous variable probit model (e.g., outcome associations in the presence of endogenous treatment in observational data). Can handle clustered data. Accommodates treatment effect modification based on observable covariates. } \usage{ ## S3 method for class "hybrid" hybrid(outcome, probit, modifiers = NULL, init = NULL, id = NULL, se = "R") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{outcome}{ an object of class "formula" with a numeric vector on the left hand side, and predictors of interest on the right hand side. } \item{probit}{ an object of class "formula" with a binary \code{(0/1)} numeric vector on the left hand side (1 indicating medication use), and predictors of medication use on the right hand side (right hand side permitted to contain variables on the right hand side of the outcome equation). } \item{modifiers}{ an object of class "formula" with a binary numeric vector indicating medication use on the left hand side, and treatment effect modifiers on the right hand side. If effect modifiers are treatment group specific (e.g., medication dose), set the effect modifier variables to zero for the untreated observations. If any other numeric values are used, they will ultimately be zet to zero. If \code{NULL}, the average treatment effect will be estimated under the assumption of no effect modification. } \item{init}{ a vector of initial values. The ordering of subparameters is: \code{alpha} (probit model parameters), \code{beta} (outcome model parameters), \code{eta} (an intercept, with or without effect mofidier paramters), \code{sigmay} (outcome error standard deviation), \code{rho} (error correlation). If \code{NULL}, an initial value will be chosen through OLS linear regression and probit-link GLM without regard to endogeneity. } \item{id}{ a numeric vector indicating subject IDs if data are clustered. In the absence of clustered data, this can be left blank (defaults to \code{NULL}). } \item{se}{ a string, either \code{"M"} for model-based standard errors (based on inverse observed Fisher information), or \code{"R"} for robust standard errors (based on methods of Huber and White). Defaults to \code{"R"}. If \code{id} is provided for clustered data, the cluster-robust variance estimator (with working independence) will be used even if the user specifies type \code{"M"}. } } \details{ The model is evaluated with numerical minimization of the negative log-likelihood (the \code{BFGS} is used). The probit model and error correlation parameters are weakly identified and hence the error variance is set at unity. The data must be complete (no missing values) and numeric, with the exception of factors, which may be used on the right hand side of equations. } \value{ \code{hybrid} prints a summary of the coefficient estimates, standard errors, Wald-based confidence intervals, and p-values for the outcome model, the treatment effects (and potentially effect modifiers), and the medication use probit model. \item{alpha}{estimate of the medication use probit model parameters.} \item{beta}{estimate of the outcome model parameters.} \item{eta}{estimate of the treatment effect, with or without effect modifier parameters.} \item{sigma}{estimate of the standard deviation of the outcome error.} \item{rho}{estimate of the correlation between the errors.} \item{vcov}{entire estimated variance-covariance matrix, provided if the user wishes to perform any more specific hypothesis tests.} \item{init}{initial value ultimately used, whether specified by the user or generated through the default approach.} \item{fitted}{vector of fitted outcome values.} \item{call}{the matched call.} \item{out.form}{the formula used for the outcome model.} \item{prob.form}{the formula used for the medication use probit model.} \item{tx.form}{the formula used for the treatment effects model (potentially with effect modifiers).} \item{sterr}{the choice of the variance estimate procedure (either model-based or robust).} \item{labels}{labels for predictors to be passed into output.} } \author{ Andrew J. Spieker, Ph.D. } \references{ Heckman JJ. Dummy endogenous variables in a simultaneous equation system. \emph{Econometrica} 46(4), 931-959. Maddala GS. \emph{Limited-dependent and qualitative variables in econometrics.} Cambridgeshire: Cambridge University Press; 1983. Spieker AJ, Delaney JAC, and McClelland RL. Evaluating the treatment effects model for estimation of cross-sectional associations between risk factors and cardiovascular biomarkers influenced by medication use. \emph{Pharmacoepidemiology and Drug Safety} 24(12), 1286-1296. } \examples{ #- Generate Data -# require(mvtnorm) set.seed(1) N <- 2000 X1 <- rnorm(N, 0, 1); X2 <- rnorm(N, 0, 1); X3 <- rnorm(N, 0, 1); errors <- rmvnorm(N, sigma = 50*matrix(c(1, 0.5, 0.5, 1), nrow = 2)) Y <- 50 + X1 + X2 + errors[,1] Z <- rep(0, N) Z[(-5 + X1 + X3 + errors[,2]) > 0] <- 1 Y[Z == 1] <- Y[Z == 1] - 0.5*X1[Z == 1] #- Estimate Model with No Effect Modification -# hybrid(Y ~ X1 + X2, probit = Z ~ X1 + X3) #- Estimate Model with Effect Modification -# hybrid(Y ~ X1 + X2, probit = Z ~ X1 + X3, modifiers = Z ~ X1) #- Estimate Model with Effect Modification and Model-Based Variance -# hybrid(Y ~ X1 + X2, probit = Z ~ X1 + X3, modifiers = Z ~ X1, se = "M") }
694fced135e0dc6ad65e95c1f89bebcc587dfc75
553992ae66d19695a240b2c8df4357b09f99bb69
/Misc/R_RStudio_Workflow/R/ExampleScripts/5_VisualizeData.R
0bc4ba31dfc7b7571cb4bcd080e365e61deb1589
[]
no_license
Alfiew/Workshops
839ec14d5c4b95cd39474044e9bdb2946d2dece9
4ac40823e13ed285bcabc44eb4449d4d1be4cd05
refs/heads/master
2023-04-19T05:48:15.096172
2021-04-27T01:12:07
2021-04-27T01:12:07
null
0
0
null
null
null
null
UTF-8
R
false
false
1,606
r
5_VisualizeData.R
# visualize three ways ## first a simple scatter plot in three ways ## second we'll do delays (x) by the categories ## third is a multivariate analysis of counts ## simple scatter plot plot(ttc_streetcar_delays_Jan2019$Delay, ttc_streetcar_delays_Jan2019$Gap) ### this makes much more sense than just a correlation ## ggplot of gap vs. delay ttc_streetcar_delays_Jan2019 %>% ggplot(aes(x = Delay, y = Gap)) + geom_point() ttc_streetcar_delays_Jan2019 %>% ggplot(aes(x = Delay, y = Gap, color = Day)) + geom_point() ttc_streetcar_delays_Jan2019 %>% ggplot(aes(x = Delay, y = Gap, color = Day)) + geom_point() + facet_wrap(~Day) ## ggplot of gap vs. delay faceted by day of the week ## now some dot plots ttc_streetcar_delays_Jan2019 %>% ggplot(aes(x = Delay, y = Route)) + geom_point() ttc_streetcar_delays_Jan2019 %>% ggplot(aes(x = Delay, y = Day)) + geom_point() ttc_streetcar_delays_Jan2019 %>% ggplot(aes(x = Delay, y = Incident)) + geom_point() ### not a particularly good idea but shows the flexibility ttc_streetcar_delays_Jan2019 %>% ggplot(aes(x = Delay, y = Route, color = Day)) + geom_point() + facet_wrap(~Incident) ## Correspondence Analysis of route x day route_by_day_counts <- table(ttc_streetcar_delays_Jan2019$Route,ttc_streetcar_delays_Jan2019$Day) ## we won't use ExPosition's graphing utility expo_ca_results <- ExPosition::epCA(route_by_day_counts, graphs = F) ## instead we'll use factoextra factoextra::fviz_ca_biplot(expo_ca_results) ## don't ride the 506 on Thursdays, nor the 512 on Fridays ## Tuesdays: a mess.
f1c7427abd5afc1de846dce0f7f20c41dc409c6c
dd4cece7e7b3626c2c511acbfa21d64f15f9ae4f
/run_analysis.R
f37da4781f9ed183986a3b4c4b31bd107aeb0891
[]
no_license
jo07/getting-and-cleaning-data-coursre-project
13e42f382a3ae831ecf4cd5dcd5893f0fd91e443
ed6bc3a44285ae7bd2ef1a89155e6032be45671c
refs/heads/master
2021-01-19T22:28:51.241016
2014-10-26T04:00:33
2014-10-26T04:00:33
null
0
0
null
null
null
null
UTF-8
R
false
false
4,282
r
run_analysis.R
# this program only invokes one function, run(), which in turn invokes other functions in order to complete a series of steps. Its end result is a tidy dataset called tidy_data.txt # this function takes a set of file paths, using them to read a feature dataset (training or testing), gives the columns descriptive names by using the features dataset, adds in associated activities as a column from the activities dataset, adds another column with the associated activity labels from the activities label dataset, adds the associated subjects as a column from the activities dataset, and then return the related dataset build_related_data_set <- function(data_file_path, features_file_path, activities_file_path, activity_labels_file_path, subjects_file_path) { # Read data data <- read.table(data_file_path) # Read features features <- read.table(features_file_path, col.names = c("feature_id", "feature")) # Name data columns with features colnames(data) <- features$feature # Read activities activities <- read.table(activities_file_path, col.names = c("activity")) # Add activity class labels column to data data$activity <- as.factor(activities$activity) # Read activity labels activity_labels <- read.table(activity_labels_file_path, col.names = c("activity", "activity_label")) # Add activity name labels column to data data_with_activity_labels <- join(x = data, y = activity_labels, by = "activity") # Read subjects subjects <- read.table(subjects_file_path, col.names = c("subject")) # Make subject a factor subjects$subject <- as.factor(subjects$subject) # Add subjects column to data cbind(subjects, data_with_activity_labels) } # this function takes the training and testing sets and returns a merged dataset merge_data_sets <- function(training_data, testing_data) { # Bind the two data sets on row rbind(training_data, testing_data) } # This function takes a dataset, extracts all the variables related to mean and standard deviation, and returns the filtered dataset. extract_mean_and_std_variables <- function(data, features_file_path) { # Read features features <- read.table(features_file_path, col.names = c("feature_id", "feature")) # Create a logical vector of variables that contain "mean" or "std" (as well as the columns added by this program) mean_and_std_variables <- grepl("subject|activity|activity_label|mean\\(\\)|std\\(\\)", colnames(data)) # Subset data with logical vector data[,mean_and_std_variables] } # this function takes a dataset, groups all the subjects together, then groups all the activities together, calculates a mean across all variables, and return this dataset produce_data_with_mean_per_feature_per_activity_per_subject <- function(trimmed_data) { library(plyr) ddply(trimmed_data, .(subject, activity, activity_label), numcolwise(mean)) } # this function downloads the raw data (if it hasn't been downloaded already), builds the related testing and training datasets, merge them, extract the mean and standard deviation variables, calculate a mean across all features per subject per activity, and write the resulting data set to a file called tidy_data.txt run <- function() { if (!file.exists("./UCI HAR Dataset")) { # download the data file_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" file_name <- "UCI HAR Dataset.zip" download.file(file_url, file_name, method = "curl") # unzip file unzip(file_name) } training_data <-build_related_data_set("./UCI HAR Dataset/train/X_train.txt", "./UCI HAR Dataset/features.txt", "./UCI HAR Dataset/train/y_train.txt", "./UCI HAR Dataset/activity_labels.txt", "./UCI HAR Dataset/train/subject_train.txt") testing_data <- build_related_data_set("./UCI HAR Dataset/test/X_test.txt", "./UCI HAR Dataset/features.txt", "./UCI HAR Dataset/test/y_test.txt", "./UCI HAR Dataset/activity_labels.txt", "./UCI HAR Dataset/test/subject_test.txt") merged_data <- merge_data_sets(training_data, testing_data) trimmed_data <- extract_mean_and_std_variables(merged_data, "./UCI HAR Dataset/features.txt") data_with_mean_per_feature_per_activity_per_subject <- produce_data_with_mean_per_feature_per_activity_per_subject(trimmed_data) write.table(data_with_mean_per_feature_per_activity_per_subject, "./tidy_data.txt") } # invokes the main function run();
0dbb689290934843ba1191ef17d0cccb002e6410
fc24092b78fdea4c04058cd2618f74fc1ff8b107
/man/diseases.Rd
d4247f9b6e4d6bd1ca9fefa08c771e94e599a123
[]
no_license
viparc/viparc
b721459244c5af42a6b63bd707f86ac3a2d5aba0
0f39110e1140b2b41cb4ba6c9bf17f3024db82bd
refs/heads/master
2021-10-10T13:04:54.888469
2019-01-11T07:57:18
2019-01-11T07:57:18
null
0
0
null
null
null
null
UTF-8
R
false
true
1,529
rd
diseases.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{diseases} \alias{diseases} \title{Diseases data} \format{A data frame with 4271 rows and 17 variables: \describe{ \item{FarmID}{ID of farm.} \item{FlockID}{ID of flock, combining ID of farm with ID of cycle.} \item{Cycle}{ID of cycle.} \item{WEEK}{Week number.} \item{WeekID}{ID of week, combining ID of flock with ID of week.} \item{RESPIRATORY}{Presence / absence of respiratory symptoms.} \item{DIARRHOEA}{Presence / absence of diarrhoea symptoms.} \item{CNS}{Presence / absence of central nervous system infection symptoms.} \item{MALAISE}{Presence / absence of malaise.} \item{LEGLESIONS}{Presence / absence of leg lesions.} \item{SUDDENDEATH}{Presence / absence of sudden death.} \item{NoSign}{Presence / absence of symptoms.} \item{Sick_yes}{Presence / absence of symptoms (any symptom).} \item{OTHDISEASE}{Presence / absence of other disease.} \item{CHICKENSICKNO}{Number of sick chicken.} \item{CHICKENDISEASEDEATH}{Number of chicken dead, excluding sudden death.} \item{CHICKENSUDDENDEATH}{Number of chicken dying from sudden death.} }} \source{ \url{http://viparc.org} } \usage{ diseases } \description{ Data collected in the ViParc project (http://viparc.org). } \details{ Generated from \code{diseases <- read.csv2(system.file("extdata", "DISEASE_GridEvent.csv", package = "viparc"), sep=",", na.strings = "na", header = TRUE, stringsAsFactors = FALSE)} } \keyword{datasets}
80bb83b247545f2545e11a38ec049a860f80c3a4
84a16740225507247eee8a759444cfe4b61de3ae
/ideas.R
d9271f89160bd049383880d07f04db2099ea4a83
[]
no_license
jean9208/Song-Separator
f85e67fe42f2767434c2e9abeb76ccf66f9f2e61
f9dbfe9e21f87eabb9bfa9bedfe2ad3cc05bd980
refs/heads/master
2022-04-19T18:18:15.953492
2020-04-18T20:58:46
2020-04-18T20:58:46
114,084,821
0
0
null
null
null
null
UTF-8
R
false
false
740
r
ideas.R
## Initial ideas for separating music by instrument ## # Author: Jean Michel Arreola Trapala # Date: 13/12/2017 # Contact: jean.arreola@yahoo.com.mx #Initial tests #Load packages library(tuneR) library(fastICA) library(seewave) #load song song <- readmp3("demo.mp3") #Start with a small subset of the song test <- matrix(c(song@left[5000001:6000000], song@right[5000001:6000000]), 1000000,2) #Apply independent component analysis result <- fastICA(test,2) #Create Wave object from matrix output <- Wave(result$S, bit = 16) #Save .wav savewav(output, filename = "output.wav") #Create and save .wav with subset of original song (for comparison purpouses) original <- Wave(test) savewav(original , filename = "original.wav")
24c133d6d3fbacb45eabd02c44b7821eccacc2a2
cf8326cf2ffe34465a9519325ccea010f55ecd2c
/CWS/CW5.R
03fb08a04851ce8c6af67302c74cb535f9bb186a
[]
no_license
flying-bear/R_2018
727a456d4e291140994df6f120f7265cd05ab7fa
c479bc097c75c623472f3231fc21ecd90587dff4
refs/heads/master
2020-03-30T12:34:42.859031
2018-11-21T18:30:41
2018-11-21T18:30:41
151,230,480
1
0
null
null
null
null
UTF-8
R
false
false
4,342
r
CW5.R
# gutenbergr install.packages('gutenbergr') library(gutenbergr) library(tidyverse) gutenberg_metadata # data on contents of the project (book number, title, author etc) gutenberg_works() # data on contents with text availible gutenberg_authors %>% # data on authors slice(grep('Austen', author)) gutenberg_works(author == 'Austen, Jane') %>% select(gutenberg_id) -> ids p_and_p_text <- gutenberg_download(1342) gutenberg_works() %>% distinct(language) austen_text <- gutenberg_download(ids) gutenberg_metadata %>% count(language) %>% arrange(desc(n)) # tidytext install.packages('tidytext') library(tidytext) p_and_p_text %>% unnest_tokens(word, text) -> tidy_p_and_p # unnest_tokens(new column name, used column name, to_lower = FALSE) # the last argument is responsible for keeping rgister austen_text %>% unnest_tokens(word, text) -> tidy_austen tidy_p_and_p %>% count(word) %>% arrange(desc(n)) # stop words stop_words tidy_p_and_p %>% count(word) %>% arrange(desc(n)) %>% anti_join(stop_words) %>% anti_join(data.frame(word = c('miss', 'sir', 'mr', 'mrs'))) %>% slice(1:30) %>% ggplot(aes(word, n))+ geom_col()+ coord_flip() # tf-idf and ngrams austen_text %>% unnest_tokens(bigrams, text, token = 'ngrams', n = 2) %>% count(bigrams, gutenberg_id) %>% bind_tf_idf(term = bigrams, document = gutenberg_id, n) -> austen_tf_idf austen_tf_idf %>% mutate(gutenberg_id = factor(gutenberg_id)) %>% arrange(desc(tf_idf)) %>% slice(1:50) %>% ggplot(aes(bigrams, tf_idf, fill = gutenberg_id))+ geom_col()+ coord_flip()+ facet_wrap(~gutenberg_id, scales = 'free')+ labs(x = '', y = 'tf_idf') # udpipe install.packages('udpipe') library(udpipe) udmodel <- udpipe_download_model(language = 'dutch') udmodel <-udpipe_load_model(file = udmodel$file_model) x <- udpipe_annotate(udmodel, x = 'Nå, jeg sprang og skyndte mig efter kaninen, døde af nysgerrighed og blev næsten fanget') x <- as.data.frame(x) # lingtypology from Glottolog install.packages('lingtypology') library(lingtypology) glottolog.modified # contents of glottolog database lang.aff('Slavic') # what we want . what we know aff.lang('Mam') long.lang('Mam') lat.lang('Mam') lang.country('Russia') gltc.lang('Mam') country.lang(c('Adyghe', 'Russian'), intersection = TRUE) # where both are spoken aff.lang('Arabic') # has a built-in spell-checker aff.lang('Standard Arabic') aff.lang('Standard Arabic', glottolog.source = 'original') # maps map.feature(c('Adyghe', 'Russian', 'Mam', 'Modern Hebrew'), features = c('feature1', 'feature2', 'feature1', 'feature1')) map.feature(c('Adyghe', 'Russian', 'Mam', 'Modern Hebrew'), features = c('feature1', 'feature2', 'feature1', 'feature1'), map.orientation = 'Atlantic') map.feature(lang.aff('Slavic')) map.feature(lang.aff('Sign'), map.orientation = 'Atlantic') map.feature(lang.aff('Sign'), label = lang.aff('Sign')) # labels without clik needed ejective_and_n_consonants # built-in dataset map.feature(languages = ejective_and_n_consonants, features = ejective_and_n_consonants$consonants) map.feature(languages = ejective_and_n_consonants, features = ejective_and_n_consonants$ejectives) # API for linguists # WALS (the World Atas of Language Structures) wals.feature('1a') -> w # don't forget to cite things properly! map.feature(language = w$language, # consonant inventory features = w$`1a`, longitude = w$longitude, latitude = w$latitude, label = w$language) wals.feature(c('1a', '2a')) -> w map.feature(language = w$language, # vowel inventory features = w$`2a`, longitude = w$longitude, latitude = w$latitude, label = w$language) # autotyp a <- autotyp.feature(c('Gender', 'Numeral classifiers')) # don't forget to cite things properly! a %>% select(language, Gender.n) %>% na.omit() -> a2 map.feature(languages = a2$language, features = a2$Gender.n, label = a2$language) a_big <- autotyp.feature(c('Gender', 'Numeral classifiers'), na.rm = FALSE) # don't throw away those languages that are absent in lingtypology package
6243817c8f642d29a541878ff142f944e2f99280
403bc5dce93f12ac5e148cd1c5abfae7ccfaaaf2
/R_code/pe_interpolate_outdoor_values.R
4e7d0affe9b9988cb7961171c280ef47a689bca2
[ "NIST-PD" ]
permissive
usnistgov/SLP_data_code
01d18a649ca5621bcce26becc6edd22c22d8d22e
2d99e9d29c8cb0101a089124346a6d89c936b34a
refs/heads/master
2020-05-16T11:38:03.091393
2019-08-12T19:36:34
2019-08-12T19:36:34
183,022,683
2
0
null
null
null
null
UTF-8
R
false
false
3,209
r
pe_interpolate_outdoor_values.R
library(tidyverse) library(readxl) library(rstan) library(lubridate) library(stringr) read_excel(path = '../data/PE_Florida_ambient_plus_panel_temps.xlsx', skip = 1, col_names = FALSE) %>% select(X__1, X__2, X__4, X__7, X__10) %>% rename(date = X__1, time = X__2, intensity = X__4, temp = X__7, panel_temp = X__10) -> ambient_conditions ambient_conditions <- mutate(ambient_conditions, date_time = ymd_hms(paste(date, str_sub(time, start = 12, end = 20)))) ambient_conditions <- select(ambient_conditions, intensity, panel_temp, date_time, temp) ambient_conditions <- mutate(ambient_conditions, dose = intensity*60/1e6, panel_temp = 0.1*(panel_temp - 30) - 1) -> ambient_conditions all_date_time <- tibble(date_time = seq(first(ambient_conditions$date_time), last(ambient_conditions$date_time), by = '1 hour')) all_date_time <- mutate(all_date_time, hour_of_day = hour(date_time)) ambient_conditions <- left_join(all_date_time, ambient_conditions, by = 'date_time') predFun <- function(y, date_time) { predict(lm(y ~ poly(date_time, deg = 3)), newdata = data.frame(date_time = date_time)) } residFun <- function(y, date_time) { residuals(lm(y ~ poly(date_time, deg = 3), na.action = na.exclude)) } ambient_conditions <- mutate(group_by(ambient_conditions, hour_of_day), pred_panel_temp = predFun(panel_temp, date_time), resid_panel_temp = residFun(panel_temp, date_time), pred_dose = predFun(dose, date_time), resid_dose = residFun(dose, date_time)) ambient_conditions <- ungroup(ambient_conditions) ambient_conditions <- mutate(ambient_conditions, panel_temp_use = ifelse(is.na(panel_temp), pred_panel_temp, panel_temp), dose_use = pmax((ifelse(is.na(dose), pred_dose, dose)), 0)) write_csv(ambient_conditions, '../data/PE_FL_ambient_conditions_after_interp.csv') ggplot(data = ambient_conditions, mapping = aes(x = date_time, y = panel_temp)) + geom_line() + geom_line(mapping = aes(y = pred_panel_temp), color = 'red') + facet_wrap(~hour_of_day, labeller = 'label_both') ggplot(data = ambient_conditions, mapping = aes(x = date_time, y = dose)) + geom_line() + geom_line(mapping = aes(y = pred_dose), color = 'red') + facet_wrap(~hour_of_day, labeller = 'label_both') ggplot(data = ambient_conditions, mapping = aes(x = date_time, y = resid_panel_temp)) + geom_point() + geom_hline(yintercept = 0, color = 'red') + facet_wrap(~hour_of_day) ggplot(data = ambient_conditions, mapping = aes(x = date_time, y = resid_dose)) + geom_point() + geom_hline(yintercept = 0, color = 'red') + facet_wrap(~hour_of_day) ggplot(data = ambient_conditions, mapping = aes(x = date_time, y = panel_temp_use)) + geom_line() + geom_line(mapping = aes(y = panel_temp), color = 'red') ggplot(data = ambient_conditions, mapping = aes(x = date_time, y = dose_use)) + geom_line()
3c73c1c46b4be6b25b5da6e606959a7954aae900
293786cd8c761df0cdb12357e64c730e8169efa7
/inst/doc/fully-customed.R
aeca75c7aced3c14d8f7a858bf3b417c984b09a3
[]
no_license
cran/ggVennDiagram
91ef96d05cf95cca1d69a3457fe0b0f30da7c9c0
cacf410b5a23b6b9e12c51d1798e8232d015bb65
refs/heads/master
2023-08-16T21:39:09.269120
2023-08-14T11:20:13
2023-08-14T12:45:03
236,605,754
0
0
null
null
null
null
UTF-8
R
false
false
2,748
r
fully-customed.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(ggVennDiagram) ## ----------------------------------------------------------------------------- genes <- paste0("gene",1:1000) set.seed(20210302) gene_list <- list(A = sample(genes,100), B = sample(genes,200), C = sample(genes,300), D = sample(genes,200)) library(ggVennDiagram) library(ggplot2) ## ----------------------------------------------------------------------------- venn <- Venn(gene_list) data <- process_data(venn) ggplot() + # 1. region count layer geom_sf(aes(fill = count), data = venn_region(data)) + # 2. set edge layer geom_sf(aes(color = id), data = venn_setedge(data), show.legend = FALSE) + # 3. set label layer geom_sf_text(aes(label = name), data = venn_setlabel(data)) + # 4. region label layer geom_sf_label(aes(label = count), data = venn_region(data)) + theme_void() ## ----------------------------------------------------------------------------- data ## ----------------------------------------------------------------------------- ggplot() + # change mapping of color filling geom_sf(aes(fill = id), data = venn_region(data), show.legend = FALSE) + # adjust edge size and color geom_sf(color="grey", size = 3, data = venn_setedge(data), show.legend = FALSE) + # show set label in bold geom_sf_text(aes(label = name), fontface = "bold", data = venn_setlabel(data)) + # add a alternative region name geom_sf_label(aes(label = name), data = venn_region(data), alpha = 0.5) + theme_void() ## ----------------------------------------------------------------------------- venn <- Venn(gene_list) data <- process_data(venn) items <- venn_region(data) %>% dplyr::rowwise() %>% dplyr::mutate(text = yulab.utils::str_wrap(paste0(.data$item, collapse = " "), width = 40)) %>% sf::st_as_sf() label_coord = sf::st_centroid(items$geometry) %>% sf::st_coordinates() p <- ggplot(items) + geom_sf(aes_string(fill="count")) + geom_sf_text(aes_string(label = "name"), data = data@setLabel, inherit.aes = F) + geom_text(aes_string(label = "count", text = "text"), x = label_coord[,1], y = label_coord[,2], show.legend = FALSE) + theme_void() + scale_fill_distiller(palette = "RdBu") ax <- list( showline = FALSE ) plotly::ggplotly(p, tooltip = c("text")) %>% plotly::layout(xaxis = ax, yaxis = ax)
e9b55770213e571ebf8c9691ecc517980076eec5
e09d229dd1ad18879fb051e4cb7d97c1475f49aa
/R/get_file_hash.R
f63f79858d875fd2417e39f14f096512c5dbf425
[ "MIT" ]
permissive
hamishgibbs/rtrackr
15bc922c8f8dfb765ee5b5da80df66b84eb16b16
2a353b73f8507e96c71c32c1ea557cfc04f9c0b2
refs/heads/master
2022-11-11T17:35:52.513669
2020-06-20T12:19:33
2020-06-20T12:19:33
271,510,902
1
0
NOASSERTION
2020-06-12T14:45:06
2020-06-11T09:54:51
R
UTF-8
R
false
false
778
r
get_file_hash.R
# get_file_hash # # @param hash_string data.frame, dataframe of old and new hashes with colnames hash and trackr_old_hash # # @importFrom digest sha1 # # @return character, hash of current file row hashes get_file_hash <- function(dataframe, tstamp){ if (!is.data.frame(dataframe)) stop("hash_string must be a data.frame") hash_ref <- dataframe %>% tidyr::unite(col = "hash_string", colnames(dataframe)[!colnames(dataframe) %in% c('trackr_id', 'trackr_old_hash')], sep = '') hash_ref <- hash_ref %>% dplyr::mutate(hash = lapply(hash_ref$hash_string, digest::sha1) %>% unlist()) file_hash <- digest::sha1(paste0(tstamp, paste0(hash_ref$hash, collapse = ''))) if(length(file_hash) > 1){stop('more than one file hash returned')} return(file_hash) }
94f0811e1396ec0906207ab94b887eb49bec1ca1
cbce5e4314d1edadf7d5c4f153a87d70c642a9ea
/db/man/get.trait.data.Rd
11046eecf1ae3a388d2a14bd45e1cd842f076b30
[ "NCSA", "LicenseRef-scancode-unknown-license-reference" ]
permissive
kemball/pecan
644662398835fe6d286d82d4494530f4a2bebd58
23074a812cdc50c86e2227a89a1cc1e685ec2747
refs/heads/master
2020-12-25T09:47:18.695082
2013-08-06T15:39:17
2013-08-06T15:39:17
null
0
0
null
null
null
null
UTF-8
R
false
false
210
rd
get.trait.data.Rd
\name{get.trait.data} \alias{get.trait.data} \title{Gets trait data from the database} \usage{ get.trait.data() } \description{ Gets trait data from the database } \author{ David LeBauer, Shawn Serbin }
e4d3f521a309a78be0199ee3386ac009f9d9b726
9661290402c7023f91f3fd4638b2295ff2627bb9
/ExampleTrainings/iRODS-User-training/exampleRules/conditionalhello.r
4ec787f7456362b9f4e63675bbf6baa4edacc457
[ "MIT" ]
permissive
stefan-wolfsheimer/B2SAFE-B2STAGE-Training
36aaae7e14b0407bb0464c68893b7a2b8011d89c
db9e6a8b0adc42a789272bc6f024fe64e98d03ff
refs/heads/master
2020-03-19T06:05:18.631884
2018-06-12T10:07:48
2018-06-12T10:07:48
135,989,466
0
0
null
2018-06-04T07:47:12
2018-06-04T07:47:12
null
UTF-8
R
false
false
203
r
conditionalhello.r
conditionalhello{ if(*name!="Your Name"){ writeLine("stdout", "Hello *name!"); } else { writeLine("stdout", "Hello world!"); } } INPUT *name="Your Name" OUTPUT ruleExecOut, *name
70ecabbdcab9e9ea6411c9a623d93c99aee5ac09
0ff5853af9fd557f6591979a834d9fe82708d234
/R/vcov.drc.R
8e573f44f13e2979d41ae7f9e19d680009449a27
[]
no_license
csetraynor/drc
4c6deed9b783802852cfd3ed60c87dec6afc0ce5
8719d43a09711250cd791d7d2bc965558d3e62a6
refs/heads/master
2023-04-11T03:15:00.881506
2021-05-04T02:42:06
2021-05-04T02:42:06
null
0
0
null
null
null
null
UTF-8
R
false
false
3,986
r
vcov.drc.R
"vcov.drc" <- function(object, ..., corr = FALSE, od = FALSE, pool = TRUE, unscaled = FALSE) { ## Defining function for calculating variance-covariance matrix contData <- identical(object$"type", "continuous") || identical(object$"type", "standard") if (contData) { vcovfct <- vcCont } else { vcovfct <- vcDisc } ## Retrieving the estimated variance-covariance matrix for the parameter estimates if (!corr) { # summary(object)$"varMat" if (!is.null(object$"objList")) { # require(magic, quietly = TRUE) if ((contData) && (pool)) { vcovfct <- function(x){vcCont(x) / (2 * rse(x, TRUE))} # no individual scaling, only for continuous data vcMat <- do.call("adiag", lapply(object$"objList", vcovfct)) if (!unscaled) { vcMat <- vcMat * (2 * (object$"minval" / df.residual(object))) # scaling based on all fits } } else { vcMat <- do.call("adiag", lapply(object$"objList", vcovfct)) } # do.call("adiag", lapply(object$"objList", object$"estMethod"$"vcovfct")) # vcMat <- do.call("adiag", lapply(object$"objList", vcovfct)) # if (contPool) # { # vcMat <- vcMat * (2 * (object$"minval" / df.residual(object))) # # scaling based on all fits # } return(vcMat) } else { if ((contData) && (unscaled)) { return(vcovfct(object) / (2 * rse(object, TRUE))) } else { varMat <- vcovfct(object) ## Adjusting for over-dispersion using the Pearson statistic if ((identical(object$"type", "binomial")) && (od)) { gof <- drmLOFbinomial()$"gofTest"(object$"dataList"$"resp", weights(object), fitted(object), df.residual(object)) varMat <- varMat * (gof[1] / gof[2]) } return(varMat) } } } else { ## Calculating correlation matrix corrFct <- function(object) { vcMat <- (object$"estMethod")$"vcovfct"(object) diage <- sqrt(diag(vcMat)) vcMat / (outer(diage, diage)) } if (!is.null(object$"objList")) { # require(magic, quietly = TRUE) do.call("adiag", lapply(object$"objList", corrFct)) } else { corrFct(object) } } } "vcCont" <- function(object) { # scaledH <- (object$"fit"$"hessian") / (2 * rvfct(object)) scaledH <- (object$"fit"$"hessian") / (2 * rse(object, TRUE)) invMat <- try(solve(scaledH), silent = TRUE) if (inherits(invMat, "try-error")) { # cat("Note: Variance-covariance matrix regularized\n") ## More stable than 'solve' (suggested by Nicholas Lewin-Koh - 2007-02-12) ch <- try(chol(scaledH), silent = TRUE) ## "silent" argument added after report by Xuesong Yu - 2010-03-09 if (inherits(ch, "try-error")) { # ch <- try(chol(0.99 * object$fit$hessian + 0.01 * diag(dim(object$fit$hessian)[1])), silent = TRUE) ch <- try(chol(0.99 * scaledH + 0.01 * diag(dim(scaledH)[1])), silent = TRUE) # 2012-06-22 } ## Try regularizing if the varcov is unstable if (!inherits(ch, "try-error")) { return(chol2inv(ch)) } else { numRows <- dim(scaledH)[1] return(matrix(NA, numRows, numRows)) } } else { return(invMat) } } "vcDisc" <- function(object) { solve(object$fit$hessian) }
c3ef2a28945481c46ab11f91353cd1a01b07e1f1
8893ba18785558773985e5dcf2049ddc01375e6b
/man/yoda_set.Rd
6c991aa76fb909f314abc034dd5b7efd02991514
[]
no_license
cran/Arothron
a122b82cdfc75dc2be94115917c24c228445c8cd
79c614f1b17f54a72648fbcd4d924907a768f755
refs/heads/master
2023-02-07T17:29:56.204584
2023-02-01T11:40:08
2023-02-01T11:40:08
127,958,255
0
1
null
null
null
null
UTF-8
R
false
true
352
rd
yoda_set.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/yoda_set.R \docType{data} \name{yoda_set} \alias{yoda_set} \title{example dataset} \usage{ data(yoda_set) } \description{ Landmark set on Yoda } \author{ Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano } \keyword{Arothron}
d1cc24be906539632d359ab701ed375209c57462
609ca5bd45edb5d1055b4a38efbcdfe2adfe7d63
/man/summary.learnIQ1var.Rd
c40d0ab9de673e097a3f76aeae93494dca8ecacb
[]
no_license
kalinn/iqLearn
e2c7939aa7eb058c7e48dd98e9d6464d01b65a19
97bfba5f7dbe0a9f6d8ed333c5aac6422af28b5d
refs/heads/master
2022-08-09T11:48:39.147582
2022-08-01T17:27:09
2022-08-01T17:27:09
30,946,670
2
0
null
null
null
null
UTF-8
R
false
false
2,502
rd
summary.learnIQ1var.Rd
\name{summary.learnIQ1var} \alias{summary.learnIQ1var} \alias{print.summary.learnIQ1var} \title{ IQ-learning: contrast variance modeling summary } \description{ Output from the contrast function variance modeling in IQ-learning. } \usage{ \method{summary}{learnIQ1var}(object, ...) } \arguments{ \item{object }{ object of type \code{learnIQ1var} } \item{... }{ additional arguments to be passed to \code{summary()} } } \details{ When \code{method="homo"} returns the standard deviation from the constant fit. When \code{method="hetero"} returns regression output and other summary statistics from the contrast function log-linear variance model. See \code{summary.lm} for more details. } \value{ When \code{method="hetero"} computes and returns multiple summary statistics from the log-linear model in \code{object}. See \code{summary.lm} for a list of available summary statistics. Otherwise, when \code{method="homo"} returns only the standard deviation from the constant variance fit. } \references{ Linn, K. A., Laber, E. B., Stefanski, L. A. (2015) "iqLearn: Interactive Q-Learning in R", Journal of Statistical Software, 64(1), 1--25. Laber, E. B., Linn, K. A., and Stefanski, L. A. (2014) "Interactive model building for Q-learning", Biometrika, 101(4), 831-847. } \author{ Kristin A. Linn <kalinn@ncsu.edu>, Eric B. Laber, Leonard A. Stefanski } \seealso{ \code{\link{learnIQ1var}} } \examples{ ## load in two-stage BMI data data (bmiData) bmiData$A1[which (bmiData$A1=="MR")] = 1 bmiData$A1[which (bmiData$A1=="CD")] = -1 bmiData$A2[which (bmiData$A2=="MR")] = 1 bmiData$A2[which (bmiData$A2=="CD")] = -1 bmiData$A1 = as.numeric (bmiData$A1) bmiData$A2 = as.numeric (bmiData$A2) s1vars = bmiData[,1:4] s2vars = bmiData[,c (1, 3, 5)] a1 = bmiData[,7] a2 = bmiData[,8] ## define response y to be the negative 12 month change in BMI from ## baseline y = -(bmiData[,6] - bmiData[,4])/bmiData[,4] fitIQ2 = learnIQ2 (y ~ gender + parent_BMI + month4_BMI + A2*(parent_BMI + month4_BMI), data=bmiData, "A2", c("parent_BMI", "month4_BMI")) fitIQ1cm = learnIQ1cm (~ gender + race + parent_BMI + baseline_BMI + A1*(gender + parent_BMI + baseline_BMI), data=bmiData, "A1", c ("gender", "parent_BMI", "baseline_BMI"), fitIQ2) fitIQ1var = learnIQ1var (fitIQ1cm) summary (fitIQ1var) fitIQ1var = learnIQ1var (~ gender + race + parent_BMI + baseline_BMI + A1*(parent_BMI), data=bmiData, "A1", c ("parent_BMI"), "hetero", fitIQ1cm) summary (fitIQ1var) }
811a102d4fc87b1c53addfa0890de3cb73354c31
a17b617826c6be240732d257e944f596f96899a7
/complex/timeseries.R
799ff51818fbd28e40a3de1c2cd03996a5f7b820
[]
no_license
mrquincle/crownstone-bash
246b899adae37f77fdf4075724e4bef3b6a5e0bf
c65746401a7684e7e5449d7b5e4d410cb0168ef3
refs/heads/master
2023-02-20T11:08:49.419508
2023-02-09T13:13:57
2023-02-09T13:13:57
102,087,361
0
0
null
null
null
null
UTF-8
R
false
false
341
r
timeseries.R
library(ggplot2) x <- read.csv("../output/timeseries.log", sep="\t") power <- x[,1] time <- x[,2] power_usage <- data.frame(y=power) power_usage$x <- as.POSIXct(time, format="%Y-%m-%dT%H:%M:%OS") #print(head(power_usage)) dev.new() p <- ggplot(power_usage, aes(x,y) ) + geom_point() + ggtitle("Time series of power data") print(p)
c675c2028626c33f190d46cea2df4865433be17b
c8b6f055acd3d2725c76174439532c9eb245e98e
/codes/L01(2020).R
72ac0f06605371f5a7013fe77d639241e5b1730f
[ "CC0-1.0" ]
permissive
sumitrmishra/intro-r-workshop
995dc9854dda65c9f1d4eca5b10f08083939dd2f
2b04310c0daa8672b6e3b252d45e880b86aacd2c
refs/heads/master
2022-12-26T14:02:00.325091
2020-10-11T14:32:40
2020-10-11T14:32:40
null
0
0
null
null
null
null
UTF-8
R
false
false
1,080
r
L01(2020).R
#your very first code in R # In this lecture, you learn how to: # a) navigate through R Studio # b) create an object in R # c) do simple math operations in R # R as a calculator 1 + 2 # I will write my favourite Beatles song and expect R to save it. strawberry fields forever # oops, i ran into trouble. i see the following error #* Error: unexpected symbol in "strawberry fields" * #' #' To have R save my favourite Beatles song, #' i will create something that R understands. #' And, that something is an R object. #' NOTE: the way to create an object is to write #' object <- value #' Let's do that. beatles.song <- "strawberry fields forever" print(beatles.song) #prints an object ## Class Example (23 July 2020) age.sumit <- 40 age.shruti <- 20 age.sumit + age.shruti print(age.sumit + age.shruti) # Thanks to Rahul K. ## simple math operation a <- 1 #define an object called a which takes value equals 1 b <- 3 a + b #adds b to a b - a #subtract a from b c <- 5 b*c #product of b and c sum <- a + b + c #define a new object using existing objects print(sum)
9205fa31bbdaa9b45fd275a5deb484c85bbd8bbf
79ed53a11a7b7cb020030ce3c1e42e761cad3e9a
/transcription_profiling.R
ef471edac3153c19e58a64d58f6d32f15cd5ac19
[]
no_license
dabrau/extract_tcga
6a92a9ddb1a25c7d94ea4ea7269f3ee91988fb61
ce143043fcb8b42234710394089dbf8c797df91f
refs/heads/master
2021-05-11T08:29:59.986931
2018-01-25T00:46:56
2018-01-25T00:46:56
118,053,079
0
0
null
null
null
null
UTF-8
R
false
false
342
r
transcription_profiling.R
library(tidyverse) source("./utils.R") GetTranscriptionProfileData <- function(proj, workflow.types = c("HTSeq - Counts", "HTSeq - FPKM")) { results <- workflow.types %>% map(function(workflow.type) QueryData(proj, "Transcriptome Profiling", workflow.type)) %>% map(Prepare) names(results) <- workflow.types return(results) }
866f4434f37afa39748f0b87827ef8d901bc2441
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/RImpact/examples/GeometricMean.Rd.R
fc0e2147c2a4fbe6434300ab94c1c1a488a95882
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
150
r
GeometricMean.Rd.R
library(RImpact) ### Name: GeometricMean ### Title: GeometricMean ### Aliases: GeometricMean ### ** Examples GeometricMean(c(1, 3, 9, 27, 81))
d064b9834167ec8f7316531cae5ee684918bd240
fe3b6cf5bf1fc041355fd5c4d0f2e94406255caf
/arima/exploratory.R
1de3d525e2a0f6220007ec64ec81c42d6566437e
[]
no_license
ShabnamEzatzadeh/bigbrother
50292693a1079b4366cb7488245bb914b3323f7a
02c74d18dab33ba73d4e28ca293e72b0f7a93546
refs/heads/master
2021-01-18T21:04:10.088962
2014-12-05T04:22:51
2014-12-05T04:22:51
null
0
0
null
null
null
null
UTF-8
R
false
false
3,571
r
exploratory.R
rm(list=ls()) setwd("~/Documents/bigbrother/data/traffic/counts/") library(fda) data=read.table(file="006G283P.txt") dim(data) day=factor(x=data[,2]) date=factor(x=data[,1]) day2 = factor(x=data[,2]) counts=matrix(0,nrow=24*dim(data)[1],ncol=1) for(i in 1:dim(data)[1]){ counts[(i*24-23):(i*24)]=data[i,3:26] day[(i*24-23):(i*24)]=rep(data[i,2],24) date[(i*24-23):(i*24)]=rep(data[i,1],24) } counts=as.numeric(counts) #boxplot(counts~day) hour=rep(1:24,dim(data)[1]) week.day=rep(1,dim(data)[1]*24) #Codes a 1 for weekday and 0 for weekend in.day=which(day=="Sat"|day=="Sun") week.day[in.day]=rep(0,length(in.day)) #weekend.day = which(day2=="Sat"|day2=="Sun") weekend.day = which(day2=="Sun") weekend.counts = matrix(0, length(weekend.day), 24) weekend.counts = data[weekend.day, 3:26] weekday.day = which(day2=="Mon"|day2=="Tue"|day2=="Wed"|day2=="Thu"|day2=="Fri") weekday.counts = matrix(0, length(weekday.day), 24) weekday.counts = data[weekday.day, 3:26] counts.weekend = matrix(0, nrow=24*dim(weekend.counts)[1], ncol=1) counts.weekday = matrix(0, nrow=24*dim(weekday.counts)[1], ncol=1) cat("Here") for (i in 1:dim(weekend.counts)[1]) { for (j in 1:24) { counts.weekend[(i-1)*24 + j] = weekend.counts[i, j] } } for (i in 1:dim(weekday.counts)[1]) { for (j in 1:24) { counts.weekday[(i-1)*24 + j] = weekday.counts[i, j] } } counts.weekday = as.numeric(counts.weekday) counts.weekend = as.numeric(counts.weekend) plot(1:24,1:24,ylim=c(min(counts),max(counts)),type="n") for(i in 1:dim(data)[1]){ lines(1:24,data[i,3:26],col=rgb(0,0,0,.15)) } plot(1:24,1:24,ylim=c(min(counts),max(counts)),type="n") for(i in 1:dim(weekend.counts)[1]){ lines(1:24,weekend.counts[i,1:24],col=rgb(0,0,0,.15)) } # plot(1:24,1:24,ylim=c(min(counts),max(counts)),type="n") # for(i in 1:dim(weekday.counts)[1]){ # lines(1:24,weekday.counts[i,1:24],col=rgb(0,0,0,.15)) # } #mod=lm(counts~day) #anova(mod) #par(mfrow=c(2,1)) #acf(counts,lag.max=180) #pacf(counts,lag.max=120) # mod4.0=arima(counts,order=c(4,0,0)) # mod5.0=arima(counts,order=c(5,0,0)) # mod6.0=arima(counts,order=c(6,0,0)) # mod7.0=arima(counts,order=c(7,0,0)) # mod8.0=arima(counts,order=c(8,0,0)) # mod24.0=arima(counts,order=c(24,0,0)) # mod4.0$aic-131000 # mod5.0$aic-131000 # mod6.0$aic-131000 # mod7.0$aic-131000 # mod8.0$aic-131000 # mod24.0$aic-131000 #mod=arima(counts.weekend,order=c(1,0,1))#,seasonal=list(order=c(),period=NA)) #mod_predict = predict(mod, n.ahead=24) #plot(1:24, mod_predict$pred) # get.best.arima <- function(x.ts, maxord = c(1,1,1,1,1,1)) { # best.aic <- 1e8 # n <- length(x.ts) # for (p in 0:maxord[1]) for(d in 0:maxord[2]) for(q in 0:maxord[3]) # for (P in 0:maxord[4]) for(D in 0:maxord[5]) for(Q in 0:maxord[6]) { # fit <- arima(x.ts, order = c(p,d,q), seas = list(order = c(P,D,Q), # frequency(x.ts)), method = "CSS") # fit.aic <- -2 * fit$loglik + (log(n) + 1) * length(fit$coef) # if (fit.aic < best.aic) { # best.aic <- fit.aic # best.fit <- fit # best.model <- c(p,d,q,P,D,Q) # } # } # list(best.aic, best.fit, best.model) # } # best.arima <- get.best.arima(counts, maxord = c(rep(2,6))) # best.fit=best.arima[[2]] # best.arima # acf(resid(best.fit)) # pacf(resid(best.fit)) #fbplot(data[,3:26],ylim=c(min(counts),max(counts))) # best.arima.res <- get.best.arima(resid(mod), maxord = c(2,2,2,2,2,2)) # best.fit.res=best.arima.res[[2]] # best.arima.res # par(mfrow=c(1,3)) # acf(counts,lag.max=24*10) # acf(resid(mod),lag.max=24*10) # acf(resid(best.fit.res),lag.max=24*10) # pacf(resid(best.fit.res))
c0546d024c6fa6bc0383722b3193df512dbfbb53
602980a2b335336d9bac17d1a924ddc690449691
/R/EpivizServer-class.R
fedad94fda4441cdf36dd97f50bdea461e5c03e6
[]
no_license
epiviz/epivizr-release
55211407cb8bf781ce8c5706479299d81ad3a5f8
798b350442b74334fdf3ac834c65a878e0d436e0
refs/heads/master
2021-01-13T01:40:53.888146
2015-06-09T17:08:33
2015-06-09T17:08:33
18,971,179
1
0
null
null
null
null
UTF-8
R
false
false
7,409
r
EpivizServer-class.R
.dummyTestPage=function(req) { wsUrl = paste(sep='', '"', "ws://", ifelse(is.null(req$HTTP_HOST), req$SERVER_NAME, req$HTTP_HOST), '"') list( status = 200L, headers = list( 'Content-Type' = 'text/html' ), body = paste( sep = "\r\n", "<!DOCTYPE html>", "<html>", "<head>", '<style type="text/css">', 'body { font-family: Helvetica; }', 'pre { margin: 0 }', '</style>', "<script>", sprintf("var ws = new WebSocket(%s);", wsUrl), "ws.onmessage = function(msg) {", ' var req = JSON.parse(msg.data)', ' msgDiv = document.createElement("pre");', ' msgDiv.innerHTML = req.data.msg.replace(/&/g, "&amp;").replace(/\\</g, "&lt;");', ' document.getElementById("output").appendChild(msgDiv);', ' ws.send(JSON.stringify({type: "response", requestId: req.requestId, data: {msg: "that msg"}}));', "}", "function sendInput() {", " var input = document.getElementById('input');", " ws.send(JSON.stringify({type: 'request', requestId: 0, data: {action: 'getAllData', measurements: {}, chr: input.value, start: 0, end: 0}}));", " input.value = '';", "}", "</script>", "</head>", "<body>", '<h3>Send Message</h3>', '<form action="" onsubmit="sendInput(); return false">', '<input type="text" id="input"/>', '<h3>Received</h3>', '<div id="output"/>', '</form>', "</body>", "</html>" ) ) } .standalonePage <- function(path="") { if (path == "") { filePath <- system.file("www", package="epivizr") } else { filePath <- path } epivizrMsg("loading standalone from ", filePath) staticHandler(filePath) } EpivizServer <- setRefClass("EpivizServer", fields=list( port="integer", websocket="ANY", server="ANY", interrupted="logical", socketConnected="logical", verbose="logical", msgCallback="function", requestQueue="Queue", requestWaiting="logical", tryPorts="logical", daemonized="logical", standalone="logical", staticSitePath="character", startServerFn="function", stopServerFn="function" ), methods=list( initialize=function(port=7312L, tryPorts=FALSE, daemonized=NULL, standalone=NULL, verbose=FALSE, staticSitePath="", ...) { port <<- port interrupted <<- FALSE socketConnected <<- FALSE server <<- NULL tryPorts <<- tryPorts requestWaiting <<- FALSE daemonized <<- .epivizrCanDaemonize() && isTRUE(daemonized) startServerFn <<- if (.self$daemonized) httpuv::startDaemonizedServer else httpuv::startServer stopServerFn <<- if (.self$daemonized) httpuv::stopDaemonizedServer else httpuv::stopServer standalone <<- isTRUE(standalone) staticSitePath <<- staticSitePath verbose <<- verbose callSuper(...) }, finalize=function() { stopServer() }, tryMorePorts=function(callbacks,minPort=7000L, maxPort=7999L) { success <- FALSE port <<- minPort while(!success && port <= maxPort) { tryCatch({ cat(".") server <<- startServerFn("0.0.0.0", port, callbacks) success <- TRUE }, error=function(e) { port <<- port + 1L }) } invisible(NULL) }, show=function() { cat(sprintf("<EpivizServer> port: %d, %s", port, ifelse(socketConnected,"connected","not connected")),"\n") invisible(NULL) }, makeCallbacks=function() { wsHandler <- function(ws) { if (verbose) epivizrMsg("WS opened") websocket <<- ws socketConnected <<- TRUE websocket$onMessage(.self$msgCallback) websocket$onClose(function() { socketConnected <<- FALSE invisible() }) popRequest() invisible() } if (standalone) { httpHandler <- .standalonePage(staticSitePath) } else { httpHandler <- .dummyTestPage } handlerMgr <- HandlerManager$new() handlerMgr$addHandler(httpHandler, 'static') handlerMgr$addWSHandler(wsHandler, 'ws') handlerMgr$createHttpuvApp() }, startServer=function(...) { 'start the websocket server' callbacks <- makeCallbacks() tryCatch({ server <<- startServerFn("0.0.0.0", port, callbacks) }, error=function(e) { if (!tryPorts) stop(sprintf("Error starting epivizServer, likely because port %d is in use.\nTry a different port number or setting tryPorts=TRUE (see ?startEpiviz).",port)) tryMorePorts(callbacks) }) invisible() }, stopServer=function() { interrupted <<- TRUE if (!isClosed()) { stopServerFn(server) } server <<- NULL socketConnected <<- FALSE interrupted <<- TRUE invisible() }, service=function(nonInteractive=FALSE) { if (isClosed()) { stop("Can't listen, socket is closed") } if (daemonized) return(invisible(TRUE)) if (nonInteractive) { # run service loop once httpuv::service() return(invisible(TRUE)) } interrupted <<- FALSE while(!interrupted) { httpuv::service() Sys.sleep(0.001) } invisible(TRUE) }, stopService=function() { interrupted <<- TRUE invisible() }, runServer=function(...) { startServer(...) on.exit(stopServer()) service() }, isClosed=function() { is.null(server) }, bindManager=function(mgr) { msgCallback <<- function(binary, msg) { if (binary) { msg <- rawToChar(msg) } if (verbose) { epivizrMsg("RCVD: ", msg) } msg = fromJSON(msg) if (msg$type == "request") { out=list(type="response", requestId=msg$requestId) msgData=msg$data action=msgData$action # request handling # defined here: http://epiviz.github.io/dataprovider-plugins.html out$data=NULL if (action == "getAllData") { out$data <- list(msg=msgData$chr) } else { out$data <- mgr$handle(action, msgData) } response=toJSON(out) if (verbose) { epivizrMsg("SEND: ", response) } websocket$send(response) } else if (msg$type == "response") { # TODO: check response success callback = mgr$callbackArray$get(msg$requestId) if (!is.null(callback)) { callback(msg$data) } popRequest() } } invisible() }, sendRequest=function(request) { requestQueue$push(request) if (!requestWaiting) popRequest() invisible() }, popRequest=function() { if (!socketConnected) { return(invisible()) } request <- requestQueue$pop() if (is.null(request)) { requestWaiting <<- FALSE stopService() return(invisible()) } request <- toJSON(request) if (verbose) epivizrMsg("SEND: ", request) websocket$send(request) requestWaiting <<- TRUE service() }, emptyRequestQueue=function() { requestQueue$empty() inivisible() } ) )
86c9555b75995e3ab2323d029e287e955c25acf3
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
/mcga/inst/testfiles/ByteVectorToDoubles/libFuzzer_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1612761270-test.R
3825dfb1467bc3539ef04333c2bf4af9534b1a7e
[]
no_license
akhikolla/updatedatatype-list3
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
d1505cabc5bea8badb599bf1ed44efad5306636c
refs/heads/master
2023-03-25T09:44:15.112369
2021-03-20T15:57:10
2021-03-20T15:57:10
349,770,001
0
0
null
null
null
null
UTF-8
R
false
false
380
r
1612761270-test.R
testlist <- list(b = c(1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L, 1852730990L)) result <- do.call(mcga:::ByteVectorToDoubles,testlist) str(result)
5097a543ad3aef9c8d147ad9a59136830a1b2c77
da103fb81c35cf05c055d7692853d225b93ed181
/man/Description.Rd
9e2192bc841839caa15b483914960677151eb89a
[]
no_license
jundoll/jmisc
ba60fed0c91a4585e5772a22069d2ce9a9be8468
097056ed08c09eb5bbb97141fb7d94256a521f2e
refs/heads/master
2021-01-01T05:40:02.181006
2017-01-15T08:58:41
2017-01-15T08:58:41
38,819,996
1
0
null
null
null
null
UTF-8
R
false
true
582
rd
Description.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Description.R \name{Description} \alias{Description} \title{Create the header templete of the file} \usage{ Description(desc = "This is a test message.", symbol = "#", sym_len = 50, ...) } \arguments{ \item{desc}{A character that is the description} \item{symbol}{The symbol that is consists of the header} \item{sym_len}{The width of the header} \item{...}{Another item} } \value{ The header templete of the file } \description{ Create the header templete of the file } \examples{ Description() }
72ec19b6e5707ed36df92614b1c6dad4e047ff2b
bb04b62a93cbe2d18d4d5b85254a3b1106635f69
/man/PlotFunctionStack.Rd
23cadade79ddf82f6f96ab083b9833d7cceccf88
[]
no_license
xia-lab/MicrobiomeAnalystR
331725a764aa97099c0ca56668428bce63785bed
97ced67a4ac04e4804d263ec3cde0fe10fd752aa
refs/heads/master
2023-08-08T13:52:28.383786
2023-08-04T16:28:34
2023-08-04T16:28:34
184,415,141
97
36
null
2020-02-25T05:39:50
2019-05-01T12:48:01
R
UTF-8
R
false
true
592
rd
PlotFunctionStack.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sdp_utils.R \name{PlotFunctionStack} \alias{PlotFunctionStack} \title{Function to plot stacked bar chart of functional data.} \usage{ PlotFunctionStack( mbSetObj, summaryplot, functionlvl, abundcal, geneidtype, metadata, colpalopt, format = "png", dpi = 72 ) } \arguments{ \item{mbSetObj}{Input the name of the mbSetObj.} } \description{ This function plots stacked bar charts of functional data. } \author{ Jeff Xia \email{jeff.xia@mcgill.ca} McGill University, Canada License: GNU GPL (>= 2) }
381fa477462a1748279bb3464207a2fd4c9c305a
0bc66cf962bdab8d2e36db95cfcedd14f0f6f6b9
/plot3.R
8ad4b2aa03329335b922d512e29b81a6aaf2cd7f
[]
no_license
xnoamix/ExData_Plotting1
ec4f074623bb6642c05e4b55bcda8d2a052ccba1
1a77d2cde4861fa7b7e57e353916957950e766f3
refs/heads/master
2021-01-20T16:41:20.739055
2014-12-03T18:34:31
2014-12-03T18:34:31
null
0
0
null
null
null
null
UTF-8
R
false
false
936
r
plot3.R
##preparing the data unzip("exdata_data_household_power_consumption.zip") data <- read.table("household_power_consumption.txt", header=F, sep=";", skip=66637, nrow=2880) ## attaching a header to the data, combining date and time to timestamp header <- readLines("household_power_consumption.txt", n=1) header2 <- unlist(strsplit(header, split=";")) colnames(data) <- header2 data$Date <- as.Date(data$Date, format = "%d/%m/%Y") data$ts <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S") ## writing the plot into a png file png("plot3.png", width = 480, height = 480, units = "px") plot(data$ts, data$Sub_metering_1, col="black", type="l", ylab="Energy sub metering", xlab="") lines(data$ts, data$Sub_metering_2, col="red", type="l") lines(data$ts, data$Sub_metering_3, col="blue", type="l") legend("topright", col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2) dev.off()
db6c3fdf043dc321f4fb9bb7dd33241fa0b8a4e2
b3bc093f56f7a211e54cda4a289688d2bb545500
/R/sdefit.R
e4d140ad04c26d095b23c61a507d3d881e36381d
[]
no_license
ogarciav/resde
730499e376a6a66a7f87e1047cc554324a70f50f
776449729e833359250d13c83a1cb81b18a070e8
refs/heads/master
2023-06-10T13:28:04.436257
2023-05-20T01:27:59
2023-05-20T01:29:13
296,492,518
1
1
null
null
null
null
UTF-8
R
false
false
12,980
r
sdefit.R
# File contents: trfuns, sdefit, str2fun_theta # Environment for passing transformation functions: trfuns <- new.env() # The functions for computing phi and its derivative phiprime are generated and # stored here by sdefit(). They are read from this environment inside uvector(). # A regular assignment within sdefit() does not work, because nls() and nlme() # somehow alter the environments when processing functions given in formulas. # Passing the functions as arguments to uvector() does not work either, nls() # gets confused and it crashes. Simply placing the functions in the global # environment with <<- or assign() works fine without any changes to uvector(), # but global variables are often frowned upon, e.g., by CRAN. # Stop the R checker complaining about phi and phiprime in uvector(): if(getRversion() >= "2.15.1") utils::globalVariables(c("phi", "phiprime")) #' Fit SDE model #' #' ML estimation of parameters for a reducible SDE #' #' @param model Model specification, as produced by \code{\link{sdemodel}()}. #' @param x,t Vectors with variables, or names of columns in data frame. #' @param unit If applicable, unit id vector, or name of its column in data frame. #' @param data Data frame, if data not given directly in \code{x}, \code{t}, \code{unit}. #' @param start Named vector or named list with starting parameter values #' for non-hierarchical models. They can also be given #' in global. #' @param global Named vector or list of global parameters and their starting #' values for hierarchical models. Can also contain #' starting values for non-hierarchical models. #' @param local Named vector or list of local parameters and their starting values for #' hierarchical models. The value can be a vector with #' values for each unit, or a single scalar that #' applies to all the units. #' @param known Named vector or list with any parameters that should be fixed at given values. #' @param method \code{'nls'} for non-hierarchical models (default). For #' hierarchical models it can be \code{'nls'}, for fixed #' locals, or \code{'nlme'} for mixed effects. #' @param control Optional control list for \code{nls()} or \code{nlme()}. #' @param phi Optional transformation function. If \code{NULL} (default), it is automatically generated. #' @param phiprime Optional derivative function. If \code{NULL} (default), it is automatically generated. #' @return List with two components: a list \code{fit} containing the output from #' the optimizer (\code{nls} or \code{nlme}), and a list \code{more} containing #' sigma estimates, log-likelihood, AIC and BIC. Note that in \code{fit}, "residual sum-of-squares" #' corresponds to \code{uvector}, not to \code{x} or \code{y}. Same for \code{nls} and \code{nlme} #' methods like \code{fitted} or \code{residuals} applied to \code{fit}. #' @export #' @import stats #' #' @examples #' m <- sdemodel(phi=~x^c, beta0=~b*a^c, beta1=~-b) #' mod1 <- sdefit(m, "height", "age", data=Loblolly[Loblolly$Seed=="301",], #' start=c(a=70, b=0.1, c=1)) #' mod2 <- sdefit(m, "height", "age", "Seed", Loblolly, global=c(b=0.1, c=0.5), #' local=c(a=72)) #' @usage sdefit(model, x, t, unit=NULL, data=NULL, start=NULL, #' global=NULL, local=NULL, known=NULL, method="nls", #' control=NULL, phi=NULL, phiprime=NULL) sdefit <- function(model, x, t, unit=NULL, data=NULL, start=NULL, global=NULL, local=NULL, known=NULL, method="nls", control=NULL, phi=NULL, phiprime=NULL) { # Convert and check arguments m <- model$m pars <- sort(model$pars) lambda <- model$lambda if(length(local) != 0 && length(start) != 0) warning("Hierarchical model with parameters in 'start'. Taken as global.", call.=FALSE) global <- as.list(c(start, global)) # ignore start for now local <- as.list(local) known <- as.list(known) datasrc <- substitute(data) # call value if(is.null(unit) && length(local) > 0) stop("Local parameters without unit ids", call.=FALSE) if(!is.null(unit) && length(local) == 0){ warning("Unit ids but no local parameters", call.=FALSE) local <- NULL } u <- known u$eta <- NULL if(!identical(pars, sort(c(names(global),names(local),names(u))))){ stop("Missing or repeated parameters, should have values for ", paste0(pars, collapse=", "), call.=FALSE) } if(!identical(method, "nls") && !identical(method, "nlme")) stop("Unknown method ", method, call.=FALSE) # Make data frame if data given in x and t if(is.null(data)){ data <- data.frame(x=x, t=t) x <- "x" t <- "t" if(!is.null(unit)){ # unit ids given data$unit <- unit unit <- "unit" } } # Make sure that the data is sorted on increasing t if(is.null(unit)){ data <- data[order(data[,t]), ] } else { for(u in unique(data[, unit])){ i <- data[, unit] == u d <- data[i,] data[i,] <- d[order(d[,t]),] } } # Figure-out eta if(!is.null(known$eta) && (identical(m$mup, "0") || identical(m$mum, "0"))){ stop("If a known eta is given, mup and mum cannot be 0", call.=FALSE) } if(identical(m$mum, "0")){ eta <- 0 } else if(identical(m$mup, "0")){ eta <- 1 } else if(!is.null(known$eta)){ eta <- known$eta known$eta <- NULL } else { global$eta <- 0.5 eta <- "eta" } # And eta0 if(identical(m$mu0, "0")){ eta0 <- 0 } else { global$eta0 <- 0.5 eta0 <- "eta0" } # Transformation and derivative if(is.null(phi)) phi <- str2fun_theta(m$phi) if(is.null(phiprime)) phiprime <- str2fun_theta(m$phiprime) # Store them in the trfuns environment, see comments at the top of this file assign("phi", phi, trfuns) # or trfuns$phi <- phi assign("phiprime", phiprime, trfuns) # Build formula using uvector() if(is.null(unit)) u <- "NULL" else u <- unit uargs <- c("x", "t", "unit", "beta0", "beta1", "eta", "eta0", "x0", "t0", "lambda", "mum", "mu0", "mup", "sorted") lambda <- paste0("list(", paste(lambda, lambda, sep="=", collapse=","), ")") uvals <- with (m, c(x, t, u, beta0, beta1, eta, eta0, x0, t0, lambda, mum, mu0, mup, TRUE)) frml <- as.formula(paste0("0~uvector(", paste(uargs, uvals, sep="=", collapse=", "), ")")) if(!is.null(known)) frml <- as.formula(do.call("substitute", list(frml, known))) # fill-in known values # Replicate locals to one for each unit if necessary if(!is.null(unit)){ nunits <- length(unique(data[, unit])) if(!is.null(local)){ for(i in seq_along(local)){ if(length(local[[i]]) == 1) local[[i]] <- rep(local[[i]], nunits) else if(length(local[[i]]) != nunits) stop("Length of local ", names(local)[i], " should be 1 or ", nunits, call.=FALSE) } local <- as.data.frame(local) } } # Fit with nlme if(identical(method, "nlme")){ if(length(local) == 0) # do we have locals? stop("No locals for method 'nlme'", call.=FALSE) fixed <- global fixed$eta <- NULL # exclude eta, if present fixed <- c(unlist(fixed), colMeans(local)) # random <- as.matrix(local - colMeans(local)) start <- fixed # list(fixed=fixed, random=random) fixed <- as.formula(paste0(paste0(names(fixed), collapse="+"), "~1")) random <- as.formula(paste0(paste0(colnames(local), collapse="+"), "~1")) groups <- as.formula(paste0("~", unit)) # Run nlme if(is.numeric(eta)){ # no free eta parameter, simple call fit <- nlme::nlme(frml, data, fixed=fixed, random=random, groups=groups, start=start, control=control) } else { # estimate eta between 0 and 1, nested call # Use function factory tricks, see # https://adv-r.hadley.nz/function-factories.html fit <- NULL # prepare to receive nlme result factory <- function(start){ # function factory start <- start # store start parameters for next iteration here function(eta){ # function to be manufactured frml_eta <- as.formula(do.call("substitute", list(frml, list(eta=eta)))) # substitute eta value in formula fit <<- nlme::nlme(frml_eta, data, fixed=fixed, random=random, groups=groups, start=start, control=control) # stored outside start <<- nlme::fixef(fit) # next start # start <<- list(fixed = nlme::fixef(fit), # random = as.matrix(nlme::randef(fit))) # next start return(logLik(fit)) } # end ef returned function } f <- factory(start) # generate function of eta to be optimized eta <- optimize(f, c(0, 1), maximum=TRUE, tol=1e-8)[["maximum"]] # Older simpler code # f <- function(eta){ # frml_eta <- as.formula(do.call("substitute", list(frml, # list(eta=eta)))) # substitute eta value in formula # fit <- nlme::nlme(frml_eta, data, fixed=fixed, random=random, # groups=groups, start=start, control=control) # return(logLik(fit)) # } # eta <- optimize(f, c(0, 1), maximum=TRUE, tol=1e-8)[["maximum"]] # frml_eta <- as.formula(do.call("substitute", list(frml, # list(eta=eta)))) # substitute eta value in formula # fit <- nlme::nlme(frml_eta, data, fixed=fixed, random=random, # groups=groups, start=start, control=control) } # Grab parameter estimates cf <- coef(fit) global <- c(cf[1, ], eta=eta)[names(global)] local <- cf[!names(cf) %in% names(global)] npar <- length(global) + length(local)*(length(local)+3)/2 + 1 } # done with nlme # Append [unit] to local names in formula if(!is.null(unit) && !is.null(local)){ nms <- names(local) u <- paste0("[", unit, "]") e <- lapply(paste0(nms, u), str2lang) names(e) <- nms frml <- as.formula(do.call("substitute", list(frml, e))) } # Fit with nls if(method == "nls"){ start <- c(global, local) if(is.null(global$eta)){ # no free eta parameter, unconstrained fit fit <- nls(frml, data, start=start, control=control) } else { # constrained to 0 <= eta <= 1 lo <- up <- unlist(start) lo[] <- -Inf lo["eta"] <- 0 up[] <- Inf up["eta"] <- 1 fit <- nls(frml, data, start=start, control=control, algorithm="port", lower=lo, upper=up) } fit$data <- datasrc # call value, usually data set name # Grab parameter estimates cf <- coef(fit) global <- cf[names(global)] cf <- cf[!(names(cf) %in% names(global))] if(!is.null(unit)) local <- matrix(cf, nrow=nunits, dimnames=list(NULL, names(local))) npar <- length(global) + length(local) + 1 # number of parameters if(!is.null(unit)) local <- as.data.frame(local) } # done with nls # Additional fit statistics # String with uvector call adding final=TRUE s <- as.character(frml[3]) # exclude '0 ~' s <- paste0(substr(s, 1, nchar(s)-1), ", final=TRUE)") # Execute it more <- eval(str2lang(s), envir=c(global, local, data)) # alternative: more <- with(c(global, local, known, data), eval(str2lang(s))) names(more) <- c("sigma_p", "sigma_m", "sigma_0", "logLik") more <- more[c(m$mup, m$mum, m$mu0, "x") != "0"] # drop unused if(identical(method, "nlme")){ more$logLik <- logLik(fit) } else if(isFALSE(all.equal(more$logLik, logLik(fit)))){ stop("A bug! Something wrong with the logLikelihood computation") } more$AIC <- 2 * (npar - more$logLik) more$BIC <- log(nrow(data)) * npar - 2 * more$logLik if(isFALSE(all.equal(more[c("AIC", "BIC")], c(AIC(fit) + 2*exists("f", mode="function"), BIC(fit) + log(nrow(data))*exists("f", mode="function"))))){ stop("A bug! Something wrong with the AIC or BIC computation") } # Chck for boundary solutions (zero sigmas) if(!is.null(more$sigma_p) && !is.null(more$sigma_m)) { msg <- "Solution at boundary, it may be a local optimum. Compare logLik with " if (abs(more$sigma_p) < 1e-5) warning(msg, "mum = 0", call.=FALSE) else if (abs(more$sigma_m) < 1e-5) warning(msg, "mup = 0", call.=FALSE) } return(list(fit=fit, more=unlist(more))) } #' String to function, with parameters in theta #' #' Normally not called by the user directly, used by \code{\link{sdefit}()}. #' Converts an expression, in a character string, to a function. #' #' @param s String representation of a function of \code{x} and parameters #' #' @return Function of \code{x} and \code{theta}, \code{theta} being a named vector or list of parameters. #' @export #' #' @examples str2fun_theta("x^c / a") str2fun_theta <- function(s){ t <- paste("alist(x=, theta=, with(theta, ", s, "))") return(as.function(eval(str2lang(t)))) }
8632fd29db459592a038afa9642c75135a087d68
42ac78fed8e8494cc54a533e6cb9b4c18ca51369
/branches/Matrix-for-R-2.3.x/man/sparseLU-class.Rd
1a54289e49ff827adc63af1bbd677ff46379e453
[]
no_license
LTLA/Matrix
8a79cac905cdb820f95190e99352cd9d8f267558
2b80087cfebc9f673e345000aeaf2170fc15b506
refs/heads/master
2020-08-07T20:22:12.075155
2019-09-28T21:21:10
2019-09-28T21:21:10
213,576,484
0
1
null
2019-10-13T00:56:38
2019-10-08T07:30:49
C
UTF-8
R
false
false
1,732
rd
sparseLU-class.Rd
\name{sparseLU-class} \docType{class} \alias{sparseLU-class} \title{Sparse LU decomposition of a square sparse matrix} \description{Objects of this class contain the components of the LU decomposition of a sparse square matrix.} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("sparseLU", ...)} but are more commonly created by function \code{\link{lu}} applied to a sparse matrix, such as a matrix of class \code{\linkS4class{dgCMatrix}}.} } \section{Slots}{ \describe{ \item{\code{L}:}{Object of class \code{"\linkS4class{dgCMatrix}"} The lower triangular factor from the left.} \item{\code{U}:}{Object of class \code{"\linkS4class{dgCMatrix}"} The upper triangular factor from the right.} \item{\code{p}:}{Object of class \code{"integer"} Permutation applied from the left. } \item{\code{q}:}{Object of class \code{"integer"} Permutation applied from the right.} \item{\code{Dim}:}{the dimension of the original matrix; inherited from class \code{\linkS4class{MatrixFactorization}}.} } } \section{Extends}{ Class \code{"\linkS4class{LU}"}, directly. Class \code{"\linkS4class{MatrixFactorization}"}, by class \code{"LU"}. } \section{Methods}{ No methods defined with class "sparseLU" in the signature. } %\references{} %\author{} \note{ The decomposition is of the form \deqn{A = PLUQ}{A = PLUQ} where all matrices are sparse and of size \eqn{n\times n}{n by n}. The matrices \eqn{P} and \eqn{Q} are permutation matrices, \eqn{L} is lower triangular and \eqn{U} is upper triangular. } \seealso{ \code{\link{lu}}, \code{\link[base]{solve}}, \code{\linkS4class{dgCMatrix}} } \examples{ } \keyword{classes}
92ffd9dbbbf830d904f53998d64b309d50a77c5d
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/drf/man/split_frequencies.Rd
1d1af256e0845b050ae36c676f985be703f0b232
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
841
rd
split_frequencies.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analysis_tools.R \name{split_frequencies} \alias{split_frequencies} \title{Calculate which features the forest split on at each depth.} \usage{ split_frequencies(forest, max.depth = 4) } \arguments{ \item{forest}{The trained forest.} \item{max.depth}{Maximum depth of splits to consider.} } \value{ A matrix of split depth by feature index, where each value is the number of times the feature was split on at that depth. } \description{ Calculate which features the forest split on at each depth. } \examples{ n <- 50 p <- 2 X <- matrix(rnorm(n * p), n, p) Y <- X + matrix(rnorm(n * p), ncol=p) drf.forest <- drf(X = X, Y = Y, splitting.rule = "FourierMMD", num.features = 10) # Calculate the split frequencies for this forest. split_frequencies(drf.forest) }
fe3e4099715c0e1b1beb58e476ddf3585858c482
b9b114e93485d6a525a89a349a614f0cc7297250
/app/Schinria/My_Shiny_ADS/Bar.R
2da89783bb8acd957a7b590c8b06344430ed7e89
[]
no_license
xiaoyu211/project2-cycle2-8
aeabb9902a1f17fb77a77f3191aa1cc975104090
eafa2954d0ad2b6897f4c661a8e92ac4a949afa8
refs/heads/master
2021-06-07T07:35:47.630927
2016-02-24T01:00:26
2016-02-24T01:00:26
null
0
0
null
null
null
null
UTF-8
R
false
false
1,404
r
Bar.R
setwd("~/ADS PROJECT 2 - My stuff/My_Shiny_ADS") library(datasets) library(shiny) library(data.table) library(dplyr) ##################### sub_orig <- readRDS("data_5.rds") shinyvars <- c("Resolution.Description","Borough", "Date") shiny1 <- sub_orig[shinyvars] shiny1$Date <- as.Date(shiny1$Date, "%Y-%m-%d") shiny1$Date <- format(shiny1$Date, '%Y') shiny1 <- na.omit(shiny1) shiny1$Duplicate <- ifelse(grepl("duplicate", shiny1$Resolution.Description),1,0) shiny1 <- filter(shiny1, shiny1$Duplicate == 1) shiny1 <- shiny1[, c(2,3,4)] shiny.df <- aggregate(shiny1$Duplicate, list(Borough = shiny1$Borough, Year = shiny1$Date),sum) colnames(shiny.df)[3] <- "Frequency" View(shiny.df) #Final data frame prep for shiny final_shiny <- data.frame(c('17867','31241'), c('11240','25408'),c('12210','23468'), c('5710','10402'), c('283','541')) rownames(final_shiny) <- c("2014","2015") colnames(final_shiny) <- c("Bronx","Brooklyn","Manhattan","Queens","Staten Island") final_shiny$Bronx <- as.numeric(paste(final_shiny$Bronx)) final_shiny$Brooklyn <- as.numeric(paste(final_shiny$Brooklyn)) final_shiny$Manhattan <- as.numeric(paste(final_shiny$Manhattan)) final_shiny$Queens <- as.numeric(paste(final_shiny$Queens)) final_shiny$`Staten Island` <- as.numeric(paste(final_shiny$`Staten Island`)) final_shiny <- as.matrix(final_shiny) saveRDS(final_shiny, file="final_shiny.Rds")
728e002d117db61c987bd41f11d2dc721510d7a4
bc3bbd6c6e4878e27612b26bf51208b8537808d4
/code/package-functions-vectorization.R
7cb82f348bcefeda725644e9aab29528ff7b5e50
[]
no_license
quinlan-lab/sllobs-biostats
e01d955c76a0be723bf43ec83de17bfd64aef088
212635f4ddb79c722df38d4697d438105bcd8230
refs/heads/master
2022-02-15T02:47:04.330973
2022-01-27T15:29:26
2022-01-27T15:29:26
177,178,442
78
16
null
2019-06-19T12:59:28
2019-03-22T16:55:02
null
UTF-8
R
false
false
10,952
r
package-functions-vectorization.R
###Week 4 R ###Data types, Packages, Functions and Vectorization ##Example of Vectorization (slide 2) x=c(1,2,3,4,5) x ##Slow - using a for loop x = 1 for (i in seq(2,5)){ x[i] = 1 + i-1 } x ##DataFrame (Slide 8) # Create the data frame. emp.data <- data.frame( emp_id = c (1:5), emp_name = c("Rick","Dan","Michelle","Ryan","Gary"), salary = c(623.3,515.2,611.0,729.0,843.25), start_date = as.Date(c("2012-01-01", "2013-09-23", "2014-11-15", "2014-05-11", "2015-03-27")), stringsAsFactors = FALSE ) # Print the data frame. print(emp.data) # Get the structure of the data frame. str(emp.data) summary(emp.data) ### Logical vectors used in mathematical operations ##Slide 9 x = c(seq_along(1:10)) a = x > 5 a ##Conditions using logical operators #& = and #| = or #! = not ##Create condition that outputs logical statements of the queried vector ##In this case the query is: ##are the values in the column emp_id larger than 2 AND smaller than 5? emp.data$emp_id >2 & emp.data$emp_id <5 ##We can then use the condition to extract only the rows that met the ##condition (TRUE) suba = emp.data[emp.data$emp_id >2 & emp.data$emp_id <5,] suba ##Factors (Slide 10) ##Random vector 100 random state names ?state.name set.seed(5) ## Set seed is useful for creating simulations or random objects that can be reproduced. dis_loc = sample(state.name, 100, replace = TRUE) ##Convert to Factor dis_loc = factor(dis_loc) dis_loc ##How many levels.. 43 levels ##What are the levels (states)?? levels(dis_loc) ##get the frequency of states sort(table(dis_loc),decreasing = T) ##let's generate 100 random values -- Number of people per sighting dis_pat_num = sample(100,100) ##100 samples of size 100 each ##put these vectors in a data frame dis_df = data.frame(dis_loc,dis_pat_num) str(dis_df) ##What if we want to get the mean number of cases per state? ##This is one example of the use of a member of apply family ?tapply ##apply function (mean) to each unique combination of levels incmeans <- tapply(dis_pat_num, dis_loc, mean) incmeans ##Quick plots par(mar = c(7, 4, 2, 2) + 0.2) ##Increase margins of plot barplot(incmeans,las=2, cex.names = 0.8,xlab ="", ylab = ("Sighting counts")) ##Better plot -- more informative plot(dis_df$dis_pat_num ~ dis_df$dis_loc, las = 2, cex.axis= 0.8,xlab ="", ylab = ("Sighting counts")) ####Lists### ##Slide 12 #Create list Lst <- list(name="Fred", wife="Mary", no.children=3, child.ages=c(4,7,9)) ##call elements of list Lst[[1]] Lst$name Lst[2,4] ##Add elements to list -- let's create a matrix and add it to the List (slide 13) Mat = matrix(c(1:10),2,5) Lst[5] <- list(matrix=Mat) Lst str(Lst) ##create a new list with three elements list1 <- list(1,2,3) ##merge lists List_f = c(Lst,list1) ##Convert a list to a vector (slide 14) v1 <- unlist(List_f) class(v1) ##Arrays (slide 15) ##create vector with 1500 elements z = runif(1500) ##transform to array by changing the dimensions dim(z) <- c(3,5,100) z z[,,40] ##Create array using the function array (slide 16) vec= runif(24) ##runif is an important function to random sample from the ##Uniform distribution Arr = array(vec, dim=c(3,4,2)) Arr ##Available Packages in R from CRAN (slide 19) a <- available.packages() head(rownames(a), 3) head(a) ### Install packages in R ##remove.packages("ggplot2") install.packages("ggplot2") library(ggplot2) install.packages(c("e1071", "ggplot2")) ###Install Bioconductor (Slide 24) ##the next if statement is needed only once to install bioconductor if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") ##Install one or multiple bioconductor packages BiocManager::install(c("GenomicFeatures", "AnnotationDbi")) ##Add gene names to rnaseq dataset that contains ensembl gene IDS (slide 25) BiocManager::install("biomaRt") ###Set Directory ### setwd("/your/directory/here/") ##read document rnaseq = read.table("airway_scaledcounts.subset.euro.fixed.tsv",header = T, dec = ",") ##subset to only 300 observations, ALL columns rnaseq_small = rnaseq[c(1:300),] ## call library Biomart library(biomaRt) ##Create MART object that has the database information grch37 = useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl") ##query Mart with a set of values (gene names), to obtain attributes geneID = getBM( filters= "ensembl_gene_id", attributes= c("ensembl_gene_id","external_gene_name","chromosome_name","start_position"), values= rnaseq_small$ensgene, mart= grch37) head(geneID) ##Possible filters and attributes for query filters_37 = listFilters(grch37) attr_37 = listAttributes(grch37) ###Merge data frames rnaseq_names = merge(rnaseq_small,geneID,by.x = "ensgene", by.y = "ensembl_gene_id") ##reshuffle columns to make it more organized rnaseq_names = rnaseq_names[,c(1,6,7,8,2:5)] ##Functions ##For loop (Slide 31) for(i in seq_along(1:10)){ print(log(i)) } #Vectorized a = c(1:10) log(a) ##while loops (slide 32) a = 0; b =1 print(a) while (b < 50) { print(b) temp <- a + b a <- b b <- temp } ##If loop (slide 33) ##this if statement was not explained in class but it is a good example ##of how to deal with time and dates in R ##How long do I have left for the class?? ##Create a variable that stores the time for the end of the class time2 = strptime(c("10:00"), format= "%H:%M") ##How long before the end of the class? time2 - Sys.time() ##if there is no time left add message if there is time left tell me how much is left if (time2 - Sys.time() < 0 ){ print("Time's up") }else{ print(paste("we have",time2 - Sys.time(),"hours to how")) } ##Writing functions (slide 35) doubleVal = function(n){ doub = 2 * n return(doub) } doubleVal(c(5,6,4,5,6)) ##Vectorized form f <- function(x = 1) x * 2 f(c(5,6,4,5,6)) ## functions (slide 37) fahrenheit_to_kelvin <- function(temp_F) { temp_K <- ((temp_F - 32) * (5 / 9)) + 273.15 return(temp_K) } #(slide 38) kelvin_to_celsius <- function(temp_K) { temp_C <- temp_K - 273.15 return(temp_C) } kelvin_to_celsius(0) fahrenheit_to_celsius <- function(temp_F) { temp_K <- fahrenheit_to_kelvin(temp_F) temp_C <- kelvin_to_celsius(temp_K) return(temp_C) } fahrenheit_to_celsius(32.0) ##Nested functions (slide 39) kelvin_to_celsius(fahrenheit_to_kelvin(32.0)) f <- function(x = 1) x * 2 f(4) ##Random Walk (slide 43) ##First function that contains for loops rw2d1A = function(n) { xpos = numeric(n) truefalse = c(TRUE, FALSE) plusminus1 = c(1, -1) for(i in 2:n) # Decide whether we are moving horizontally # or vertically. if (sample(truefalse, 1)) { xpos[i] = xpos[i-1] + sample(plusminus1, 1) } else { xpos[i] = xpos[i-1] - sample(plusminus1, 1) } #rw1Plot = plot(seq_along(1:n),xpos) list(x = xpos) } system.time(rw2d1A(100000)) plot(seq_along(1:100),rw2d1A(100)[[1]]) ##Vectorized form that initializes the vector and uses the cumsum function to expand ##the vector rw2d5A = # Sample from 4 directions, not horizontally and verticallly # separately. function(n = 100000) { xsteps = c(-1, 1, 0, 0) dir = sample(1:4, n - 1, replace = TRUE) xpos = c(0, cumsum(xsteps[dir])) list(x = xpos) } system.time(rw2d5A(100000)) plot(seq_along(1:100),rw2d5A(100)[[1]]) ####Apply Family (slide 45) apply(rnaseq_names[,c(4:7)], 2, mean,na.rm=TRUE) ###Homework answers ##Two dimensional Random Walk multiple versions, version 5 is ultra vectorized. ##Example taken from https://www.stat.auckland.ac.nz/~ihaka/downloads/Taupo-handouts.pdf rw2d1 = function(n) { xpos = ypos = numeric(n) truefalse = c(TRUE, FALSE) plusminus1 = c(1, -1) for(i in 2:n) # Decide whether we are moving horizontally # or vertically. if (sample(truefalse, 1)) { xpos[i] = xpos[i-1] + sample(plusminus1, 1) ypos[i] = ypos[i-1] } else { xpos[i] = xpos[i-1] ypos[i] = ypos[i-1] + sample(plusminus1, 1) } list(x = xpos, y = ypos) } rw2d2 = # Replace sample with runif() function(n) { xpos = ypos = numeric(n) for(i in 2:n) { if (runif(1) > .5) { xpos[i] = xpos[i-1] + 2 * (runif(1) > .5) - 1 ypos[i] = ypos[i-1] } else { xpos[i] = xpos[i-1] ypos[i] = ypos[i-1] + 2 * (runif(1) > .5) - 1 } } list(x = xpos, y = ypos) } rw2d2.5 = # Vectorize the runif() to generate all n in one go. function(n) { xpos = ypos = numeric(n) horOrVert = runif(n) > .5 delta = 2 * (runif(n) > .5) - 1 for(i in 2:n) { if (horOrVert[i]) { xpos[i] = xpos[i-1] + delta[i] ypos[i] = ypos[i-1] } else { xpos[i] = xpos[i-1] ypos[i] = ypos[i-1] + delta[i] } } list(x = xpos, y = ypos) } rw2d3 = # # Here we get rid of the loop. # We generate a collection of +1 and -1 values at random # Then we generate a collection of TRUE and FALSE values # indicating whether this is a horizontal or vertical move. # And we get rid of the loop by using the function cumsum() # But we can't just cumsum the values in steps. We have to # do this conditional on xdir. But this is where the vectorized # function ifelse() is very powerful. function(n) { steps = 2 * (runif(n - 1) > .5) - 1 xdir = runif(n - 1) > .5 xpos = c(0, cumsum(ifelse(xdir, steps, 0))) ypos = c(0, cumsum(ifelse(xdir, 0, steps))) list(x = xpos, y = ypos) } rw2d4 = # Bring sample back! function(n) { steps = sample(c(-1, 1), n - 1, replace = TRUE) xdir = sample(c(TRUE, FALSE), n - 1, replace = TRUE) xpos = c(0, cumsum(ifelse(xdir, steps, 0))) ypos = c(0, cumsum(ifelse(xdir, 0, steps))) list(x = xpos, y = ypos) } n=10 rw2d5 = # Sample from 4 directions, not horizontally and verticallly # separately. function(n) { xsteps = c(-1, 1, 0, 0) ysteps = c( 0, 0, -1, 1) dir = sample(1:4, n - 1, replace = TRUE) xpos = c(0, cumsum(xsteps[dir])) ypos = c(0, cumsum(ysteps[dir])) list(x = xpos, y = ypos) } system.time(rw2d1(100000)) system.time(rw2d2(100000)) system.time(rw2d2.5(100000)) system.time(rw2d3(100000)) system.time(rw2d4(100000)) system.time(rw2d5(100000))
5b3b349628be055d6f1a88ef2e886ac562b68d4d
d6f93471ac349c795b91f09850ae72266f790287
/ParetoFunction.R
af320fe3f09b5a16c83c379c85aa29345245f374
[]
no_license
Diversity-ParetoOptimal/ParetoR
e7d0c848423741c020ffcc6729400211df4ce813
492e9b8e3bed82ca59ab1f8f70e5583857989a1d
refs/heads/master
2021-01-02T09:01:17.933522
2019-04-30T16:52:04
2019-04-30T16:52:04
78,620,598
9
5
null
null
null
null
UTF-8
R
false
false
16,657
r
ParetoFunction.R
# Pareto-Optimization via Normal Boundary Intersection # Developer: Q. Chelsea Song # Contact: qianqisong@gmail.com # Last Update: 11/17/2016 ####################################### NBI Main Function #################################### #' NBI Main Function #' #' Main function for obtaining pareto-optimal solution via normal boundary intersection. #' @param X0 Initial input for preditor weight vector #' @param Spac Number of Pareto spaces (i.e., number of Pareto points minus one) #' @param Fnum Number of criterions #' @param VLB Lower boundary for weight vector estimation #' @param VUB Upper boundary for weight vector estimation #' @param TolX Tolerance index for estimating weight vector, default is 1e-4 #' @param TolF Tolerance index for estimating criterion, default is 1e-4 #' @param TolCon Tolerance index for constraint conditions, default is 1e-7 #' @param graph If TRUE, plots will be generated for Pareto-optimal curve and predictor weights #' @param graph If TRUE, plots will be generated for Pareto-optimal curve and predictor weights #' @import nloptr #' @return Pareto-Optimal solutions #' @export NBI = function(X0,Spac,Fnum,VLB=vector(),VUB=vector(),TolX=1e-4,TolF=1e-4,TolCon=1e-7,graph=TRUE){ cat('\n Estimating Pareto-Optimal Solution ... \n') #------------------------------Initialize Options-------------------------------# X0 = assert_col_vec(X0) VLB = assert_col_vec(VLB) VUB = assert_col_vec(VUB) # Number of variables nvars = length(X0) # Set options # algorithm: sequential (least-squares) quadratic programming algorithm # (SQP is algorithm for nonlinearly constrained, gradient-based optimization, # supporting both equality and inequality constraints.) # maxeval: Max Iterations # xtol_abs: Tolerance for X # ftol_abs: Tolerance for F # tol_constraints_ineq/eq: Tolerance for inequal/equal constraints # (for reference) MATLAB constraints: # options = optimoptions('fmincon','Algorithm','sqp','MaxIter',(nvars+1)*1000,'TolFun',TolF,'TolX',TolX,'TolCon',TolCon,'Display','off') nloptr::nl.opts(optlist = list( maxeval = (nvars+1)*1000 ,xtol_rel = TolX ,ftol_rel = TolF )) #Initialize PHI PHI = matrix(0,Fnum,Fnum) #------------------------------Shadow Point-------------------------------# # cat('\n ----Step 1: find shadow minimum---- \n') ShadowF = matrix(0,Fnum) ShadowX = matrix(0,nvars,Fnum) xstart = X0 out = WeightsFun(Fnum,Spac) Weight = out$Weights Near = out$Formers rm(out) Weight = Weight/Spac for(i in 1:Fnum){ temp = c(1,dim(Weight)[2]) j = temp[i] g_Weight <<- Weight[,j] fmin = 9999 ntr = nvars-1 fminv = matrix(0,ntr,1) fminx = matrix(0,nvars,ntr) for(k in 1:ntr){ xstart = runif(length(X0)) out = nloptr::slsqp(x0 = X0, fn = myLinCom ,lower = VLB, upper = VUB ,hin = myCon_ineq ,heq = myCon_eq ) x = out$par f = out$value rm(out) fminv[k] = f fminx[,k] = x if(f <= fmin){ fmin = f reps = k } } x = fminx[,reps] som = 0 for(k in 2:nvars){ som = som + x[k] } for(k in 2:nvars){ x[k] = x[k]/som } # to make sum of x = 1 ShadowX[,i] = x ShadowX = round(ShadowX,4) tempF = -myFM(x) ShadowF[i] = round(tempF[i],4) } # cat( '\n Shadow Minimum-F: \n') # print(round(ShadowF,3)) # cat('\n Shadow Minimum--X(column) \n') # print(round(ShadowX,3)) #------------------------------Matrix PHI-------------------------------# # cat('\n ----Step 2: find PHI---- \n') for(i in 1:Fnum){ PHI[,i] = myFM(ShadowX[,i]) + ShadowF PHI[i,i] = 0 } # print(round(PHI,3)) #Check to make sure that QPP is n-1 dimensional if(rcond(PHI) < 1e-8){stop(' Phi matrix singular, aborting.')} #------------------------------Quasi-Normal Direction-------------------------------# # cat('\n ----Step 3: find Quasi-Normal---- \n') g_Normal <<- -PHI%*%matrix(1,Fnum,1) #------------------------------weights-------------------------------# # cat('\n ----Step 4: create weights---- \n') out = WeightsFun(Fnum,Spac) Weight = out$Weight Near = out$Formers Weight = Weight/Spac num_w = dimFun(Weight)[2] # cat('\n Weights in row: \n') # print(round(Weight,3)) #------------------------------NBI Subproblems-------------------------------# # cat('\n ----Step 5: solve NBI sub-problems---- \n') # Starting point for first NBI subproblem is the minimizer of f_1(x) xstart = c(ShadowX[,1],0) Pareto_Fmat = vector() # Pareto Optima in F-space Pareto_Xmat = vector() # Pareto Optima in X-space X_Near = vector() # solve NBI subproblems for(k in 1:num_w){ w = Weight[,k] # Solve problem only if it is not minimizing one of the individual objectives indiv_fn_index = which(w == 1) # the boundary solution which has been solved if(length(indiv_fn_index) != 0){ # w has a 1 in indiv_fn_index th component, zero in rest # Just read in solution from shadow data Pareto_Fmat = cbind(Pareto_Fmat, (-PHI[,indiv_fn_index] + ShadowF)) Pareto_Xmat = cbind(Pareto_Xmat, ShadowX[,indiv_fn_index]) X_Near = cbind(X_Near, c(ShadowX[,indiv_fn_index],0)) # print(Pareto_Fmat) }else{ w = rev(w) if(Near[k] > 0){ xstart = X_Near[,Near[k]] #start X is the previous weight-order's X } #start point in F-space g_StartF <<- PHI%*%w + ShadowF # SOLVE NBI SUBPROBLEM out = nloptr::slsqp(x0 = xstart, fn = myT ,lower = c(VLB,-Inf) ,upper = c(VUB,Inf) ,hin = myCon_ineq ,heq = myTCon_eq) x_trial = out$par f = out$value rm(out) # success # if(fiasco >= 0){ Pareto_Fmat = cbind(Pareto_Fmat, -myFM(x_trial[1:nvars])) # Pareto optima in F-space som = 0 for(k in 2:nvars){som = som + x_trial[k]} for(k in 2:nvars){x_trial[k] = x_trial[k]/som} Pareto_Xmat = cbind(Pareto_Xmat, x_trial[1:nvars]) # Pareto optima in X-space X_Near = cbind(X_Near,x_trial) # }else{ # # # unsuccess # # num_fiascos = num_fiascos + 1 # PHI2 = matrix(0,Fnum,Fnum) # # for(i in 1:Fnum){ # # PHI2[,i] = myFM(x_trial) - ShadowF # PHI2[i,i] = 0 # } # # g_Normal2 = -PHI2%*%matrix(1,Fnum,1) # g_Normal2 = g_Normal2/norm(g_Normal2,type='2') # specifies 2-norm, or Euclidean length the vector # Pareto_Fmat = c(Pareto_Fmat, (g_StartF + xstart[dimFun(xstart)[1]] %*% g_Normal2)) # Pareto optima in F-space # X_Near = c(X_Near,x_trial) # giveup = readline('Give up ? (0/1) ') # if(giveup == 1){break} # # } } } #------------------------------Plot Solutions-------------------------------# # cat('\n ----Step 6: plot---- \n') if(graph==TRUE){plotPareto(Pareto_Fmat, Pareto_Xmat)} #------------------------------Output Solutions-------------------------------# # Output Solution Pareto_Fmat = t(Pareto_Fmat) Pareto_Xmat = t(Pareto_Xmat[2:nrow(Pareto_Xmat),]) colnames(Pareto_Fmat) = c("AI.ratio","Criterion.Validity") colnames(Pareto_Xmat) = c(paste0(rep("P",(nvars-1)),1:(nvars-1))) # solution = round(cbind(t(Pareto_Fmat),t(Pareto_Xmat[2:nrow(Pareto_Xmat),])),3) # colnames(solution) = c("AI.ratio","Criterion.Validity", paste0(rep("P",(nvars-1)),1:(nvars-1))) # cat("\n Pareto-Optimal Solution \n \n") # print(solution) return(list(Pareto_Fmat = round(Pareto_Fmat,3), Pareto_Xmat = round(Pareto_Xmat,3))) } ########################### Supporting Functions (A) ######################## # User-Defined Input for NBI.r - Pareto-Optimization via Normal Boundary Intersection # Input: ## 1) Population correlation matrix (R): criterion & predictor inter-correlation ## 2) Population subgroup difference (d): criterion & predictor mean difference ## between minority and majority subgroups ## 3) Proportion of minority applicants (prop): ## prop = (# of minority applicants)/(total # of applicants) ## 4) Selection ratio (sr): sr = (# of selected applicants)/(total # of applicants) # Related functions: # myFM # myCon ###### myFM() ###### #' myFM #' #' Supporting function, defines criterion space #' @param x Input predictor weight vector #' @return f Criterion vector #' @export myFM = function(x){ # Obtain within-package 'global' variables d <- d_ParetoR R <- R_ParetoR R_u = R[-nrow(R),-ncol(R)] b = x[-1] # variance of minority and majority applicant weighted predictor # composite (P) distribution (DeCorte, 1999) sigma_p = sqrt(t(b)%*%R_u%*%b) # mean of minority weighted predictor composite distribution (DeCorte, 1999) p_i_bar = 0 # mean of majority weighted predictor composite distribution (DeCorte, 1999) p_a_bar = d%*%x[-1]/sigma_p # minority group selection ratio (denoted as h_i in DeCorte et al., 1999) SR_B = 1 - pnorm(x[1], p_i_bar) # majority group selection ratio (denoted as h_i in DeCorte et al., 1999) SR_W = 1 - pnorm(x[1], p_a_bar) # AIratio a_g (DeCorte et al., 2007) a_g = SR_B/SR_W # Composite Validity R_xy R_xy = t(c(t(b),0)%*%R%*%c(t(matrix(0,dimFun(R_u)[1],1)),1))/sqrt(t(b)%*%R_u%*%b) # DeCorte et al., 2007 f = matrix(1,2,1) f[1,] = -a_g f[2,] = -R_xy return(f) } ####### myCon_ineq() ###### # Nonlinear inequalities at x #' myCon_ineq #' #' Support function, defines inequal constraint condition #' @param x Input predictor weight vector #' @return Inequal constraint condition for use in NBI() #' @export myCon_ineq = function(x){return(vector())} ####### myCon_eq() ###### # Nonlinear equalities at x #' myCon_eq #' #' Support function, defines equal constraint condition #' @param x Input predictor weight vector #' @return Equal constraint condition for use in NBI() #' @export myCon_eq = function(x){ # Obtain within-package 'global' variable prop <- prop_ParetoR sr <- sr_ParetoR d <- d_ParetoR R <- R_ParetoR R_u = R[-nrow(R),-ncol(R)] b = x[-1] # variance of minority and majority applicant weighted predictor # composite (P) distribution (DeCorte, 1999) sigma_p = sqrt(t(b)%*%R_u%*%b) # mean of minority weighted predictor composite distribution (DeCorte, 1999) p_i_bar = 0 # mean of majority weighted predictor composite distribution (DeCorte, 1999) p_a_bar = d%*%x[-1]/sigma_p # p_a_bar = (x[2]*1.00+x[3]*0.23+x[4]*0.09+x[5]*0.33)/sigma_p # minority group selection ratio (denoted as h_i in DeCorte et al., 1999) SR_B = 1 - pnorm(x[1], p_i_bar) # majority group selection ratio (denoted as h_i in DeCorte et al., 1999) SR_W = 1 - pnorm(x[1], p_a_bar) # Nonlinear equalities at x ceq = matrix(1,2,1) ceq[1,] = SR_B*prop + SR_W*(1-prop) - sr # DeCorte et al. (2007) ceq[2,] = (t(b)%*%R_u%*%b) - 1 return(ceq) } ########################### Supporting Functions (B) ######################## # Supplementary Functions for NBI.r - Pareto-Optimization via Normal Boundary Intersection # Function List ## assert_col_vec ## dimFun ## WeightsFun ## Weight_Generate ## myLinCom ## myT ## myTCon_eq ## plotPareto ###### assert_col_vec() ###### #' assert_col_vec #' #' Support function, refines intermediate variable for use in NBI() #' @param v Intermediate variable v #' @return Refined variable v #' @export assert_col_vec = function(v){ if(is.null(dimFun(v))){ v=v }else if(dimFun(v)[1] < dimFun(v)[2]){v = t(t)} return(v)} ###### dimFun() ###### #' dimFun #' #' Support function, checks input predictor weight vector x #' @param x Input predictor weight vector #' @return x Checked and refined input predictor weight vector #' @export dimFun = function(x){ if(is.null(dim(x))){ return(c(0,0)) }else(return(dim(x))) } ###### WeightsFun() ###### #' WeightsFun #' #' Support function, generates all possible weights for NBI subproblems #' @param n Number of objects (i.e., number of predictor and criterion) #' @param k Number of Pareto points #' @return Weights All possible weights for NBI subproblem #' @export WeightsFun = function(n, k){ # global variables # weight, Weights, Formers, Layer, lastone, currentone # # Generates all possible weights for NBI subproblems given: # n, the number of objectives # 1/k, the uniform spacing between two w_i (k integral) # This is essentially all the possible integral partitions # of integer k into n parts. WeightSub <<- matrix(0,1,n) Weights <<- vector() # assign("Formers", vector(), envir = .GlobalEnv) Formers <<- vector() # assign("Layer", n, envir = .GlobalEnv) Layer <<- n # assign("lastone", vector(), envir = .GlobalEnv) lastone <<- -1 # assign("currentone", -1, envir = .GlobalEnv) currentone <<- -1 Weight_Generate(1, k) return(list(Weights = Weights, Formers = Formers)) } ###### Weight_Generate() ###### #' Weight_Generate #' #' Function intended to test the weight generation scheme for NBI for > 2 objectives #' @param n Number of objects (i.e., number of predictor and criterion) #' @param k Number of Pareto points #' @return Weight_Generate #' @export Weight_Generate = function(n, k){ # global variables: # weight Weights Formers Layer lastone currentone # wtgener_test(n,k) # # Intended to test the weight generation scheme for NBI for > 2 objectives # n is the number of objectives # 1/k is the uniform spacing between two w_i (k integral) if(n == Layer){ if(currentone >= 0){ Formers <<- c(Formers,lastone) lastone <<- currentone currentone <<- -1 }else{ num = dimFun(Weights)[2] Formers <<- c(Formers,num) } WeightSub[(Layer - n + 1)] <<- k Weights <<- cbind(Weights,t(WeightSub)) }else{ for(i in 0:k){ if(n == (Layer - 2)){ num = dimFun(Weights)[2] currentone <<- num+1 } WeightSub[(Layer - n + 1)] <<- i Weight_Generate(n+1, k-i) } } } ###### myLinCom() ###### #' myLincom #' #' Support function #' @param x Input predictor weight vector #' @return f Criterion vector #' @export myLinCom = function(x){ # global variable: g_Weight F = myFM(x) f = t(g_Weight)%*%F return(f) } ###### myT() ###### #' myT #' #' Support function, define criterion space for intermediate step in NBI() #' @param x_t Temporary input weight vector #' @return f Temporary criterion space #' @export myT = function(x_t){ f = x_t[length(x_t)] return(f) } ###### myTCon_eq() ###### #' myTCon_eq #' #' Support function, define constraint condition for intermediate step in NBI() #' @param x_t Temporary input weight vector #' @return ceq Temporary constraint condition #' @export myTCon_eq = function(x_t){ # global variables: # g_Normal g_StartF t = x_t[length(x_t)] x = x_t[1:(length(x_t)-1)] fe = -myFM(x) - g_StartF - t * g_Normal # c = myCon_ineq(x) ceq1 = myCon_eq(x) ceq = c(ceq1,fe) return(ceq) } ###### plotPareto() ###### #' plotPareto #' #' Function for plotting Pareto-optimal curve and predictor weights #' @param CriterionOutput Pareto-Optimal criterion solution #' @param ParetoWeights Pareto-Optimal predictor weight solution #' @return Plot of Pareto-optimal curve and plot of predictor weights #' @export plotPareto = function(Pareto_Fmat, Pareto_Xmat){ par(mfrow=c(1,2)) AIratio = t(Pareto_Fmat[1,]) Criterion = t(Pareto_Fmat[2,]) X = t(Pareto_Xmat[2:nrow(Pareto_Xmat),]) # AI ratio - Composite Validity trade-off plot(AIratio, Criterion, xlim = c(min(AIratio),max(AIratio)), main = "Composite Validity -- AI ratio trade-off", xlab = "AI ratio", ylab = "Composite Validity", type='c',col='blue') points(AIratio, Criterion, pch=8,col='red') # Predictor weights plot(AIratio,X[,1], xlim=c(min(AIratio),max(AIratio)),ylim=c(0,1), main = "Predictor weights trade-off function", xlab = "AI ratio", ylab = "Predictor weight", type='c',col='red') points(AIratio,X[,1],pch=8, col=rainbow(1)) for(i in 2:ncol(X)){ lines(AIratio,X[,i],type='c', col=rainbow(1, start=((1/ncol(X))*(i-1)), alpha=1)) points(AIratio,X[,i],pch=8, col=rainbow(1, start=((1/ncol(X))*(i-1)), alpha=1)) } legend('topleft', legend=c(paste0('Predictor ',1:ncol(X))), lty=c(rep(2,ncol(X))),lwd=c(rep(2,ncol(X))), col=rainbow(ncol(X))) }
cc8039fc49ac07d54809f2933b17e7a569bf1d4f
963b75306674956433ce16a562816de9bd4f9393
/R/PlusMinus.PIimp.R
9a30b57ec54cb412aac9cc637db752ce5aa493eb
[]
no_license
cran/LogicForest
cd68de6f2bfa89b6cfc9fe49410b0ce20fa94f29
0619287ba164198feec015682d110a1bcdce58da
refs/heads/master
2021-01-20T04:32:33.071237
2014-09-18T00:00:00
2014-09-18T00:00:00
17,717,763
0
1
null
null
null
null
UTF-8
R
false
false
2,487
r
PlusMinus.PIimp.R
PlusMinus.PIimp <- function(resp, Xs, fullPIdata, mod, wts, ntrees) { All.PInms<-colnames(fullPIdata) n.obs<-nrow(fullPIdata) nCV<-length(mod) test.ids<-CV.ids(n=n.obs, kfold=nCV) loc.ids<-CV.ids(n=ntrees, kfold=nCV) APIchange<-matrix(0, nrow=nCV, ncol=length(All.PInms)) SPIchange<-matrix(0, nrow=nCV, ncol=length(All.PInms)) colnames(APIchange)<-All.PInms colnames(SPIchange)<-All.PInms for(i in 1:nCV) { ids<-test.ids[[i]] chg.ids<-loc.ids[[i]] TR.resp<-resp[[i]] TR.Xs<-Xs[[i]] fits<-mod[[i]]$AllFits alphas<-wts[[i]] Orig.pred<-ada.pred(fits=fits, alphas=alphas, Xs=TR.Xs) Orig.miss<-sum(abs(Orig.pred-TR.resp)) tPIdata<-fullPIdata[ids,] c.PIdat<-mod[[i]]$PI.TSdata for(j in 1:length(All.PInms)) { PI<-All.PInms[j] Anewpred.mat<-matrix(0, nrow=nrow(TR.Xs), ncol=length(c.PIdat)) Snewpred.mat<-matrix(0, nrow=nrow(TR.Xs), ncol=length(c.PIdat)) for(k in 1:length(c.PIdat)) { tree.PIdat<-c.PIdat[[k]] if(is.vector(tree.PIdat)){Otree.pred<-alphas[k]*ifelse(tree.PIdat==1, 1, -1)} if(is.matrix(tree.PIdat)){Otree.pred<-alphas[k]*ifelse(rowSums(tree.PIdat)>0, 1, -1)} tree.PInms<-colnames(tree.PIdat) if(PI%in%tree.PInms==TRUE) { Anewpred.mat[,k]<-Otree.pred loc<-which(tree.PInms%in%PI) new.treePIdat<-as.matrix(tree.PIdat[,-loc]) if(dim(new.treePIdat)[2]==0) {Ntree.pred<-alphas[k]*rep(-1, length(TR.resp))} if(dim(new.treePIdat)[2]>0) {Ntree.pred<-alphas[k]*ifelse(rowSums(new.treePIdat)>0, 1, -1)} Snewpred.mat[,k]<-Ntree.pred } if(PI%in%tree.PInms==FALSE) { Snewpred.mat[,k]<-Otree.pred new.treePIdat<-cbind(tree.PIdat, tPIdata[,j]) Ntree.pred<-alphas[k]*ifelse(rowSums(new.treePIdat)>0, 1, -1) Anewpred.mat[,k]<-Ntree.pred } } Apred<-ifelse(rowSums(Anewpred.mat)>0, 1, 0) Spred<-ifelse(rowSums(Snewpred.mat)>0, 1, 0) Amiss<-sum(abs(Apred-TR.resp)) Smiss<-sum(abs(Spred-TR.resp)) Adiff<-Orig.miss-Amiss Sdiff<-Smiss-Orig.miss APIchange[i,j]<-Adiff SPIchange[i,j]<-Sdiff } } APIimp<-colSums(APIchange)#/nrow(APIchange) SPIimp<-colSums(SPIchange)#/nrow(SPIchange) Imp<-rowSums(cbind(APIimp, SPIimp)) ans<-list(APIimp=APIimp, APIchange=APIchange, SPIimp=SPIimp, SPIchange=SPIchange, Imp=Imp) }
c27fe1bb3a43bfb00c911d87219ba6c7bc96b12f
e8bf9ee8cd6ff243f10ca6ad8f41d9b327e34155
/R/to_plumber.R
94fb42723bd0d06e842f4192d19f76aac5de59d1
[ "MIT" ]
permissive
ColinFay/craneur
3ef416ac64028b6557542e49bcf562c5aa102621
92e1573e4a5d944507da8e75869649e06e2fbaae
refs/heads/master
2020-03-19T07:41:11.103266
2018-11-22T07:16:23
2018-11-22T07:16:23
136,139,398
33
5
null
2018-07-20T06:21:11
2018-06-05T07:43:02
R
UTF-8
R
false
false
1,664
r
to_plumber.R
#' Create a plumberfile that simulates a RAN #' #' @param from the `src/` folder #' @param to where to write the plumber folder #' #' @return writes a folder with a plumber.R and the src #' @export #' #' @examples #' \dontrun{ #' to_plumber(from = "inst/src/", to = "inst/plumb") #' } to_plumber <- function(from, to){ l_f <- list.files( file.path(from, "contrib"), pattern = "tar.gz$") if (!dir.exists(to)) dir.create(to) file.copy(from, to, recursive = TRUE) file.create( file.path(to, "plumber.R") ) write_here <- function(...){ write(..., file = file.path(to, "plumber.R"), append = TRUE) } write_here("library(plumber)") write_here("\n") write_here("#* @apiTitle Plumber RAN") write_here("\n") write_here('#* @serializer contentType list(type="text/plain")') write_here("#* @get /src/contrib/PACKAGES") write_here("\n") write_here("function(){") write_here(" tmp <- tempfile()") write_here(' PACKAGE <- readLines("src/contrib/PACKAGES")') write_here(" for (i in seq_along(PACKAGE)){") write_here(" write(PACKAGE[i], tmp, append = TRUE)") write_here(" }") write_here(' readBin(tmp, "raw", n=file.info(tmp)$size)') write_here("}") write_here("\n") for (i in seq_along(l_f)){ write_here('#* @serializer contentType list(type="application/x-tar")') write_here(glue('#* @get /src/contrib/{l_f[i]}')) write_here('function(){') write_here(' tmp <- tempfile()') write_here(glue(' file.copy(normalizePath("src/contrib/{l_f[i]}"), tmp)')) write_here(glue(' readBin(tmp, "raw", n=file.info(tmp)$size)')) write_here('}') write_here('\n') } return(to) }
0548d38fbc70eb564e104f570ccfd1b162429892
4cee6dec70875ca85f20dd738932be86f361a63e
/pkg/man/contaminate.Rd
b5f245fb6fe0ef5558a1b88b99ec87b4a3f8dcd7
[]
no_license
dieterich-lab/pulseR
9b7114769b48a305ba0a11357226e8f774b73a20
1323b378e95b483c8bda99d6c71befccd45c810f
refs/heads/master
2021-01-18T20:40:00.474158
2018-10-26T10:45:32
2018-10-26T10:45:32
72,013,067
2
4
null
null
null
null
UTF-8
R
false
true
1,096
rd
contaminate.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formulas.R \name{contaminate} \alias{contaminate} \title{Generate a new formula as a mixture of two} \usage{ contaminate(formulas, target_condition, contaminant_condition, coef_name) } \arguments{ \item{formulas}{list, generated by \code{\link{MeanFormulas}}} \item{target_condition}{character; a condition, for which a new formulas is to be generated} \item{contaminant_condition}{character; a condition, which reads can contaminate fraction corresponding to \code{target_condition}} \item{coef_name}{character; the name of the cross-contamination rate} } \value{ unevaluated call, as from \code{\link{quote}} function } \description{ If the condition A can be contaminated with reads from the condition B, its expected mean read number will be \eqn{(1-p)r_a + p r_b}, where \eqn{p} is cross-contamination rate and \eqn{r_a,r_b} are expected mean read numbers for conditions A and B, correspondingly. } \examples{ \dontrun{ forms <- MeanFormulas(A=r_a, B=r_b) forms[["A"]] <- contaminate(forms, "A", "B", "p") } }
36a7a3f783eb80e42fd52215fdc3544ef84fe4c5
e2fb32fa53018aa039dd34656f7150ffd9c640e6
/Week 4/W4A22.R
4cef5840d88fe534415ddae5071b66d1abedda37
[]
no_license
PozzettiAndrea/15.071x-ONGOING-
a1b5301d31a658062b5d1c34a486f06b68a87cfa
9d8307b95c4cbcac681a709f31667dfcac885f07
refs/heads/master
2021-05-25T21:24:48.893321
2020-04-15T21:57:07
2020-04-15T21:57:07
253,925,416
0
0
null
null
null
null
UTF-8
R
false
false
1,297
r
W4A22.R
#1.1 LETTERS = read.csv("LETTERS_ABPR.csv") str(LETTERS) LETTERS$isB = as.factor(LETTERS$letter == "B") set.seed(1000) spl = sample.split(LETTERS$isB,SplitRatio=0.5) train = subset(LETTERS,spl==TRUE) test = subset(LETTERS,spl==FALSE) table(test$isB) #1.2 CARTb = rpart(isB ~ . - letter, data=train, method="class") predictions = predict(CARTb, newdata=test, type="class") table(test$isB,predictions) #1.3 library(randomForest) set.seed(1000) RFb = randomForest(isB ~ xbox + ybox + width + height + onpix + xbar + ybar + x2bar + y2bar + xybar + x2ybar + xy2bar + xedge + xedgeycor + yedge + yedgexcor, data=train) RFb = randomForest(isB ~ . - letter, data=train) predictions = predict(RFb, newdata=test) table(test$isB,predictions) #2.1 LETTERS$letter = as.factor( LETTERS$letter ) set.seed(2000) spl = sample.split(LETTERS$letter, SplitRatio = 0.5) train2 = subset(LETTERS, spl == TRUE) test2 = subset(LETTERS, spl == FALSE) table(train2$letter) table(test2$letter) #2.2 CARTletter = rpart(letter ~ . - isB, data=train2, method="class") predictLetter = predict(CARTletter, newdata=test2, type="class") table(test2$letter, predictLetter) #2.3 set.seed(1000) RFletter = randomForest(letter ~ . - isB, data=train2) predictLetter = predict(RFletter, newdata=test2) table(test2$letter, predictLetter)
7e6342d334f66e4fae0d12b8b0fb8c792ddac6d6
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/mice/examples/make.predictorMatrix.Rd.R
0a3e473fc596408103e35230e65d3f36fc6ad904
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
249
r
make.predictorMatrix.Rd.R
library(mice) ### Name: make.predictorMatrix ### Title: Creates a 'predictorMatrix' argument ### Aliases: make.predictorMatrix ### ** Examples make.predictorMatrix(nhanes) make.predictorMatrix(nhanes, blocks = make.blocks(nhanes, "collect"))
7f27c439e6cb35f8a009bfe47fa2872675e2193f
54394dc51347bf0a034c1ece9cd751e2b7b4fa5d
/.Rprofile
d44cc362b55b72195d60c07f2b1ea9dae824b6fc
[]
no_license
jarfa/dotfiles
7a0cb25efdfacd74cba9537064e2f03fd4505691
ae7a943c2f55af9ae60d139a40e9b5b2a43396bf
refs/heads/master
2021-01-20T12:13:20.314476
2017-02-28T03:33:25
2017-02-28T03:33:25
25,318,493
0
0
null
null
null
null
UTF-8
R
false
false
645
rprofile
.Rprofile
.First = function() { cat("Successfully loaded .Rprofile at", date(), "\n") updatePrompt <- function(...) {options(prompt=format(Sys.time(), "%H:%M:%S> ")); return(TRUE)} addTaskCallback(updatePrompt) } options(repos=c("http://cran.stat.ucla.edu")) .startup <- new.env() #I'm creating a new env so that rm(list = ls()) doesn't remove these assign("hh", function(d, nr=6, nc=NULL){ stopifnot(class(d) == 'matrix' | class(d) == 'data.frame') if(is.null(nc)) nc = nr r = min(nr, nrow(d)) c = min(nc, ncol(d)) d[1:r, 1:c] }, env=.startup) assign("qq", function(save1='no') {base::q(save=save1)} , env=.startup) attach(.startup)
eaa8c87a3fd18c79ecf0c19b293c1e0251b63fd5
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
/codeml_files/newick_trees_processed/1893_0/rinput.R
2528d10bc3fa239b750fe00cd30d9ab4687c420f
[]
no_license
DaniBoo/cyanobacteria_project
6a816bb0ccf285842b61bfd3612c176f5877a1fb
be08ff723284b0c38f9c758d3e250c664bbfbf3b
refs/heads/master
2021-01-25T05:28:00.686474
2013-03-23T15:09:39
2013-03-23T15:09:39
null
0
0
null
null
null
null
UTF-8
R
false
false
135
r
rinput.R
library(ape) testtree <- read.tree("1893_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="1893_0_unrooted.txt")
ec1745047d81532c7c0ad3acff7aea11abe8e11b
25fb5a89d973ce5191272251fe8a30c36139887c
/R/plotqq.R
5d331163980fbc055e16e7010cbf7bfcb8f1dafd
[]
no_license
cran/plotmo
a89b6d6e71583528642674b9a81b94ebb24d2c94
e61d4f166dfb9b54dfc6c269ce2771d25518503e
refs/heads/master
2022-07-17T04:53:09.281256
2022-05-21T18:30:02
2022-05-21T18:30:02
17,698,578
0
3
null
null
null
null
UTF-8
R
false
false
5,756
r
plotqq.R
# plotqq.R plotmo_qq <- function(rinfo, info, nfigs, grid.col, smooth.col, id.n, iresids, npoints, force.auto.resids.ylim, ...) { old.pty <- par("pty") par(pty="s") # square on.exit(par(pty=old.pty)) # we figure out the shape of the qq line with all resids but # plot only npoints points (selecting them with iresids) resids <- rinfo$scale * rinfo$resids # qqnorm sets NAs in trans.resids (leverage==1) to NA in # qq$x and qq$y, and thus NAs don't get plotted (R PR#3750) main <- dota("main", DEF=sprint("%s QQ", rinfo$name), ...) qq <- qqnorm(resids, main=main, plot.it=FALSE) id.indices <- get.id.indices(resids, id.n) xlim <- NULL ylim <- NULL if(nfigs == 1) # user can set xlim only if this is the only figure xlim <- dota("xlim", DEF=xlim, ...) if(!force.auto.resids.ylim) ylim <- dota("ylim", DEF=ylim, ...) xlim <- dota("qq.xlim", DEF=xlim, ...) ylim <- dota("qq.ylim", DEF=ylim, ...) if(!is.specified(xlim) && !is.null(id.indices)) { # extra space for point labs? min <- min(qq$x, na.rm=TRUE) max <- max(qq$x, na.rm=TRUE) xlim <- c(min - .1 * (max - min), max + .1 * (max - min)) } if(!is.specified(ylim)) { min <- min(qq$y, na.rm=TRUE) max <- max(qq$y, na.rm=TRUE) ylim <- c(min, max) if(!is.null(id.indices)) # extra space for point labs? ylim <- c(min - .05 * (max - min), max + .05 * (max - min)) if(info) # extra space for density plot? ylim[1] <- ylim[1] - .1 * (max - min) } xlim <- fix.lim(xlim) ylim <- fix.lim(ylim) # allow col.response as an argname for compat with old plotmo pt.col <- dota("col.response col.resp", DEF=1, ...) pt.col <- dota("pt.col col.points col.point col.residuals col.resid col", EX=c(0,1,1,1,1,1), DEF=pt.col, NEW=1, ...) pt.col <- dota("qq.col col.residuals col.resid col", EX=c(0,1,1,1), DEF=pt.col, NEW=1, ...) # recycle pt.col <- repl(pt.col, length(resids)) pt.cex <- dota("response.cex cex.response", DEF=1, ...) pt.cex <- dota("pt.cex cex.points cex.point cex", EX=c(0,1,1,1), DEF=pt.cex, NEW=1, ...) pt.cex <- dota("qq.cex cex.qq cex.residuals", EX=c(0,1,1), DEF=pt.cex, NEW=1, ...) pt.cex <- pt.cex * pt.cex(length(resids), npoints) pt.cex <- repl(pt.cex, length(resids)) pt.pch <- dota("response.pch pch.response", DEF=20, ...) pt.pch <- dota( "qq.pch pt.pch pch.points pch.point pch.residuals pch", EX=c(1,0,0,1,1,1), DEF=pt.pch, NEW=1, ...) pt.pch <- repl(pt.pch, length(resids)) ylab <- rinfo$name ylab <- sprint("%s Quantiles", ylab) drop.line.col <- function(..., qqline.col=NA, qqline.lwd=NA, qqline.lty=NA) { call.plot(graphics::plot, PREFIX="qq.", force.x = qq$x[iresids], force.y = qq$y[iresids], force.col = pt.col[iresids], force.cex = pt.cex[iresids], force.pch = pt.pch[iresids], force.main = main, force.xlab = "Normal Quantiles", force.ylab = ylab, force.xlim = xlim, force.ylim = ylim, ...) } drop.line.col(...) if(is.specified(grid.col)) grid(col=grid.col, lty=1) qqline.col <- dota("qqline.col", DEF=1, ...) qqline.lwd <- dota("qqline.lwd", DEF=1, ...) qqline.lty <- dota("qqline.lty", DEF=3, ...) if(is.specified(qqline.col) && is.specified(qqline.lwd) && is.specified(qqline.lty)) call.plot(qqline, force.y=resids, force.col=qqline.col, force.lwd=qqline.lwd, force.lty=qqline.lty, ...) if(info) { # draw actual and theoretical density along the bottom usr <- par("usr") # xmin, xmax, ymin, ymax scale <- .1 * (usr[4] - usr[3]) / (max(qq$y) - min(qq$y)) draw.density.along.the.bottom(qq$x, den.col=smooth.col, scale=scale, ...) draw.density.along.the.bottom( resids / sd(resids, na.rm=TRUE), # TODO correct? scale=scale, ...) legend("bottomright", inset=c(0,.06), legend=c("actual", "normal"), cex=.8, lty=1, col=c("gray57", smooth.col), box.col="white", bg="white", x.intersp=.2, seg.len=1.5) } if(is.specified(grid.col) || is.specified(qqline.col) || info) { # replot box and points because they may have been obscured box() drop.line.col <- function(..., qqline.col=NA, qqline.lwd=NA, qqline.lty=NA) { call.plot(graphics::points, PREFIX="qq.", force.x = qq$x[iresids], force.y = qq$y[iresids], force.col = pt.col[iresids], force.cex = pt.cex[iresids], force.pch = pt.pch[iresids], ...) } drop.line.col() } if(!is.null(id.indices)) plotrix::thigmophobe.labels( x = qq$x[id.indices], y=qq$y[id.indices], labels = rinfo$labs[id.indices], offset = .33, xpd=NA, font = dota("label.font", DEF=1, ...)[1], cex = .8 * dota("label.cex", DEF=1, ...)[1], col = dota("label.col", DEF=if(is.specified(smooth.col)) smooth.col else 2, ...)[1]) }
90e0faaf9f818f787f721fca6be5bc82424f78fd
f2f8779f5f49160746fea2be85f32fd9e1ea6b2c
/plot4.R
6af36667a1ca5ae4da1db7e05100e5e56ba04ea1
[]
no_license
HariSriraman/Exploratory_data_analysis
0f24f9d39b7b6c4071c69f0c60e6cc31241add1e
8371a59f0bc3024ad0d4a2e37f450d89f12a9ebb
refs/heads/master
2021-01-02T08:49:27.526594
2017-08-02T15:16:29
2017-08-02T15:16:29
99,071,542
0
0
null
null
null
null
UTF-8
R
false
false
1,867
r
plot4.R
## Set the working directory setwd("C:/Users/hsrirama/Documents/Personal/Coursera/Course 4 - Exploratory Data Analysis/Project") ## Unzip the file into 'data' folder if file doesn't exists if (!file.exists("./data/household_power_consumption.txt")) {unzip("exdata%2Fdata%2Fhousehold_power_consumption.zip", exdir = "data")} ## Pull entire dataset into a data frame variable my_data <- read.table("data/household_power_consumption.txt", sep = ";", header=TRUE, dec=".", stringsAsFactors=FALSE) ## load the lubridate library to use the date functions library(lubridate) ## filter to pull only the required data into another data frame req_data <- subset (my_data, dmy(Date) >= dmy("01-02-2007") & dmy(Date) <= dmy("02-02-2007")) ## Draw the HISTOGRAM into the PNG file ## Need to convert the 'Sub Metering 1, 2 & 3' to numeric to get the required plot format gap <- as.numeric(req_data$Global_active_power) esm1 <- as.numeric(req_data$Sub_metering_1) esm2 <- as.numeric(req_data$Sub_metering_2) esm3 <- as.numeric(req_data$Sub_metering_3) voltage <- as.numeric(req_data$Voltage) grd <- as.numeric(req_data$Global_reactive_power) dt <- strptime(paste(req_data$Date, req_data$Time, sep =" "), "%d/%m/%Y %H:%M:%S") ## Open a PNG file handler png("plot4.png", width=480, height=480) par(mfrow=c(2,2)) ## Section 1 plot(dt, gap, type="l", ylab="Global Active Power", xlab="", cex=0.2) ## Section 2 plot(dt, voltage, type="l", ylab="Voltage", xlab="datetime") ## Section 3 plot(dt, esm1, type="l", ylab="Energy sub metering", xlab="") lines(dt, esm2, type="l", col="red") lines(dt, esm3, type="l", col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col = c("black", "red", "blue"), bty="o") ## Section 4 plot(dt, grd, type="l", ylab="Global_reactive_power", xlab="datetime") ## Close the file dev.off()
40111db2445c8919bb51c0afc95c912fd053ef0c
118d91758cfe209f5e477e23abfb04b91f0d0586
/inst/DVHshiny/app_ui_tab_metrics.R
58f960195d817e8d65aa3393ad670b06e53a7e55
[]
no_license
cran/DVHmetrics
5668244accbcf2ba712f5d318191f137cac4e930
1420f5f5bb683ee7c58a85ccd3763b0654eb8e66
refs/heads/master
2022-05-01T16:13:48.056003
2022-03-23T10:40:02
2022-03-23T10:40:02
30,032,803
0
0
null
null
null
null
UTF-8
R
false
false
3,436
r
app_ui_tab_metrics.R
fluidPage( fluidRow( box(title="Define metrics", width=4, textInput("metrInput", "Metric(s):", value=c("DMEAN, D1cc, V10%")), #tags$textarea(id="defMetricsMult", rows=2, cols=10, ""), #actionButton('clearText_button','Clear metrics'), #radioButtons("metrInterp", label=h5("DVH interpolation"), # list("Linear"=1, # "Monotone spline"=2, # "Local polynomial"=3)), checkboxInput("metrEUDparam", "Show EUD params ...", FALSE), conditionalPanel(condition="input.metrEUDparam == true", textInput("metrEUDa", h5("exponent a"), value=""), textInput("metrEUDfd", h5("fraction dose"), value=""), textInput("metrEUDab", h5("alpha/beta ratio"), value="") ), checkboxInput("metrNTCPparam", "Show (N)TCP params ...", FALSE), conditionalPanel(condition="input.metrNTCPparam == true", radioButtons("metrNTCPtype", h5("(N)TCP Model"), list("Probit (Lyman KB)"=1, "Logit (Niemierko)"=2, "Poisson (Kaellman)"=3, "Relative Seriality"=4)), textInput("metrNTCPtd50", h5("T(C)D50"), value=""), textInput("metrNTCPn", h5("n (=1 / EUD-a)"), value=""), conditionalPanel(condition="input.metrNTCPtype == '1'", textInput("metrNTCPm", h5("Lyman m"), value="") ), conditionalPanel(condition="input.metrNTCPtype != '1'", textInput("metrNTCPgamma50", h5("Logit/Poisson gamma50"), value="") ), conditionalPanel(condition="input.metrNTCPtype == '4'", textInput("metrNTCPs", h5("Relative Seriality s"), value="") ) ), uiOutput("metrSelPat"), actionButton("metrSelPatAll", label="(De)Select All"), uiOutput("metrSelStruct"), actionButton("metrSelStructAll", label="(De)Select All"), selectizeInput("metrSortBy", label=h5("Sort output table by:"), choices=c("Value"=1, "Structure"=2, "Metric"=3, "Patient ID"=4), multiple=TRUE)#, #options=c(placeholder='Click to select variables')) ), box(title="DVH metrics", width=8, DT::dataTableOutput("metrics"), downloadButton("saveMetrics", "Save as text file"), inputPanel( radioButtons("saveMetrDec", "Decimal separator:", list("."=1, ","=2)), radioButtons("saveMetrSep", "Column separator:", list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4))) ) ) )
2d6504721e993bc8de015a8960ee0910db5556f1
749827f781183063003f79362eb0e1e7157cabef
/R/getDF.R
6a5aa606288d33251315c15582a6768ee613a844
[]
no_license
OpenSILEX/opensilex-datavis-rapp-demo
6bd4f8a5e0ef7fe2c00a1b4d0a63ba89ebc94a93
1101050290c12b4c828a78386e4556495b90e4c3
refs/heads/master
2021-06-12T00:18:20.068554
2021-04-20T07:20:16
2021-04-20T07:20:16
177,541,853
0
2
null
2019-07-03T15:10:49
2019-03-25T08:08:19
JavaScript
UTF-8
R
false
false
3,536
r
getDF.R
#------------------------------------------------------------------------------- # Program: getDF.R # Objective: functions to get the incertitude data related to environment measures # Authors: Chourrout Elise # Creation: 15/02/2019 # Update: #------------------------------------------------------------------------------- #' @title getDF #' #' @description get the dataset of incertitudes for the data selected #' #' @param varURI uri of the variable to plot, from the \code{\link{variableList}} function or the web service directly #' @param smoothing logical, smoothing of the data, returns the incertitudes from the smoothing #' @param sensor character, uri of a sensor to filter with #' @param endDate date, date from which to filter data, format "\%Y-\%m-\%dT\%H:\%M:\%S" #' @param startDate date, date to which filter the data, format "\%Y-\%m-\%dT\%H:\%M:\%S" #' #' @param wsUrl url of the webservice # #' @return data.frame #' @export #' #' @examples #' \donttest{ #' connectToPHISWS(apiID="ws_2_public","guest@opensilex.org","guest") #' vars <- variableList( wsUrl = "www.opensilex.org/openSilexAPI/rest/") #' vars #' getDF( varURI = list("http://www.opensilex.org/demo/id/variables/v004", #' "http://www.opensilex.org/demo/id/variables/v007")) #' } getDF <- function(varURI, smoothing = FALSE, sensor = NULL, endDate = NULL, startDate = NULL, wsUrl = "http://www.opensilex.org:8080/openSilexSandBoxAPI/rest/"){ phisWSClientR::connectToPHISWS(apiID="ws_2_public","guest@opensilex.org","guest", url=wsUrl) ## Data recuperation # Variable's information variableList <- variableList( wsUrl = wsUrl) variableList <- variableList[ variableList$uri %in% varURI, ] # Data Data = lapply(varURI, FUN = function(uri){ enviroData <- getDataVar(varURI = uri, variableList = variableList)$enviroData yVar <- enviroData$value # Casting Date in the right format xVar <- as.POSIXct(enviroData$date, tz = "UTC", format = "%Y-%m-%dT%H:%M:%S") DataX <- data.frame(date = xVar, value = yVar) ## Data filtering # Chosen dates if(!is.null(startDate)){ startDate <- as.POSIXct(startDate, tz = "UTC", format = "%Y-%m-%d") DataX <- DataX[which(DataX$date >= startDate),] } if (!is.null(endDate)){ endDate <- as.POSIXct(endDate, tz = "UTC", format = "%Y-%m-%d") DataX <- DataX[which(DataX$date <= endDate),] } # Chosen sensor if(!is.null(sensor)){ if(length(grep(sensor, enviroData$sensorUri)) != 0){ DataX <- DataX[which(enviroData$sensorUri == sensor),] }else{ warning("This variable is not measured by the sensor.") } } ##Smoothing if(smoothing == TRUE){ if(length(DataX$date) > 20){ df = 20 } else { df <- length(DataX$date)-1 } varSpline <- gam::gam(value~s(date, df = df), data = DataX) varPred <- stats::predict(varSpline, se.fit = TRUE) dist <- abs(as.numeric(DataX$value) - as.numeric(varPred$fit)) DataX <- cbind(DataX, dist) names(DataX)[length(DataX)-1] <- paste("Value of ",variableList[which(variableList$uri == uri),"name"], sep = "") names(DataX)[length(DataX)] <- paste("Distance of ",variableList[which(variableList$uri == uri),"name"], " from smoothed curve", sep = "") }else{ names(DataX)[length(DataX)] <- paste("Value of ",variableList[which(variableList$uri == uri),"name"], sep = "") } return(DataX) }) names(Data)=variableList$name return(Data) }
0c9c8dfb7eedf32bc9baca022969d0a4b8fc09a2
8195e8b515ea2758a7bfc0bd435a4d9f9923f6e9
/R/get_bozukyol.R
a78f97f7b0508765db28d3cc742041e33d14c134
[]
no_license
atakzltn/MAKibb
0526538416d7a63b357bd72077c955727a3e97e8
f778c739ecbea959126c98054d22fbc81614bc24
refs/heads/master
2022-10-06T12:31:23.789902
2020-06-01T20:41:47
2020-06-01T20:41:47
267,534,917
1
0
null
null
null
null
UTF-8
R
false
false
2,147
r
get_bozukyol.R
#' @title get_bozukyol Function #' #' @description You can reach instant bad road information. See \url{https://data.ibb.gov.tr/organization/iett-genel-mudurlugu} for more information. #' @export #' #' @param busdoornumber This parameter is not mandatory. This information is the information that comes to the bus driver screens. For this reason, you can also query bad roads by the bus door number. #' #' @return NULL #' @importFrom RCurl basicTextGatherer #' @importFrom RCurl curlPerform #' @importFrom xml2 as_xml_document #' @importFrom xml2 xml_find_all #' @importFrom xml2 xml_text #' @importFrom dplyr filter #' @examples get_bozukyol(busdoornumber="B4042") #' #' @export get_bozukyol <- function(busdoornumber) { headerFields = c(Accept = "text/xml", Accept = "multipart/*", 'Content-Type' = "text/xml; charset=utf-8", SOAPAction = "http://tempuri.org/GetBozukSatih_XML") body = '<Envelope xmlns="http://schemas.xmlsoap.org/soap/envelope/"> <Body> <GetBozukSatih_XML xmlns="http://tempuri.org/"/> </Body> </Envelope>' reader = basicTextGatherer() curlPerform( url = "https://api.ibb.gov.tr/iett/FiloDurum/SeferGerceklesme.asmx?wsdl", httpheader = headerFields, postfields = body, writefunction = reader$update ) xml <- reader$value() xml3 <- as_xml_document(xml) # get all the <record>s NBOYLAM <-xml_find_all(xml3, "//NBOYLAM") NENLEM <-xml_find_all(xml3, "//NENLEM") SKAPINUMARASI <-xml_find_all(xml3, "//SKAPINUMARASI") boylam <- trimws(xml_text(NBOYLAM)) enlem <- trimws(xml_text(NENLEM)) kapinumarasi <- trimws(xml_text(SKAPINUMARASI)) boylam <- as.data.frame(boylam) enlem <- as.data.frame(enlem) kapinumarasi <- as.data.frame(kapinumarasi) bozuk_haritasi <- data.frame(boylam,enlem,kapinumarasi) bozuk_haritasi$boylam <- as.numeric(trimws(bozuk_haritasi$boylam)) bozuk_haritasi$enlem <- as.numeric(trimws(bozuk_haritasi$enlem)) bozuk_haritasi<-as.data.frame(bozuk_haritasi) ifelse(missing(busdoornumber),return(bozuk_haritasi),return(filter(bozuk_haritasi,bozuk_haritasi$kapinumarasi %in% busdoornumber))) }
7b45edd8b9ab9b11b5f5e103d425dcabfc463a03
05a79f6b42fb32cac5dd113d4b9e4e2b46bf726c
/R/erah.R
9e1756580b61930a0edf0bfd5e9a6f7e8f3fac11
[]
no_license
cran/erah
f7694555847f4d0077c95b4c2af6ad49bb89f5a5
1e64f83223ca1a744bfe73770593e673a3d46919
refs/heads/master
2021-07-04T19:31:22.570330
2021-05-11T03:20:17
2021-05-11T03:20:17
56,294,465
0
1
null
null
null
null
UTF-8
R
false
false
15,071
r
erah.R
globalVariables("mslib") ## MetaboSet Class Definition: setClass(Class = "eRahSoftParameters", representation = representation(algorithm="character", min.peak.width = "numeric", min.peak.height = "numeric", noise.threshold = "numeric", avoid.processing.mz = "vector",compression.coef = "numeric", analysis.time="vector")) setClass(Class = "eRahAlParameters", representation = representation(algorithm="character", min.spectra.cor="numeric", max.time.dist="numeric", mz.range="vector", method="character")) setClass(Class = "eRahIdParameters", representation = representation(algorithm="character", database="character", compare.only.mz="vector")) setClass(Class = "MetaData", representation = representation(Instrumental = "data.frame", Phenotype = "data.frame", DataDirectory="character")) setClass(Class = "Statistics", representation = representation(Univariate="data.frame", Multivariate="data.frame")) setClass(Class="MSResultsParameters", representation = representation(Alignment = "list", Identification = "list")) setClass(Class="Data", representation = representation(FeatureList = "list", FactorList = "list", Parameters = "list")) setClass(Class = "Results", representation = representation(Parameters="MSResultsParameters", Alignment = "data.frame", Identification="data.frame", Statistics="Statistics")) setClass(Class="MetaboSet",representation= representation(Info = "character", Data="Data", MetaData="MetaData", Results = "Results")) ## Intern Classes for eRah: setClass(Class = "eRah_DB", representation = representation(name="character", version="character", info="character", database="list")) setClass(Class = "RawDataParameters", representation = representation(data = "matrix", min.mz = "numeric", max.mz = "numeric", start.time = "numeric", mz.resolution = "numeric", scans.per.second = "numeric", avoid.processing.mz = "vector", min.peak.width = "numeric", min.peak.height = "numeric", noise.threshold = "numeric", compression.coef = "numeric")) setMethod("show", "MetaboSet", function(object){ cat("A \"MetaboSet\" object containing", length(object@Data@FactorList), "samples \n \n" ,sep=" ") cat("Data processed with", object@Data@Parameters$algorithm, "\n" ,sep=" ") cat("Info attached to this experiment: \n", object@Info) }) metaData <- function(object) {object@MetaData@Instrumental} phenoData <- function(object) {object@MetaData@Phenotype} #setClass(Class="expClasses",representation= representation(classes.type = "character", classes.summary = "data.frame")) # setMethod("show", "expClasses", function(object) { # classes.string <- paste(object@classes.type, collapse=", ") # cat("Experiment containing ", nrow(object@classes.summary), " samples in ", length(object@classes.type), " different type of classes named: ",classes.string, ". \n \n", sep="") # print(object@classes.summary) # }) # setGeneric("metaData", function(object) standardGeneric("metaData")) # setMethod("metaData", "MetaboSet", function(object) object@MetaData@Instrumental) # setGeneric("phenoData", function(object) standardGeneric("phenoData")) # setMethod("phenoData", "MetaboSet", function(object) object@MetaData@Phenotype) ## Main Software functions: setDecPar <- function(min.peak.width, min.peak.height=2500, noise.threshold=500, avoid.processing.mz=c(73:75,147:149), compression.coef=2, analysis.time=0) { softPar <- new("eRahSoftParameters",algorithm="eRah-OSD", min.peak.width = min.peak.width/60, min.peak.height = min.peak.height, noise.threshold = noise.threshold, avoid.processing.mz = avoid.processing.mz, compression.coef = compression.coef, analysis.time=analysis.time) softPar } setAlPar <- function(min.spectra.cor, max.time.dist, mz.range=c(70:600)) { alPar <- new("eRahAlParameters", algorithm="eRah", min.spectra.cor=min.spectra.cor, max.time.dist=max.time.dist/60, mz.range = mz.range, method="eRah") alPar } newExp <- function(instrumental, phenotype=NULL, info=character()) { #IF es un path: #IF path.dir== path.dir <- strsplit(instrumental, split="/")[[1]] path.dir <- paste(path.dir[-length(path.dir)], collapse="/") instrumental.dataframe <- suppressWarnings(try(read.csv(instrumental, sep=";"), silent=T)) if(class(instrumental.dataframe)=="try-error") stop(attributes(instrumental.dataframe)$condition) delete.rows <- apply(instrumental.dataframe,1,function(x) if(x["sampleID"]==""){TRUE}else{FALSE}) if(any(delete.rows)) instrumental.dataframe <- instrumental.dataframe[-which(delete.rows==T),] if(is.null(phenotype)) { phenotype.dataframe = as.data.frame(NULL) warning("No phenotype data have been attached to this experiment.") }else{ phenotype.dataframe <- suppressWarnings(try(read.csv(phenotype, sep=";"), silent=T)) if(class(phenotype.dataframe)=="try-error") stop(attributes(phenotype.dataframe)$condition) ## Comprobar almenys que estigui la columna que la relaciona amb la instrumental sampleID } factors.list <- lapply(1:nrow(instrumental.dataframe), function(x){as.data.frame(NULL)}) names(factors.list) <- as.vector(instrumental.dataframe$sampleID) ident.list <- as.data.frame(matrix(0,ncol=7, dimnames=list(row=0,col= c("AlignID", "tmean", "Name", "MatchFactor", "CAS", "Formula", "DB.Id")))) uni.stats <- as.data.frame(matrix(0,ncol=3, dimnames=list(row=0,col= c("Id", "FoldChangue", "pvalue")))) multi.stats <- as.data.frame(matrix(0,ncol=3, dimnames=list(row=0,col= c("Id", "CompoundsInvolved", "pvalue")))) al.par <- list() id.par <- list() soft.par <- list() stat.parameters <- new("MSResultsParameters", Alignment=al.par, Identification=id.par) statistics <- new("Statistics", Univariate = uni.stats, Multivariate = multi.stats) MS.Results <- new("Results", Parameters = stat.parameters, Identification = ident.list, Statistics = statistics ) MS.Data <- new("Data", FeatureList = list(NULL), FactorList = factors.list, Parameters = list(NULL)) MS.MetaData <- new("MetaData", Instrumental = instrumental.dataframe, Phenotype = phenotype.dataframe, DataDirectory=path.dir) # Instrumental Slots validation: col.correct <- c("sampleID","filename","date","time") for(i in 1:length(col.correct)) if(length(apply(as.matrix(colnames(MS.MetaData@Instrumental)),1,function(x) grep(col.correct[i],x)))==0) stop("Invalid instrumental file. The file must contain at least the following columns: ", paste(col.correct, collapse=", ")) # Phenotype Slots validation: if(nrow(MS.MetaData@Phenotype)!=0){ col.correct <- c("sampleID","class") for(i in 1:length(col.correct)) if(length(apply(as.matrix(colnames(MS.MetaData@Phenotype)),1,function(x) grep(col.correct[i],x)))==0) stop("Invalid phenotype file. The file must contain at least the following columns: ", paste(col.correct, collapse=", ")) } sample.container <- new("MetaboSet", Info = info, Data = MS.Data, MetaData = MS.MetaData, Results = MS.Results) sample.container } deconvolveComp <- function(Experiment, decParameters, samples.to.process=NULL, down.sample=FALSE, virtual.scans.ps=NULL) { plotting=FALSE Number.of.Samples <- nrow(Experiment@MetaData@Instrumental) if(is.null(samples.to.process)) samples.to.process <- 1:Number.of.Samples stopifnot(samples.to.process>=1, max(samples.to.process)<=Number.of.Samples, length(samples.to.process)<=Number.of.Samples) soft.par <- list(min.peak.width = decParameters@min.peak.width, min.peak.height = decParameters@min.peak.height, noise.threshold = decParameters@noise.threshold, avoid.processing.mz = decParameters@avoid.processing.mz, compression.coef = decParameters@compression.coef, analysis.time = decParameters@analysis.time) Experiment@Data@Parameters <- soft.par k <- 1 for(index in samples.to.process) { cat("\n Deconvolving compounds from",as.character(Experiment@MetaData@Instrumental$filename[index]),"... Processing", k,"/",length(samples.to.process),"\n") Experiment <- processSample(Experiment, index, plotting, down.sample, virtual.scans.ps) k <- k + 1 } cat("\n Compounds deconvolved \n") Experiment } alignComp <- function(Experiment, alParameters, blocks.size=NULL) { al.par <- list(alignment.algorithm=alParameters@algorithm, min.spectra.cor=alParameters@min.spectra.cor, max.time.dist=alParameters@max.time.dist, mz.range=alParameters@mz.range) Experiment@Results@Parameters@Alignment <- al.par min.spectra.cor <- Experiment@Results@Parameters@Alignment$min.spectra.cor max.time.dist <- Experiment@Results@Parameters@Alignment$max.time.dist mz.range <- Experiment@Results@Parameters@Alignment$mz.range maxMZ <- max(mz.range) # Experiment@Data@FactorList <- align.factors(Experiment@Data@FactorList, min.spectra.cor, max.time.dist, maxMZ, mz.range) # Experiment@Results@Alignment <- create.factorlist.table(Experiment) if(is.null(blocks.size)) { Experiment@Data@FactorList <- align.factors(Experiment@Data@FactorList, min.spectra.cor, max.time.dist, maxMZ, mz.range) Experiment@Results@Alignment <- create.factorlist.table(Experiment) #return(Experiment) }else{ #blocks.size <- 15 max.mz <- maxMZ Itrt <- length(Experiment@Data@FactorList)/blocks.size sequs <- trunc(seq(1, length(Experiment@Data@FactorList), length.out=Itrt)) sequs[1] <- 0 corresponding.list <- list() block.list <- list() #i <- 1 for(i in 1:(length(sequs)-1)) { cat("Aligning block ", i, " of ", length(sequs)-1, "... \n", sep="") ghost.object <- Experiment ghost.object@Data@FactorList <- Experiment@Data@FactorList[(sequs[i]+1):sequs[(i+1)]] factors.list <- ghost.object@Data@FactorList ghost.object@Data@FactorList <- align.factors(factors.list, min.spectra.cor, max.time.dist, max.mz, mz.range) ghost.factors.list <- create.factorlist.table(ghost.object) block.list[[i]] <- data.frame(ID=ghost.factors.list$AlignID, RT=ghost.factors.list$tmean, Spectra=ghost.factors.list$Spectra) corresponding.list <- c(corresponding.list,lapply(ghost.object@Data@FactorList, function(x) x$AlignID)) } cat("Aligning factors across blocks... \n") full.factorlist <- align.factors(block.list, min.spectra.cor, max.time.dist, max.mz, mz.range) #MaxALID <- max(unlist(lapply(full.factorlist, function(x) x$AlignID))) factors.list <- Experiment@Data@FactorList if(!(any(unlist(lapply(factors.list,function(x) {is.null(x$AlignID)}))==FALSE))) { factors.list <- lapply(factors.list, function(x){ outp <- cbind(x,matrix(0,nrow=length(x$ID))) colnames(outp)[ncol(outp)] <- "AlignID" outp }) }else{ factors.list <- lapply(factors.list, function(x){ x$AlignID <- rep(0,length(x$ID)) x }) } Experiment@Data@FactorList <- factors.list free.aligned.slots <- list() for(i in 1:length(full.factorlist)) { for(j in (sequs[i]+1):sequs[(i+1)]) { ID.vct <- sapply(full.factorlist[[i]]$ID, function(x) {x.num <- which(corresponding.list[[j]]==x) if(length(x.num)==0) x.num=0 x.num }) #full.factorlist[[i]]$AlignID[which(ID.vct!=0)] #ID.vct[which(ID.vct!=0)] Experiment@Data@FactorList[[j]]$AlignID[ID.vct[which(ID.vct!=0)]] <- full.factorlist[[i]]$AlignID[which(ID.vct!=0)] free.aligned.slots[[j]] <- which(full.factorlist[[i]]$AlignID[which(ID.vct!=0)]==0) } } MaxALID <- max(unlist(lapply(Experiment@Data@FactorList, function(x) x$AlignID))) Alid.counter <- MaxALID + 1 for(i in 1:length(free.aligned.slots)) { Experiment@Data@FactorList[[i]]$AlignID[free.aligned.slots[[i]]] <- seq(Alid.counter, Alid.counter + (length(free.aligned.slots[[i]])-1) ) Alid.counter <- Alid.counter + length(free.aligned.slots[[i]]) } cat("Constructing Factor List Table... (This may take a while...)\n") Experiment@Results@Alignment <- create.factorlist.table(Experiment) } Experiment } identifyComp <- function(Experiment, id.database=mslib, mz.range=NULL, n.putative=3) { #if(!(any(unlist(lapply(Experiment@Data@FactorList,function(x) {is.null(x$AlignID)} ))==FALSE))) stop("Factors must be aligned first") if(is.null(Experiment@Results@Parameters@Alignment$mz.range) && is.null(mz.range)) stop("A mz.range has to be specified") if(is.null(mz.range)) compare.only.mz <- min(Experiment@Results@Parameters@Alignment$mz.range):max(Experiment@Results@Parameters@Alignment$mz.range) if(!is.null(mz.range)) compare.only.mz <- mz.range id.par <- list(database.name = id.database@name, compare.only.mz = compare.only.mz, n.putative = n.putative) Experiment@Results@Parameters@Identification <- id.par avoid.processing.mz <- Experiment@Data@Parameters$avoid.processing.mz maxMZ <- max(compare.only.mz) Experiment@Results@Identification <- identify.factors(Experiment, maxMZ, compare.only.mz, avoid.processing.mz, id.database@database, n.putative) Experiment } processSample <- function(Experiment, index, plotting, down.sample, virtual.scans.ps) { if(Experiment@MetaData@DataDirectory=="") {filename <- as.character(Experiment@MetaData@Instrumental$filename[index]) }else{filename <- paste(Experiment@MetaData@DataDirectory,"/",Experiment@MetaData@Instrumental$filename[index], sep="")} sampleObject <- NULL sampleObject <- load.file(filename) # file.extension <- strsplit(as.character(Experiment@MetaData@Instrumental$filename[index]), split="\\.")[[1]] # file.type <- file.extension[length(file.extension)] # if(file.type=="cdf") sampleObject <- load.ncdf(filename) # if(file.type=="mzXML" || file.type=="xml") sampleObject <- load.xml(filename) # if(file.type=="MetaboSet") # { # load(filename) # sampleObject <- new("RawDataParameters", data = sampleRD@data, min.mz = sampleRD@min.mz, max.mz = sampleRD@max.mz, start.time = sampleRD@start.time, mz.resolution = 1) # } Experiment@Data@Parameters$scans.per.second <- sampleObject@scans.per.second sampleObject@avoid.processing.mz <- Experiment@Data@Parameters$avoid.processing.mz sampleObject@min.peak.width <- Experiment@Data@Parameters$min.peak.width*Experiment@Data@Parameters$scans.per.second*60 sampleObject@min.peak.height <- Experiment@Data@Parameters$min.peak.height sampleObject@noise.threshold <- Experiment@Data@Parameters$noise.threshold #sampleObject@moving.window.length <- Experiment@Data@Parameters$moving.window.length*Experiment@Data@Parameters$scans.per.second*60 #sampleObject@moving.window.overlap <- Experiment@Data@Parameters$moving.window.overlap sampleObject@compression.coef <- Experiment@Data@Parameters$compression.coef #sampleObject@factor.minimum.sd <- Experiment@Data@Parameters$factor.minimum.sd #sampleObject@filter.matrix <- get.filter.matrix(sampleObject) sampleObject <- avoid.processing(sampleObject) factor.list <- try(get.factor.list(sampleObject, analysis.window=Experiment@Data@Parameters$analysis.time, plotting, down.sample, virtual.scans.ps), silent=F) if(class(factor.list)=="try-error") {factor.list <- as.data.frame(NULL); warning("Unable to extract factors from ", Experiment@MetaData@Instrumental$filename[index], ". Data may be corrupted.", sep="")} Experiment@Data@FactorList[[index]] <- factor.list Experiment }
b7ba484a4236d69d91f83f04088a39e12e826693
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Rmixmod/examples/Mixmod-class.Rd.R
66e34fd87f014d0f9b6481fa21e101de20085b4c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
154
r
Mixmod-class.Rd.R
library(Rmixmod) ### Name: Mixmod-class ### Title: Constructor of ['Mixmod'] class ### Aliases: Mixmod-class ### ** Examples getSlots("Mixmod")
1b9af763a8daa157b6a239f539de12c93aceebaa
66c35bb07d3aa3322cfcb69b0adbd4ef8ea59f4f
/Working_collection_of_scripts/Basic_plotting_ggplot2.R
780f0abc5998eeed47a434c5c1dc0fedd59a731e
[]
no_license
jlhanson5/R_snippets
de42646e40033a57cb9f6b3699c25d09eda2c6fd
782e6f4fc49c3872a26ad917af877467d70281f0
refs/heads/master
2021-01-01T06:51:37.902718
2017-07-17T23:36:31
2017-07-17T23:36:31
97,530,925
0
0
null
null
null
null
UTF-8
R
false
false
179
r
Basic_plotting_ggplot2.R
library(ggplot2) ggplot(df, aes(x=Independent_Variable, y=Dependent_Variable)) +geom_point(shape=16, size=3,color = 'black',alpha = 0.5) + geom_smooth(method=lm, color = 'red')
356a4751a2cb5609c27095c9391b6e9fb7187e81
a0aa511780814e67695203dec46022eda1c75467
/man/clearALL.Rd
66ef0cb637417205db298a81788c91fb83a2a4d7
[]
no_license
leonpheng/xptdefine
ab031216806bbb22e8fbf7b5b56ac5fcfba726a3
62388d9927436fac5df9a05305bf6bffc2124e56
refs/heads/master
2020-04-05T02:29:19.060784
2019-11-19T15:39:29
2019-11-19T15:39:29
114,052,833
0
0
null
null
null
null
UTF-8
R
false
true
235
rd
clearALL.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/package.R \name{clearALL} \alias{clearALL} \title{clearALL} \usage{ clearALL(...) } \description{ Internal use. } \examples{ clearALL() } \keyword{clearALL}
c6b113947f5717de1c4c2a0f24678f7accbbe40b
b27fd936626536a5bba6c99e395284c6c451d376
/CountElements.R
f345cd2a81c3ad7ddccf54a3c948476e1ddfb967
[ "MIT" ]
permissive
sckaiser/BlindDate
f3db4f38e80440028b49fb2d594006e2aa776388
e8b9645ae838aad70fd1544f0928f66c9f84e230
refs/heads/master
2021-06-12T01:27:41.830690
2017-04-21T15:29:05
2017-04-21T15:29:05
33,501,969
2
0
null
null
null
null
UTF-8
R
false
false
507
r
CountElements.R
CountElements <- function(x) { # Counts the number of elements in a character string. An element is defined # as a sequence of adjacent numbers or characters, exluduing punctuation and # spaces. # Args: # x, a character vector # Returns: # y, an integer: the most frequent number of elements found. x <- TokenizeDt(x) # tokenize to a list of character vectors (elements) y <- mclapply(x, length) # count elements in each x y <- unlist(y) TrueMode(y) # return the most common. }
e56e9d046280886d6ff28d2ea00d934ade88b073
3b3b10d85d9eaba1b19e88cf63b97f5071f165e9
/man/umccr_tidy.Rd
6acb8f65d4e9af4478ac326a7ad587754dde7109
[ "MIT" ]
permissive
pdiakumis/dracarys
e416dcb9c7cc597490872a4564c8a5ac1e466fba
25fd7860440548d914a474a41017848a5fab40c8
refs/heads/main
2023-07-22T08:55:15.045965
2023-07-05T12:42:04
2023-07-05T12:42:04
237,552,776
0
1
null
null
null
null
UTF-8
R
false
true
1,601
rd
umccr_tidy.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidy.R \name{umccr_tidy} \alias{umccr_tidy} \title{Tidy UMCCR Results} \usage{ umccr_tidy( in_dir = NULL, out_dir = NULL, prefix = NULL, gds_local_dir = NULL, out_format = "tsv", dryrun = FALSE, token = Sys.getenv("ICA_ACCESS_TOKEN"), pattern = NULL ) } \arguments{ \item{in_dir}{Directory path to UMCCR workflow results (can be GDS or local).} \item{out_dir}{Output directory.} \item{prefix}{Prefix of output file(s).} \item{gds_local_dir}{If \code{indir} is a GDS directory, 'recognisable' files will be first downloaded to this directory.} \item{out_format}{Format of output (tsv, parquet, both) (def: tsv).} \item{dryrun}{Just list the files that will be downloaded (def: FALSE).} \item{token}{ICA access token (by default uses $ICA_ACCESS_TOKEN env var).} \item{pattern}{Pattern to further filter the returned file type tibble (see \code{name} column in the \code{FILE_REGEX} tibble).} } \value{ Tibble with path to input file and the resultant tidy object. } \description{ Tidies UMCCR workflow results into a list of tibbles and writes individual tibbles to TSV and/or Parquet format. } \examples{ \dontrun{ in_dir <- paste0( "gds://production/analysis_data/SBJ02858/tso_ctdna_tumor_only/", "20221104b7ad0b38/L2201560/Results/PRJ222206_L2201560/" ) in_dir <- here::here(glue("nogit/tso/2022-12-13/SBJ02858/dracarys_gds_sync")) out_dir <- file.path(in_dir, "../out") gds_local_dir <- NULL prefix <- "SBJ02858" dryrun <- F umccr_tidy(in_dir = in_dir, out_dir = out_dir, prefix = prefix) } }
8c7bfee10b8eafacbc3da1bad967784dd5a34f5f
ef40ccc2ab54a5ba93fbea8f5910b8374bbd5dc3
/R/taxonomic-coverage-element.R
fd4b3abfe46ee53b51f043102a02ef4a897fb95d
[ "CC0-1.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
ErinCain/EDIutils
278899ef3e3f1d97ab04d28b1f0cee5c1a794145
2eca0760679cbedf86c0a90694986bd22e4f715b
refs/heads/v0.1
2023-03-22T22:18:32.546511
2021-02-26T16:37:11
2021-02-26T16:37:11
319,430,518
0
0
CC0-1.0
2020-12-16T18:12:46
2020-12-07T20:01:58
null
UTF-8
R
false
false
8,652
r
taxonomic-coverage-element.R
#' @title Add Taxonomic Coverage #' @description Adds the taxonomic coverage information of a dataset based off of #' EML standards. The addition of taxonomic coverage is optional, however defaults #' of chinook, sturgeon, smelt, and steelhead are provided. While single or #' multiple taxonomies can be applied, the full set of information must be #' provided if chosen to be included. #' @param CVPIA_common_species Use one of the following from the helper data #' \code{\link{CVPIA_common_species}}: "chinook", "delta_smelt", #' "white_sturgeon", "green_sturgeon", or "steelhead" to get pre-selected #' information from ITIS. #' @param kingdom Kingdom level present. #' @param kingdom_value The kingdom name. #' @param phylum Phylum level present. #' @param phylum_value The phylum name. #' @param class Class level present. #' @param class_value The class level name. #' @param order Order level present. #' @param order_value The order level name. #' @param family Family level present. #' @param family_value The family level name. #' @param genus Genus level present. #' @param genus_value The genus level name. #' @param species Species level present. #' @param species_value The species level name. #' @param common_name The common name of the organism. #' @param taxon_id Optional. The taxonomic serial number provided by ITIS. #' @section CVPIA Common Species: #' The following frequently cited species are available for convenience: #' * EDIutils::CVPIA_common_species$chinook - Oncorhynchus tshawytscha (ITIS: 161980) #' * EDIutils::CVPIA_common_species$steelhead - Oncorhynchus mykiss (ITIS: 161989) #' * EDIutils::CVPIA_common_species$delta_smelt - Hypomesus transpacificus (ITIS: 162032) #' * EDIutils::CVPIA_common_species$white_sturgeon - Acipenser transmontanus (ITIS:161068) #' * EDIutils::CVPIA_common_species$green_sturgeon - Acipenser medirostris (ITIS: 161067) #' #' For further taxonomic coverage (i.e subkingdom, infrakingdom, etc.) on any of #' these species, you can visit: \href{https://www.itis.gov/}{ITIS}'s webpage for #' full coverage information. #' #' @return Taxonomic coverage information. The function should be assigned to the #' name taxonomic_coverage to append it to the dataset or project. Example of how #' to incorporate it into the add_coverage function is seen below. #' @examples #' taxonomic_coverage <- add_taxonomic_coverage(CVPIA_common_species = "chinook") #' #' taxonomic_coverage <- add_taxonomic_coverage(CVPIA_common_species = "delta_smelt") #' #' taxonomic_coverage <- add_taxonomic_coverage(CVPIA_common_species = "white_sturgeon") #' #' taxonomic_coverage <- add_taxonomic_coverage(CVPIA_common_species = "green_sturgeon") #' #' taxonomic_coverage <- add_taxonomic_coverage(CVPIA_common_species = "steelhead") #' #' taxonomic_coverage <- add_taxonomic_coverage(kingdom_value = "Animalia", #' phylum_value = "Chordata", #' class_value = "Mammalia", #' order_value = "Carnivora", #' family_value = "Felidae", #' genus_value = "Panthera", #' species_value = "Panthera Leo", #' common_name = "Lion", #' taxon_id = "183803") #' #' # To append this information to the dataset or project: #' add_coverage(parent_element = list(), geographic_description = "Description", #' west_bounding_coordinate = "-160.594000", #' east_bounding_coordinate = "-134.104800", #' north_bounding_coordinate = "71.238300", #' south_bounding_coordinate = "67.865000", #' begin_date = "1980-01-01", #' end_date = "2010-12-31", #' taxonomic_coverage = taxonomic_coverage) #' @export add_taxonomic_coverage <- function(CVPIA_common_species = NULL, kingdom = "kingdom", kingdom_value, phylum = "phylum", phylum_value, class = "class", class_value, order = "order", order_value, family = "family", family_value, genus = "genus", genus_value, species = "species", species_value, common_name, taxon_id = NULL) { if (is.null(CVPIA_common_species)) { required_arguments <- c("kingdom_value", "phylum_value", "class_value", "order_value", "family_value", "genus_value", "species_value", "common_name") missing_argument_index <- which(c(missing(kingdom_value), missing(phylum_value), missing(class_value), missing(order_value), missing(family_value), missing(genus_value), missing(species_value), missing(common_name))) if (length(missing_argument_index) > 0) { tax_error <- required_arguments[missing_argument_index][1] tax_error_message <- switch(tax_error, kingdom_value = "Please provide a kingdom.", phylum_value = "Please provide a phylum.", class_value = "Please provide a class.", order_value = "Please provide an order.", family_value = "Please provide a family.", genus_value = "Please provide a genus.", species_value = "Please provide a species.", common_name = "Please provide a common name.") stop(tax_error_message, call. = FALSE) } kingdom_value <- kingdom_value phylum_value <- phylum_value class_value <- class_value order_value <- order_value family_value <- family_value genus_value <- genus_value species_value <- species_value common_name <- common_name } common_species_index <- which(c(CVPIA_common_species == "chinook", CVPIA_common_species == "steelhead", CVPIA_common_species == "delta_smelt", CVPIA_common_species == "white_sturgeon", CVPIA_common_species == "green_sturgeon")) if (length(common_species_index) > 0) { taxonomicCoverage <- EDIutils::CVPIA_common_species[[common_species_index]] } else { kingdom_value <- "Animalia" phylum_value <- "Chordata" taxonomicCoverage <- list(taxonomicClassification = list(taxonRankName = kingdom, taxonRankValue = kingdom_value, taxonomicClassification = list(taxonRankName = phylum, taxonRankValue = phylum_value, taxonomicClassification = list(taxonRankName = class, taxonRankValue = class_value, taxonomicClassification = list(taxonRankName = order, taxonRankValue = order_value, taxonomicClassification = list(taxonRankName = family, taxonRankValue = family_value, taxonomicClassification = list(taxonRankName = genus, taxonRankValue = genus_value, taxonomicClassification = list(taxonRankName = species, taxonRankValue = species_value, commonName = common_name)))))))) } if (is.null(taxon_id)) { message("No taxon id has been provided. This number can be found at ITIS.gov if you wish to append it.") } else { taxonomicCoverage$taxonomicClassification$taxonomicClassification$taxonomicClassification$taxonomicClassification$taxonomicClassification$taxonomicClassification$taxonomicClassification$taxonId <- list("provider" = "https://itis.gov", taxonId = taxon_id) } return(taxonomicCoverage) }
c4216f668fcf508d3f3fc83d0bc711308b116389
26ad45b7c8da382a3153bdbf5a6c50160f7b7608
/Data Mining/rxNaiveBayes.R
9dae10af8b70dd427330f872f36b1c3fe43355da
[]
no_license
hjanime/RevoEnhancements
c884aa96bdae127d8d4c3a1b1fa4a48a24a5b664
2e60942ab6884d08c7f51e20bd461146b317f5f9
refs/heads/master
2021-01-15T19:50:00.574867
2013-04-09T21:08:10
2013-04-09T21:08:10
null
0
0
null
null
null
null
UTF-8
R
false
false
2,392
r
rxNaiveBayes.R
# Function to create a Naive Bayes model on big data. It uses e1071. rxNaiveBayes <- function (formula, data, laplace = 0, ...) { require(e1071) call <- match.call() vars <- all.vars(formula) Yname <- vars[1] x <- vars[-1] varInfo <- rxGetVarInfo(data) if (x == ".") { x <- names(varInfo) x <- x[!x %in% Yname] } origOrder <- x catVars <- (sapply(varInfo, "[[", "varType") == c("factor"))[x] catVars <- catVars[order(catVars, decreasing = TRUE)] x <- names(catVars) catLength <- sapply(varInfo[names(which(catVars))], function(x) length(x$levels)) sumVars <- list(categorical = x[catVars], numeric = x[!catVars]) est <- function(vars) { catSum <- numSum <- NULL if (!is.null(vars[["categorical"]])) { catFun <- function(x) { form <- as.formula(paste("~", paste(Yname, x, sep = ":"))) tab <- rxCrossTabs(form, data, returnXtabs = TRUE) class(tab) <- "table" attr(tab, "call") <- NULL (tab + laplace)/(rowSums(tab) + laplace * catLength[x]) } catSum <- lapply(vars[["categorical"]], catFun) } if (!is.null(vars[["numeric"]])) { form <- as.formula(paste("~", paste(vars[["numeric"]], Yname, sep = ":", collapse = "+"))) numVars <- rxSummary(form, data)$categorical numFun <- function(x) { ret <- as.matrix(x[, c("Means", "StdDev")]) myNames <- vector("list", 2) myNames[[1]] <- x[, 2] dimnames(ret) <- myNames return(ret) } numSum <- lapply(numVars, numFun) } ret <- c(catSum, numSum) } form <- as.formula(paste("~", Yname)) apriori <- rxCrossTabs(form, data, returnXtabs = TRUE) class(apriori) <- "table" attr(apriori, "call") <- NULL tables <- est(sumVars) names(tables) <- x for (i in 1:length(tables)) names(dimnames(tables[[i]])) <- c("Y", x[i]) names(dimnames(apriori)) <- "Y" structure(list(apriori = apriori, tables = tables, levels = varInfo[[Yname]][["levels"]], call = call), class = c("rxNaiveBayes", "naiveBayes")) } #data(HouseVotes84, package = "mlbench") #x <- HouseVotes84 #x$V17 <- rnorm(nrow(x), mean = c(-3, 5)[as.numeric(x$Class)], sd = c(.5, 2)[as.numeric(x$Class)]) #x$V18 <- rnorm(nrow(x), mean = c(2, 15)[as.numeric(x$Class)], sd = c(4, 1)[as.numeric(x$Class)]) #model <- naiveBayes(Class ~ ., data = x) #model2 <- rxNaiveBayes(Class ~ ., data = x)
89f5b5d4f1ae45e0811cfcffa8a293ecc80687f3
e80ffb7bfb546b42354e29dd9d0c2633e3743ca9
/man/writetessa.Rd
79a1fc8942fb9d920777a9e6b6c92351ade3d61e
[]
no_license
shearwavesplitter/MFASTR
5c417f2499dcbb1df8e56786106e8ebdaa7eeb5e
a533f527cd6a4d2472ff7305f63b7f7c85467ceb
refs/heads/master
2021-01-21T06:55:13.317019
2020-02-17T14:43:59
2020-02-17T14:43:59
84,286,263
3
0
null
null
null
null
UTF-8
R
false
true
639
rd
writetessa.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fn_writetessa.R \name{writetessa} \alias{writetessa} \title{Write TESSA .summ file} \usage{ writetessa(summ, name) } \arguments{ \item{summ}{Dataframe containing the summary file of measurements to be run in TESSA} \item{name}{Name of the file including path and .summ suffix (defaults to current working directory)} } \description{ Writes out a .summ file in the format required for TESSA } \examples{ # Create a .summ file for TESSA from all F1, F2 and F3 graded measurements cz <- summ.cz("~/path/to/summfiles") writetessa(cz,"~/TESSA/summfiles/cz.summ") }
c8bd6df04cb7a91e2aec879b8ac8c1436a0394c4
6074beaaa23e6dd8c2c2a1265da5562816f90ebc
/analysis/scripts/plotChrom.R
6f0bf932560619d9377962946ba2083b5c8663ab
[ "BSD-3-Clause" ]
permissive
msproteomicstools/msproteomicstools
935402d7984295785b36afb6a0f2fccfcba9dbe6
84e0eb00c2b4b0a26b99a916de554fdb46c4b108
refs/heads/master
2021-11-19T18:25:06.289924
2021-10-12T11:34:54
2021-10-12T11:34:54
32,137,230
47
51
NOASSERTION
2021-10-12T11:34:55
2015-03-13T07:03:21
Python
UTF-8
R
false
false
2,920
r
plotChrom.R
library(lattice) extract_chrom <- function(t, thisdata, productmz, extraction_window=0.05) { this_spectrum = subset(thisdata, SEC == t) return(sum(subset(this_spectrum, MZ > productmz-(extraction_window/2) & MZ < productmz+(extraction_window/2))$INT)) } graphme <- function(xxp,allmx){ xxp <- xxp[length(xxp):1] allmx <- allmx[allmx$MZ > 400,] sum(is.element(allmx$label,xxp)) allmx <- allmx[is.element(allmx$label,xxp),] print(dim(allmx)) allmx$MZ <- as.factor(allmx$MZ) return(allmx) } irt2rt <- function(x,c=2148.68,m=33.87) { return(m*x+c) } plotgraph <- function(assay_irt,background,rt_extraction_window=180) { txtfiles <- dir(pattern=glob2rx(paste("*",background,"*","._chrom.mzML.dta2d",sep=""))) rawdata <- list() for(i in 1:length(txtfiles)) { rawdata[[i]] <- read.csv(txtfiles[i], sep="\t") names(rawdata[[i]])<-c("SEC","MZ","INT") } # use this code to extract chromatograms # data <- list() # for(i in 1:length(txtfiles)) # { # df<-data.frame() # for(j in 1:length(productmz)) { # dfj <- data.frame("INT" = sapply( unique(rawdata[[i]]$SEC), extract_chrom, thisdata=rawdata[[i]], productmz=productmz[j]), "SEC"=unique(rawdata[[i]]$SEC)) # dfj$MZ <- rep(productmz[j],dim(dfj)[1]) # df<-rbind(df,dfj) # } # data[[i]] = df # } data<-rawdata xx <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001") length(xx) allm <- NULL label <- NULL for(i in 1:10){ allm <- rbind(allm,data[[i]]) labelt <- rep(xx[i],dim(data[[i]])[1]) label <- c(label, labelt) } allm <- cbind(label, allm) allm <- data.frame(as.factor(allm$label), as.numeric(allm$SEC), as.numeric(allm$MZ), as.numeric(allm$INT)) colnames(allm) <- c("label","SEC","MZ","INT") colnames(allm) allm$label[1:10] xxs <- c("x512","x256","x128","x064","x032","x016","x008","x004","x002","x001") allmx <- allm if (background=="human") { irt<-irt2rt(assay_irt[[1]],1687.64,33.61) } else if (background=="yeast") { irt<-irt2rt(assay_irt[[1]],2105.2,34.27) } else if (background=="no_background") { irt<-irt2rt(assay_irt[[1]],2150.32,35.05) } pdf(file=paste(names(assay_irt)[[1]],"_",background,".pdf",sep=""),width=6, height=length(xxs)*1.5) print(xyplot(INT ~ SEC | label ,data=subset(allmx,SEC >= irt-rt_extraction_window & SEC <= irt+rt_extraction_window),type="l",xlim=c(irt-rt_extraction_window,irt+rt_extraction_window),scales=list(y=list(relation="free", cex=0.7,rot=45)),groups=MZ,layout=c(1,length(xxs)),xlab="RT [s]", ylab="INT",as.table=TRUE)) dev.off() } background<-list("water"="no_background","yeast"="yeast","human"="human") assays<-list("VGDTVLYGK"=3.7,"IADIQLEGLR"=49.4,"TGGDEFDEAIIK"=40.8,"LITVEGPDGAGK"=10.9,"LVDEEGNDVTPEK"=-5.1) assay_irt<-assays[tail(strsplit(getwd(),"/")[[1]],n=1)] for(j in 1:length(background)) { plotgraph(assay_irt,background[[j]]) }
4872fc1d1bcdd012d9c9f788f7f266786161b0f1
440ca1ee319392290b318dc28069b70f4d84213e
/sandbox/test_aggRviz_join.R
050daef02fa637ab6c650ea9c5c255e0d2e8a42c
[ "MIT" ]
permissive
amygoldlist/aggRviz
6a4f6cca2490d1df373d135760a3580eaf527126
08006d52ae1d1d4bb077534f2b170c3194e6f6bd
refs/heads/master
2020-03-13T00:48:18.316031
2018-06-28T19:10:15
2018-06-28T19:10:15
130,893,468
0
1
null
2018-06-28T19:10:16
2018-04-24T17:53:37
R
UTF-8
R
false
false
736
r
test_aggRviz_join.R
context("testing aggRviz_join") load("testdata/yummy.Rda") df1 <- dat_1 %>% dplyr::filter(Dessert == "") %>% dplyr::select(-Dessert) df2 <- dat_2 %>% dplyr::filter(Sweet_or_Salty=="") %>% dplyr::select(-Sweet_or_Salty) df <- dplyr::inner_join(df1,df2) test_that('AggRviz_join basic functionality', { expect_equal(aggRviz_join(dat_1,dat_2), df) expect_is(aggRviz_join(dat_1,dat_2),'data.frame') #expect_is(filter_blanks(df_blanks),'data.frame') #expect_equal(filter_blanks(df_noblanks), df_noblanks) #expect_equivalent(filter_blanks(df_blanks), df_blanks_filtered) #expect_error(filter_blanks(5), "data should be a dataframe!") #expect_error(filter_blanks(c(4,65)), "Error: data should be a dataframe!") })
1dda59846674c0f5f18cd7d6869c72ff16fede93
8e1f9c5b510459b3a89a62814990c5e8675ae541
/OrdinalRecommenders_1.R
b5cf58f742b79877381d5e78c37fe1f85a5654bd
[]
no_license
harpreetSinghGuller/MDA
2062b1c79cd184c8cf302757eee2a14420e448d0
b686bc45e06dcc42e282df755fd17532b77bbf27
refs/heads/master
2021-10-02T05:05:46.134811
2018-11-29T07:33:45
2018-11-29T07:33:45
98,188,754
0
0
null
null
null
null
UTF-8
R
false
false
7,477
r
OrdinalRecommenders_1.R
### -------------------------------------------------- ### --- Recommender Systems: Ordinal Logistic Regression hello ### --- Goran S. Milovanović, PhD ### --- Data Kolektiv, Belgrade, Serbia ### --- Developed for: SmartCat, Novi Sad, Serbia ### --- 25 February 2017. ### --- MovieLens 100K Data Set ### -------------------------------------------------- ### -------------------------------------------------- ### --- The MovieLens 100K Dataset: ### --- F. Maxwell Harper and Joseph A. Konstan. 2015. ### --- The MovieLens Datasets: History and Context. ### --- ACM Transactions on Interactive Intelligent Systems ### --- (TiiS) 5, 4, Article 19 (December 2015), 19 pages. ### --- DOI=http://dx.doi.org/10.1145/2827872 ### -------------------------------------------------- ### -------------------------------------------------- ### --- Part 1A: Import Data + export CSV ### -------------------------------------------------- rm(list = ls()) library(readr) library(dplyr) library(tidyr) library(Matrix) library(text2vec) ### --- ratings data setwd('./its wrong need to changes') setwd('./data100K') ratingsData <- read_delim('u.data', col_names = F, delim = '\t') colnames(ratingsData) <- c('UserID', 'MovieID','Rating', 'Timestamp') setwd('../outputs100K') write_csv(ratingsData, path = paste0(getwd(),'/ratings.csv'), append = F, col_names = T) ### --- user data setwd('../data100K') usersData <- read_delim('u.user', col_names = F, delim = '|', col_types = list(col_integer(), col_integer(), col_character(), col_character(), col_character())) colnames(usersData) <- c('UserID', 'Age', 'Gender', 'Occupation', 'Zip-code') setwd('../outputs100K') write_csv(usersData, path = paste0(getwd(),'/users.csv'), append = F, col_names = T) ### --- movies data setwd('../data100K') moviesData <- read_delim('u.item', col_names = F, delim = '|') moviesData$X4 <- NULL moviesData$X5 <- NULL colnames(moviesData) <- c('MovieID', 'Title', 'Date', 'unknown', 'Action', 'Adventure' , 'Animation', 'Children\'s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western') setwd('../outputs100K') write_csv(moviesData, path = paste0(getwd(),'/movies.csv'), append = F, col_names = T) ### -------------------------------------------------- ### --- Part 1B: Feature Engineering ### -------------------------------------------------- rm(list = ls()) ### --- load data ratingsData <- read_csv("ratings.csv", col_names = T) usersData <- read_csv("users.csv", col_names = T) moviesData <- read_csv("movies.csv", col_names = T) ### -- clean-up a bit (one movie is 'unknown'): w <- which(moviesData$Title == 'unknown') unknownID <- moviesData$MovieID[w] # - fix for the 'unknown' movie in moviesData moviesData <- moviesData[-unknownID, ] # - fix ID numbers after removing the 'unknown' movie moviesData$MovieID[moviesData$MovieID > unknownID] <- moviesData$MovieID[moviesData$MovieID > unknownID] - 1 # - fix for the 'unknown' movie in ratingsData w <- which(ratingsData$MovieID == unknownID) ratingsData <- ratingsData[-w, ] # - fix ID numbers after removing the 'unknown' movie in ratingsData ratingsData$MovieID[ratingsData$MovieID > unknownID] <- ratingsData$MovieID[ratingsData$MovieID > unknownID] - 1 # - save ratingsData without the 'unknow' movie: model version write.csv(ratingsData, "ratingsData_Model.csv") ### --- Compute moviesDistance w. Jaccard {text2vec} from movie genres moviesData <- moviesData %>% separate(col = Date, into = c('Day', 'Month','Year'), sep = "-") moviesData$Day <- NULL moviesData$Month <- NULL moviesData$Year <- as.numeric(moviesData$Year) range(moviesData$Year) # - that would be: [1] 1922 1998 # - Introduce Movie Decade in place of Year: decadeBoundary <- seq(1920, 2000, by = 10) moviesData$Year <- sapply(moviesData$Year, function(x) { wL <- x < decadeBoundary wU <- x >= decadeBoundary if (sum(wL) == length(decadeBoundary)) { return(1) } else if (sum(wU) == length(decadeBoundary)) { decadeBoundary[length(decadeBoundary)] } else { decadeBoundary[max(which(wL-wU == -1))] } }) # - Match moviesData$Year with ratingsData: mD <- moviesData %>% select(MovieID, Year) ratingsData <- merge(ratingsData, mD, by = 'MovieID') # - Movie Year (now Decade) as binary: moviesData <- moviesData %>% spread(key = Year, value = Year, fill = 0, sep = "_") # - compute moviesDistance: moviesDistance <- moviesData[, 3:ncol(moviesData)] w <- which(moviesDistance > 0, arr.ind = T) moviesDistance[w] <- 1 moviesDistance <- dist2(Matrix(as.matrix(moviesData[, 4:ncol(moviesData)])), method = "jaccard") moviesDistance <- as.matrix(moviesDistance) rm(moviesData); gc() # - save objects and clear: numMovies <- length(unique(ratingsData$MovieID)) write_csv(as.data.frame(moviesDistance), path = paste0(getwd(),'/moviesDistance.csv'), append = F, col_names = T) rm(moviesDistance); gc() ### --- produce binary User-Item Matrix (who rated what only): userItemMat <- matrix(rep(0, dim(usersData)[1]*numMovies), nrow = dim(usersData)[1], ncol = numMovies) userItemMat[as.matrix(ratingsData[c('UserID', 'MovieID')])] <- 1 rm('w', 'ratingsData', 'usersData'); gc() ### --- Compute userDistance w. Jaccard {text2vec} userItemMat <- Matrix(userItemMat) usersDistance <- dist2(userItemMat, method = "jaccard") rm(userItemMat); gc() usersDistance <- as.matrix(usersDistance) write_csv(as.data.frame(usersDistance), path = paste0(getwd(),'/usersDistance.csv'), append = F, col_names = T) rm(usersDistance); gc() ### --- Compute User-User and Item-Item Ratings Similarity Matrices ratingsData <- read_csv("ratingsData_Model.csv", col_names = T) ratingsData$X1 <- NULL # - User-Item Ratings Matrix ratingsData$Timestamp <- NULL ratingsData <- ratingsData %>% spread(key = MovieID, value = Rating, sep = "_") %>% arrange(UserID) # - Pearson Correlations: User-User Sim Matrix UserUserSim <- ratingsData %>% select(starts_with("Movie")) UserUserSim <- t(UserUserSim) UserUserSim <- cor(UserUserSim, use = 'pairwise.complete.obs') UserUserSim <- as.data.frame(UserUserSim) write_csv(UserUserSim, path = paste0(getwd(),'/UserUserSim.csv'), append = F, col_names = T) rm(UserUserSim); gc() # - Pearson Correlations: Item-Item Sim Matrix ItemItemSim <- ratingsData %>% select(starts_with("Movie")) rm(ratingsData); gc() ItemItemSim <- cor(ItemItemSim, use = 'pairwise.complete.obs') ItemItemSim <- as.data.frame(as.matrix(ItemItemSim)) write_csv(ItemItemSim, path = paste0(getwd(),'/ItemItemSim.csv'), append = F, col_names = T) rm(ItemItemSim); gc()
d9c3ff76a9a0469205ac646b480d810b1f499583
401771109480da6da170bbbd0ecb2b4bad42ab30
/man/fullMultiFit.Rd
0a59897a2ae77e0974779fe935f83c74eb629f11
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-public-domain" ]
permissive
usnistgov/potMax
8ab9612aeab1998ce304f0968ab81a29c50af733
a7271d3a12f4053edea0ac2004e5efb1dc41382c
refs/heads/master
2020-04-04T09:48:12.671567
2019-01-24T19:26:42
2019-01-24T19:26:42
52,827,981
1
0
null
2018-04-20T21:34:55
2016-02-29T22:00:25
R
UTF-8
R
false
true
2,668
rd
fullMultiFit.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multi_threshold_fits.R \name{fullMultiFit} \alias{fullMultiFit} \alias{fullMultiFit.declustered_series} \alias{fullMultiFit.default} \title{Maximum Likelihood Estimation of the Full Model for Many Thresholds} \usage{ fullMultiFit(x, lt, n_min, n_max, weight_scale, n_starts, progress_tf) \method{fullMultiFit}{declustered_series}(x, lt, n_min, n_max, weight_scale, n_starts, progress_tf = TRUE) \method{fullMultiFit}{default}(x, lt, n_min, n_max, weight_scale, n_starts, progress_tf = TRUE) } \arguments{ \item{x}{An S3 object of class \code{declustered_series} or a numeric vector. If the latter, the values to be thresholded and used in fitting.} \item{lt}{(numeric scalar) The length of the time series in units of time (seconds, minutes, hours, etc.).} \item{n_min}{(numeric scalar) The minimum number of thresholded observations to include} \item{n_max}{(numeric scalar) The maximum number of thresholded observations to include} \item{weight_scale}{(numeric scalar) The value of \eqn{\tau}} \item{n_starts}{(numeric scalar) The number of random starts to use in the search for the maximum} \item{progress_tf}{(logical scalar) Display a progress bar if TRUE, else not.} } \value{ An S3 object of class \code{full_multi_fit} with elements \describe{ \item{\code{$all_fits}}{An object of type \code{full_pot_fit} for each threshold} \item{\code{$thresholds}}{The thresholds for the fits} \item{\code{$weights}}{The weights associated with the fitted model for each threshold} \item{\code{$lt}}{The value of the \code{lt} argument} \item{\code{$n_min}}{The value of the \code{n_min} argument} \item{\code{$n_max}}{The value of the \code{n_max} argument} \item{\code{$weight_scale}}{The value of the \code{weight_scale} argument} } } \description{ Fit the full 2D extremal Poisson process for many thresholds } \details{ \code{fullMLE} and \code{fullWPlot} are called for a sequence of thresholds. Weights associated with each fit are also calculated. Suppose that for threshold \eqn{u_i} the maximum vertical distance from a point on the W plot to the \eqn{45^\circ} line is \eqn{\delta_i} such that the \eqn{\delta_i} are scaled to the unit interval. The weight associated with threshold \eqn{u_i} is then \deqn{\frac{\exp\{-\tau\delta_i\}}{\sum\exp\{-\tau\delta_i\}}} } \section{Methods (by class)}{ \itemize{ \item \code{declustered_series}: \item \code{default}: }} \examples{ \dontrun{ ddat <- decluster(-jp1tap813wind315$value) multi_est <- fullMultiFit(x = ddat, lt = 100, n_min = 10, n_max = 50, weight_scale = 5) } }
6731c6ca000df89a681d1f9efc24e82abab00235
7db57346716cd9684617c7477c9268049af0f1f2
/R/utils.R
d542e08db1d673cadbcad26b601fef316a6ffd11
[]
no_license
trafficonese/hutilscpp
6e9695056f9d7ac7a1b3343c8f2d714d558f1f47
2f57360a1dea10fc87688539f816e4e11d35ad3c
refs/heads/master
2020-07-08T22:45:01.021156
2019-08-22T11:27:46
2019-08-22T11:27:46
null
0
0
null
null
null
null
UTF-8
R
false
false
2,299
r
utils.R
check_TF <- function(x) { if (is.logical(x) && length(x) == 1L) { if (anyNA(x)) { xc <- deparse(substitute(x)) stop("`", xc, " = NA` but must be TRUE or FALSE. ", "Change `", xc, "` to be TRUE or FALSE.") } else { return(NULL) } } else { xc <- deparse(substitute(x)) if (length(x) != 1L) { stop("`", xc, "` had length ", length(x), " but must be length-one. ", "Change `", xc, "` to be TRUE or FALSE.") } else { stop("`", xc, "` was type ", typeof(x), " but must be logical. ", "Change `", xc, "` to be TRUE or FALSE.") } } } isnt_number <- function(a, na.bad = TRUE, infinite.bad = TRUE) { if (!is.numeric(a)) { o <- TRUE ac <- deparse(substitute(a)) attr(o, "ErrorMessage") <- paste0("`", ac, "` was a ", class(a), ", but must be numeric.") return(o) } if (length(a) != 1L) { o <- TRUE ac <- deparse(substitute(a)) attr(o, "ErrorMessage") <- paste0("`", ac, "` had length ", length(a), ", but must be length-one.") return(o) } if (na.bad && is.na(a)) { o <- TRUE ac <- deparse(substitute(a)) attr(o, "ErrorMessage") <- paste0("`", ac, "= NA`, but this is not permitted.") return(o) } if (infinite.bad && is.infinite(a)) { o <- TRUE ac <- deparse(substitute(a)) attr(o, "ErrorMessage") <- paste0("`", ac, "` was not finite, but this is not permitted.") return(o) } FALSE } AND <- `&&` OR <- `||` epsilon <- function() { sqrt(.Machine$double.eps) } #' @noRd #' @param xi integer version of \code{x}. May be cheaper if already known which_isnt_integerish <- function(x, xi = as.integer(x)) { if (is.integer(x)) { return(0L) } e <- epsilon() # slower to use -e, e when *validating* data, # which should be the benchmark, since it # doesn't matter how fast you are when you # are about to error. d_r <- do_range_dbl(x - xi) if (d_r[2L] > e) { return(as.integer(d_r[4L])) } if (d_r[1L] < -e) { return(as.integer(d_r[3L])) } 0L } isFALSE <- function(x) { is.logical(x) && length(x) == 1L && !anyNA(x) && !x } firstNonNegativeRadix <- function(x, ...) { if (is.double(x)) { do_firstNonNegativeRadix_dbl(x, ...) } else { do_firstNonNegativeRadix_int(x, ...) } }
246b9b7724d4a23433a877fc01286ae516f0df93
b153a1175385e6c77771a21d44269ff6309af38f
/assignment regression2.R
896e574e9c371495cb560dbf1d8433d6d1765b0d
[]
no_license
liuzanzan/The-Assignment-2-Liu-Zanzan
034507117b50fd521dd3ba627a244071dd3e6345
bcd757981acf464647a2604c120e4d3bc2bbf30f
refs/heads/master
2021-08-23T06:33:33.425114
2017-12-03T22:50:45
2017-12-03T22:50:45
112,970,516
0
0
null
null
null
null
UTF-8
R
false
false
4,328
r
assignment regression2.R
######################### # backward regression # ######################### library(psych) # for describe library(car) # for residualPlots, vif, pairs.panels, ncvTest library(ggplot2) # for ggplot library(cAIC4) # for cAIC library(r2glmm) # for r2beta library(influence.ME) # for influence library(lattice) # for qqmath library(reshape2) # for melt function #data mydata = read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class/master/home_sample_1.csv") ### check data set for invalid data (e.g. coding errors) # descriptive statistics summary(mydata) # according to the summary, there is an participant whose sex is "3", and an participant whose mindfulness value is negative. # use which function to find these participants and get rid of them which(mydata$sex=="3") which(mydata$mindfulness<0) mydata1=mydata[-c(15,24),] summary(mydata1) # model 3 mod_pain3 <- lm(pain ~ sex + age + STAI_trait + pain_cat + cortisol_serum + mindfulness + weight, data = mydata1) summary(mod_pain3) AIC(mod_pain3) ### Model diagnostics # Fit the final model mod_pain3 = lm(pain ~ sex + age + STAI_trait + pain_cat + cortisol_serum + mindfulness + weight , data = mydata1) # checking for influential outliers plot(pain ~ factor(sex), data = mydata1) plot(pain ~ age, data = mydata1) abline(lm(pain ~ age, data = mydata1)) plot(mod_pain3, which = 4) plot(mod_pain3, which = 5) ### checking assumptions # normality assumption # QQ plot plot(mod_pain3, which = 2) # skew and kurtosis describe(residuals(mod_pain3)) # histogram hist(residuals(mod_pain3), breaks = 20) # linearity assumption # predicted values against actual values pred <- predict( object = mod_pain3 ) plot( x = pred, y = mydata1$pain, xlab = "Fitted Values", ylab = "Observed Values") # predicted values against residuals plot(mod_pain3, which = 1) # residual plot for each predictor from the car package, returning the result of linearity tests residualPlots(mod_pain3) # homoscedasticty assumption (homogeneity of variance) plot(mod_pain3, which = 3) ncvTest(mod_pain3) ############################## # Backward regression # ############################## ###which is the best predictor names(mydata1) # there is some multicollinearity vif(mod_pain3) mod_pain3_back = step(mod_pain3, direction = "backward") summary(mod_pain3_back) ############################################################################################### backward_model=lm(formula = pain ~ age + pain_cat + cortisol_serum + mindfulness, data = mydata1) theory_based_model = lm(pain ~ sex + age + STAI_trait + pain_cat + cortisol_serum + cortisol_saliva + mindfulness , data = mydata1) summary(backward_model) summary(theory_based_model) #the 95% CI confint(backward_model) #beta lm.beta(backward_model) ### Comparing models ## first, compare the initial model with the backward model AIC(mod_pain3) AIC(backward_model) anova(backward_model, mod_pain3) # compare with AIC AIC(backward_model) AIC(theory_based_model) #anova anova(backward_model, theory_based_model) # training set 1 backward_model_train = lm(pain ~ age + pain_cat + cortisol_serum + mindfulness, data = mydata1) summary(backward_model_train) # training set 2 theory_based_model_train = lm(pain ~ sex + age + STAI_trait + pain_cat + cortisol_serum + cortisol_saliva + mindfulness , data = mydata1) summary(theory_based_model_train) pred_backward_train = predict(backward_model_train) pred_theory_train= predict(theory_based_model_train) RSS_backward_train = sum((mydata1[1:158,"pain"] - pred_backward_train)^2) RSS_theory_train = sum((mydata1[1:158,"pain"] - pred_theory_train)^2) RSS_backward_train RSS_theory_train # check model performance on the test set test_data=read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class/master/home_sample_2.csv") pred_backward_test <- predict(backward_model_train, test_data) pred_theory_test <- predict(theory_based_model_train, test_data) # calculate the sum of squared residuals RSS_backward_test = sum((test_data[1:160,"pain"] - pred_backward_test)^2) RSS_theory_test = sum((test_data[1:160,"pain"] - pred_theory_test)^2) RSS_backward_test RSS_theory_test
05075e19bae9a52594671d9d46ce5b096d03f299
1e99edaf168955eff672209c33227d499fcc310a
/分拆.R
d19e8429024b4f21454438d389a229453fe67d7d
[]
no_license
GuitarHero-Eric/BOCCFC-supporting
d9ab0f2a33abc0fef97aefbd61a9b5de1c43f0cf
65d6a9733e34d25982b1b9e8270d288906f318ab
refs/heads/master
2021-03-30T21:28:09.951217
2018-03-09T09:36:39
2018-03-09T09:36:39
124,482,520
0
0
null
null
null
null
GB18030
R
false
false
352
r
分拆.R
a<-read.csv("kaishu.csv") q<-as.data.frame(table(a$区域中心)) # a<-subset(a,管理团队=="首逾团队") dir.create(paste("kaishu",sep = ""), showWarnings = FALSE) for (x in 1:nrow(q)) { x1<-subset(a,区域中心==q[x,1]) wd<-as.character(q[x,1]) wd<-gsub(" ","",wd) write.csv(x1,paste("E:/R路径/kaishu/",wd,".csv",sep="")) }
6096dd9520fc56b6f6032c3b113ca48bd064812b
119df04eb38038e570e7edbcf1d3ea3a233f99de
/task5-Text Analytics/Code/e10.r
13416bf0a5253597f8d2b8e814962d5769c11fb7
[]
no_license
AlexGithubbb/Data-Analysis-using-R
f751e8d59a08e222324c94b9f1cb4c8278b2f389
7048744fbf31bc908cdc99f5cadc560d2fbd1e8b
refs/heads/master
2020-04-12T20:31:59.255144
2018-12-21T17:32:38
2018-12-21T17:32:38
162,739,098
0
0
null
null
null
null
UTF-8
R
false
false
529
r
e10.r
#========================================= # Task5 Exercise 10: use wordcloud library to visualize both training dtm and testing dtm install.packages("wordcloud") library("wordcloud") #the training dtm visualization wordcloud(corpus_train, max.words=100, # look at the 100 most common words scale=c(4, 0.5, 7)) # the testing dtm visualization wordcloud(corpus_test, max.words=100, # look at the 100 most common words scale=c(4, 0.5, 7)) #=========================================
b7893c42f2a52eaf7397c8ffb522b8aefd66082c
d55710d4a1d5612c59a1558ce3e8edcbefcc0b54
/man/GFQ.Rd
d8996b8d129a5a798fd2a6cc5017614f4cf37fa6
[]
no_license
elamrireda/FunctQuant
35fdb7315c09e9e42a6b12ca039eb64774040e45
45794620febfe0eb90d580f37d7006fbdd4c59fe
refs/heads/master
2023-02-05T11:11:55.729805
2020-12-31T11:31:15
2020-12-31T11:31:15
324,865,397
0
0
null
null
null
null
UTF-8
R
false
false
1,429
rd
GFQ.Rd
\name{GFQ} \alias{GFQ} %- Also NEED an '\alias' for EACH other topic documented here. \title{Greedy Functional Quantization} \description{Data-driven greedy functional quantization based on the distorsion error or the maximin (a space filling design criterion) } \usage{GFQ(data,mKL,size,method,deepstart=TRUE)} %- maybe also 'usage' for other objects documented here. \arguments{ \item{data}{matrix that we want to quantize.} \item{mKL}{truncation argument for the dimension reduction.} \item{size}{size of the quantization grids.} \item{method}{"L2" or "maximin".} \item{deepstart}{maximin, if TRUE: the quantization is started by the central curve.} } \value{ \item{data}{the input matrix.} \item{quantizer}{the quantizer grid (curves are chosen among the input data.} \item{weights}{the associated weight of each curve (calculated using the input matrix).} } \seealso{ \code{\link{CVT}} and \code{\link{StochGradient}} } \examples{ ##### function to generate realizations of BM BM <- function(N=1000,M=1,x0=0,t0=0,T=1,Dt=NULL) { Dt <- (T - t0)/N t <- seq(t0, T, by=Dt) res <- data.frame(sapply(1:M,function(i) c(0,cumsum(rnorm(N,mean =0,sd=sqrt(Dt)))))) names(res) <- paste("X",1:M,sep="") X <- ts(res, start = t0, deltat = Dt) return(X) } data <- t(BM(N=200-1,M=200)) mKL <- 2 size <- 10 method <- "maximin" quant <- GFQ(data,mKL,size,method,deepstart=TRUE) }
4839ff0a5d7e94425013564c77e29b31b874960c
8002812bb637d2c37044fc4b9c81c472a71c18ff
/man/hello.Rd
2ed11027028ceb8c3c2f2b6dad932dc9796d0524
[]
no_license
321k/GoogleTRends
d6e9da26df255195958f94db0eb9707585839e7a
c1603e0aa16152ac6e6c199154431ee3a0a38d6b
refs/heads/master
2021-01-10T22:37:49.100205
2016-09-29T14:04:17
2016-09-29T14:04:17
69,573,156
0
0
null
null
null
null
UTF-8
R
false
false
170
rd
hello.Rd
\name{GoogleTRends} \alias{GoogleTRends} \title{GoogleTRends} \usage{ hello() } \description{Download and read Google Trends data from your browser} \examples{ hello() }
c3118cf6e45669f137d5fdd7a66d8705cef4c41c
282937161c6f9e14877757c56dabc66d0621a413
/load_1000/statistics_function.R
8db3583e28a406d4aac9535b65e2b42787eb990c
[]
no_license
tarasevic-r/Vibro-acoustic
e292a6c1ebd1121d5640d9a17600466a8d9ba3f4
3eda43eed45d8eeef37c547ec479223b70589a71
refs/heads/master
2022-04-18T16:19:59.853574
2020-04-02T06:25:38
2020-04-02T06:25:38
247,627,489
0
0
null
null
null
null
UTF-8
R
false
false
748
r
statistics_function.R
# function for main statistics calculation statistics <- function(sample_index, time, value, from, to) { data.frame( Sample_Index = sample_index , Length = length(time) , Sample_start = from , Sample_end = to , Mode_freq_t = as.integer(Mode_f(diff(time))) , Period_from = min(time) , Period_to = max(time) , Time_period = round((max(as.numeric(time)) - min(as.numeric(time)))/3600, 2) , Value_min = round(min(value, na.rm=T), 2) , Value_max = round(max(value, na.rm=T), 2) , Range = round(max(value, na.rm=T) - min(value, na.rm=T), 2) , Mean = round(mean(value, na.rm=T), 2) , Sd = round(sd(value, na.rm=T), 2) , Median = round(median(value, na.rm=T), 2) ) }
cd03d1d982b4a3bb51ff6bf0236fc842f96d3558
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/AnalyzeFMRI/examples/N2G.Rd.R
9542b87970578b98b3bc348ee36f558f30a37735
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
375
r
N2G.Rd.R
library(AnalyzeFMRI) ### Name: N2G ### Title: Fits the N2G model ### Aliases: N2G ### Keywords: utilities ### ** Examples par <- c(3, 2, 3, 2, .3, .4) data <- c(rnorm(10000), rgamma(2000, 10, 1), -rgamma(1400, 10, 1)) hist(data, n = 100, freq = FALSE) q <- N2G.Fit(data, par, maxit = 10000, method = "BFGS") p <- seq(-50, 50, .1) lines(p, N2G.Density(p, q), col = 2)
24c21d3420bd96e1dd94c2add5c8c8e9c6414c92
0e040f6bfff65e8324e0f9f30d494deaf7c596d9
/trackingMyself_huaweiConvertExportToGpx.R
6510902949eda1d652e6eb5d63666d0b3cda31dd
[ "MIT" ]
permissive
Rubbert/QuantifiedScientist
06deb2c97c20f2523bb4487f92af9974e20bca85
0d68c308286c3b18604a1f7f9206f4ebe689689a
refs/heads/master
2023-08-16T01:58:27.773415
2021-10-08T20:08:20
2021-10-08T20:08:20
274,339,241
2
0
null
null
null
null
UTF-8
R
false
false
4,835
r
trackingMyself_huaweiConvertExportToGpx.R
setwd('/Users/rterhorst/Research/src/') library(caret) library('rlang') library("foreign") library("R.utils") library('zoo') library(ggplot2) #library(chron) #library(lme4) library("openxlsx") #library(hash) library(stringr) library(data.table) library('jsonlite') library(lubridate) source('trackingMyself_mainSetOfFxns.R') Lines <- readLines('/Users/rterhorst/Research/src/ConfigFiles/Directories_macosx_16inch.cfg') Lines2 <- chartr("[]", "==", Lines) DF <- read.table(textConnection(Lines2), as.is = TRUE, sep = "=", fill = TRUE) ini_data = subset(transform(DF, V3 = na.locf(ifelse(V1 == "", V2, NA))), V1 != "") #Define datadir data_dir_track <- ini_data$V2[ini_data$V1=="trackingmyself " & ini_data$V3=="DataDirectories"] data_dir_track <- substring(data_dir_track, 2) image_dir_track <- ini_data$V2[ini_data$V1=="trackingmyself " & ini_data$V3=="ImageDirectories"] image_dir_track <- substring(image_dir_track, 2) huaweiGeneralFolder = file.path(data_dir_track,'huawei') huaweiFolderSel = file.path(huaweiGeneralFolder,'huaweiWatch3','HUAWEI_HEALTH_20211004184532') folderToSaveGpx = file.path(huaweiFolderSel, 'robGpx') mkdirs(folderToSaveGpx) ## filePathData = file.path(huaweiFolderSel,'Motion path detail data & description','motion path detail data.json') raw_data <- paste(readLines(filePathData), collapse="") json_data2 <- jsonlite::fromJSON(raw_data) locationInfo = json_data2$attribute distanceInfo = json_data2$totalDistance stepsRealInfo = json_data2$realSteps stepsTotalInfo = json_data2$totalSteps startTimeInfo = json_data2$startTime sportType = json_data2$sportType #Check just those for which a significant distance was recorded indicesWithDistance = which(distanceInfo>2000) for (sportSelIndex in indicesWithDistance){ print(sportSelIndex) print(paste0('start time = ', startTimeInfo[[sportSelIndex]])) print(paste0('sport type = ', sportType[[sportSelIndex]])) #Extract the string with the lon and lat locationInfoRaw = locationInfo[[sportSelIndex]] locationInfoRawSplit = strsplit(locationInfoRaw, split = '\n')[[1]] ##Keep only the ones with longitude and lattitude locationInfoRawSplit = locationInfoRawSplit[grep(pattern = '.*\\;lat\\=[0-9].*', x = locationInfoRawSplit) ] locationInfoMatrix = fread(text = locationInfoRawSplit, data.table = F) #Convert data and column namesto right format locationInfoMatrix$V1 = NULL#gsub('HW_EXT_TRACK_DETAIL@is','',locationInfoMatrix$V1,fixed=T) for (colIndex in c(1:ncol(locationInfoMatrix))){ prefix = str_split(locationInfoMatrix[1,colIndex],'=')[[1]][[1]] locationInfoMatrix[,colIndex] = as.numeric(gsub(paste0(prefix,'='),'',locationInfoMatrix[,colIndex])) colnames(locationInfoMatrix)[[colIndex]]=prefix } #Convert data-time (still need to check if timezone is correct) locationInfoMatrix$dateTime <- as.POSIXct(locationInfoMatrix$t, origin="1970-01-01") #Remove some with a wrong last row locationInfoMatrix = locationInfoMatrix[which(locationInfoMatrix[,'lat']!=90),] #Some extra header info pre <- '<?xml version="1.0" encoding="utf-8" standalone="yes"?> <gpx version="1.1" creator="GPS Visualizer http://www.gpsvisualizer.com/" xmlns="http://www.topografix.com/GPX/1/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd"> <trk> <name>test1</name> <trkseg>' post <- '</trkseg> </trk> </gpx>' #Create variable to save textGPX = paste( c(pre, mapply(function(lat, lon, datetime) { sprintf('<trkpt lat="%f" lon="%f"> <time>%s</time> </trkpt>', lat, lon, datetime) }, locationInfoMatrix$lat, locationInfoMatrix$lon, locationInfoMatrix$dateTime), post, "\n"), collapse="\n") #save to file writeLines(text = textGPX, con = file.path(folderToSaveGpx,paste0('sport',sportType[[sportSelIndex]],'_',as.POSIXct(json_data2$startTime[[sportSelIndex]]/1000, origin="1970-01-01"),'.GPX'))) } #https://gis.stackexchange.com/questions/187475/converting-position-data-created-in-r-to-gpx-format # dat <- read.table(header=TRUE, stringsAsFactors=FALSE, text=' # DEVICE_ID LAT LONGITUDE DATE TIME # 150211559 12.920818 77.600197 02-01-17 0:00:00 # 150211559 12.914159 77.600037 02-01-17 0:01:39 # 150211559 12.919819 77.600189 02-01-17 0:00:10 # 150211559 12.919434 77.600174 02-01-17 0:00:20 # 150211559 12.918937 77.60009 02-01-17 0:00:29 # 150211559 12.914159 77.600037 02-01-17 0:01:49 # 150211559 12.918482 77.600136 02-01-17 0:00:39 # 150211559 12.917423 77.60009 02-01-17 0:00:49') # # dat$dt <- format(as.POSIXct(paste(dat$DATE, dat$TIME), format="%m-%d-%y %H:%M:%S"), # format="%Y-%m-%dT%H:%M:%SZ", tz="UTC")
5caed60657c7405bcb3317fac7b1c2e35104a01b
abd0439e4334255e89858b6ba0298254b24e3f00
/train資料處理.R
c6f33503e0ebcaafb11f2eecc965dbd18fc7a59a
[]
no_license
vvsy/DM_Project4
c557eb12f6a4e5413b978762e85f5d791d1dc2eb
67fceb221f8ef0e66a025478c9c1e0086e6eed58
refs/heads/master
2020-04-13T22:38:13.798498
2019-01-16T19:03:14
2019-01-16T19:03:14
163,484,193
0
0
null
null
null
null
UTF-8
R
false
false
2,522
r
train資料處理.R
filepath <- "/Users/liguanzhi/Downloads/dmclass2018train.csv" read.csv(filepath) -> df # library library(tidyverse) library(kableExtra) library(dplyr) library(ggplot2) library(reshape2) library(VIM) library(mice) # explore data str(df) dim(df) ## look at those unique value lapply(df, function(x) length(unique(x))) ## drop the col which is only one outcome -> fnf (all of them Femoral Neck Fracture), seq57 (all of them are 0) df %>% select(-fnf,-seq57) -> df ## drop age cols *****************,-age,-aid21 cormat <- round(cor(df[5:7]),2) melted_cormat <- melt(cormat) ggplot(data = melted_cormat, aes(x=Var1, y=Var2, fill=value)) + geom_tile() df %>% select(-age,-aid21) -> df ## convert variables to correct property factor_col = c("id","dmfail","renal","male","bipolar","ch4cat","ch2cat","ch3cat","cin4cat","cin2cat","cin3cat","cindex","cno","seq25","seq26","seq27","seq28","seq29","seq30","seq31","seq32","seq33","seq34","seq35","seq36","seq37","seq38","seq39","seq40","seq41","seq42","seq43","seq44","seq45","seq46","seq47","seq48","seq49","seq50","seq51","seq52","seq53","seq54","seq55","seq56","seq58","seq59","hospvol4cat","areacode","area4cat","city7","city5cat","city7cat","nihno") df[factor_col] <- lapply(df[factor_col], factor) numeric_col = c("med_cost","hospvolume","insamt","paym") df[numeric_col ]<- lapply(df[numeric_col], as.numeric) ## check the ratio of missing value df[ df == "XX" ] <- NA apply(df, 2, function(col)sum(is.na(col))/length(col)) -> ratio as.data.frame(ratio) -> ratio ratio aggr_plot <- aggr(df, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(data), cex.axis= .5, gap=1) #發現55:59行的na值有為非缺失所以丟掉56:59僅留下55行來補值 ggplot(df,aes(x =df$city5cat)) + geom_bar() ggplot(df,aes(x =df$city7cat)) + geom_bar()#觀察5分布比較平均所以刪掉7 df %>% select(-city7cat,-areacode) -> df #### mice.data <- mice(df, m = 1, maxit = 5, # max iteration method = "cart", seed = 188) micedf <- complete(mice.data,1) apply(micedf, 2, function(col)sum(is.na(col))/length(col)) -> miceratio as.data.frame(miceratio) -> miceratio miceratio save(micedf,file="~/Dropbox/mice.data.Rda") ##outcome ggplot(df,aes(y = df$med_cost)) + geom_boxplot() df %>% filter(df$med_cost > quantile(df$med_cost,0.75)) %>% summarise(x=n()) ggplot(df,aes(x = df$dmfail)) + geom_bar()
af50defc8089148a67118c2ed9a582efd5840d8b
7b8b5630a5cef2a21428f97b2c5b26b0f63e3269
/man/lifetime.Rd
48ec1a259496163b8a422dd5ef53bf4e4fe5f22f
[ "BSD-3-Clause", "BSD-2-Clause" ]
permissive
cells2numbers/migrationminer
eb257733c4999f9af57ce10f2faf051d1e0b82fa
c25c692615953c33b3d73430117129fea980bcdb
refs/heads/master
2021-01-23T07:34:53.509775
2019-04-29T17:45:18
2019-04-29T17:45:18
102,511,560
7
0
NOASSERTION
2019-04-09T16:13:56
2017-09-05T17:37:14
R
UTF-8
R
false
true
739
rd
lifetime.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/track.R \name{lifetime} \alias{lifetime} \title{Lifetime of a track object.} \usage{ lifetime(tracks, t_var = "Metadata_timePoint") } \arguments{ \item{tracks}{data frame with track objects} \item{t_var}{variable name / columne name used for time coordinates} } \value{ Calculate life time of each track object } \description{ Lifetime of a track object. } \examples{ data <- tibble::tibble( Metadata_timePoint = c(1:5), Location_Center_X = c(1, 2, 3, 4, 5), Location_Center_Y = c(1, 1, 1, 1, 1), TrackObjects_Label = c(rep(1, 5)) ) tracks <- migrationminer::displace(data,"TrackObjects_Label") lifetime <- migrationminer::lifetime(tracks) }
ee586cb711c59bbd8ca92c6b6ac0f70fbbcfcdc6
f3a260a55664bc9dedab32a5d6de5e2ca3301ac9
/man/DescToolsAddIns.Rd
522dc243e884473220c2b5553312fa443ace6ebd
[]
no_license
AndriSignorell/DescToolsAddIns
d8db57675ffe0fedd5ce393ea29bb7abccee5219
caa509d6da75ffe58867354911f49ce5a432dcc5
refs/heads/master
2022-05-14T20:15:17.684361
2022-05-10T05:53:50
2022-05-10T05:53:50
190,240,128
2
1
null
2019-11-29T02:51:46
2019-06-04T16:27:49
R
UTF-8
R
false
false
8,272
rd
DescToolsAddIns.Rd
\name{RStudio AddIns} \alias{Str} \alias{Str1} \alias{Summary} \alias{Abstract} \alias{Head} \alias{Example} \alias{Some} \alias{Desc} \alias{Select} \alias{Plot} \alias{PlotD} \alias{FileSaveAs} \alias{XLView} \alias{IntView} \alias{FileOpen} \alias{FlipBackSlash} \alias{SetArrow} \alias{Enquote} \alias{EnquoteS} \alias{EvalEnquote} \alias{Edit} \alias{NewObject} \alias{InspectPnt} \alias{Unclass} \alias{GetExcelRange} \alias{GetExcelRangeH} \alias{GetExcelTable} \alias{Class} \alias{Info} \alias{FlushToSource} \alias{FileBrowserOpen} \alias{SortAsc} \alias{SortDesc} \alias{Shuffle} \alias{RemoveDuplicates} \alias{Cat} %- Also NEED an '\alias' for EACH other topic documented here. \title{Some Functions to be Used as Shortcuts in 'RStudio' %% ~~function to do ... ~~ } \description{ A simply great new feature in RStudio is the option to define add-ins and bind shortcuts to them. This package includes add-ins for some of the most frequently used functions in a data scientist's (at least mine) daily work (like \code{str()}, \code{example()}, \code{plot()}, \code{head()}, \code{view()}, \code{Desc()}).\cr Most of these functions use the current selection in RStudios source panel and send the specific command to the console where it is executed immediately. Assigning shortcuts to these add-ins saves you tons of keystrokes and brings a breeze of liberating and brake-releasing GUI-interaction into your R-life. } \usage{ NewObject() FileOpen() FileBrowserOpen() FileImport() FileSaveAs() GetExcelRange(env = .GlobalEnv, header = FALSE, echo = TRUE) GetExcelRangeH(env = .GlobalEnv) GetExcelTable(env = .GlobalEnv) XLView() IntView() Edit() FlushToSource() Select() Str() Str1() Abstract() Head() Some() Summary() Cat() Desc() Example() Info() Unclass() Plot() PlotD() SetArrow() InspectPnt() BuildModel() Enquote() EnquoteS() RemoveDuplicates() SortAsc() SortDesc() Shuffle() EvalEnquote() FlipBackSlash() } \arguments{ \item{env}{Environment for GetExcelRange to write the data. GlobalEnv is default.} \item{header}{logical determining if the import from XL should use a header or not} \item{echo}{should the function return an echo} } \details{ The following add-ins are available: \itemize{ \item \code{NewObject}: \cr starts the good old internal editor and allows to enter values. In dependence of the initial selection in the code panel different objects will be returned. If the selection was \code{m} the code for a matrix will be created, if the selection is \code{d} then the code for a data.frame will be returned and a c will be interpreted as vector. Default is \code{m}. Note that column names can be defined within the editor, but not rownames (don't ask me why ...). \item \code{FileOpen}: \cr get the path of a file to open and insert in code. (See also: \code{Select()}) \item \code{FileBrowserOpen}: \cr get a selected path and opens the systems filebrowser with the according directory. \item \code{FileImport}: \cr display the import file dialog for importing SAS, SPSS, Stata, Systat and MiniTab files. (See \code{\link{FileImportDlg}()}) \item \code{FileSaveAs}: \cr display a file dialog, get the desired path and filename and save the selected object in the given place. Several file types are supported csv/xlsx for data.frames, text/binary for other objects. \item \code{GetXLRange}: \cr run DescTools command \code{\link[DescTools]{XLGetRange}()} by taking the current selection as name for the imported data and replace the current selection by the used code. \item \code{GetXLTable}: \cr run DescTools command \code{\link[DescTools]{XLGetRange}()} by taking the current 3-fold selection as data matrix, rownames and columnnames and replace the current selection by the structure code. \item \code{XLView}: \cr view selected object in MS-Excel, which makes sense for data.frames. \item \code{IntView}: \cr view selected object with internal viewer (here again: only data.frames) \item \code{Edit}: \cr run R-command \code{\link{fix}()} on the current selection. This is nice for editing data frames interactively. \item \code{FlushToSource}: \cr run \code{dput} on the current selection and replace the selection with the result. \item \code{Select}: \cr select or pick things interactively. This function as well evaluates the current selection. If the selection corresponds to \code{pch} a dialog for selecting point characters will be displayed. If it's \code{col} the \code{\link{ColPicker}()} dialog will be displayed. If it's any of \code{path}, \code{fn}, \code{file} the \code{FileOpen()} dialog will be displayed. In all other case the \code{\link{SelectVarDlg}()} on the current selection will be run. This can be used to select variablenames of a \code{data.frame} by mouse click or the levels of a factor (all depending on the class of the selected object). The variable names will be returned as string, enquoted and separated by a comma. \item \code{Str}, \code{Str1}: \cr run DescTools command \code{\link[DescTools]{Str}()} on the current selection, \code{Str1()} shows only the first level ; \item \code{Abstract}: \cr run DescTools command \code{\link[DescTools]{Abstract}()} (a less technical description than \code{str()}, meant for using with data.frames) on the current selection \item \code{Head}: \cr run R-command \code{\link{head}()} on the current selection \item \code{Some}: \cr run DescTools command \code{\link[DescTools]{Some}()} on the current selection \item \code{Summary}: \cr run R-command \code{\link{summary}()} on the current selection \item \code{Cat}: \cr run R-command \code{\link{cat}(..., sep="\n")} on the current selection \item \code{Desc}: \cr run DescTools command \code{\link[DescTools]{Desc}()} on the current selection \item \code{Example}: \cr run R-command \code{example()} on the current selection \item \code{Info}: \cr print object properties of the currently selected object \item \code{Unclass}: \cr run R-command \code{\link{unclass}()} on the current selection \item \code{Plot}: \cr run \code{\link{plot}()} on the current selection \item \code{PlotD}: \cr run \code{plot(Desc())} on the current selection \item \code{SetArrow}: \cr place an arrow in a plot and insert code into source file \item \code{InspectPnt}: \cr Lauch \code{IdentifyA(... , poly=TRUE)} with the current selection as argument and returns the selected points on the plot. \item \code{BuildModel}: \cr display dialog for building up a model formula interactively by point and click. (See \code{\link{ModelDlg}()}) \item \code{Enquote}, \code{EnquoteS}: \cr split the selected text in the source panel using line breaks as separators, enquote the sections using either double or single quotation marks, and paste them separated by comma. \item \code{RemoveDuplicates}: \cr remove all the duplicates (by row) of the selected text. \item \code{SortAsc}, \code{SortDesc}, \code{Shuffle}: \cr sorts the selected text by row in ascending, descending, resp. random order. \item \code{EvalEnquote}: \cr evaluates the selection before enquoting the parts and paste them separated by comma. This can be used to enquote factors. \item \code{FlipBackSlash}: \cr Handling backslashes in pathnames can be really annoying on Windows. This function replaces all backslashes in the selected text by simple slashes. In the first step all backlashes are replaced by slashes, in further applications of the function, flipflops are performed between slashes and backslashes. The option \code{ReplaceDoubleSlash} controls whether double slashes should be replaced by simple ones.\cr It's cool to set this command to the keyboard shortcut: \code{Ctrl+Shift+/} } As soon as the package is loaded once, the AddIns will be listed in RStudios AddIns listbox:\cr\cr \figure{AddIns.png}{AddIns listbox} The keyboard shortcuts can then be assigned by clicking on the menu \code{Tools | Modify Keyboard Shortcuts...} and scroll down to the Addin scope:\cr\cr \figure{Shortcuts.png}{Keyboard Shortcuts} } \author{Andri Signorell <andri@signorell.net> %% ~~who you are~~ } \examples{ if (interactive()){ Str() } } \keyword{ utilities }
228c7f03a484e42bcdcc1c1b31e0ed56ae39958a
41bbdd673bef8e9f1fdc724556591f4607b220e6
/pascal/R/finite.only.R
efe6f1add769bdbeb9d5084997705a984627eca5
[]
no_license
pascal-niklaus/pascal
bd42a5f00fd06d9b9f8b4e6227419943a817b409
88723458c71609a8971925d363671910a4fa421c
refs/heads/master
2023-06-27T03:37:37.156306
2023-06-14T13:35:16
2023-06-14T13:35:16
27,164,003
3
1
null
null
null
null
UTF-8
R
false
false
829
r
finite.only.R
#' Return finite elements of a list or vector #' #' Convenience function that removes non-finite elements (i.e. NA, NaN, Inf, -Inf) from a list or vector #' #' Character vectors are first converted to numeric values. Similarly, factors are first converted to #' their character representation and then to numeric values. #' #' @param x vector of data #' @return vector with non-finite (NaN, Inf) and missing data (NA) removed. #' @examples #' x <- 1:5 #' x[3]<-NA #' x[4]<-1/0 #' x #' ## [1] 1 2 NA Inf 5 #' finite.only(x) #' ## [1] 1 2 5 #' @author Pascal Niklaus \email{pascal.niklaus@@ieu.uzh.ch} #' @export finite.only <- function(x) { x<-safen(x); if(is.list(x)) { x[unlist(lapply(x,function(x) is.finite(x)))] } else if(is.vector(x)) { x[is.finite(x)]; } else stop("unknown data structure"); }
0ad4ad3bbf4d56b333380c3a8a74d6e9681d5dec
b12259b6d342fab1a65a54648f0830f097b2c5aa
/man/fback.Rd
fc6870fc2c530372a07f173bab17f36eb05634e7
[]
no_license
cran/mapsRinteractive
38e7156c3c180aaa0f1c603d329f9b61821de31d
9a9b7538595fa0cb24eb74dd9df2061cadbbd658
refs/heads/master
2023-04-28T09:41:51.252478
2023-04-24T06:40:02
2023-04-24T06:40:02
145,896,378
0
0
null
null
null
null
UTF-8
R
false
true
307
rd
fback.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/feedback.R \name{fback} \alias{fback} \title{fback} \usage{ fback(t) } \arguments{ \item{t}{Character string. Required.} } \value{ A Character vector. } \description{ Prints feedback and adds it to a vector. } \keyword{internal}
01bc849a126cae7d851c4f4f181a915a8e41b4b6
ddc0b2dc15120912e44a5fc25d7f9316937c7c24
/Decision Tree Regression/Decision Tree Regression.R
e1b6ea347b2d82beda25d6783c5fe81e5915ef8e
[]
no_license
dsilvadeepal/ML-model-implementations
2c4fd36bc065d26204730b177983e32fcbc44a26
35b4f1d830c3dc12c71ce060e75157de29caf045
refs/heads/master
2020-03-23T01:28:16.469680
2019-10-04T01:21:05
2019-10-04T01:21:05
140,919,086
2
0
null
2019-10-04T01:21:06
2018-07-14T05:38:57
HTML
UTF-8
R
false
false
743
r
Decision Tree Regression.R
library(rpart) cars <- read.csv("cars.csv") #Create training and test data train <- cars[1:950, ] # training data test <- cars[951:963, ] # test data #Generating the decision tree model regtree <- rpart(CarSales ~ ., method = "anova", data = train) summary(regtree) #Plotting the regression tree plot(regtree, uniform=TRUE, main="Regression Tree for Sales") text(regtree, use.n=TRUE, all=TRUE, cex=.8) #View the regression tree parameters printcp(regtree) #View the cross-validation error for each split par(mfrow=c(1,2)) rsq.rpart(regtree) #Left chart chows R2 improving as splits increase & right chart shows xerror(cross validation error) #decreases with each split #Predicting the outcome predict(regtree, test, method="anova")
4541d0817dd4fd567fa33769768dafe4a6f286bf
5b30d84beb11b493c5cdcfab73112bd3de490f5d
/Zomato.R
b84e964738502c6bcdc8ecd0966e97d1a8ae25c9
[]
no_license
mohit77bansal/zomato_data_extractor
279bee0be67ab1c3761c8cda03e0608a2225a33b
0c13318b8cc323a00b76581e32a8477e7b3f9681
refs/heads/master
2020-05-20T19:54:18.538094
2019-05-09T05:40:38
2019-05-09T05:40:38
185,733,345
0
0
null
null
null
null
UTF-8
R
false
false
1,112
r
Zomato.R
#setwd("F:/R/cogxio") #setwd("F:/R/cogxio/Zomato") #setwd("E:/cogxio") #install.packages("rvest") library(rvest) source("url_extraction.R") data <- extract_data(1) #150 is the total page number for which restro url is collected. write.csv(data,"restro_name_url.csv", row.names=T) rm(data) url_data <- read.csv("restro_name_url.csv",header=T, sep=",", stringsAsFactors=F) source("details.R") source("filter.R") #data1 <- data_wo_na[1:100,] datawithrating1 <- details(url_data) datafinal1 <- filter(datawithrating1) write.csv(datafinal1,"zomato.csv",row.names=F) # # # data2 <- data_wo_na[501:1000,] # # datawithrating2 <- details(data2) # # data3 <- data_wo_na[1001:1500,] # datawithrating3 <- details(data3) # datafinal3 <- filter(datawithrating3) # write.csv(datafinal3,"zomato3.csv",row.names=T) data4 <- data_wo_na[1501:2000,] datawithrating4 <- details(data4) datafinal4 <- filter(datawithrating4) write.csv(datafinal4,"zomato4.csv",row.names=T) data5 <- data_wo_na[2001:2500,] datawithrating5 <- details(data5) datafinal5 <- filter(datawithrating5) write.csv(datafinal5,"zomato5.csv",row.names=T)
7c601feddfd81e9bf6d12d1901c1e1cd782530d2
f183fc8e84334298eebf70f6ab458560970fcb5e
/cachematrix.R
748a317e02c137eac25ecdcc4abc98fe87fc70d9
[]
no_license
jessegonzalez/ProgrammingAssignment2
c0ec2f5149ca0c4a5d77ea33fc4e70e445eadbf6
e1963ffaefbc216bc46b37cf9164024e34270a84
refs/heads/master
2020-12-25T23:27:03.699490
2015-07-22T01:42:38
2015-07-22T01:42:38
39,476,555
0
0
null
2015-07-22T00:24:12
2015-07-22T00:24:11
null
UTF-8
R
false
false
1,821
r
cachematrix.R
## makeCacheMatrix and cacheSolve are used to cache the result of finding the inverse of a matrix ## which can be computationally intensive. ## ## Example Usage: ## ## tmp_matrix <- matrix(c(0,1,3,1,43,2,3,6,9), nrow=3, ncol=3) ## cache_matrix <- makeCacheMatrix(tmp_matrix) ## cacheSolve(cache_matrix) ## cacheSolve(cache_matrix) ## makeCacheMatrix creates a special "matrix", which is really a list of functions to ## get and set the value of the matrix ## get and set the value of the inverse of matrix makeCacheMatrix <- function(x = matrix()) { m <- NULL ## set the value of the matrix set <- function(y) { x <<- y m <<- NULL } ## get the value of the matrix get <- function() { x } ## set the value of the solved matrix setSolved <- function(solve) { m <<- solve } ## get the value of the solved matrix getSolved <- function() { m } list(set = set, get = get, setSolved = setSolved, getSolved = getSolved) } ## The following function returns the inverse of the matrix created with the above function makeCacheMatrix(). ## It first checks to see if the matrix inverse been calculated with getSolved. ## If that result is not NULL, it gets and returns the result from the cache and skips the computation. ## Otherwise, it calculates the inverse of the matrix and sets the value in the cache via the setSolved function, ## then returns the result. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getSolved() if(!is.null(m)) { message("getting cached data") return(m) } ## Get the matrix and solve data <- x$get() m <- solve(data, ...) ## update the cache and return the result x$setSolved(m) m }
f0da278d602649290a050f7459514d7fc58cd505
64ccb80369da3130ce79e2cd6aab13758e82a1ab
/scripts/analysis.R
7ffdc77d728255dab701f8fb43b64e8991f26327
[]
no_license
tmonfre/qss82-covid-project
140b9a6f22ca055c1992082dad9fa1ae708c6ea2
80b2e42dcc39ff686a540f0d88766d7ebbbec203
refs/heads/master
2023-03-21T06:34:43.885323
2021-03-17T00:32:04
2021-03-17T00:32:04
335,418,531
0
0
null
null
null
null
UTF-8
R
false
false
614
r
analysis.R
load("~/Downloads/mediation RData/mediation-president.RData") # load("~/Downloads/mediation RData/mediation-senate.RData") ade_sum <- 0 signif_count <- 0 for (result in rslt) { if (result[[3]][["z0.p"]] <= 0.05) { ade_sum <- ade_sum + print(result[[3]][["z0"]]) signif_count <- signif_count + 1 } } ade_sum / length(rslt) signif_count acme_sum <- 0 signif_count <- 0 for (result in rslt) { if (result[[3]][["d0.p"]] <= 0.05 & result[[3]][["z0.p"]] <= 0.05) { acme_sum <- acme_sum + print(result[[3]][["d0"]]) signif_count <- signif_count + 1 } } acme_sum / length(rslt) signif_count
d94ba0e1259450733947381e0fb9750e6137d751
531ef52b0fe449e3ac5771b9350952019a282cfd
/code/output.R
8f2ef5f417186695027da12a0f11f5d8385ee20d
[]
no_license
arielfuentes/Places
2b5f831952a4ef0ca642d05b330dd30cd45646bb
e6a99861099dfcef0c165083a0fa3260e4a3608b
refs/heads/main
2023-01-20T15:42:44.559775
2020-11-25T13:16:31
2020-11-25T13:16:31
308,146,643
0
0
null
null
null
null
UTF-8
R
false
false
537
r
output.R
library(rmarkdown) Region <- c("Región de Antofagasta", "Región del Bío-Bío", "Región de La Araucanía") lapply(X = Region, FUN = function(x) tryCatch(render(input = "code/lugares.Rmd", output_dir = "output", output_file = paste0("mall", x), encoding = "utf-8"), error = function(e) NULL ) )
0365d183227cdfcd0e4afd5612065a59fcd7f863
129408919e4fcde9818bef047f6e9b2a74d23c8a
/man/prepare_input_time_series_art.Rd
612f0dc99a8855dc9adac36fc8b2e09a9b7c6a54
[ "MIT" ]
permissive
mrc-ide/naomi
93decfb73624de911f298aadcc0e0d02b8d7d5e5
94d34246144e4dfcb86161258faf213a7db03268
refs/heads/master
2023-06-14T06:37:36.343882
2023-05-05T11:08:33
2023-05-05T11:08:33
204,965,083
7
6
NOASSERTION
2023-09-12T12:54:48
2019-08-28T15:32:00
R
UTF-8
R
false
true
728
rd
prepare_input_time_series_art.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/input-time-series.R \name{prepare_input_time_series_art} \alias{prepare_input_time_series_art} \title{Prepare data for ART input time series plots} \usage{ prepare_input_time_series_art(art, shape) } \arguments{ \item{art}{Path to file containing ART data or ART data object} \item{shape}{Path to file containing geojson areas data or area data object} } \value{ Data formatted for plotting input time series containing columns area_id, area_name, area_level, area_level_label, time_period, year, quarter, plot and value } \description{ Take uploaded ART and shape file paths and format as data which can be used to draw input time series graphs. }
5217f29db3b791dd40d28910d6fca4b02a034efd
53baf0c2392c0293ee6ece9b73c7e65082a95400
/R_scripts/cibersort_plots.R
dad84f2db9b33065e6c9fc6b671e81ca8d3bd495
[]
no_license
hancockinformatics/misc_R_scripts
fd215ba0e9eeaa60c4b1afcda1eee29fe55e3bee
9682236f9b535e198f4dce81af10da434b5c7e29
refs/heads/master
2022-01-19T22:37:11.445065
2022-01-06T19:14:48
2022-01-06T19:14:48
169,137,794
1
1
null
null
null
null
UTF-8
R
false
false
2,328
r
cibersort_plots.R
# Load required libraries and sample info --------------------------------- pkgs <- c("matrixStats", "pheatmap", "RColorBrewer", "tidyverse", "cowplot") lapply(pkgs, library, character.only = T) samples <- read_csv("samples_20190529.csv") %>% mutate(Library_name = str_replace_all(Library_name, pattern = "-", replacement = ".")) # Read in results --------------------------------------------------------- cibersort_raw <- read_tsv("Results/Cibersort/CIBERSORT.Output_Job11_20190529.txt") %>% rename("Patient" = `Input Sample`) %>% select(-c(`P-value`, `Pearson Correlation`, `RMSE`)) # Clean up data for plotting ---------------------------------------------- # Remove cell types which are 0 in all samples, make into long format cibersort_tidy <- cibersort_raw[, colSums(cibersort_raw != 0) > 0] %>% gather(key = Cell_type, value = Proportion, 2:18) %>% left_join(samples, ., by = c("Library_name" = "Patient")) # Make a heatmap of cibersort results ------------------------------------- cibersort_hmap <- cibersort_raw %>% column_to_rownames(var = "Patient") %>% as.matrix() %>% t() pheatmap( cibersort_hmap, angle_col = 45, color = colorRampPalette(brewer.pal(9, "Blues"))(100), fontsize = 12 ) # Make stacked bar chart -------------------------------------------------- mypalette <- colorRampPalette(brewer.pal(8, "Set3")) cibersort_barplot <- cibersort_tidy cibersort_barplot$Library_name <- factor(cibersort_barplot$Library_name, levels = str_sort(unique(cibersort_barplot$Library_name), numeric = T)) ggplot(cibersort_barplot, aes(Library_name, Proportion, fill = Cell_type)) + geom_bar(position = "stack", stat = "identity", colour = "grey30") + labs(fill = "Cell Type", x = "", y = "Estimated Proportion") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + scale_y_continuous(expand = c(0.01, 0)) + scale_fill_manual(values = mypalette(17)) # Boxplot with cell types on x-axis --------------------------------------- ggplot(cibersort_tidy, aes(Cell_type, Proportion, fill = Cell_type)) + geom_boxplot(outlier.shape = 21, colour = "black") + labs(x = "", y = "Estimated Proportion") + theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none") + scale_fill_manual(values = mypalette(17))
31a737c4325090cc6c66a57097623a5f31c92847
7e36b838cc40a513e8a26cdf08e2d535ca384b4e
/Prediction.R
f4a2ce72ac044be87007f026d619393304a27922
[]
no_license
ShrutiMainkar1998/Supervised-ML
44e123a6b9f6f77c40715cca5e76ba962cee9703
9540b18bbe7a1fe0ab45776df530dd7b19a5741c
refs/heads/main
2023-01-06T13:35:55.612936
2020-11-06T14:22:31
2020-11-06T14:22:31
310,600,352
0
0
null
null
null
null
UTF-8
R
false
false
969
r
Prediction.R
# Reading data from remote link data <- read.csv(url("http://bit.ly/w-data")) View(data) print("Data imported successfully") #Plotting the distribution of scores plot(data$Hours, data$Scores, main= "Scores vs Hours", xlab = "Hours", ylab = "Scores", pch=19) #Preparing data set.seed(2) id <- sample(2,nrow(data),prob=c(0.7,0.3),replace = TRUE) print(id) train <- data[id==1,] test <- data[id==2,] View(train) View(test) #Training data mdl <- lm(data = train, formula = Scores~Hours) summary(mdl) print("Training complete.") # Plotting the regression line plot(data$Hours,data$Scores, col = "blue", main = "Scores vs Hours", abline(mdl), cex=1.2, pch=16, ylab = "Scores", xlab = "Hours") # Testing Model: Comparing Actual vs Predicted y <- predict(mdl, test) print(data.frame(y, test$Scores)) #predicting the value at 9.25 pred <- predict(mdl, data.frame(Hours=c(9.25))) print(pred) # Mean Absolute Error library("ie2misc") print(mae(test$Scores, y))
f5d67b2124e0a65369c1edc216a4dc07d733c882
86a282f2e03d0d8e64127bfe2aa4be6d968d24b4
/man/reg.gprior.post.Rd
4dff449f63f0f46f9ed02610fd944d3888d223ab
[]
no_license
u44027388/LearnBayes
fc57e5689c9619de966f4b9e0210bb3aa078ec8f
f7722076d01768bb845bfe9bed78c365fcf292df
refs/heads/master
2021-09-14T19:23:22.283849
2018-05-17T21:04:10
2018-05-17T21:04:10
null
0
0
null
null
null
null
UTF-8
R
false
false
794
rd
reg.gprior.post.Rd
\name{reg.gprior.post} \alias{reg.gprior.post} \title{Computes the log posterior of a normal regression model with a g prior.} \description{ Computes the log posterior of (beta, log sigma) for a normal regression model with a g prior with parameters beta0 and c0. } \usage{ reg.gprior.post(theta, dataprior) } \arguments{ \item{theta}{vector of components of beta and log sigma} \item{dataprior}{list with components data and prior; data is a list with components y and X, prior is a list with components b0 and c0} } \value{ value of the log posterior } \author{Jim Albert} \examples{ data(puffin) data=list(y=puffin$Nest, X=cbind(1,puffin$Distance)) prior=list(b0=c(0,0), c0=10) reg.gprior.post(c(20,-.5,1),list(data=data,prior=prior)) } \keyword{models}
56edd34e9934bcf347aabef400162b87edac9343
db10b0336f082c09393a39ee5de99e7aaa05014d
/R/documentation.R
0e8c2e839094f98beba15ff390b7b9c5445d50f7
[]
no_license
leandroroser/dbR6
8709c4977f59ed8c22bbcb63e78db1119c7d19f1
f722a08ab2930bd4be0bbd50cf5a08b117980fe1
refs/heads/master
2021-09-14T21:46:30.801292
2018-05-20T08:51:06
2018-05-20T08:51:06
112,393,261
1
0
null
2018-01-18T15:58:47
2017-11-28T21:50:04
R
UTF-8
R
false
false
11,760
r
documentation.R
#' dbR6_data class #' @name dbR6Parent #' @docType class #' @importFrom R6 R6Class #' @importFrom methods setOldClass #' @importFrom stats na.omit #' @importFrom utils read.table #' @return Object of \code{\link{R6Class}} #' @format \code{\link{R6Class}} object. #' @field where environment storing data & enclosing environment for metadata #' @section Methods: #' \describe{ #' \item{initialize}{initialize method} #' \item{finalize}{finalize method} #' \item{get_where}{get environment with the database connection} #' \item{set_data}{set database connection} #' } #'@rdname dbR6Parent NULL #' dbR6 class #' @name dbR6 #' @docType class #' @importFrom R6 R6Class #' @importFrom crayon bgCyan #' @importFrom crayon bgMagenta #' @importFrom crayon bold #' @import chunkR #' @return Object of \code{\link{R6Class}} #' @format \code{\link{R6Class}} object. #' @field where (inherited from R6_data class) Environment storing data & enclosing environment for metadata #' @field metadata Environment storing object metadata #' @section Methods: #' \describe{ #' #' \enumerate{ #\item{\strong{deep_clone}}{deep clone dbR6 object (internal function). #\subsection{Parameters}{ #\code{\strong{name}} What to clone ("metadata", "where" (database))- # #\code{\strong{Value}} The name of the output. #} #} #' \item{\bold{initialize}}{ #' #' Initialize dbR6 object. #' \emph{---Parameters---} #' \subsection{}{ #' \itemize{ #' \item{\strong{filename} ":memory:" for to create an in-memory database, #' or a name for an on-disk database.} #' \item{\strong{overwrite} The name of the output.} #' } #' } #' } #' #' \item{\bold{finalize}}{ #' #' Finalize dbR6 object.} #' # \item{get_where}{(inherited from R6_data class). Get location of the database linked to a dbR6 object (Internal).} #' # \item{set_data}{(inherited from R6_data class). Set dbR6 database (Internal).} #' #' #' \item{\bold{get_metadata}}{ #' #' Get object metadata.} #' # \item{set_metadata}{Set object metadata. (Internal).} # \item{set_one_metadata_value}{Set a single metadata field (Internal).} #' #' #' \item{\bold{list_tables}}{ #' #' List tables in database.} #' #' #' \item{\bold{get_tables_number}}{ #' #' Get number of tables.} #' #' \item{\bold{colnames}}{ #' #' Get colnames of a table. #' #' \emph{---Parameters---} #' #' \subsection{}{ #' \itemize{ #' \item{\code{\strong{what}} Name of table.} #' } #' } #' } #' #' \item{\bold{nrow}}{ #' #' Get the number of rows of a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of table.} #' } #' } #' } #' #' \item{\bold{ncol}}{ #' #' Get the number of columns of a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of table.} #' } #' } #' } #' #' \item{\bold{dim}}{ #' #' Get dimension of a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of table.} #' } #' } #' } #' #' \item{\bold{print}}{ #' #' Print dbR6 object graphical interface.} #' #' \item{\bold{get_table}}{ #' #' Get a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of the table} #' \item{\code{\strong{from}} Row where start to read (index >= 1)} #' \item{\code{\strong{to}} Row where fininish to read} #' } #' } #' } #' #' \item{\bold{send_query}}{ #' #' Send an SQL query. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{query}} Query to send} #' } #' } #' } #' #' \item{\bold{send_statement}}{ #' #' Send an SQL statement. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{ #' \code{\strong{statement}} Statement to send} #' } #' } #' } #' #' \item{\bold{add_table}}{ #' #' Add a table to a dbR6 object. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\strong{new_name} Name for the new table} #' #' \item{\strong{new_df} Input table} #' #' \item{\code{\strong{overwrite}} Overwrite if already exists? Default FALSE} #' #' \item{\code{\strong{append}} Append content if already exists? Default FALSE} #' #' \item{\code{\strong{has_rownames}} Row names present? Default TRUE} #' #' \item{\code{\strong{fun}} Function to apply to the table before writing it} #' #' \item{\code{\strong{...}} Additional parameters passed to RSQLite::dbWriteTable.} #' } #' } #' } #' #' \item{\bold{remove_table}}{ #' #' Remove a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of table.} #' } #' } #' } #' #' \item{\bold{copy_table_structure}}{ #' #' Add an empty table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{new_names}} Name of the new table.} #' #' \item{\code{\strong{from}} Table to copy structure from.} #' #' \item{\code{\strong{overwrite}} Overwrite table if exists?.} #' } #' } #' } #' #' \item{\bold{save}}{ #' #' Save a dbR6 database on disk. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{to}} Name of the new table.} #' } #' } #' } #' #' \item{\bold{clone_db}}{ #' #' Clone a dbR6 object. #' #' \emph{---Parameters---} #' #' \subsection{}{ #' \itemize{ #' \item\code{\strong{to}} Name of the new table.} #' } #' } #' #' \item{\bold{sort}}{ #' #' Sort a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of the table to sort.} #' #' \item{\code{\strong{column}} Column used to sort.} #' #' \item{\code{\strong{...}} Vector with other columns used to sort.} #' } #' } #' } #' #' \item{\bold{create_index}}{ #' #' Create an index for a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of the table to sort.} #' #' \item{\code{\strong{column}} Column used to Create index.} #' #' \item{\code{\strong{unique}} Create unique index? (Logical).} #' #' \item{\code{\strong{...}} Other aditional columns in a character vector to create the index.} #' } #' } #' } #' #' \item{\bold{drop_index}}{ #' #' Drop an index from a table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of the index to drop.} #' } #' } #' } #' #' \item{\bold{get_indices}}{ #' #' Get indices for a table.} #' #' \item{\bold{send_transaction}}{ #' #' Generate transaction with the tables. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{...}} Character vector with transactions to parse.} #' } #' } #' } #' #' \item{\bold{filter}}{ #' #' Filter a table using the given conditions. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{table}} Name of the table.} #' #' \item{\code{\strong{conditions}} Logical conditions.} #' #' \item{\code{\strong{r_commands}} R commands to be evaluated in the condition ('where' query) #' indicated witihin \%rs& and \%re\% as in: \%rs\% my_command \%re\%} #' } #' } #' } #' #' \item{exist_table}{Verify if a table exists (Logical). #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Name of the table.} #' } #' } #' } #' #' #' \item{\bold{statement_chunk}}{ #' #' Executes a statement in chunks #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{what}} Statement.} #' #' \item{\code{\strong{n}} Chunk size.} #' } #' } #' } #' #' \item{\bold{streamer}}{ #' #' Apply a function for an imput data table using chunks, storing the output #' into a new table. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{input}} Input table.} #' \item{\code{\strong{output}} Output table.} #' \item{\code{\strong{my_fun}} R function to apply.} #' \item{\code{\strong{n}} Chunk size.} #' } #' } #' } #' #' #' \item{\bold{write_dataframe}}{ #' #' Write an external dataframe into the database using chunks. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{input}} Input table.} #' \item{\code{\strong{output}} Output table.} #' \item{\code{\strong{has_colnames}} Colnames present? (Logical).} #' \item{\code{\strong{chunksize}} Chunk size.} #' \item{\code{\strong{sep}} Character separating cell elements (default = " ").} #' \item{\code{\strong{fun}} Function to apply to the chunk before writing it to the database.} #' \item{\code{\strong{...}} Additional parameters passed to read.table.} #' } #' } #' } #' #' \item{\bold{write_matrix}}{ #' #' Write an external matrix into the database using chunks. #' #' \emph{---Parameters---} #' \subsection{}{ #' #' \itemize{ #' \item{\code{\strong{input}} Input table.} #' \item{\code{\strong{output}} Output table.} #' \item{\code{\strong{has_colnames}} colnames present? Default TRUE.} #' \item{\code{\strong{has_rownames}} rownames present? Default TRUE.} #' \item{\code{\strong{my_fun}} Function to apply to the chunk before writing it to the database.} #' \item{\code{\strong{data_mode}} R mode of the input data ("integer", "logical", "character", "numerical").} #' } #' } #' } #' #' } #' } #' @example #' { #' library(dbR6) #' # Let's create a table of 1E6 rows x 100 columns: #'con <- file("long_table.txt", open = "w") #'header <- paste("C", 1:100, sep = "", collapse = " ") #'writeLines(header, con) #'row_index <- 1 #' for(i in 1:100) { #' long_table <- matrix(sample(letters, 1000000, replace = TRUE), 10000, 100) #' rownames(long_table) <- row_index : (i * 10000) #' row_index <- row_index + 10000 #' write.table(long_table, con, quote = FALSE, append = TRUE, col.names = FALSE, row.names = TRUE) #' cat("Written ", i * 10000, " of 1E6 rows\n") #'} #'close(con) #'# Create a new dbR6 object (on-disk) with the method "new". All the methods #'# available for the dbR6 class, are accessible via: some_dbR6_object$name_of_method(parameters). # In this case we will create an SQLite database on disk: #' data_on_disk <- dbR6$new("output.sqlite") #' #' # Write the big matrix in the on-disk database. The dbR6 package uses the chunkR package #' # (available on this GitHub repository, https://github.com/leandroroser/chunkR), #' # which allows to read a matrix in chunks efficiently: #' require("chunkR") #' data_on_disk$write_matrix(input = "long_table.txt", output = "long", chunksize = 10000) #' #' #' # The show method returns information about the object: #' data_on_disk #' #' Interface #' #' # Call some of the available methods: #' data_on_disk$list_tables() # list tables #' data_on_disk$get_table("long", 1, 10) # get values from the "long" table, from rows 1 to 10 #' data_on_disk$location() # location of the database #' data_on_disk$nrow("long") # number of rows of "long" table #' data_on_disk$ncol("long") # number of columns of "long" table #' data_on_disk$send_query("SELECT * FROM long LIMIT 5;") #send an SQL query #' #' #' # Method to write data frames #' #' # Please note that the first method is for matrix (i.e., all columns of the same type) #' # while the second for data frames (the columns can be of different type). #' # The first one is recommended when #' # working with tables with a same type of data, as it is faster. #' #' data_on_disk$write_dataframe("long_table.txt", "long_as_df", chunksize = 10000) #' #' #' # List tables #' #' data$list_tables() #' #' # Remove table "long" #' data_on_disk$remove_table("long", "long_as_df") #' #' # See the object #' data_on_disk #' #' Objects in-memory and reconnections to existing databases #' #' # In-memory are created passing the parameter ":memory:" to the constructor #' data_in_memory <- dbR6$new(":memory:") #' #' # For reconnection to an existing object the database is passed as argument to the constructor #' reconnection <- dbR6$new("output.sqlite") #'} #'@rdname dbR6 NULL