blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
337bce70f3c0b8de1d283c2e82c569606a555f08 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/assertive.data.uk/examples/is_uk_telephone_number.Rd.R | 47a24cca1ba3a5710edb52286f0858efe6c67325 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 398 | r | is_uk_telephone_number.Rd.R | library(assertive.data.uk)
### Name: assert_all_are_uk_telephone_numbers
### Title: Is the string a valid UK telephone number?
### Aliases: assert_all_are_uk_telephone_numbers
### assert_any_are_uk_telephone_numbers is_uk_telephone_number
### ** Examples
phone_nos <- c("+44 207 219 3475", "08457 90 90 90")
is_uk_telephone_number(phone_nos)
assert_all_are_uk_telephone_numbers(phone_nos)
|
7934f258af6826392498f2b9ffc7685059175677 | 1841d189c34273204cf7c01561194bd88c855232 | /code/07catPlot.R | 67402fc0bee1cd82c247f8eed16fdc3ccf6e3d91 | [
"CC0-1.0"
] | permissive | eddieimada/PTEN_analysis | 693f249351a139184444140fb1a6bb939a8c891b | 104865391035f1a25aabe3a1f4e81bb3c0caa963 | refs/heads/main | 2023-04-09T15:47:05.145975 | 2021-03-23T17:17:07 | 2021-03-23T17:17:07 | 350,792,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,025 | r | 07catPlot.R | ### Clean
rm(list=ls())
library(matchBox)
load("~/Dropbox (MechPred)/Projects/PtenERG/Manuscript/objs/DGE_prad.rda")
xde <- read.csv("~/Dropbox (MechPred)/Projects/PtenERG/XDE/text/PTENposVSneg.csv", stringsAsFactors = F)
xde <- xde[,c(1,6)]
names(xde) <- c("symbol", "value")
xde$value <- xde$value * -1
tcga <- cbind.data.frame(symbol=tGnr$PTEN_NEGvsPTEN_POS$geneName, value=tGnr$PTEN_NEGvsPTEN_POS$t)
tcga <- filterRedundant(tcga)
tcga <- tcga[!is.na(tcga$symbol),]
allData <- list(xde,tcga)
mergedDf <- mergeData(allData, idCol = 1, byCol = 2)
HypPI <- calcHypPI(mergedDf, expectedProp = NULL)
CAT_UP <- computeCat(mergedDf, size = 500)
CAT_DN <- computeCat(mergedDf, size=500, decreasing = FALSE)
CAT <- list(Up=CAT_UP$.value.vs..value.1,
Down=CAT_DN$.value.vs..value.1)
png("./figs/CAT.png", width=2200, height = 1100, res=330)
plotCat(CAT, preComputedPI = HypPI, main = "Correspondance-At-the-Top between Up- and Down-regulated genes", maxYlim = 0.3, spacePts = 50, col = c("steelblue", "goldenrod1"), pch = 19, lty = 1, lwd = 1.5 )
dev.off()
xde <- read.csv("~/Dropbox (MechPred)/Projects/PtenERG/XDE/text/PTENinERGPosxde.csv", stringsAsFactors = F)
xde <- xde[,c(1,6)]
names(xde) <- c("symbol", "value")
xde$value <- xde$value * -1
tcga <- cbind.data.frame(symbol=tGnr$PTEN_NEGvsPTEN_POSinERGpos$geneName, value=tGnr$PTEN_NEGvsPTEN_POS$t)
tcga <- filterRedundant(tcga)
tcga <- tcga[!is.na(tcga$symbol),]
allData <- list(xde,tcga)
mergedDf <- mergeData(allData, idCol = 1, byCol = 2)
HypPI <- calcHypPI(mergedDf, expectedProp = NULL)
CAT_UP <- computeCat(mergedDf, size = 500)
CAT_DN <- computeCat(mergedDf, size=500, decreasing = FALSE)
CAT <- list(Up=CAT_UP$.value.vs..value.1,
Down=CAT_DN$.value.vs..value.1)
png("./figs/CAT.png", width=2200, height = 1100, res=330)
plotCat(CAT, preComputedPI = HypPI, main = "Correspondance-At-the-Top between Up- and Down-regulated genes", maxYlim = 0.3, spacePts = 50, col = c("steelblue", "goldenrod1"), pch = 19, lty = 1, lwd = 1.5 )
dev.off() |
6ee38ba6e209f66291569250874d3bca5d198920 | 0f64ac5e3d3cf43124dcb4917a4154829e7bb535 | /scripts/run_me.R | adf492b583c9466b81cb5d021c220c408c767354 | [] | no_license | wactbprot/r4vl | 8e1d6b920dfd91d22a01c8e270d8810f02cea27c | a34b1fa9951926796186189202750c71e7883f8d | refs/heads/master | 2016-09-11T02:22:39.828280 | 2014-10-07T14:37:55 | 2014-10-07T14:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17 | r | run_me.R |
Result <- say()
|
97cda85c88ed8cee4beea43573a04369606dd770 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610382999-test.R | 695a17bebd6dd998bd1447c743df70819ed11dfd | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 173 | r | 1610382999-test.R | testlist <- list(rates = NaN, thresholds = c(-6.03473647567306e+304, NaN, NaN, NaN, NaN, NaN, 0), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
380806b90e1a6f1af1e6e07452cbc9799645f962 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/tidylog/examples/summarize.Rd.R | 3f293add6343f9f206aa7171c984633cef60a09e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 390 | r | summarize.Rd.R | library(tidylog)
### Name: summarize
### Title: Wrapper around dplyr::summarize and related functions that
### prints information about the operation
### Aliases: summarize summarize_all summarize_at summarize_if summarise
### summarise_all summarise_at summarise_if tally count
### ** Examples
summarize_all(mtcars, mean)
#> summarize_all: now one row and 11 columns, ungrouped
|
01394cff9f0235daa0a31ea2c9c3b846599e081e | 3e6b3d4da5b526e8cd59b9fac6d097c6b0e59e2b | /analysis/compare_hubbard_harvard.R | 7ec9ca690651a45c0495025846caacbee1972cf6 | [] | no_license | sdtaylor/phenology_dataset_study | c1c05543c407175d56731bf7f124655a45925fdb | 463b75941b6c9bc360b59b953935ec056523df3d | refs/heads/master | 2021-01-09T06:09:21.348537 | 2019-11-06T18:45:35 | 2019-11-06T18:45:35 | 80,926,989 | 1 | 1 | null | 2018-09-19T20:48:30 | 2017-02-04T15:09:35 | TeX | UTF-8 | R | false | false | 3,724 | r | compare_hubbard_harvard.R | library(tidyverse)
library(cowplot)
config = yaml::yaml.load_file('config.yaml')
###################################################
# This produced the supplementary figures S8 and S9, comparing parameters
# of hubbard, harvard, and NPN
#################################################
all_parameters = read_csv(config$model_parameter_file) %>%
filter(!parameter_name %in% c('run_time','num_iterations')) %>%
filter(dataset %in% c('harvard','hubbard','npn'))
# only keep hubbard species
hubbard_species = all_parameters %>%
filter(dataset=='hubbard') %>%
select(species) %>%
distinct()
all_parameters = all_parameters %>%
filter(species %in% hubbard_species$species)
#Pull out phenophase
all_parameters = all_parameters %>%
mutate(phenophase = stringr::word(species,2,2, ' - '),
species = stringr::word(species,1,1,' - '))
all_parameters$phenophase = as.numeric(all_parameters$phenophase)
#Make the threshold temperature name a bit more descriptive
all_parameters$parameter_name[all_parameters$parameter_name=='T'] = 'T_base'
########################################################################
datasets = c('harvard','hubbard','npn')
pretty_dataset_names = c('Harvard Forest','Hubbard Brook','NPN')
all_parameters$dataset = factor(all_parameters$dataset, levels = datasets, labels = pretty_dataset_names)
############################################################################
common_plot_theme = theme(strip.text = element_text(size=10),
strip.background = element_rect(fill='grey95'),
axis.text = element_text(size=12),
axis.title.y = element_text(size=18))
point_size=4
point_shapes = c(17,13)
color_pallete=c("grey42", "#56B4E9", "#009E73")
single_model_plot = function(model_name, plot_title){
plot_data = all_parameters %>%
filter(model==model_name)
summary_lines = plot_data %>%
group_by(dataset, parameter_name, species) %>%
summarise(mean_value = mean(value), median_value=median(value)) %>%
ungroup() %>%
gather(summary_metric, summary_value, mean_value, median_value)
p=ggplot(plot_data, aes(x=value, fill=dataset)) +
geom_density(position = position_identity(), alpha=0.7) +
geom_vline(data=summary_lines, aes(xintercept=summary_value, color=dataset, linetype=summary_metric), size=1) +
scale_fill_manual(values=color_pallete) +
scale_color_manual(values=color_pallete) +
facet_wrap(species~parameter_name, scales='free') +
theme(legend.key.size = unit(3, units='lines'),
legend.key = element_rect(size=5))+
labs(fill='', color='',linetype='', x='Parameter Distribution', y = plot_title)
return(p)
}
library(patchwork)
alternating = single_model_plot('alternating', 'Alternating')
gdd = single_model_plot('gdd', 'GDD')
gdd_fixed = single_model_plot('gdd_fixed', 'Fixed GDD')
naive = single_model_plot('naive','Naive')
uniforc = single_model_plot('uniforc','Uniforc')
linear = single_model_plot('linear_temp','Linear')
plot(alternating)
plot(gdd)
plot(gdd_fixed)
plot(naive)
plot(uniforc)
plot(linear)
no_legend = theme(legend.position = 'none')
hubbard_hubbard_1 =
(naive + no_legend)+
(gdd_fixed + no_legend) +
(linear) +
(gdd + no_legend) +
plot_layout(ncol=1, heights=c(1,1,2,3))
hubbard_hubbard_2 =
(alternating) +
(uniforc + no_legend) +
plot_layout(ncol=1, heights=c(1,1))
ggsave(paste0(config$image_save_directory,'figure_s8_hubbard_harvard_comparison1.png'), plot=hubbard_hubbard_1, height=40, width=30,dpi=1000, units = 'cm')
ggsave(paste0(config$image_save_directory,'figure_s9_hubbard_harvard_comparison2.png'), plot=hubbard_hubbard_2, height=40, width=30,dpi=1000, units = 'cm')
|
238aaef2b7e2a50cc6fb3e955ae950c3c1a95827 | 578d5e83262d9a11b271983a93604e4a3084d049 | /mobula-models.R | 3b45d5c5158d285a35f5184ef060571c34460484 | [] | no_license | patricia-alcantara-p/mobula_japanica-niche | 0f6b0159c83d7d8d9931608edf56cbbcd4760950 | 1f5350343c8fb84e5bc92b78b2bb6d4632ce7670 | refs/heads/master | 2021-01-13T12:43:57.931900 | 2016-11-03T03:19:40 | 2016-11-03T03:19:40 | 72,560,205 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,215 | r | mobula-models.R | # mobula niche model
# Ricardo Oliveros-Ramos (ricardo.oliveros@gmail.com)
# version 28.01.2016
require(mgcv)
require(kali)
source("auxiliar_functions.R")
# Input parameters --------------------------------------------------------
inputFile = "input/base_mobula_pa-mobula_1985_2015.csv"
outputPath = "output"
batchName = "mobula_global"
ratio = 0.2 # fraction of data used for validation
species = "mobula"
link = "logit"
factors = "masas"
# Pre-processing ----------------------------------------------------------
DateStamp("Starting...")
if(!exists("global")) {
global = read.csv(inputFile)
global[, species] = as.factor(global[,species])
for(var in factors) global[, var] = as.factor(global[, var])
}
# Training and validation -------------------------------------------------
DateStamp("Creating training and validation datasets")
global.model = splitDataSet(global, species, factor=ratio)
test = global
test$lchl = log10(test$chl)
test$loxi = log10(test$oxip)
# Train models ------------------------------------------------------------
neg = test[test$mobula==0,]
DateStamp("Training models...")
fmlas = list()
fmlas$mod0 = as.formula("mobula ~ s(sst,k=3) + s(chl,k=3)")
fmlas$mod1 = as.formula("mobula ~ s(sst) + s(chl)")
fmlas$mod2 = as.formula("mobula ~ s(sst,k=6) + s(chl,k=6)")
fmlas$mod3 = as.formula("mobula ~ s(sst, chl)")
fmlas$mod4 = as.formula("mobula ~ te(sst, chl)")
mods = fitGAMs(global.model, fmlas)
save(mods, file=file.path("output", "mobula_model.RData"))
# mod0 = gam(mobula ~ s(sst,k=k) + s(chl,k=k), data=test, family=binomial(link=link))
plot.new()
plot.window(xlim=range(test$sst), ylim=range(test$chl))
points(chl ~ sst, data=test, subset=test$mobula==1, col="blue", pch=19, cex=0.5)
points(chl ~ sst, data=test, subset=test$mobula==0, col="red", pch=4, cex=0.5)
axis(1)
axis(2)
box()
n0 = calculateNiche(mods$models$mod0, req.sens = 0.9)
n1 = calculateNiche(mods$models$mod1, req.sens = 0.9)
n2 = calculateNiche(mods$models$mod2, req.sens = 0.9)
n3 = calculateNiche(mods$models$mod3, req.sens = 0.9)
n4 = calculateNiche(mods$models$mod4, req.sens = 0.9)
plot(n0, vars = c("sst", "chl"))
plot(n1, vars = c("sst", "chl"))
plot(n2, vars = c("sst", "chl"))
plot(n3, vars = c("sst", "chl"))
plot(n4, vars = c("sst", "chl"))
x11()
layout(matrix(c(1,4,3,2), ncol=2), widths=c(4,1), heights=c(1,4))
par(mar=c(0,0,0,0), oma=4*c(1,1,0.5,0.5))
density(n4, var="sst", axes=FALSE, col=c("blue", "red"), lwd=c(2,2))
legend("topright", legend = c("Presences", "All data"),
col=c("blue", "red"), lty = c(1,1), cex = 1, bty = "n")
density(n4, var="chl", vertical=TRUE, axes=FALSE, col=c("blue", "red"), lwd=c(2,2))
plot.new() # skip one plot
plot(n4, vars = c("sst", "chl"), type="hull", )
points(n4, vars = c("sst", "chl"), col="blue", pch=19)
points(chl ~ sst, data=neg, col="red", pch=4, cex=0.75)
legend("topleft", legend = c("Presences", "Absences"),
col = c("blue", "red"), pch=c(19, 4), cex = 1, bty = "n")
mtext(text = "Sea Surface Temperature (ºC)", side = 1, cex = 1, line = 2.5)
mtext(text = "Chlorphyll-a concentration (mg/L)", side = 2, cex = 1, line = 2.5)
dev.copy(png, file="Figure1.png", width=1200, height=1200, res=144)
dev.off()
|
fbedee36d8441096ff32225ffb87a9c50ce63356 | ed157f4d1f9f309b50c228bbeb58900ca276116d | /man/rar.Rd | 76d8ef229c62877980cb2c368636d249ab7c3d31 | [] | no_license | ugenschel/rotations | fad38b96a98de9d811b51912f399d2d2faaa91e0 | d52a5c1063962f19e48389287baf21f5c97ba3b6 | refs/heads/master | 2021-01-24T01:30:11.660323 | 2012-08-17T16:35:32 | 2012-08-17T16:35:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 707 | rd | rar.Rd | \name{rar}
\alias{rar}
\title{Sample of size n from target density f}
\usage{
rar(n, f, g, M, ...)
}
\arguments{
\item{n}{number of sample wanted}
\item{f}{target density}
\item{g}{sampling distribution}
\item{M}{maximum number in uniform proposal density}
\item{...}{additional arguments sent to arsample}
}
\value{
a vector of size n of observations from target density
}
\description{
Sample of size n from target density f
}
\examples{
# sample from haar distribution
x <- rar(10000, haar, runif, 1/pi, min=-pi, max=pi)
kappa=0.5
M <- max(fisher(seq(-pi, pi, length=1000), kappa))
x.fisher <- rar(10000, fisher, runif, M, min=-pi, max=pi, kappa=kappa)
}
\author{
Heike Hofmann
}
|
48b9ff17ca46e2e5c0b27396b157117ac586fcc1 | 5a96afe67c78a91a43c779a12c2a9eafcdb4d4cb | /R/vcov.R | 6631c20a25573fc1e2fe10c419bef9f2215f01b3 | [] | no_license | cran/econet | 1f3e62e9c884e239f0073ae489ea1f6522b9948f | 5981aa19ba085124c65762863d4fa4a1e6056c99 | refs/heads/master | 2022-04-30T04:41:34.880731 | 2022-04-27T23:00:02 | 2022-04-27T23:00:02 | 145,896,520 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 974 | r | vcov.R | #' vcov
#' @param print string. If \code{"main.equation"} returns estimates for the main equation. \cr
#' If \code{"first.step"} returns estimates for the first step.
#' If \code{"centrality"} returns quantile distribution for the estimated centrality.
#' @param centrality string. It is used when \code{object} is produced by \code{horse_race}.
#' @method vcov econet
#' @importFrom stats vcov
#' @noRd
#' @export
"vcov.econet" <- function(object, print = "main.equation", centrality = "parameter.dependent", ...) {
x <- object
if(print == "main.equation") {
if (!is.null(attributes(x)$attr)) {
x <- x[[1]][[centrality]]
} else {
x <- x[[1]]
}
if (inherits(x, "nls") |
inherits(x, "lm")) {
res <- vcov(x, ...)
} else {
res <- bbmle::vcov(x, ...)
}
} else if(print == "first.step") {
res <- vcov(x[[2]], ...)
}
class(res) <- "vcov.econet"
return(res)
}
|
b01deea73f018231ff07f07cd37e78e2bcb82aa9 | 876809960681af2c912be7627b6a82795a8ea5e4 | /R/LagSeq_function.R | ef09f12ff24ab9bd45243af01d25643e93c7996f | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | chatchavan/LagSeq | b1c7d4654b546591bb544f1f6646626807e3bdaa | e796b7b3a800eca66fcd93d034694350f40ff909 | refs/heads/master | 2021-01-02T22:34:14.576206 | 2017-09-12T03:35:55 | 2017-09-12T03:35:55 | 99,342,365 | 0 | 0 | null | 2017-08-04T12:50:18 | 2017-08-04T12:50:18 | null | UTF-8 | R | false | false | 8,175 | r | LagSeq_function.R | ## TODO
## - not converting codes to numbers
## - handle the case that not all codes appearing in a sequence
#' A utility function for merging same codes appearing together
#'
#' This function reads a vector of data and then merges same codes that appear together (i.e., without being interrupted by other codes)
#' @param vec A vector of data representing a sequence
#' @return The resulting vector after merging
#' @examples
#' vec = c(1, 2, 2, 3, 4)
#' MergeSameCodes(vec)
MergeSameCodes <- function(vec) {
if(length(vec) == 1) return(vec)
vec <- as.vector(vec)
v <- c(vec[1])
for(i in 2:length(vec)) {
if(vec[i] != v[length(v)]) {
v <- c(v, vec[i])
}
}
return(v)
}
#' A basic function that converts data to a transitional frequency matrix
#'
#' This function reads a vector of data and then computes and returns a square output matrix (the cells of which correspond to the cells of the transitional frequency matrix) for each ofthe following: transitional frequencies with row and column totals, ex- pected frequencies, transitional probabilities, adjusted residuals and significance levels, Yule's Q values, transformed kappas (Wampold , 1989, 1992, 1995), z values for the kappas, and significance levels.
#' @param vec A vector of data representing a sequence
#' @param ncodes Integer. The number of codes (or types of events/behaviors) expected in the vector. Optional.
#' @param lag Integer. The lag to be applied to the conversion. Default: 1, denoting converting based on immediate adjacency.
#' @param merge Boolean. Whether to merge the same codes appearing without interruption. Default: FALSE.
#' @return A list of a bunch of matrices representing different statistics of transitional frequency
#' @export
#' @examples
#' vec = c(1, 2, 3, 4, 3, 2, 1, 2)
#' LagSeq(vec, ncodes = 4)
LagSeq <- function(vec, ncodes=0, lag=1, merge=FALSE) {
tmp = as.factor(vec)
codes_levels = levels(tmp)
vec <- as.integer(tmp) # TODO
if(merge==TRUE) vec <- MergeSameCodes(vec)
if(is.null(ncodes) || is.na(ncodes) || ncodes == 0)
ncodes = length(unique(vec))
## Transitional frequency matrix
freqs = matrix(0, ncodes, ncodes)
for(c in 1:length(vec)) {
if(c+lag <= length(vec))
freqs[vec[c], vec[c+lag]] <- freqs[vec[c], vec[c+lag]] + 1
}
## Expected frequency matrix and Adjusted residuals
rowtots = rowSums(freqs) # sums of rows
coltots = colSums(freqs) # sums of cols
ntrans = sum(rowtots) # total transitions
prows = rowtots / ntrans # probability for each row
pcols = coltots / ntrans # probability for each column
expfreq = matrix(-1.001, ncodes,ncodes) # Expected Values/Frequencies
zadjres = matrix(-1.001, ncodes,ncodes) # Adjusted Residuals
for(i in 1:ncodes) {
for(j in 1:ncodes) {
if (!merge) {
expfreq[i,j] = rowtots[i] * coltots[j] / ntrans
}
if (merge && (ntrans - rowtots[j]) > 0 ) {
expfreq[i,j] = (rowtots[i] * coltots[j]) / (ntrans - rowtots[j])
}
if ( (expfreq[i,j]*(1-pcols[j])*(1-prows[i])) > 0) {
zadjres[i,j]=(freqs[i,j]-expfreq[i,j]) / sqrt( expfreq[i,j]*(1-pcols[j]) * (1-prows[i]) )
}
}
}
## Yule's Q values
yulesq = matrix(-1.001, ncodes,ncodes) # Yule's Q Values
for(i in 1:ncodes) {
for(j in 1:ncodes) {
a = freqs[i,j]
b = rowtots[i] - freqs[i,j]
c = coltots[j] - freqs[i,j]
d = ntrans - rowtots[i] - coltots[j] + freqs[i,j]
if ( (a*d + b*c) > 0 ) {
yulesq[i,j] = ( a*d - b*c ) / (a*d + b*c)
}
}
}
return(list(freq = freqs, expfreq = expfreq, adjres = zadjres, yulesq = yulesq))
}
#' Compare transitional patterns between two groups
#'
#' Given two groups, each of which contains multiple sequences of codes (or types of events/behaviors), compare these two groups whether there is any significant difference for each pair of codes for given trasational relationship measure(s).
#' @param df A data frame containing required data. Data should be strict tabular format, with at least the following columns---group membership, sequence membership, codes.
#' @param group Index of the column representing group membership.
#' @param seq Index of the column representing sequence membership.
#' @param codes Index of the column representing codes.
#' @param ncodes Integer. The number of codes (or types of events/behaviors) expected in the vector. Optional.
#' @param lag Integer. The lag to be applied to the conversion. Default: 1, denoting converting based on immediate adjacency.
#' @param merge Boolean. Whether to merge the same codes appearing without interruption. Default: FALSE.
#' @param alpha Double. cut-off level to show the result of the t-test Default: .05.
#' @param print.statistics If \code{TRUE}, print statistical results in the console
#' @export
#' @return A list with the following keys: \code{freq}, \code{adr}, \code{yule}.
#' Each element is a list with two keys: \code{descriptive}
#' (descriptive statistics returned from \code{psych::describeBy}), and
#' \code{t} (t-test result)
#'
#' @examples
#' load("lagseq_example_data.Rdata")
#' Lag_Seq_Groups(df, group=6, seq=1, codes=5)
LagSeq_Groups <- function(df,
group, seq, codes,
ncodes=0, lag=1, merge=FALSE,
alpha = .05, print.statistics = FALSE) {
options(stringsAsFactors=FALSE)
if(is.null(ncodes) || is.na(ncodes) || ncodes == 0)
ncodes = length(unique(df[, codes]))
## convert codes to integers
tmp = as.factor(df[, codes])
(codes_levels = levels(tmp))
df[, codes] = as.integer(tmp)
## make sequences unique by pasting seq with group
df[, seq] <- paste(df[, group], df[, seq], sep="_")
lag_measures_freq <- data.frame(rep(c(), 1 + ncodes ** 2))
lag_measures_adr <- data.frame(rep(c(), 1 + ncodes ** 2))
lag_measures_yule <- data.frame(rep(c(), 1 + ncodes ** 2))
seqs <- levels(factor(df[, seq]))
groups = rep(NA, length(seqs))
for(i in seq(1, length(seqs))) {
s = seqs[i]
df_sub <- df[df[, seq] == s, ]
if(nrow(df_sub) <= 1){
lag_measures_freq <- rbind(lag_measures_freq, c(1, rep(NA, ncodes ** 2)))
lag_measures_adr <- rbind(lag_measures_adr, c(1, rep(NA, ncodes ** 2)))
lag_measures_yule <- rbind(lag_measures_yule, c(1, rep(NA, ncodes ** 2)))
} else {
matrices = LagSeq(df_sub[, codes], ncodes, lag, merge) # ncodes problematic here
v_m = matrices$freq
count <- sum(rowSums(v_m))
v_m_adr <- matrices$adjres
v_m_yule <- matrices$yulesq
lag_measures_freq <- rbind(lag_measures_freq, c(count, as.vector(t(v_m))))
lag_measures_adr <- rbind(lag_measures_adr, c(count, as.vector(t(v_m_adr))))
lag_measures_yule <- rbind(lag_measures_yule, c(count, as.vector(t(v_m_yule))))
}
groups[i] = unique(df_sub[, group])
}
all_lag_measures <-
list(freq = lag_measures_freq,
adr = lag_measures_adr,
yule = lag_measures_yule)
all_desc_statistics <- list(freq = NULL, adr = NULL, yule = NULL)
for (idx in names(all_lag_measures)) {
lag_measures <- all_lag_measures[[idx]]
names(lag_measures) <- c("count", sapply(1:ncodes, function(x) paste(codes_levels[x], codes_levels[1:ncodes], sep=" -> ")))
lag_measures$seq = seqs
lag_measures$group = groups
# describe first
desc_statistics <- psych::describeBy(lag_measures[, 1:(ncol(lag_measures)-2)], lag_measures$group)
if (print.statistics) {
print(desc_statistics)
}
# t-tests
(groups_u = unique(groups))
for(c in 1:(ncol(lag_measures)-2)) {
tryCatch({
t = t.test(lag_measures[lag_measures$group == groups_u[1], c],
lag_measures[lag_measures$group == groups_u[2], c])
if(!is.na(t) && t$p.value < alpha) {
if (print.statistics) {
cat("t-test for", names(lag_measures)[c], ":\n")
print(t)
}
}
}, error=function(cond) {
# message("Here's the original error message:")
# message(cond)
})
}
all_desc_statistics[[idx]] <- list(descriptive = desc_statistics, t = t)
}
# return descriptive statistics
all_desc_statistics
}
|
6f3e7c351a12a956e615db96d1d30360930b5607 | 23940af526fa235eceeea29f0fa567a4721fb39d | /man/FastSparseMatSymmatl.Rd | 53d0f9346ce5d0c2d7951ed7c7b607d468e99bc3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | bioturing/signac | 843b5446c6633f1d1ba84478ee8a665ad2418193 | 521a71e28b0dfeb9c83a45acd394f8f8a0b55f14 | refs/heads/master | 2022-06-07T18:43:24.685272 | 2022-05-18T11:37:22 | 2022-05-18T11:37:22 | 176,672,113 | 26 | 11 | NOASSERTION | 2022-05-18T11:37:22 | 2019-03-20T06:47:55 | C++ | UTF-8 | R | false | true | 294 | rd | FastSparseMatSymmatl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{FastSparseMatSymmatl}
\alias{FastSparseMatSymmatl}
\title{FastSparseMatSymmatl}
\usage{
FastSparseMatSymmatl(mat)
}
\arguments{
\item{mat}{A sparse matrix}
}
\description{
Symmatl sparse matrix
}
|
15fe4c0ea9c050f096ff98e7a2566e13c552a6e1 | fcf08bed1de5fcf61d507dc984676c5bcddb46dc | /analysis_1-2.R | de2c8750c0004b72a5ace2a4bd3be41177e89089 | [] | no_license | jrosen48/generality | 033200c4b8857401ba19a8d91d11bbd962804704 | 958934f6924aed210a8b3425e2caf2d4e32fb121 | refs/heads/master | 2021-03-27T15:07:48.960625 | 2017-01-03T13:30:56 | 2017-01-03T13:30:56 | 77,837,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,526 | r | analysis_1-2.R | ######################################################
# 0. loading data, setting up ########################
######################################################
install.packages('tidyverse')
install.packages('abind') # also need to install abind
install.packages('MASS') # same
install.packages('readr') # last one - this one reads CSV files faster and has some useful defaults, though it is not necessary
library(tidyverse) # after you can use library(tidyverse) when you want to use it
setwd('~/dropbox/1_research/generality') # change this to the folder with the data in it
data <- readr::read_csv("all_observations.csv")
data # need `` symbols around the names since can't start a variable name with a #
# this is because `7C_8-3` and `7C_12-2` have no 3.1 obs - will remove later
data <- rbind(data, c(NA, '3.1', '3.1', NA))
data
data[, 2:4] <- sapply(data[, 2:4], function(x) floor(as.numeric(x)))
data
##################################################################
# 1. look at likelihood of profile (using all data & six codes) #
##################################################################
to_prep <- tidyr::gather(data, key, val, -id)
to_plot <- to_prep %>%
group_by(key, val) %>%
summarize(n = n()) %>%
filter(!is.na(val)) %>%
tidyr::spread(val, n, fill = 0) %>%
tidyr::gather(Code, the_val, -key) %>%
group_by(key) %>%
mutate(the_val = as.numeric(the_val),
the_val = the_val / sum(the_val))
to_plot$key <- factor(to_plot$key,
levels = c("7C_8-3", "7C_12-2", "8B_6-2"))
to_plot$the_key <- factor(to_plot$Code)
to_plot <- to_plot %>% arrange(key, Code)
t1_freq <- as.vector(table(data$`7C_8-3`))
t2_freq <- as.vector(table(data$`7C_12-2`))
t3_freq <- as.vector(table(data$`8B_6-2`))
the_mat <- cbind(t1_freq, t2_freq, t3_freq)
row.names(the_mat) <- sort(unique(data$`8B_6-2`))
colnames(the_mat) <- names(data)[2:4]
chi_sq_output <- chisq.test(the_mat) # this throws a warning (not an error) because some cells have few (< 5) cases; it's probably okay as long as we interpret those cells with caution
round(chi_sq_output$stdres, 3) # here are the z-scores (> 1.96 means cell is more likely than expected; < 1.96 means cell is less likely than expected)
to_plot$sig <- ifelse(as.vector(chi_sq_output$stdres) >= 1.96, "+",
ifelse(as.vector(chi_sq_output$stdres) <= -1.96, "-",
"=")) # replace "=" w/ NA
ggplot(to_plot, aes(x = key, y = the_val, color = Code, group = Code, label = sig)) +
geom_point(size = 1.5) +
geom_line() +
ggrepel::geom_label_repel(show.legend = F) +
#ggrepel::geom_text_repel() +
ylab("Proportion of Responses") +
xlab(NULL) +
theme(text = element_text(size = 14)) +
theme(legend.title = element_blank()) +
labs(
title = "Number of Responses by Code for All Observations",
caption = "Note. +, -, and = labels indicate code is more likely than expected evaluated using a chi-square test of proportions."
) +
theme(plot.caption = element_text(size = 10, family = "Times"))
ggsave("code.png", width = 6, height = 6)
######################################################
# 2. look at likelihood of shifts ####################
######################################################
tab1 <- table(data$`7C_8-3`, data$`7C_12-2`)
tab2 <- table(data$`7C_12-2`, data$`8B_6-2`)
arr <- abind::abind(tab1, tab2, along = 3)
names(dimnames(arr)) <- c("first_code", "second_code", "shift")
dimnames(arr)[[3]] = c("shift_1", "shift_2")
m.sat <- MASS::loglm( ~ first_code + second_code + shift, arr)
m.sat_out <- as.data.frame(resid(m.sat))
df1 <- data.frame(tab1)
df2 <- data.frame(tab2)
df1 <- mutate(df1, shift = "shift_1")
df2 <- mutate(df2, shift = "shift_2")
df <- bind_rows(df1, df2)
names(df) <- c('first_code', 'second_code', "n", "shift")
df_out <- df %>% arrange(shift, second_code)
out_out <- m.sat_out %>%
gather(key, val) %>%
mutate(code_1 = rep(c(0:5), 12)) %>%
unite(united, code_1, key)
df <- bind_cols(df_out, out_out)
df <- select(df, first_code, second_code, val, shift)
df <- unite(df, code, first_code, second_code, sep = "-")
out <- df %>%
filter(val >= 1.96 | val <= -1.96) %>%
mutate(sig = ifelse(val > 1.96, "+",
ifelse(val < -1.96, "-", NA))) %>%
select(-val) %>%
arrange(shift, code)
to_plot <- data %>%
gather(key, val, -id) %>%
mutate(val = factor(val)) %>%
filter(!is.na(val)) %>%
count(key, val) %>%
mutate(prop = round(n / sum(n), 3))
to_plot$key <- factor(to_plot$key,
levels = c("7C_8-3", "7C_12-2", "8B_6-2"))
to_plot$key <- ifelse(to_plot$key == "7C_8-3", "Time 1",
ifelse(to_plot$key == "7C_12-2", "Time 2", "Time 3"))
to_plot$val <- factor(to_plot$val,
levels =
c("5", "4", "3", "2", "1", "0"))
# Found likelihoods using this (very clunky):
# to_plot %>%
# arrange(key) %>%
# group_by(key) %>%
# mutate(new_prop = cumsum(prop),
# new_loc = (new_prop - (prop / 2)))
ggplot(to_plot, aes(x = key, y = prop, fill = val, width = .625)) +
geom_col(position = 'stack') +
xlab(NULL) +
ylab("Proportion of Responses") +
xlab(NULL) +
theme(text = element_text(size = 15)) +
theme(legend.title = element_blank()) +
theme(plot.caption = element_text(size = 11, family = "Times")) +
annotate("segment", x = 1, xend = 2, y = .049, yend = .015, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", linetype = "dashed", x = 1, xend = 2, y = .207, yend = .505, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", linetype = "dashed", x = 1, xend = 2, y = .207, yend = .626, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", x = 1, xend = 2, y = .478, yend = .505, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", x = 1, xend = 2, y = .701, yend = .626, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", linetype = "dashed", x = 1, xend = 2, y = .705, yend = .819, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", linetype = "dashed", x = 1, xend = 2, y = .873, yend = .221, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", x = 2, xend = 3, y = .221, yend = .180, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", x = 2, xend = 3, y = .221, yend = .553, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", x = 2, xend = 3, y = .221, yend = .805, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", linetype = "dashed", x = 2, xend = 3, y = .505, yend = .180, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", linetype = "dashed", x = 2, xend = 3, y = .505, yend = .398, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", x = 2, xend = 3, y = .626, yend = .553, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
annotate("segment", x = 2, xend = 3, y = .819, yend = .805, arrow=arrow(ends = "last", length=unit(.2,"cm"))) +
labs(
title = "Shifts Between Codes for All Observations",
caption = "Note. Solid lines indicate shift is more likely than (and dashed lines indicate shift is less likely) than expected as evaluated using log linear models"
)
ggsave("shift.png", width = 9, height = 9.25) |
085228e64dc89a27d9fd870a04108d19c74a3a23 | a3541fa9afdcbc4bd1360afda4b6f8d170244889 | /R/bg22DemographicSubgroups2016to2020.R | 5a82f174688b1eb410cef4e7f19b008986fe1bea | [] | no_license | ejanalysis/ejscreen | 4349236260c94dd9a9d0cfdcae237adebcec2d8a | 6af10b7d3b47c683cb512fd4792c2eef0e1d695a | refs/heads/master | 2023-05-27T11:59:13.144072 | 2023-05-25T23:40:52 | 2023-05-25T23:40:52 | 40,103,218 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,171 | r | bg22DemographicSubgroups2016to2020.R | #' @name bg22DemographicSubgroups2016to2020
#' @docType data
#' @title Demographic subgroups of race/ethnicity by block group
#'
#' @description bg22DemographicSubgroups provides subgroups that are
#' the ones making up EJScreen's % people of color (aka % minority)
#' such as Hispanic or Latino, Non-Hispanic Black Alone, etc.
#' @details
#' \preformatted{
#' This dataset is a companion to the block group data from EJScreen.
#' EJScreen and therefore bg22 would have lacked table B03002 (race ethnicity)
#' so that table is obtained as bg22DemographicSubgroups
#'
#' This also includes race/ethnicity data for Puerto Rico, but not GU/AS/VI/MP.
#'
#' EJScreen 2.1 uses ACS2020, which is from 2016-2020
#' (released March 17 2022, delayed from Dec 2021).
#' It was to be called the 2022 version of EJScreen, and
#' here is called bg22.
#'
#' EJScreen 2.0 was released by EPA 2022-02-18 (delayed from mid/late 2021).
#' EJScreen 2.0 used ACS2019, which is from 2015-2019 (released Dec 2020).
#' It was to be a late 2021 version, and here had been called bg21.
#'
#' bg22DemographicSubgroups was created by downloading and calculating
#' RACE ETHNICITY SUBGROUP VARIABLES THAT ARE NOT IN EJSCREEN
#' (the subgroups within "minority" or "people of color").
#' This is from Census Table B03002, and includes percent Hispanic,
#' percent Non-Hispanic Black Alone (not multirace), etc.
#' Race ethnicity groups are defined by Census Bureau. They are
#' mutually exclusive (no overlaps between groups,
#' so a person is always in only one of these groups)
#' so they add up to the total population count or percent.
#' Block group resolution for USA.
#' From Census ACS 5 year summary file.
#'
#' This will give a quick look at some key stats:
#' # round(data.frame(cbind(
#' # subgroups=unlist(ustotals(bg22DemographicSubgroups2016to2020)),
#' # maingroups = unlist(ustotals(subset(bg22, bg22$ST !='PR')))
#' # ),2)
#'
#' ##########################################################################
#' How dataset was created:
#' ##########################################################################
#'
#' # see ejscreen/inst/SCRIPT_create_bgDemog_ejscreen2.1_andtracts.R
#' # the SCRIPT for how this was created
#' # and ejscreen/inst/SCRIPT_ADD_PUERTORICO_DEMOG_SUBGROUPS.R for PR part.
#'
#' # DOWNLOADED ACS TABLE WITH RACE ETHNICITY BY BLOCK GROUP
#' # AND CREATE PERCENT VARIABLES LIKE PERCENT HISPANIC, ETC.
#'
#' # These are created: (count and percent hispanic or latino,
#' # nonhispanic white alone i.e. single race,
#' # nonhispanic black or african american alone, Not Hispanic or Latino
#' # American Indian and Alaska Native alone,
#' # Not Hispanic or Latino Native Hawaiian and Other Pacific Islander alone,
#' # and nh some other race alone, and nh two or more races):
#'
#' # "hisp" "nhwa" "nhba" "nhaiana" "nhaa" "nhnhpia"
#' # "nhotheralone" "nhmulti" "nonmins" "pcthisp" "pctnhwa" "pctnhba"
#' # "pctnhaiana" "pctnhaa" "pctnhnhpia" "pctnhotheralone" "pctnhmulti"
#'
#' }
#'
NULL
|
b40ceeb4f57b3f39d24ad1dc736c0dc465e97edf | 2fbe266ec037f5af395bcf304f39d4430d61f59e | /R/U_it.R | 03296137e3179fdacbe5ff99ada792c09a954061 | [] | no_license | cran/XRSCC | 03601fc10e525e83b75e86b5d0aa3ffa14612db3 | 1168bbac5dfd5fe976c1d97e6d90322bb0572b9e | refs/heads/master | 2020-09-05T07:00:36.565156 | 2016-11-12T01:05:48 | 2016-11-12T01:05:48 | 73,521,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,854 | r | U_it.R | U_it<-function(prev.results){
if (missing(prev.results)){
stop("No elementos para iteracion, No elements for iteration")
} else {
if(prev.results$bin[1]==0){
stop("El proceso ya esta bajo control, The process is already under control")
} else {
u.0 <- prev.results$data.0
u.pos<-prev.results$in.control
u.1<-u.0[u.pos,]
m <- nrow(u.1)
ui.1 <- u.1$d/u.1$n
# Calculo de limites de control para la grafica u
LCS.u.1<-expression(mean(ui.1)+3*sqrt(mean(ui.1)/mean(u.1$n)))
LCI.u.1<-expression(mean(ui.1)-3*sqrt(mean(ui.1)/mean(u.1$n)))
LC.u.1<-expression(mean(ui.1))
if (eval(LCI.u.1)>0){
LCI.u.1<-eval(LCI.u.1)
} else {
LCI.u.1 <- 0
}
u.pos<-which(ui.1 >= eval(LCI.u.1) & ui.1 < eval(LCS.u.1))
ui.2<-ui.1[u.pos]
u.fi.1<-which(ui.1 < eval(LCI.u.1))
u.fs.1<-which(ui.1 >= eval(LCS.u.1))
bin.u<-if(length(u.pos)< m){
bin.u<-1
} else {
bin.u<-0
}
#
# Script para Grafica U inicial
plot.u<-function(U=ui.1,type="b",col="blue",pch =19){
plot(U, xlab= "Numero de muestra", ylab="Numero de inconformidades por unidad",
main="Grafica U, Control Estadistico de la Calidad",type=type, col=col,
ylim=c(eval(LCI.u.1)-mean(ui.1)*0.05, max(eval(LCS.u.1)*1.1, max(ui.1)*1.1)),
xlim=c(-0.05*m, 1.05*m), pch = pch)
abline(h= c(eval(LCS.u.1), eval(LCI.u.1), eval(LC.u.1)),col="lightgray")
text(c(rep(1,3),rep(7,3)), rep(c(eval(LCS.u.1),eval(LC.u.1),eval(LCI.u.1)),2),
c(c("LCS = ","LC = ","LCI = "), c(round(eval(LCS.u.1),3), round(eval(LC.u.1),3),
round(eval(LCI.u.1),3))),
col="red") }
plot.u()
# Crea la lista de los resultados
structure(list("in.control" = u.pos,
"out.control"= c(u.fi.1,u.fs.1),
"Iteraciones" = prev.results$Iteraciones + 1,
"data.0"= prev.results$data.0,
"data.1"= ui.2,
"bin" = bin.u,
"Limites de Control Grafica U" = c("LCI.u"=eval(LCI.u.1),"LCS.u"=eval(LCS.u.1),
"LC.p"=eval(LC.u.1)),
"Conclusion del proceso"= c(if(length(u.pos)< m){
print("Proceso fuera de Control en Grafica U")
} else {
print("El proceso esta bajo control en Grafica U")
})))
}
}
}
|
d2ba56f615bc4f3d381d5b50b4dd49bc18162ce8 | 1d4130c143c95aed44cfdde3226efb7c69e22f61 | /PracticalMachineLearning/quiz/ex4.R | ac17ccd64d2104a86380ddc81d945da64f4026cc | [] | no_license | kapliy/mooc | f665576057416b099f1802f6853c7bf799393217 | 71b4ffbce2af95ba55aedc179cf2092ee53e9ada | refs/heads/master | 2021-01-14T14:37:50.699443 | 2015-08-05T18:38:22 | 2015-08-05T18:38:22 | 37,920,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 919 | r | ex4.R | set.seed(62433)
mod1 <- train(diagnosis~., data=training, method='rf')
mod2 <- train(diagnosis~., data=training, method='gbm')
mod3 <- train(diagnosis~., data=training, method='lda')
# train combiner
pred1 <- predict(mod1,newdata=training);
pred2 <- predict(mod2,newdata=training)
pred3 <- predict(mod3,newdata=training)
predDF <- data.frame(pred1,pred2,pred3,diagnosis=training$diagnosis)
combModFit <- train(diagnosis~.,method="rf",data=predDF)
# use combiner
pred1 <- predict(mod1,newdata=testing);
pred2 <- predict(mod2,newdata=testing)
pred3 <- predict(mod3,newdata=testing)
predDF <- data.frame(pred1,pred2,pred3,diagnosis=testing$diagnosis)
combPred <- predict(combModFit, newdata=predDF)
sum(pred1 == testing$diagnosis) / length(pred1) # rf
sum(pred2 == testing$diagnosis) / length(pred2) # gbm
sum(pred3 == testing$diagnosis) / length(pred3) # lda
sum(combPred == testing$diagnosis) / length(combPred)
|
77faae84717fb1b44a7f2a73daf3f91190796554 | 77d8580393591c609386089c3efedbe2375d175a | /Regression and Multivariate Analysis/Ames_assignment1.R | 3989a5d7ace19ce8141ee0e0ccaf25e76669306d | [] | no_license | jmwanat/data_analytics | b9ef48b630aa7ad1e181df911b28a95695b3ce84 | 829de8866551de90d441d70bf284d9c575343edc | refs/heads/master | 2022-12-07T01:37:34.046650 | 2020-09-09T02:41:55 | 2020-09-09T02:41:55 | 283,517,962 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,660 | r | Ames_assignment1.R | # Jennifer Wanat
# Fall 2017
# read_ames.R
install.packages("Hmisc")
require(Hmisc)
require(gridExtra)
require(moments)
path.name <- "~/Desktop/R/"
file.name <- paste(path.name, "ames_housing_data.csv", sep = "")
# Read in the csv file into an R data frame;
ames.df <- read.csv(file.name, header = TRUE, stringsAsFactors = FALSE)
# Show the header of the data frame;
head(ames.df)
# Show the structure of the data frame;
str(ames.df)
# This plot was created to view the 5 outliers and unusual values mentioned
# in the data documentation special notes section
plot(ames.df$GrLivArea, ames.df$SalePrice,
xlab = "Above Grade Living Area (square feet)", ylab = "Sale Price")
#Creating a waterfall of drop conditions
# Single ifelse() statement
# ifelse(condition, value if condition is TRUE, value if the condition is FALSE)
# Nested ifelse() statement
# ifelse(condition1, value if condition1 is TRUE,
# ifelse(condition2, value if condition2 is TRUE,
# value if neither condition1 nor condition2 is TRUE
# )
# )
# Create a waterfall of drop conditions;
# Work the data frame as a 'table' like you would in SAS or SQL;
ames.df$dropCondition <- ifelse(ames.df$BldgType!='1Fam','01: Not SFR',
ifelse(ames.df$SaleCondition!='Normal','02: Non-Normal Sale',
ifelse(ames.df$Street!='Pave','03: Street Not Paved',
ifelse(ames.df$GrLivArea >4000,'04: LT 4000 SqFt',
ifelse(ames.df$LotArea >100000,'05: Lot 100000 SqFt',
'99: Eligible Sample')
))))
table(ames.df$dropCondition)
# Save the table
waterfall <- table(ames.df$dropCondition);
# Format the table as a column matrix for presentation;
as.matrix(waterfall,6,1)
data[,"waterfall", drop=FALSE]
# Eliminate all observations that are not part of the eligible sample population;
eligible.population <- subset(ames.df,dropCondition=='99: Eligible Sample');
# Check that all remaining observations are eligible;
table(eligible.population$dropCondition)
#Pick twenty variables and run a data quality check on them;
table(eligible.population$LotArea!="NA")
table(eligible.population$LotArea!=0)
summary(eligible.population$LotArea)
quantile(eligible.population$LotArea)
table(eligible.population$Street,useNA = c("always"))
table(eligible.population$Utilities, useNA = c("always"))
table(eligible.population$HouseStyle, useNA = c("always"))
table(eligible.population$YearBuilt!="NA")
table(eligible.population$YearBuilt, useNA = c("always"))
summary(eligible.population$YearBuilt)
quantile(eligible.population$YearBuilt)
table(eligible.population$RoofMat, useNA = c("always"))
table(eligible.population$Exterior1, useNA = c("always"))
table(eligible.population$BsmtFinType1, useNA = c("always"))
table(eligible.population$Heating, useNA = c("always"))
table(eligible.population$CentralAir, useNA = c("always"))
table(eligible.population$Electrical, useNA = c("always"))
table(eligible.population$GrLivArea!="NA")
table(eligible.population$GrLivArea!=0)
summary(eligible.population$GrLivArea)
quantile(eligible.population$GrLivArea)
describe(eligible.population$GrLivArea)
table(eligible.population$FullBath, useNA = c("always"))
table(eligible.population$HalfBath, useNA = c("always"))
table(eligible.population$BedroomAbvGr, useNA = c("always"))
table(eligible.population$Fireplaces, useNA = c("always"))
table(eligible.population$GarageType, useNA = c("always"))
table(eligible.population$PavedDrive, useNA = c("always"))
table(eligible.population$SaleCondition, useNA = c("always"))
table(eligible.population$SalePrice!="NA")
summary(eligible.population$SalePrice)
quantile(eligible.population$SalePrice)
describe(eligible.population$SalePrice)
#Pick ten variables from the twenty variables from the data
#quality check to explore in initial exploratory data analysis;
#The ten variables are:
#lot area, utilities, year built, GrLivArea, Full bath
#half bath, bedroom, garage type, heating, sale price
#Continuous: lot area, year built, GrLivArea, sale price
#Discrete: utilities, full bath, half bath, bedroom, garage type, heating
par(mfrow = c(1,2))
boxplot(eligible.population$SalePrice, ylab = "Sale Price ($$)", col = c("lightgrey"),
coef = 3.0, do.conf = TRUE, do.out = TRUE)
hist(eligible.population$SalePrice, xlab = "Sale Price ($$)", col = "lightgrey",
main = "")
par(mfrow = c(1,1))
par(mfrow = c(1,2))
plot(eligible.population$LotArea, eligible.population$SalePrice,
ylab = "Sale Price ($$)",
xlab = "Lot Size (square feet)")
abline(lm(eligible.population$SalePrice ~ eligible.population$LotArea), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$LotArea, eligible.population$SalePrice),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010", outer = TRUE)
hist(eligible.population$LotArea, xlab = "Lot Size (square feet)", col = "lightgrey",
main = "")
par(mfrow = c(1,1))
par(mfrow = c(1,2))
plot(eligible.population$YearBuilt, eligible.population$SalePrice,
ylab = "Sale Price ($$)",
xlab = "Year Built")
abline(lm(eligible.population$SalePrice ~ eligible.population$YearBuilt), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$YearBuilt, eligible.population$SalePrice),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010", outer = TRUE)
hist(eligible.population$YearBuilt, xlab = "Year Built", col = "lightgrey",
ylim = c(0,400),
main = "")
par(mfrow = c(1,1))
par(mfrow = c(1,2))
plot(eligible.population$GrLivArea, eligible.population$SalePrice,
ylab = "Sale Price ($$)",
xlab = "Above Grade Living Area (square feet)")
abline(lm(eligible.population$SalePrice ~ eligible.population$GrLivArea), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$GrLivArea, eligible.population$SalePrice),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
hist(eligible.population$GrLivArea, xlab = "Above Grade Living Area (square feet)", col = "lightgrey",
ylim = c(0,800),
main = "")
par(mfrow = c(1,1))
par(mfrow = c(1,2))
table(eligible.population$BedroomAbvGr, eligible.population$FullBath)
plot(jitter(eligible.population$BedroomAbvGr), jitter(eligible.population$FullBath),
ylab = "Number of Full Baths",
xlab = "Number of Bedrooms Above Grade")
lines(lowess(eligible.population$BedroomAbvGr, eligible.population$FullBath),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
table(eligible.population$BedroomAbvGr, eligible.population$HalfBath)
plot(jitter(eligible.population$BedroomAbvGr), jitter(eligible.population$HalfBath),
ylab = "Number of Half Baths",
xlab = "Number of Bedrooms Above Grade")
lines(lowess(eligible.population$BedroomAbvGr, eligible.population$HalfBath),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
par(mfrow = c(1,1))
par(mfrow = c(1,3))
boxplot(eligible.population$SalePrice ~ eligible.population$BedroomAbvGr, col = "lightgrey",
ylab = "Sale Price ($$)",
xlab = "Number of Bedrooms Above Grade")
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
boxplot(eligible.population$SalePrice ~ eligible.population$FullBath, col = "lightgrey",
ylab = "Sale Price ($$)",
xlab = "Number of Full Baths")
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
boxplot(eligible.population$SalePrice ~ eligible.population$HalfBath, col = "lightgrey",
ylab = "Sale Price ($$)",
xlab = "Number of Half Baths")
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
par(mfrow = c(1,1))
par(mfrow = c(1,3))
boxplot(eligible.population$SalePrice ~ eligible.population$Heating, col = "lightgrey",
ylab = "Sale Price ($$)",
xlab = "Type of Heating")
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
boxplot(eligible.population$SalePrice ~ eligible.population$Utilities, col = "lightgrey",
ylab = "Sale Price ($$)",
xlab = "Utilities")
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
boxplot(eligible.population$SalePrice ~ eligible.population$GarageType, col = "lightgrey",
ylab = "Sale Price ($$)",
xlab = "Garage Type")
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010")
par(mfrow = c(1,1))
grid.table(addmargins(table(eligible.population$FullBath, eligible.population$HalfBath, dnn = c("FullBath", "HalfBath"))))
#Pick three variables from the ten variables from the initial exploratory data analysis
#Explore their relationship with SalePrice and Log(SalePrice)
par(mfrow = c(1,2))
plot(eligible.population$YearBuilt, eligible.population$SalePrice,
ylab = "Sale Price ($$)",
xlab = "Year Built")
abline(lm(eligible.population$SalePrice ~ eligible.population$YearBuilt), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$YearBuilt, eligible.population$SalePrice),
col = "blue", lwd = 2, lty = 1)
#log of sale price
plot(eligible.population$YearBuilt, log(eligible.population$SalePrice),
ylab = "log(Sale Price)",
xlab = "Year Built")
abline(lm(log(eligible.population$SalePrice) ~ eligible.population$YearBuilt), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$YearBuilt, log(eligible.population$SalePrice)),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010", outer=TRUE)
par(mfrow = c(1,1))
par(mfrow = c(1,2))
plot(eligible.population$GrLivArea, eligible.population$SalePrice,
ylab = "Sale Price ($$)",
xlab = "Above Grade Living Area (square feet)")
abline(lm(eligible.population$SalePrice ~ eligible.population$GrLivArea), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$GrLivArea, eligible.population$SalePrice),
col = "blue", lwd = 2, lty = 1)
#log of GrLivArea
plot(eligible.population$GrLivArea, log(eligible.population$SalePrice),
ylab = "log(Sale Price)",
xlab = "Above Grade Living Area (square feet)")
abline(lm(log(eligible.population$SalePrice) ~ eligible.population$GrLivArea), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$GrLivArea, log(eligible.population$SalePrice)),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010", outer=TRUE)
par(mfrow = c(1,1))
par(mfrow = c(1,2))
plot(jitter(eligible.population$BedroomAbvGr), eligible.population$SalePrice,
ylab = "Sale Price ($$)",
xlab = "Number of Bedrooms")
abline(lm(eligible.population$SalePrice ~ eligible.population$BedroomAbvGr), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$BedroomAbvGr, eligible.population$SalePrice),
col = "blue", lwd = 2, lty = 1)
#log with bedroom
plot(jitter(eligible.population$BedroomAbvGr), log(eligible.population$SalePrice),
ylab = "log(Sale Price)",
xlab = "Number of Bedrooms")
abline(lm(log(eligible.population$SalePrice) ~ eligible.population$BedroomAbvGr), col = "red", lwd = 2, lty = 2)
lines(lowess(eligible.population$BedroomAbvGr, log(eligible.population$SalePrice)),
col = "blue", lwd = 2, lty = 1)
#title(main = "Residential Properties sold in Ames, IA from 2006 - 2010", outer = TRUE)
par(mfrow = c(1,1)) |
ab154d089ca724467e489524f451e53715ad47c8 | cb93cf0799e3eedca6f9e720e09bb60e0f77ff10 | /man/readDataFrame.TabularTextFileSet.Rd | 1f945560600be613f77b876d1d0f87be8a0a5ecb | [] | no_license | HenrikBengtsson/R.filesets | 254c37b4546e8280b9972d06840b918e12e0b4e9 | 17181ae1c84dbf7bad1214d37e6f133ed2deeba4 | refs/heads/master | 2023-01-08T23:58:09.708417 | 2022-07-21T09:52:18 | 2022-07-21T09:52:18 | 20,844,863 | 3 | 1 | null | 2018-04-03T22:12:45 | 2014-06-15T00:25:31 | R | UTF-8 | R | false | false | 3,083 | rd | readDataFrame.TabularTextFileSet.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% TabularTextFileSet.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{readDataFrame.TabularTextFileSet}
\alias{readDataFrame.TabularTextFileSet}
\alias{TabularTextFileSet.readDataFrame}
\alias{readDataFrame,TabularTextFileSet-method}
\title{Reads the tabular data from all files as data frames}
\description{
Reads the tabular data from all files as data frames and combines them into one data frame (by default).
}
\usage{
\method{readDataFrame}{TabularTextFileSet}(this, ..., combineBy=function(x) Reduce(rbind, x), verbose=FALSE)
}
\arguments{
\item{...}{Arguments passed to
\code{\link[R.filesets:readDataFrame.TabularTextFile]{readDataFrame}()}
as called for each \code{\link{TabularTextFile}} of the file set.}
\item{combineBy}{A \code{\link[base]{function}} that takes a \code{\link[base]{list}} of \code{\link[base]{data.frame}}:s
and combines them. The default is to stack them into a single
\code{\link[base]{data.frame}}. If \code{\link[base]{NULL}}, the \code{\link[base]{list}} is not combined.}
}
\value{
Returns what \code{combineBy} returns, which defaults to a \code{\link[base]{data.frame}}.
If \code{combineBy=NULL}, then a named \code{\link[base]{list}} of \code{\link[base]{data.frame}}:s is returned.
}
\examples{
# Setup a file set consisting of all *.dat tab-delimited files
# in a particular directory
path <- system.file("exData/dataSetA,original", package="R.filesets")
ds <- TabularTextFileSet$byPath(path, pattern="[.]dat$")
print(ds)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Read data frames from each of the files
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
dataList <- lapply(ds, readDataFrame)
print(dataList)
rows <- c(3:5, 8, 2)
dataList <- lapply(ds, readDataFrame, rows=rows)
print(dataList)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Read common columns and stack into one data frame
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
colNames <- Reduce(intersect, lapply(ds, getColumnNames))
cat("Common column names:\n")
print(colNames)
# Read the *common* columns "as is" (hence 'NA')
colClasses <- rep(NA, times=length(colNames))
names(colClasses) <- colNames
cat("Column class patterns:\n")
print(colClasses)
data <- readDataFrame(ds, colClasses=colClasses)
print(data)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Translate column names on the fly
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
lapply(ds, FUN=setColumnNamesTranslator, function(names, ...) toupper(names))
data <- readDataFrame(ds, colClasses=c("(X|Y)"="integer", "CHAR"="character"))
print(data)
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{TabularTextFileSet}}.
}
\keyword{internal}
\keyword{methods}
\keyword{IO}
\keyword{programming}
|
a28be046bcfdf8fd349dc899ae20f0a3ef5d82af | 78138e49cc362483e7a51c61c5276e378049d967 | /plot1.R | 69bd5d3a5a2490c189f58325b367f8e3929f6901 | [] | no_license | jdchisholm/ExData_Plotting1 | a29cce175e50d30148c999a933ab5d4a9c437c92 | 24e341d7cf6e202de473b62e38a7cf1e2b0ad440 | refs/heads/master | 2020-05-23T10:10:19.980109 | 2015-07-12T23:14:13 | 2015-07-12T23:14:13 | 38,974,709 | 0 | 0 | null | 2015-07-12T19:23:35 | 2015-07-12T19:23:34 | null | UTF-8 | R | false | false | 762 | r | plot1.R | plot1 <- function () {
#Data is read in but skips to the desired date, then reads in the rows of data wanted for the assignment
data <- read.table("household_power_consumption.txt", sep=";", skip = grep("1/2/2007", readLines("household_power_consumption.txt")), nrows = 2880)
#Changed the data in the Global_Active_Power column (column #3) to numeric
data[,3] <- as.character(data[,3])
data[,3] <- as.numeric(data[,3])
#Open graphic device, create histogram, copy it to a png file, then close graphic device
quartz()
hist(data[,3], main="Global Active Power", xlab = "Global Active Power (kilowatts)", col = "Red")
dev.copy(png, file = "plot1.png", width=480, height = 480)
dev.off()
}
|
2162df739978be395bbbbca699683eb368f00c14 | a441ac887d892f3999f1051df80989e3bea04941 | /all_primate_preProcessing_post_brainVarFiltering.R | 3cacd43a4c1a232da3022950bbc358249c015dd6 | [] | no_license | FernaldLab/_code | bdad38d072fe3a9a09ba8e5c98591c0caaf73173 | 46a08137095c4c0d8fbe046ea1d740b61714ad47 | refs/heads/master | 2016-09-06T14:14:35.280437 | 2015-07-07T19:43:16 | 2015-07-07T19:43:16 | 38,710,635 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,572 | r | all_primate_preProcessing_post_brainVarFiltering.R | rm(list=ls()); options(stringsAsFactors=F);
library(WGCNA); allowWGCNAThreads();
setwd('/Volumes/fishstudies/_mammalian_RNAseq/');
source('/Volumes/fishstudies/_code/preProcATH-for_web_noVSN.R');
source('/Volumes/fishstudies/_code/blockwiseModulesEnriched-Feb2013.R');
#load('all_primate_brainVar_filtering_DATA.all.higherBrainVar.RData');
load('all_primate_brainVar_filtering_speciesDATA.RData')
#keep = grepl('br|cb',names(DATA.all.higherBrainVar));
#DATA = DATA.all.higherBrainVar[, keep];
#keep = grepl('br', names(DATA.Common)) & grepl('M', names(DATA.Common));
#DATA = DATA.Common[, keep];
DATA = DATA.Common;
remove = c('ggo.cb.M.1');
keepMe = !(names(DATA) %in% remove);
DATA = DATA[, keepMe];
zthresh = floor(ncol(DATA)/3);
out = preProcess(datIN=DATA,
removeOutlierProbes=T, deviate=3,
removeTooManyNAs=T, probe_thresh=zthresh,
sample_thresh=NULL, removeOutlierSamples=T, IACthresh=2,
Qnorm=T);
#####################################################################################
DATA = as.data.frame(t(out$data_Qnorm));
BLOCKSIZE = 2000;
TYPE = 'signed';
sft = pickSoftThreshold(DATA, networkType=TYPE, verbose=3, blockSize=BLOCKSIZE);
POWER = 14;
k = softConnectivity(DATA, type=TYPE, power=POWER, blockSize=BLOCKSIZE);
par(mfrow=c(1,2));
scaleFreePlot(k); hist(k,col='grey',border='darkgrey');
DS = 2;
MM = 10;
MCH = 0.15;
NET = blockwiseModules(datExpr=DATA, maxBlockSize=BLOCKSIZE, networkType=TYPE, power=POWER, deepSplit=DS, minModuleSize=MM, mergeCutHeight=MCH, verbose=3);
dendro=NET$dendrograms;
block = 1;
blockGenes = NET$blockGenes;
colors = NET$colors;
MEs = NET$MEs;
source('/Volumes/fishstudies/_code/exploreNetwork.R');
source('/Volumes/fishstudies/_code/checkGeneListEnrichment.R');
library(RDAVIDWebService); library(biomaRt);
exn.plotDendroAndColors(dendro, colors, block=block, blockGenes=blockGenes);
exn.plotEigengeneNetworks2(MEs)
kME = exn.computekME(DATA, MEs)$all;
mod.genes=exn.getModuleGenes(DATA, colors);
.getModGenesRankedBykME = function(module_names,colors,kME) {
outList = list();
for (m in 1:length(module_names)) {
outList[[m]] = exn.getModulekME(module_names[m],colors,kME)
}
names(outList) = module_names;
return(outList);
}
modkMEs = .getModGenesRankedBykME(names(table(colors)),colors,kME);
length(table(colors))
MFROW = c(3,7);
factors = unlist(strsplit(rownames(DATA),'\\.'))[seq(2,length(unlist(strsplit(rownames(DATA),'\\.'))),4)];
par(mfrow=MFROW)
for(i in 1:ncol(MEs)) {
verboseBoxplot(MEs[,i], as.factor(factors), xlab='', ylab='', col=gsub('ME','',names(MEs)[i]),main=names(MEs)[i],cex.axis=1);
}
factors2 = factors; factors2[grepl('br|cb', factors2)] = 'br/cb';
par(mfrow=MFROW)
for(i in 1:ncol(MEs)) {
verboseBoxplot(MEs[,i], as.factor(factors2), xlab='', ylab='', col=gsub('ME','',names(MEs)[i]),main=names(MEs)[i],cex.axis=1);
}
factors3 = factors2; factors3[grepl('ht|kd|lv', factors3)] = 'ht/kd/lv';
par(mfrow=MFROW)
for(i in 1:ncol(MEs)) {
verboseBoxplot(MEs[,i], as.factor(factors3), xlab='', ylab='', col=gsub('ME','',names(MEs)[i]),main=names(MEs)[i],cex.axis=1);
}
tmp = read.table('primates_hiv1_interactions',header=F,sep='\t',row.names=1);
primates_hiv1_interactions = tmp[,1]; names(primates_hiv1_interactions) = rownames(tmp); rm(tmp);
primates_hiv1_interactions = primates_hiv1_interactions[primates_hiv1_interactions==1]
checkGeneListEnrichmentList(names(primates_hiv1_interactions),mod.genes,names(DATA));
hiv = checkGeneListEnrichmentList(names(primates_hiv1_interactions),mod.genes,names(DATA))$pvals;hiv |
96be9c52ab95834ccdb7854fa9f3dd1b3edaf5ec | 387cef68bd1759aa02bffe9c097d45787e862106 | /tests/testthat/test-obtain.R | ba9caa658180f9e7e0e0058453340236505f60c1 | [] | no_license | emmekeaarts/mHMMbayes | 85c1f2cbe7c34e94f6b9c463f673007db5d575a2 | 0222eb41d7e143eae02a33199c93364fabd07b13 | refs/heads/master | 2023-08-31T01:12:49.606904 | 2023-08-14T12:01:50 | 2023-08-14T12:01:50 | 167,544,703 | 11 | 10 | null | 2023-07-25T22:37:58 | 2019-01-25T12:33:24 | R | UTF-8 | R | false | false | 3,313 | r | test-obtain.R | context("obtain gamma and emiss")
## general properties tested model
n_t <- 100
n <- 10
m <- 3
J = 11
burn_in = 5
n_dep <- 2
q_emiss <- c(4,2)
gamma <- matrix(c(0.8, 0.1, 0.1,
0.2, 0.6, 0.2,
0.1, 0.2, 0.7), ncol = m, byrow = TRUE)
emiss_distr <- list(matrix(c(0.45, 0.45, 0.05, 0.05,
0.1, 0.05, 0.8, 0.05,
0.1, 0.1, 0.2, 0.6), nrow = m, ncol = q_emiss[1], byrow = TRUE),
matrix(c(0.7, 0.3,
0.9, 0.1,
0.8, 0.2), nrow = m, ncol = q_emiss[2], byrow = TRUE)
)
set.seed(4231)
data_sim <- sim_mHMM(n_t = n_t, n = n, gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss), gamma = gamma,
emiss_distr = emiss_distr, var_gamma = .5, var_emiss = c(.5, 0.5))
colnames(data_sim$obs) <- c("subj", "output_1", "output_2")
# Fit the mHMM on 2 dep variable data
set.seed(3523)
out_2st_simb <- mHMM(s_data = data_sim$obs,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE)
####################
## TESTING
###############
test_that("expected errors obtain gamma and emiss", {
expect_error(obtain_gamma(out_2st_simb, level = 'a'), " should be set to either group or subject")
ab <- c(2,3,4)
expect_error(obtain_gamma(ab), "should be from the class mHMM")
expect_error(obtain_gamma(out_2st_simb, burn_in = 10), "burn in period should be at least 2 points smaller")
expect_error(obtain_emiss(out_2st_simb, level = 'a'), " should be set to either group or subject")
expect_error(obtain_emiss(ab), "should be from the class mHMM")
expect_error(obtain_emiss(out_2st_simb, burn_in = 10), "burn in period should be at least 2 points smaller")
})
test_that("output obtain gamma", {
gamma1_g <- obtain_gamma(out_2st_simb, level = 'group')
gamma1_subj <- obtain_gamma(out_2st_simb, level = 'subject')
# test dimensions
expect_equal(dim(gamma1_g), c(m,m))
expect_equal(length(gamma1_subj), n)
expect_equal(dim(gamma1_subj[[1]]), c(m,m))
expect_equal(dim(gamma1_subj[[1]]), dim(gamma1_subj[[n]]))
# calculations
expect_equal(as.vector(gamma1_g[2,]), c(0.226, 0.575, 0.199))
expect_equal(as.vector(gamma1_subj[[1]][3,]), c( 0.038, 0.352, 0.610 ))
expect_equal(as.vector(gamma1_subj[[n]][1,]), c(0.718, 0.081, 0.201))
})
test_that("output obtain emiss", {
emiss1_g <- obtain_emiss(out_2st_simb, level = 'group')
emiss1_subj <- obtain_emiss(out_2st_simb, level = 'subject')
# test dimensions
expect_equal(dim(emiss1_g[[1]]), c(m,q_emiss[1]))
expect_equal(dim(emiss1_g[[2]]), c(m,q_emiss[2]))
expect_equal(length(emiss1_subj[[1]]), n)
expect_equal(length(emiss1_subj[[2]]), n)
expect_equal(dim(emiss1_subj[[1]][[1]]), c(m,q_emiss[1]))
expect_equal(dim(emiss1_subj[[2]][[1]]), dim(emiss1_subj[[2]][[n]]))
# calculations
expect_equal(as.vector(emiss1_g[[1]][2,]), c(0.112, 0.094, 0.700, 0.094 ))
expect_equal(as.vector(emiss1_g[[2]][2,]), c(0.821, 0.179 ))
expect_equal(as.vector(emiss1_subj[[1]][[1]][3,]), c(0.062, 0.034, 0.312, 0.593))
expect_equal(as.vector(emiss1_subj[[2]][[n]][1,]), c(0.567, 0.433))
})
|
de7b51b24456e4804e4585edd246899efaa57965 | 382e48282d3cde1e0cabf4aa8e67be4f34fd1bba | /Project_code_course8.R | 1081a02404c81765ce209b32377d182ed07a8e0e | [] | no_license | kalyansaikia/Practical_Machine_Learning_Project | e74bf6c1ab6bd36efbe5ef932d16fb8410aa1918 | c976c4d00278c2ca0460ec305729aee74ba7e61e | refs/heads/master | 2021-01-17T17:39:19.573988 | 2016-08-14T06:52:51 | 2016-08-14T06:52:51 | 65,653,490 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,279 | r | Project_code_course8.R | library(caret)
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(rattle)
library(randomForest)
library(corrplot)
set.seed(30001)
# reading the original data
train_Raw <- read.csv('./pml-training.csv', header=T, na.strings = c("", "NA"))
validationRaw <- read.csv('./pml-testing.csv', header=T, na.strings = c("", "NA"))
dim(train_Raw)
dim(validationRaw)
# data cleanup
#Data cleanup is a very important step in this analysis. here we will get rid of the observations with missing values and some meaningless variables.
sum(complete.cases(train_Raw))
# first we remove columns that contain NA/missing values
train_Raw <- train_Raw[, colSums(is.na(train_Raw)) == 0]
validationRaw <- validationRaw[, colSums(is.na(validationRaw)) == 0]
# Since data doesn't have time dependence so the columns with time information are removed from tha dataset including the first column (i.e. observation number). However, in the cleaned dataset, the variable 'classe' is kept in the dataset.
train_Raw <- train_Raw[,c(8:60)]
validationRaw<- validationRaw[, c(8:60)]
# Partitioning the train data into two parts
train_sample <- createDataPartition(y=train_Raw$classe, p=0.7, list=FALSE)
train_data <- train_Raw[train_sample, ]
test_data <- train_Raw[-train_sample, ]
# Data Modeling
# In this analysis we tried to fit Random forest algorithm to fit predictive model for recognition of activity.
# The reasons for selecting Random Forest are:
# It automatically identifies the important variables and
# It produces robust correlated covariates and outliers
# While employing Random Forest algorithm we applied 5 fold cross validation of the algorithm.
Rfcontrol <- trainControl(method="cv", 5) # Specifying Random Forest method
Rfmodel <- train(classe ~ ., data=train_data, method="rf", trControl=Rfcontrol, ntree=250)
Rfmodel #display the model parameters
# After building model, the performance was tested using the partitioned test data as below:
Rfpredict <- predict(Rfmodel, test_data)
confusionMatrix(test_data$classe, Rfpredict)
#Determining model accuracy and Out-of-sample Error Estimation
accuracy <- postResample(Rfpredict, test_data$classe) # Model Accuracy
accuracy
oose <- 1 - as.numeric(confusionMatrix(test_data$classe, Rfpredict)$overall[1]) # Out-of-sample error estimation
oose
# From the above parameter, the accuracy of modeling is estimated as 99.42% and out-of-sample error is 0.58%
# After analysing the accuracy and out-of-sample error it was decided to go ahead Random Forest model to predict the parameters 'classe' in the validation dataset. Predicting the the manner in which excercise was carried using the validation dataset. In this step we apply the model to the original test dataset as downloaded from the source.
# After reviewing data it was noticed a column called "problem_id" exist which needs to be removed before prediction.
validationRaw<-validationRaw[,c(1:53)]
Finalresult <- predict(Rfmodel, validationRaw)
Finalresult
# Viewing the correlation matrix
CP <- cor(train_data[, -length(names(train_data))])
corrplot(CP, method="circle")
#Viewing Decision tree
treeModel <- rpart(classe ~ ., data=train_data)
prp(treeModel) # fast plot
|
587b5b5c7a688e2e7a7d7931677e892267720ebe | 582ced3e41fa163afa55dc913c48ed6ed87472ac | /R/transitiveClosure.cover.R | eaec1425f280966cf881e80707c9d1cb99662bf4 | [] | no_license | cran/parsec | 70c8651b851fdf0ffbda532ee8c8b6d16571f929 | a92f381f2df1aa8c058c26eb6da052910bc64756 | refs/heads/master | 2023-09-06T06:50:44.609182 | 2023-08-19T13:40:02 | 2023-08-19T14:30:52 | 37,854,653 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 131 | r | transitiveClosure.cover.R | transitiveClosure.cover <-
function(m) {
res <- transitiveClosure.default(m)
class(res) <- "cover"
return(res)
}
|
589b70aea928c4a506f1a8cbbc2efbbabfc4b0a8 | d5d52840065492b2b73172be6bd0738e6a708dff | /package/fwPackage_1.0_source/R/as.data.frame.R | e943c47d9ee3baaa85e983d3c616f4698db92f5b | [] | no_license | grayclhn/oos-overfit | 9f3e1aca769d4bbba45b84044e609b37a843b9a0 | 779f466158fa4254a8845b47c5b003dedab69ccc | refs/heads/master | 2021-01-17T08:20:04.806990 | 2017-05-05T16:18:09 | 2017-05-05T21:27:21 | 73,234,970 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 731 | r | as.data.frame.R | as.data.frame.oos.pair <- function(x,...) {
data.frame(n = nobs(x),
ntest = ntest(x),
kNull = nreg(model.null(x)),
kAlt = nreg(model.alt(x)),
norm = bNorm(x),
elossFuture = expected.loss.future(x),
elossFutureNull = expected.loss.future(model.null(x)),
elossFutureAlt = expected.loss.future(model.alt(x)),
elossTest = expected.loss.test(x),
elossTestNull = expected.loss.test(model.null(x)),
elossTestAlt = expected.loss.test(model.alt(x)),
...)
}
as.data.frame.oos.forecast <- function(x,...) {
data.frame(ntest = ntest(x),
nreg = nreg(x),
...)
}
|
57afeabd527f22f4a2a6faa15c42e890a94b8926 | d4065309f6031e6af1fe91147ca1dc18a559205f | /scripts/config.R | 5290f1ad89649b02e4e6199f42f8932999574d06 | [] | no_license | barbarathorslund/warfarin | 3741b85bd580599cde2ba283e23774a7a018c19e | 0ec7779313ad126605a047e44c7485b3a348568e | refs/heads/main | 2023-06-25T22:35:36.441326 | 2021-07-14T18:50:44 | 2021-07-14T18:50:44 | 345,354,658 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,210 | r | config.R | add_config <- function(name, value) {
#' Defines a config variable by environmental variables or default value
#'
#' Provides the name of the config variable
#' @param name The name of the variable
#' @param value The value for the config variable
assign(name, Sys.getenv(name, unset = value), envir = .GlobalEnv)
}
# Main paths
add_config("DATA_DIR", "../../data/ukb")
add_config("KING_DATA_DIR", "../../data/ukb/king")
add_config("RAW_DATA_DIR", "../../data/ukb/raw")
add_config("RINTER_DIR", "../../data/ukb/Rinter")
add_config("PRS_DATA_DIR", "../../data/ukb/prs")
add_config("RESULTS_DIR", "../../results")
add_config("PLOTS_DIR", "../../results/plots")
add_config("BOLT_OUT_DATA_DIR", "../../data/ukb/bolt_out")
add_config("EASYQC_OUT_DATA_DIR", "../../data/ukb/easyQC_out")
# Files
## UKB files
# define_phenotype.R
add_config("ISSUE_FILE_RAW", paste(RAW_DATA_DIR ,"ukb_gp_scripts.txt", sep= "/"))
add_config("DEFINED_PHENOTYPE", paste(RINTER_DIR, "defined_phenotype.txt", sep = "/"))
# QC_filter.R
add_config("SQC_FILE_RAW", paste(RAW_DATA_DIR ,"ukb_sqc_v2.txt.gz", sep= "/"))
add_config("HDR_FILE_RAW", paste(RAW_DATA_DIR ,"ukb_sqc_v2.header.txt.gz", sep= "/"))
add_config("FAM_FILE_RAW", paste(RAW_DATA_DIR ,"ukb_43247_cal_chr1_v2_s488282.fam", sep= "/"))
add_config("GQC_FILE_RAW", paste(RAW_DATA_DIR, "ukb_geneticSampleLevel_QCs_190527.tsv.gz", sep="/"))
add_config("QC_FILTERED", paste(RINTER_DIR, "qc_filtered.txt", sep = "/"))
add_config("QC_FILTER_INFO", paste(RESULTS_DIR, "qc_filter_info.txt", sep = "/"))
# phenofile.R
add_config("FINAL_PHENOFILE", paste(RINTER_DIR, "pheno.txt", sep = "/"))
add_config("FINAL_SUBSAMPLE", paste(RINTER_DIR, "subsample_id.txt", sep = "/"))
# king_relatedpairs.R
add_config("KING_KIN_FILE", paste(KING_DATA_DIR, "king.kin", sep = "/"))
add_config("KING_KIN0_FILE", paste(KING_DATA_DIR, "king.kin0", sep = "/"))
add_config("EXCLUDE_RELATED", paste(KING_DATA_DIR, "excluderelated.txt", sep = "/"))
# covarfile.R
add_config("METADATA_FILE_RAW", paste(RAW_DATA_DIR,"ukb45051.all_fields.h5", sep = '/'))
add_config("UNRELATED_PC", paste(DATA_DIR, "flashpca/pcs.txt", sep ="/"))
add_config("RELATED_PC", paste(DATA_DIR, "flashpca/projections.txt", sep = "/"))
add_config("COVARFILE", paste(RINTER_DIR, "cov.txt", sep = "/"))
# 12_merge_bolt.R
##----------------------------------------------------------------------------------------
## metaanalysis
# easyQC.R
add_config("COMBINED_BOLT", paste(BOLT_OUT_DATA_DIR, "ukb_combined_bolt_out.txt", sep = "/"))
add_config("COMBINED_BOLT_V2", paste(BOLT_OUT_DATA_DIR, "ukb_combined_bolt_v2.tsv", sep = "/"))
add_config("RSMID_FILE", paste(RAW_DATA_DIR, "rsmid_machsvs_mapb37.1000G_p3v5.merged_mach_impute.v3.corrpos.gz", sep = "/"))
add_config("SUM_STAT_PRE_RSANN", paste(EASYQC_OUT_DATA_DIR, "CLEANED.warf.txt", sep = "/"))
add_config("SUM_STAT_POST_RSANN", paste(DATA_DIR, "ukb_warf_sumstat.txt", sep = "/"))
##----------------------------------------------------------------------------------------
## PRS
# prs_stroke_event.R
add_config("PRS_STROKE_SUBSAMPLE_ID", paste(PRS_DATA_DIR,"prs_stroke_idsubset.txt", sep= "/"))
add_config("PRS_STROKE_PHENO", paste(PRS_DATA_DIR, "prs_stroke_pheno.txt", sep = "/"))
# prs_stroke_covar.R
add_config("PRS_STROKE_PC", paste(PRS_DATA_DIR, "flashpca_stroke/pcs.txt", sep = "/"))
add_config("PRS_STROKE_COVAR", paste(PRS_DATA_DIR, "prs_stroke_covar.txt", sep = "/"))
# prs_bleed_event.R
add_config("PRS_BLEED_SUBSAMPLE_ID", paste(PRS_DATA_DIR,"prs_bleed_idsubset.txt", sep= "/"))
add_config("PRS_BLEED_PHENO", paste(PRS_DATA_DIR, "prs_bleed_pheno.txt", sep = "/"))
# prs_bleed_covar.R
add_config("PRS_BLEED_PC", paste(PRS_DATA_DIR, "flashpca_bleed/pcs.txt", sep = "/"))
add_config("PRS_BLEED_COVAR", paste(PRS_DATA_DIR, "prs_bleed_covar.txt", sep = "/"))
# prs_model.R
add_config("PRS_STROKE_PROFILE", "../data/prs/plink_stroke/prs.5e-08.profile")
add_config("PRS_BLEED_PROFILE", "../data/prs/plink_bleed/prs.5e-08.profile")
# PICS2.R
add_config("PICS2_CHR10", "../results/PICS2/PICS2_chr10_results.txt")
add_config("PICS2_CHR16", "../results/PICS2/PICS2_chr16_results.txt")
add_config("PICS2_CHR19", "../results/PICS2/PICS2_chr19_results.txt")
|
793c3a18b257690ab7d4e8b770a3442a4c7bcb48 | 080dcd4f00551bb3c32c288b8bd3f58724581af0 | /final1b.R | 786b057e019ec1f1f1023031ca1b10bbb9281fd8 | [] | no_license | sjaidka2/CHEME-5440-Final-Exam | ad7bc6292232c72e23a50fafcfe15689adace165 | 5ecef5775935f89ab8b0e263a906aa0401561a61 | refs/heads/master | 2022-08-24T18:17:30.279474 | 2020-05-23T02:44:11 | 2020-05-23T02:44:11 | 266,147,980 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,101 | r | final1b.R | #define D1 and D2 as vectors with range 0-1
D1<-c(0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0)
D2<-c(0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0)
#want g(f(D2)) and g(f(D1))
newD1<-c(0,0,0,0,0,0,0,0,0,0,0)
newD2<-c(0,0,0,0,0,0,0,0,0,0,0)
for (i in 1:length(D1)) {
newD1[i]<-1/((1+10*((D2[i]^2)/(0.1+(D2[i]^2))^2)))
newD2[i]<-1/((1+10*((D1[i]^2)/(0.1+(D1[i]^2))^2)))
}
#plot on one axis
plot(D1,newD2,xlab="D1",ylab="D2",main="D1 and D2",type="l")
lines(newD1,D2,col="blue")
legend("topright",c("D1 for D2 0-1","D2 for D1 0-1"),fill=c("blue","black"))
#phase portrait
library(phaseR)
library(deSolve)
time<-seq(0,10,by=1)
i<-c(d1=1,d2=1)
togglefunc<-function(time,state,parms){
d1=state[1]
d2=state[2]
A<--20*d2^2/(0.1+d2^2)
B<-1+10*(d2^2/(0.1+d2^2)^2)^2
C<-2*d2*(0.1+d2^2)-2*d2^3/((0.1+d2^2)^2)
X<--20*d1^2/(0.1+d1^2)
Y<-1+10*(d1^2/(0.1+d1^2)^2)^2
Z<-2*d1*(0.1+d1^2)-2*d1^3/((0.1+d1^2)^2)
d1dt=A/B*C
d2dt=X/Y*Z
return(list(c(d1dt,d2dt)))
}
out<-ode(i,time,togglefunc)
phasePortrait(out,ylim=c(-1,1))
plot(out) |
27f494ea540067c2b73f641f283e4d7db4007471 | 9423c2ae3708e7e7ac78f72b4344b2dcd16119e2 | /tbl_add_varn.R | d88c29669cf4019e32c3ef11ba13edba90fd1fd0 | [
"MIT"
] | permissive | TaiSakuma/mianRs | 57d5d3e41438a35d560e7c1f0e954ab453710ac1 | 9b133cad30f5a0cadec510e96b552b0be7ba48f8 | refs/heads/master | 2021-01-18T23:18:32.366644 | 2018-05-19T08:09:16 | 2018-05-19T08:09:16 | 28,632,406 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,245 | r | tbl_add_varn.R | #!/usr/bin/env Rscript
# Tai Sakuma <sakuma@fnal.gov>
library('plyr', warn.conflicts = FALSE, quietly = TRUE)
library('reshape', warn.conflicts = FALSE, quietly = TRUE)
##____________________________________________________________________________||
main <- function(inputFiles)
{
ret <- data.frame()
for(file in inputFiles)
{
d <- read.table(file, header = TRUE)
d$var.n <- d$n
ret <- rbind(ret, d)
}
ret <- do.add(ret)
write.table(ret, stdout(), quote = TRUE, row.names = FALSE)
## write.table(format(ret, digits = 4), stdout(), quote = FALSE, row.names = FALSE)
}
##____________________________________________________________________________||
do.add <- function(data, n = 'n', var.n = 'var.n')
{
factor.names <- names(data)[!names(data) %in% c(n, var.n)]
variables <- c(n, var.n)[c(n, var.n) != '']
data.m <- melt(data, id = c(factor.names), measured = variables)
formula <- paste(paste(factor.names, collapse = ' + '), ' ~ variable')
data.r <- cast(data.m, formula, sum)
data.frame(data.r)
}
##____________________________________________________________________________||
com.args <- commandArgs(trailingOnly = TRUE)
main(inputFiles = com.args)
|
9327d85b6b13b9628ace4812d1c8bc7ac1a3061c | 520739f8e36b54dff068cdce72106fd44660ab78 | /scripts/data processing/read_raw_data.R | e21557134c822e19282af60042a6898e97719add | [] | no_license | brunamdalmoro/sm-popularity-video-features | 16350ebf46bc225a9ec8de74ab4abc6770394fe5 | 78dabeeea80afb844f4e09f89f1fa64dc35785c3 | refs/heads/master | 2020-08-02T00:15:47.465192 | 2019-12-02T01:36:30 | 2019-12-02T01:36:30 | 211,172,561 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,231 | r | read_raw_data.R | # Read .csv data and save .rds data
library(tidyverse)
facebook_dataset <- read_csv("data/raw/facebook_dataset_2015.csv") %>%
as_tibble() %>%
# ajusting type of feature
mutate(
dominant_color.value = as.factor(dominant_color.value)
) %>%
# rename features
rename(
dominant_color.histogram_0 = `dominant_color.histogram[0]`,
dominant_color.histogram_1 = `dominant_color.histogram[1]`,
dominant_color.histogram_2 = `dominant_color.histogram[2]`,
dominant_color.histogram_3 = `dominant_color.histogram[3]`,
dominant_color.histogram_4 = `dominant_color.histogram[4]`,
dominant_color.histogram_5 = `dominant_color.histogram[5]`,
dominant_color.histogram_6 = `dominant_color.histogram[6]`,
dominant_color.histogram_7 = `dominant_color.histogram[7]`,
dominant_color.histogram_8 = `dominant_color.histogram[8]`,
dominant_color.histogram_9 = `dominant_color.histogram[9]`,
shot_detection.transition_histogram_0 = `shot_detection.transition_histogram[0]`,
shot_detection.transition_histogram_1 = `shot_detection.transition_histogram[1]`
) %>%
# remove nonvisual feature
select(-title.sentiment)
write_rds(x = facebook_dataset, path = "data/raw/facebook_dataset.rds")
|
48deb49955e1bc5423561799a092284a882895c8 | 44409bb2d580a0435fa0a6d26fdb772390392d3f | /inst/doc/rplexos.R | 07bb532d92e1bc91a2ab2d0803fb44296f5eab25 | [] | no_license | cran/rplexos | 28bc53eb983da4e52ca9c470c0f8d42542161169 | ad31b70d2e812222c1473d101fb562d9b59e9379 | refs/heads/master | 2020-12-29T02:19:09.608129 | 2017-01-13T19:30:53 | 2017-01-13T19:30:53 | 22,672,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,086 | r | rplexos.R | ## ---- echo = FALSE, message = FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------
knitr::opts_chunk$set(collapse = T, comment = "#>", fig.height = 3, fig.width = 7)
options(width = 200)
library(rplexos, quietly = TRUE)
library(ggplot2, quietly = TRUE)
theme_set(theme_bw())
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
location_ST <- location_solution_rplexos('ST')
location_LT <- location_solution_rplexos('LT')
location_ST
location_LT
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
process_folder(location_ST)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
process_folder(c(location_ST, location_LT))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
db <- plexos_open(location_ST, "Sc1")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
db_multi <- plexos_open(c(location_ST, location_LT), name = c("ST", "LT"))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
query_property(db)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
query_generator(db)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
query_day(db, "Generator", "Generation")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
query_day(db, "Region", "*")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
gen <- query_interval(db, "Generator", "Generation")
ggplot(gen, aes(x = time, y = value, fill = name)) +
geom_area() +
labs(x = "Time", y = "Generation (MW)", fill = "Generator")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
price <- query_interval(db, "Node", "Price")
ggplot(price, aes(x = time, y = value, color = name)) +
geom_line() +
labs(x = "Time", y = "Price ($/MW)", color = "Node")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
price <- query_interval(db, "Line", "Flow")
ggplot(price, aes(x = time, y = value, color = name)) +
geom_line() +
labs(x = "Time", y = "Flow (MW)", color = "Generator")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
gen.time.filter <- query_interval(db, "Generator", "Generation",
time.range = c("2015-03-14 00:00:00", "2015-03-14 12:00:00"))
ggplot(gen.time.filter, aes(x = time, y = value, fill = name)) +
geom_area() +
labs(x = "Time", y = "Generation (MW)", fill = "Generator")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
gen.gral.filter1 <- query_interval(db, "Generator", "Generation",
filter = list(name = "Baseload"))
ggplot(gen.gral.filter1, aes(x = time, y = value, fill = name)) +
geom_area() +
labs(x = "Time", y = "Generation (MW)", fill = "Generator")
gen.gral.filter2 <- query_interval(db, "Generator", "Generation",
filter = list(name = c("Baseload", "Wind")))
ggplot(gen.gral.filter2, aes(x = time, y = value, fill = name)) +
geom_area() +
labs(x = "Time", y = "Generation (MW)", fill = "Generator")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
gen.gral.filter <- query_interval(db, "Generator", "Generation",
filter = list(name = "-Baseload"))
ggplot(gen.gral.filter, aes(x = time, y = value, fill = name)) +
geom_area() +
labs(x = "Time", y = "Generation (MW)", fill = "Generator")
## ---- message = FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
location2 <- location_input_rplexos()
location2
## -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# process_folder(location2)
|
a36b43d26dde850ff6f3c42ba5da78be0ec8c509 | ab1589c3df70adf5448b9d758fb5b4dc9c0eb113 | /code/R/basic-model/factorShotOn.R | f9759ac23153e076ba99308336128f039f196e72 | [] | no_license | Dirken/forecasting-football-results | d735c4d2408d953122d28b2c6f59c41d39025813 | 0ff5ca9828604bee73343f0b4f5343eee989535a | refs/heads/master | 2023-02-27T20:00:01.780943 | 2021-02-04T01:33:58 | 2021-02-04T01:33:58 | 290,083,232 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,260 | r | factorShotOn.R | #######################
#ShotOn
#######################
shoton <- incidents %>% filter(type == 'shoton')
shoton
table(shoton$subtype1)
table(shoton$subtype2)
shoton$type <- NULL
data <- unique(shoton$subtype1)
for(i in data){
fileName <- paste0("factorsImages/shotOn/",i, ".png")
png(filename = fileName, bg="transparent")
ggPlot <- pitch + shoton %>%
filter(subtype1 == i) %>%
geom_jitter(mapping = aes(colour = subtype1), alpha = 0.5, size = 4, stroke = 0) +
#scale_color_viridis(discrete = T) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) +
theme(legend.position = "bottom")
print(ggPlot)
dev.off()
}
View(shoton)
pitch + shoton %>%
geom_jitter(mapping = aes(colour = subtype1), alpha = 0.3, size = 2, stroke = 0) +
guides(colour = guide_legend(override.aes = list(alpha = 1))) +
theme(legend.position = "bottom")
shoton %>%
ggplot(mapping = aes(x = lon)) +
geom_bar(mapping = aes(fill = subtype1)) +
#scale_color_viridis(discrete = T) +
ggtitle("Shots On by longitude")
IfIfIfshoton %>%
ggplot(mapping = aes(x = lat)) +
geom_bar(mapping = aes(fill = subtype1)) +
#scale_color_viridis(discrete = T)+
ggtitle("Shots On by latitude")
|
92bda4e61ba58da84e637124fd915b61ad7370e9 | 002ab6dce1456134a8b0235abe6250484aa8b71e | /run_analysis.R | dc2c16d37743cce3fdf78a32bb0dc481ec5bcb6c | [] | no_license | AVMurali99/Getting-and-Cleaning-Data-Project | 25998b619daa94adda50e6cd866159e882e5cf91 | 344749780eb2e5b342d06a7a34ecaf0c977200d1 | refs/heads/master | 2016-09-15T20:15:42.001004 | 2014-12-21T01:19:28 | 2014-12-21T01:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,347 | r | run_analysis.R |
# Reading in the data required from files
trData <- read.table("C:/Users/ASHA/Desktop/Coursera/UCI HAR Dataset/train/X_train.txt")
trLabels <- read.table("C:/Users/ASHA/Desktop/Coursera/UCI HAR Dataset/train/y_train.txt")
trSubjects <- read.table("C:/Users/ASHA/Desktop/Coursera/UCI HAR Dataset/train/subject_train.txt")
testData <- read.table("C:/Users/ASHA/Desktop/Coursera/UCI HAR Dataset/test/X_test.txt")
testLabels <- read.table("C:/Users/ASHA/Desktop/Coursera/UCI HAR Dataset/test/y_test.txt")
testSubjects <- read.table("C:/Users/ASHA/Desktop/Coursera/UCI HAR Dataset/test/subject_test.txt")
features <- read.table("C:/Users/ASHA/Desktop/Coursera/UCI HAR Dataset/features.txt")
# Binding the activities and subjects to the test data
testData <- cbind(testLabels,testData)
testData <- cbind(testSubjects,testData)
# Binding the activities and subjects to the training data
trData <- cbind(trLabels,trData)
trData <- cbind(trSubjects,trData)
# Combining the test data and training data into one data frame called fullData
fullData <- rbind(trData, testData)
# Adding the column names to fullData and cleaning up the mistakes in the column names
fullFeatures <- c("Subject", "Activity", as.character(features$V2))
fullFeatures <- sub("BodyBody","Body", fullFeatures)
names(fullData) <- fullFeatures
# Identifying the columns that have mean and std data and picking only those
# columns into another data frame called msData
wantedFeat <- grep("mean()|std()", fullFeatures)
newFeat <- c(1,2)
newFeat <- as.integer(newFeat)
wantFeatures <- c(newFeat, wantedFeat)
msData <- fullData[,wantFeatures]
# Making the Activity column a factor variable and adding appropriate labels
msData$Activity <- factor(msData$Activity, levels = c(1,2,3,4,5,6),
labels = c("Walking", "Walking_Upstairs", "Walking_Downstairs",
"Sitting", "Standing", "Laying"))
# Aggregating this new data frame by calculating the means for each Subject and Activity
# and storing it in another data frame and then ordering it by Subject
meanmsData <- aggregate(.~Subject+Activity, msData, mean)
meanmsData <- meanmsData[order(meanmsData$Subject, meanmsData$Activity),]
# Writing this Tidy data into a new file that will be submitted
write.table(meanmsData, file = "finalData.txt", row.names = FALSE) |
f4bd7691bc77c27d6be3b0b98e889462d7ee37aa | 3cc5e4163beab8c7e71e8f6463717195c7d2f103 | /GenomicsFunctions/ReadAndParse.R | 27eb1c1ea59bfe7698a6710b27f5eb84913beccc | [
"MIT"
] | permissive | JEstabrook/Omics-QC-pipeline | fc7c514b8df1e559b56b518b619e56c78214d0f8 | e165bd1b1df0b09c268fc5e562c99104344e1df8 | refs/heads/master | 2021-06-23T18:50:12.274069 | 2018-06-29T23:09:52 | 2018-06-29T23:09:52 | 118,011,848 | 0 | 1 | null | 2019-05-06T17:52:31 | 2018-01-18T16:59:15 | Jupyter Notebook | UTF-8 | R | false | false | 3,076 | r | ReadAndParse.R | #####################################################################################################
# Set of functions useful for reading and parsing file formats
#
# readENSgtf --- Read ensembl gtf format
#
# Author: Julja Burchard
# Started - 2016
#####################################################################################################
readENSgtf <-
function (filename, gtf.colnames=c('seqname','source','feature','start','end','score','strand','frame','attribute'), feature.col=9, comma.sub="|", curly.open.sub='<',curly.end.sub='>',comment.char='#'){
# arguments
# filename = required pathname to file to be read
# gtf.colnames = ENSEMBL GTF format field names. please check for accuracy
## imports
require(data.table)
require(jsonlite)
## constants
# int for indexing
a = as.integer(feature.col)
## read in data
# default fread settings skip tracklines, identify typeof each column
gtf = fread(filename)
names(gtf) = gtf.colnames
## parse attribute column: transform to JSON and use JSON parsers
# first, protect commas or curly braces if any within fields
# commas
mymk = grepl(',',gtf[,get(gtf.colnames[9])])
if( sum(mymk)>0 ){
set(x=gtf,i=as.integer(which(mymk)),j=a,value=gsub(',',comma.sub,gtf[mymk,get(gtf.colnames[a])]))
}
# opening curlies
mymk = grepl('\\{',gtf[,get(gtf.colnames[9])])
if( sum(mymk)>0 ){
set(x=gtf,i=as.integer(which(mymk)),j=a,value=gsub('\\{',curly.open.sub,gtf[mymk,get(gtf.colnames[a])]))
}
# closing curlies
mymk = grepl('\\}',gtf[,get(gtf.colnames[9])])
if( sum(mymk)>0 ){
set(x=gtf,i=as.integer(which(mymk)),j=a,value=gsub('\\}',curly.end.sub,gtf[mymk,get(gtf.colnames[a])]))
}
# next, clear non-JSON formatting
# remove any comments
mymk = grepl('[^\'"]#[^"\'{},]+',gtf[,get(gtf.colnames[9])])
if( sum(mymk)>0 ){
set(x=gtf,i=as.integer(which(mymk)),j=a,value=gsub(',?#[^"\'{},]+','',gtf[mymk,get(gtf.colnames[a])]))
}
# quote unquoted strings as found in Ensembl GFF3
mymk = grepl('=[^\'"]\\w+[^"\']',gtf[,get(gtf.colnames[9])])
if( sum(mymk)>0 ){
set(x=gtf,i=as.integer(which(mymk)),j=a,value=gsub('(=[^\'"]\\w+[^"\'])','"\\1"',gtf[mymk,get(gtf.colnames[a])]))
}
# lastly, adapt GTF/GFF collapsed fields to JSON format
set(x=gtf,j=a,value=paste("{",gtf[,get(gtf.colnames[a])],"}",sep=''))
set(x=gtf,j=a,value=gsub('; ?',',',gtf[,get(gtf.colnames[a])]))
set(x=gtf,j=a,value=gsub('([,{}])([A-Za-z0-9_.-]+) ','\\1"\\2" : ',gtf[,get(gtf.colnames[a])]))
set(x=gtf,j=a,value=gsub(',}','},',gtf[,get(gtf.colnames[a])]))
# begin and end properly
gtf[1,(gtf.colnames[a]) := gsub('^','[',get(gtf.colnames[a]))]
gtf[nrow(gtf),(gtf.colnames[a]) := gsub(',$',']',get(gtf.colnames[a]))]
## read JSON
# JSON
gtf.attributes = as.data.table(fromJSON(gtf[,get(gtf.colnames[a])]))
# combine tables
gtf = cbind(gtf,gtf.attributes)
return( gtf )
}
|
5f543dc5d97ca21b24d3d9c2744f9152079e388b | 3d2d38edafbf2f615b9b5263a39fb63865bc2dad | /auxillary/spc_main 0.35.r | 1da598dc782c7cb37fb7092eaecec9d65d076ed9 | [] | no_license | aa989190f363e46d/solarSfReader | 4f4ed17eb993da90845abc417aeceb2fad15646e | 17a88aba9b08e6b3c9c19f29c316e7a5cd7f4406 | refs/heads/master | 2021-01-19T10:57:38.550758 | 2014-10-01T13:22:27 | 2014-10-01T13:22:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,171 | r | spc_main 0.35.r | library(RSQLite);
## library(Hmisc);
if (file.exists("E:/Work/Aspirantura/DataBase"))
{
homedirectorypath <- "E:/Work/Aspirantura/DataBase"
setwd(homedirectorypath)
} else {
homedirectorypath <- "/home/angel/Desktop/Database/"
setwd(homedirectorypath)
Sys.setlocale('LC_ALL','C') # it necassary for good working in english locale of linux PC
}
spc_inspect <- function (current_file_path) {
# extract file title
bin.data <- file(current_file_path, "rb")
size <- file.info(current_file_path)$size
arr.data <- readBin(bin.data
, double()
, n <- size
, size <- 4)
I.data <- arr.data[1489:1689]
close(bin.data)
# open file for binary reading
bin.data <- file(current_file_path
, "rb")
greasy_title <- readBin(bin.data
, character()
, n <- 533)
close(bin.data)
# deleting a garbage symbols
title.data <- unlist(strsplit(greasy_title[532], "@"))
if (length(title.data) == 3) {title.data[2]=title.data[3]}
if (length(unlist(strsplit(title.data[2],"_"))) > 3) {
title.data <- title.data[2]
} else {
title.data <- unlist(strsplit(greasy_title[533], "@"))
title.data <- title.data[2]
}
# parsing title.data
esse <- unlist(strsplit(title.data,"_"))
return(c(esse,I.data))
}
spc_calc = function (current_dir) {
spectrum.default = vector();
spectrum.c1_286 = vector(); spectrum.c1_337 = vector()
spectrum.c2_286 = vector(); spectrum.c2_337 = vector()
spectrum.c4_286 = vector(); spectrum.c4_337 = vector()
spectrum.c6_286 = vector(); spectrum.c6_337 = vector()
spectrum.c8_286 = vector(); spectrum.c8_337 = vector()
spectrum.c10_286 = vector(); spectrum.c10_337 = vector()
Intens = list()
for (i in dir(path=current_dir, pattern="*.fss")) {
#print(sub(" ","",paste(current_dir, i)))
data.inspect <- spc_inspect(paste(current_dir, i, sep=""))
title.inspect <- data.inspect[1:4]
spectrum.inspect <- as.numeric(data.inspect[5:length(data.inspect)])
a = title.inspect[3]
b = title.inspect[4]
id_from_date_and_id = paste(title.inspect[1], title.inspect[2], sep="_")
Intens$id = title.inspect[2]
#Intens$id = 81 # for manual adding datasets in db-file
Intens$probe_id = id_from_date_and_id
if((a=="c0") & (b=="286")) { spectrum.default[300:500] = spectrum.inspect}
else {if((a=="c1") & (b=="286")){#print("c1_286");
spectrum.c1_286[300:500] = spectrum.inspect}
else {if((a=="c1") & (b=="337")) {#print("c1_337");
spectrum.c1_337[350:550] = spectrum.inspect}
else {if((a=="c2") & (b=="286")) {#print("c2_286");
spectrum.c2_286[300:500] = spectrum.inspect}
else {if((a=="c2") & (b=="337")) {#print("c2_337");
spectrum.c2_337[350:550] = spectrum.inspect}
else {if((a=="c4") & (b=="286")) {#print("c4_286");
spectrum.c4_286[300:500] = spectrum.inspect}
else {if((a=="c4") & (b=="337")) {#print("c4_337");
spectrum.c4_337[350:550] = spectrum.inspect}
else {if((a=="c6") & (b=="286")) {#print("c6_286");
spectrum.c6_286[300:500] = spectrum.inspect}
else {if((a=="c6") & (b=="337")) {#print("c6_337");
spectrum.c6_337[350:550] = spectrum.inspect}
else {if((a=="c8") & (b=="286")) {#print("c8_286");
spectrum.c8_286[300:500] = spectrum.inspect}
else {if((a=="c8") & (b=="337")) {#print("c8_337");
spectrum.c8_337[350:550] = spectrum.inspect}
else {if((a=="c10") & (b=="286")) {#print("c10_286");
spectrum.c10_286[300:500] = spectrum.inspect}
else {if((a=="c10") & (b=="337")) {#print("c10_337");
spectrum.c10_337[350:550] = spectrum.inspect}
else {
#cat("WARNING!!!11 T1tle not recognized!",i,"\n")
}
}}
}}
}}
}}
}}
}}
}
# cummul <<- c(title.inspect[2],spectrum.default[300:500], spectrum.c1_286[300:500], spectrum.c1_337[350:550],
# spectrum.c2_286[300:500], spectrum.c2_337[350:550], spectrum.c4_286[300:500],
# spectrum.c4_337[350:550], spectrum.c6_286[300:500], spectrum.c6_337[350:550],
# spectrum.c8_286[300:500], spectrum.c8_337[350:550], spectrum.c10_286[300:500],
# spectrum.c10_337[350:550])
#
# cummul <<- c(title.inspect[2], spectrum.default[300:500], spectrum.c10_337[350:550])
# smbvar = ""
#
# for(x in 1:length(cummul))
# {
# smbvar = paste(smbvar, cummul[x], sep=";")
# }
# cummul = smbvar
# write(cummul, "test.csv", append = T)
# I0 - ????????????? ??????? ??? ?????? ? ????????? 330 ?? ??? lambda=286
# Imp - ????????????? ??????? ? ??????????? ?????? ? ????????? 330 ?? ??? lambda=286
# Imon - ????????????? ????????? ?????? ? ????????? 374-376 ??, lambda=286
# Iex - ????????????? ????????? ?????? ? ????????? 470-480 ??, lambda=337
# N - ??????????? ????????????? ??????
# I0Imp - ????????? ????????????? ????????????? ????????????? ??? ?????? ? ? ??????????? ????????
# I0I0Imp - ????????? ????????????? ????????????? ????????????? ??? ?????? ? ???????? ??????????????
# tetha - ?????????????? ??????? ?????????? ????? ? ????????
# F1F0 - ???? ??????? ??????? ??????? ????
# Polarity - ?????????? ?????????????? ??????
# ?????????? ???????? ???????? ????????????? ????????? ??? ??????, c0_286
interval_def = seq(320,350,1)
Intens$I0 = max(spectrum.default[interval_def])
max_def = which(spectrum.default[interval_def] == Intens$I0)+319
# ?????????? ???????? ???????? ????????????? ???????? ? ???????, 286 nm
interval_mp = seq(320,340,1)
Intens$Imp1 = max(spectrum.c1_286[interval_mp])
max_mp1 = which(spectrum.c1_286[interval_mp] == Intens$Imp1)+319
Intens$Imp2 = max(spectrum.c2_286[interval_mp])
max_mp2 = which(spectrum.c2_286[interval_mp] == Intens$Imp2)+319
Intens$Imp4 = max(spectrum.c4_286[interval_mp])
max_mp4 = which(spectrum.c4_286[interval_mp] == Intens$Imp4)+319
Intens$Imp6 = max(spectrum.c6_286[interval_mp])
max_mp6 = which(spectrum.c6_286[interval_mp] == Intens$Imp6)+319
Intens$Imp8 = max(spectrum.c8_286[interval_mp])
max_mp8 = which(spectrum.c8_286[interval_mp] == Intens$Imp8)+319
Intens$Imp10 = max(spectrum.c10_286[interval_mp])
max_mp10 = which(spectrum.c10_286[interval_mp] == Intens$Imp10)+319
# ?????????? ???????? ???????? ????????? ??????, 286nm
interval_mon_286 = seq(370,380,1)
Intens$Imon1_286 = max(spectrum.c1_286[interval_mon_286])
max_mon1_286 = which(spectrum.c1_286[interval_mon_286] == Intens$Imon1_286)+369
Intens$Imon2_286 = max(spectrum.c2_286[interval_mon_286])
max_mon2_286 = which(spectrum.c2_286[interval_mon_286] == Intens$Imon2_286)+369
Intens$Imon4_286 = max(spectrum.c4_286[interval_mon_286])
max_mon4_286 = which(spectrum.c4_286[interval_mon_286] == Intens$Imon4_286)+369
Intens$Imon6_286 = max(spectrum.c6_286[interval_mon_286])
max_mon6_286 = which(spectrum.c6_286[interval_mon_286] == Intens$Imon6_286)+369
Intens$Imon8_286 = max(spectrum.c8_286[interval_mon_286])
max_mon8_286 = which(spectrum.c8_286[interval_mon_286] == Intens$Imon8_286)+369
Intens$Imon10_286 = max(spectrum.c10_286[interval_mon_286])
max_mon10_286 = which(spectrum.c10_286[interval_mon_286] == Intens$Imon10_286)+369
# ?????????? ???????? ???????? ????????? ??????, 337nm
interval_mon_337 = seq(370,380,1)
Intens$Imon1_337 = max(spectrum.c1_337[interval_mon_337])
max_mon1_337 = which(spectrum.c1_337[interval_mon_337] == Intens$Imon1_337)+369
Intens$Imon2_337 = max(spectrum.c2_337[interval_mon_337])
max_mon2_337 = which(spectrum.c2_337[interval_mon_337] == Intens$Imon2_337)+369
Intens$Imon4_337 = max(spectrum.c4_337[interval_mon_337])
max_mon4_337 = which(spectrum.c4_337[interval_mon_337] == Intens$Imon4_337)+369
Intens$Imon6_337 = max(spectrum.c6_337[interval_mon_337])
max_mon6_337 = which(spectrum.c6_337[interval_mon_337] == Intens$Imon6_337)+369
Intens$Imon8_337 = max(spectrum.c8_337[interval_mon_337])
max_mon8_337 = which(spectrum.c8_337[interval_mon_337] == Intens$Imon8_337)+369
Intens$Imon10_337 = max(spectrum.c10_337[interval_mon_337])
max_mon10_337 = which(spectrum.c10_337[interval_mon_337] == Intens$Imon10_337)+369
# ?????????? ???????? ???????? ????????? ??????, 286nm
interval_e_286 = seq(470,480,1)
Intens$Ie1_286 = max(spectrum.c1_286[interval_e_286])
max_e1_286 = which(spectrum.c1_286[interval_e_286] == Intens$Ie1_286)+469
Intens$Ie2_286 = max(spectrum.c2_286[interval_e_286])
max_e2_286 = which(spectrum.c2_286[interval_e_286] == Intens$Ie2_286)+469
Intens$Ie4_286 = max(spectrum.c4_286[interval_e_286])
max_e4_286 = which(spectrum.c4_286[interval_e_286] == Intens$Ie4_286)+469
Intens$Ie6_286 = max(spectrum.c6_286[interval_e_286])
max_e6_286 = which(spectrum.c6_286[interval_e_286] == Intens$Ie6_286)+469
Intens$Ie8_286 = max(spectrum.c8_286[interval_e_286])
max_e8_286 = which(spectrum.c8_286[interval_e_286] == Intens$Ie8_286)+469
Intens$Ie10_286 = max(spectrum.c10_286[interval_e_286])
max_e10_286 = which(spectrum.c10_286[interval_e_286] == Intens$Ie10_286)+469
# ?????????? ???????? ???????? ????????? ??????, 337nm
interval_e_337 = seq(470,480,1)
Intens$Ie1_337 = max(spectrum.c1_337[interval_e_337])
max_e1_337 = which(spectrum.c1_337[interval_e_337] == Intens$Ie1_337)+469
Intens$Ie2_337 = max(spectrum.c2_337[interval_e_337])
max_e2_337 = which(spectrum.c2_337[interval_e_337] == Intens$Ie2_337)+469
Intens$Ie4_337 = max(spectrum.c4_337[interval_e_337])
max_e4_337 = which(spectrum.c4_337[interval_e_337] == Intens$Ie4_337)+469
Intens$Ie6_337 = max(spectrum.c6_337[interval_e_337])
max_e6_337 = which(spectrum.c6_337[interval_e_337] == Intens$Ie6_337)+469
Intens$Ie8_337 = max(spectrum.c8_337[interval_e_337])
max_e8_337 = which(spectrum.c8_337[interval_e_337] == Intens$Ie8_337)+469
Intens$Ie10_337 = max(spectrum.c10_337[interval_e_337])
max_e10_337 = which(spectrum.c10_337[interval_e_337] == Intens$Ie10_337)+469
# ????????? ??????????? ????????????? ?????? ? ?????????? ???????
Intens$N1_286 = (Intens$Ie1_286 / Intens$Imon1_286)
Intens$N2_286 = (Intens$Ie2_286 / Intens$Imon2_286)
Intens$N4_286 = Intens$Ie4_286 / Intens$Imon4_286
Intens$N6_286 = Intens$Ie6_286 / Intens$Imon6_286
Intens$N8_286 = Intens$Ie8_286 / Intens$Imon8_286
Intens$N10_286 = Intens$Ie10_286 / Intens$Imon10_286
# ????????? ??????????? ????????????? ?????? ? ????? ???????? ?????
Intens$N1_337 = Intens$Ie1_337 / Intens$Imon1_337
Intens$N2_337 = Intens$Ie2_337 / Intens$Imon2_337
Intens$N4_337 = Intens$Ie4_337 / Intens$Imon4_337
Intens$N6_337 = Intens$Ie6_337 / Intens$Imon6_337
Intens$N8_337 = Intens$Ie8_337 / Intens$Imon8_337
Intens$N10_337 = Intens$Ie10_337 / Intens$Imon10_337
# ?????????? ??????????? F0/F
Intens$I0Imp1 = Intens$I0 / Intens$Imp1
Intens$I0Imp2 = Intens$I0 / Intens$Imp2
Intens$I0Imp4 = Intens$I0 / Intens$Imp4
Intens$I0Imp6 = Intens$I0 / Intens$Imp6
Intens$I0Imp8 = Intens$I0 / Intens$Imp8
Intens$I0Imp10 = Intens$I0 / Intens$Imp10
# ????????? ????????? I0 / (I0 - Im+p)
Intens$I0I0Imp1 = Intens$I0 / (Intens$I0 - Intens$Imp1)
Intens$I0I0Imp2 = Intens$I0 / (Intens$I0 - Intens$Imp2)
Intens$I0I0Imp4 = Intens$I0 / (Intens$I0 - Intens$Imp4)
Intens$I0I0Imp6 = Intens$I0 / (Intens$I0 - Intens$Imp6)
Intens$I0I0Imp8 = Intens$I0 / (Intens$I0 - Intens$Imp8)
Intens$I0I0Imp10 = Intens$I0 / (Intens$I0 - Intens$Imp10)
# ?????? theta ? F1/F0 (??????? ??????? ??????? ????)
c.revers <- 1/c(1,2,4,6,8,10)
effective.energy.transfer = c(Intens$I0I0Imp1, Intens$I0I0Imp2, Intens$I0I0Imp4, Intens$I0I0Imp6, Intens$I0I0Imp8, Intens$I0I0Imp10)
Intens$tetha = lm(effective.energy.transfer ~ c.revers)$coefficient[2]
Intens$F1F0 = 1/lm(effective.energy.transfer ~ c.revers)$coefficient[1]
# ?????????? ?????????????? ??????, 286 nm
interval_polarity_286 = seq(380,400,1)
Intens$Polarity1_286 = Intens$Ie1_286 / max(spectrum.c1_286[interval_polarity_286])
Intens$Polarity2_286 = Intens$Ie2_286 / max(spectrum.c2_286[interval_polarity_286])
Intens$Polarity4_286 = Intens$Ie4_286 / max(spectrum.c4_286[interval_polarity_286])
Intens$Polarity6_286 = Intens$Ie6_286 / max(spectrum.c6_286[interval_polarity_286])
Intens$Polarity8_286 = Intens$Ie8_286 / max(spectrum.c8_286[interval_polarity_286])
Intens$Polarity10_286 = Intens$Ie10_286 / max(spectrum.c10_286[interval_polarity_286])
# ?????????? ?????????????? ??????, 337 nm
interval_polarity_337 = seq(380,400,1)
Intens$Polarity1_337 = Intens$Ie1_337 / max(spectrum.c1_337[interval_polarity_337])
Intens$Polarity2_337 = Intens$Ie2_337 / max(spectrum.c2_337[interval_polarity_337])
Intens$Polarity4_337 = Intens$Ie4_337 / max(spectrum.c4_337[interval_polarity_337])
Intens$Polarity6_337 = Intens$Ie6_337 / max(spectrum.c6_337[interval_polarity_337])
Intens$Polarity8_337 = Intens$Ie8_337 / max(spectrum.c8_337[interval_polarity_337])
Intens$Polarity10_337 = Intens$Ie10_337 / max(spectrum.c10_337[interval_polarity_337])
#Intens <<- Intens # ??????? ?????????? ? ?????????? ???????????? ????
spc_write(Intens) # ?????????? ?????????? ???????? ? ??
}
spc_write = function(value_for_insert, target_db = "rbc") {
# this function work with DB self-made instruments only
# it not use SQL query and write data directly
if(file.exists("spc.db"))
{print("DB already exist!")}
else {spc_create_db_structure()}
driver<-dbDriver("SQLite")
connect<-dbConnect(driver, dbname = "spc.db")
dbWriteTable(connect, "rbc", as.data.frame(value_for_insert), overwrite = F, row.names = F, eol = "\r\n", append=T )
sqliteCloseConnection(connect); sqliteCloseDriver(driver)
}
spc_view = function(sql_query= "select * from rbc") {
driver<-dbDriver("SQLite")
connect<-dbConnect(driver, dbname = paste(homedirectorypath, "/spc.db", sep = ""))
query01 <- dbSendQuery(connect, statement =sql_query)
dataframe <- fetch(query01);
sqliteCloseResult(query01); sqliteCloseConnection(connect); sqliteCloseDriver(driver)
return(dataframe)
}
spc_illustrate = function(query_for_spc_view = "SELECT * FROM rbc") {
# formation control and experimental groups from DB
sport_id <<- as.numeric(unlist(
spc_view("SELECT id FROM private WHERE sport <> 'control'")))# AND old >= 18")))
control_id <<- as.numeric(unlist(
spc_view("SELECT id FROM private WHERE sport = 'control'")))# AND old >= 18")))
dataframe <- spc_view(query_for_spc_view)
concentration = c(1,2,4,6,8,10)
# calculating data for microviscosity plot
sport_annular = 1/mean(dataframe[sport_id, 34:39])
sport_general = 1/mean(dataframe[sport_id, 40:45])
control_annular = 1/mean(dataframe[control_id,34:39])
control_general = 1/mean(dataframe[control_id,40:45])
# drawing plot
plot(concentration, sport_annular, type = "l", col = "red", lty = "dashed", main = "RED: sport, GREEN: control \n DASHED: annular, NORMAL: general",
ylab = "Microviscosity", xlab="Concentration"); points(concentration, sport_annular, col = "red")
lines(concentration, sport_general, col = "red"); points(concentration, sport_general, col = "red")
lines(concentration, control_general, col = "green"); points(concentration, control_general, col = "green")
lines(concentration, control_annular, col = "green", lty = "dashed"); points(concentration, control_annular, col = "green")
# calculating data for correlating with theoretical Shtern-Folmer equation
F1 = mean(dataframe$I0/dataframe$Imp1)
F2 = mean(dataframe$I0/dataframe$Imp2)
F4 = mean(dataframe$I0/dataframe$Imp4)
F6 = mean(dataframe$I0/dataframe$Imp6)
F8 = mean(dataframe$I0/dataframe$Imp8)
F10 = mean(dataframe$I0/dataframe$Imp10)
print(paste("Correlation practical data with theoretical predicted values:", cor(c(F1, F2, F4, F6, F8, F10), concentration)))
# calculating effectivity energy transfer
Z1 = mean(dataframe$I0I0Imp1[sport_id])
Z2 = mean(dataframe$I0I0Imp2[sport_id])
Z4 = mean(dataframe$I0I0Imp4[sport_id])
Z6 = mean(dataframe$I0I0Imp6[sport_id])
Z8 = mean(dataframe$I0I0Imp8[sport_id])
Z10 = mean(dataframe$I0I0Imp10[sport_id])
Z <- c(Z1, Z2, Z4, Z6, Z8, Z10)
c.revers <- 1/concentration
# creating linear model Z ~ 1/c
l.sport <- lm(Z ~ c.revers)
Z1C = mean(dataframe$I0I0Imp1[control_id])
Z2C = mean(dataframe$I0I0Imp2[control_id])
Z4C = mean(dataframe$I0I0Imp4[control_id])
Z6C = mean(dataframe$I0I0Imp6[control_id])
Z8C = mean(dataframe$I0I0Imp8[control_id])
Z10C = mean(dataframe$I0I0Imp10[control_id])
ZC <- c(Z1C, Z2C, Z4C, Z6C, Z8C, Z10C)
l.control <- lm(ZC ~ c.revers)
plot(c.revers, Z, type = "l", col="red", main = "RED: sport, GREEN: control", ylab = "I0/(I0-I)", xlab = "Concentration")
points(c.revers, Z, col = "red")
lines(c.revers, ZC, col = "green"); points(c.revers, ZC, col = "green")
curve(l.control$coefficients[2]*x+l.control$coefficients[1], add = T, lty = "dashed", col = "green")
curve(l.sport$coefficients[2]*x+l.sport$coefficients[1], add = T, lty = "dashed", col = "red")
# tetha and F1/F0 values
boxplot(dataframe$tetha[sport_id], dataframe$tetha[control_id], main = "Tetha values for sport and control group")
boxplot(dataframe$F1F0[sport_id], dataframe$F1F0[control_id], main = "F1/F0 values for sport and control group")
# calculating values of polarity microenvironment pyrene
sport_polarity286 = mean(dataframe[sport_id, 60:65])
sport_polarity337 = mean(dataframe[sport_id, 66:71])
control_polarity286 = mean(dataframe[control_id,60:65])
control_polarity337 = mean(dataframe[control_id,66:71])
plot(concentration, sport_polarity337, type = "l", col = "red", main = "RED: sport, GREEN: control \n DASHED: annular, NORMAL: general",
ylab = "Polarity", xlab = "Concentration"); points(concentration, sport_polarity337, col = "red")
lines(concentration, sport_polarity286, col = "red", lty = "dashed"); points(concentration, sport_polarity286, col = "red")
lines(concentration, control_polarity337, col = "green"); points(concentration, control_polarity337, col = "green")
lines(concentration, control_polarity286, col = "green", lty = "dashed"); points(concentration, control_polarity286, col = "green")
#differencies of microviscosity in control and experimental groups
print("p-values of Wilcoxon test for microviscosity annular lipid found")
cat(wilcox.test(dataframe$N1_286[control_id], dataframe$N1_286[sport_id])$p.value, wilcox.test(dataframe$N2_286[control_id], dataframe$N2_286[sport_id])$p.value,
wilcox.test(dataframe$N4_286[control_id], dataframe$N4_286[sport_id])$p.value, wilcox.test(dataframe$N6_286[control_id], dataframe$N6_286[sport_id])$p.value,
wilcox.test(dataframe$N8_286[control_id], dataframe$N8_286[sport_id])$p.value, wilcox.test(dataframe$N10_286[control_id], dataframe$N10_286[sport_id])$p.value,"\n")
print("p-values of Wilcoxon test for microviscosity general lipid found")
cat(wilcox.test(dataframe$N1_337[control_id], dataframe$N1_337[sport_id])$p.value, wilcox.test(dataframe$N2_337[control_id], dataframe$N2_337[sport_id])$p.value,
wilcox.test(dataframe$N4_337[control_id], dataframe$N4_337[sport_id])$p.value, wilcox.test(dataframe$N6_337[control_id], dataframe$N6_337[sport_id])$p.value,
wilcox.test(dataframe$N8_337[control_id], dataframe$N8_337[sport_id])$p.value, wilcox.test(dataframe$N10_337[control_id], dataframe$N10_337[sport_id])$p.value,"\n")
# difference in effectivity energy transfer
print("p-values of Wilcox test for I0/(I0-I)")
cat(wilcox.test(dataframe$I0Imp1[control_id], dataframe$I0Imp1[sport_id])$p.value, wilcox.test(dataframe$I0Imp2[control_id], dataframe$I0Imp2[sport_id])$p.value,
wilcox.test(dataframe$I0Imp4[control_id], dataframe$I0Imp4[sport_id])$p.value, wilcox.test(dataframe$I0Imp6[control_id], dataframe$I0Imp6[sport_id])$p.value,
wilcox.test(dataframe$I0Imp8[control_id], dataframe$I0Imp8[sport_id])$p.value, wilcox.test(dataframe$I0Imp10[control_id], dataframe$I0Imp10[sport_id])$p.value,"\n")
# difference in tetha values
print("p-values of Wilcox test for tetha")
cat(wilcox.test(dataframe$tetha[control_id], dataframe$tetha[sport_id])$p.value,"\n")
# difference in F1/F0
print("p-values of Wilcox test for F1/F0")
cat(wilcox.test(dataframe$F1F0[control_id], dataframe$F1F0[sport_id])$p.value,"\n")
# difference in polarity of microenvironment
print("p-values of Wilcoxon test for polarity of pyren's microenvironment in annular lipid found")
cat(wilcox.test(dataframe$Polarity1_286[control_id], dataframe$Polarity1_286[sport_id])$p.value, wilcox.test(dataframe$Polarity2_286[control_id], dataframe$Polarity2_286[sport_id])$p.value,
wilcox.test(dataframe$Polarity4_286[control_id], dataframe$Polarity4_286[sport_id])$p.value, wilcox.test(dataframe$Polarity6_286[control_id], dataframe$Polarity6_286[sport_id])$p.value,
wilcox.test(dataframe$Polarity8_286[control_id], dataframe$Polarity8_286[sport_id])$p.value, wilcox.test(dataframe$Polarity10_286[control_id], dataframe$Polarity10_286[sport_id])$p.value,"\n")
print("p-values of Wilcoxon test for polarity of pyren's microenvironment in general lipid found")
cat(wilcox.test(dataframe$Polarity1_337[control_id], dataframe$Polarity1_337[sport_id])$p.value, wilcox.test(dataframe$Polarity2_337[control_id], dataframe$Polarity2_337[sport_id])$p.value,
wilcox.test(dataframe$Polarity4_337[control_id], dataframe$Polarity4_337[sport_id])$p.value, wilcox.test(dataframe$Polarity6_337[control_id], dataframe$Polarity6_337[sport_id])$p.value,
wilcox.test(dataframe$Polarity8_337[control_id], dataframe$Polarity8_337[sport_id])$p.value, wilcox.test(dataframe$Polarity10_337[control_id], dataframe$Polarity10_337[sport_id])$p.value,"\n")
# cluster analysis
data.dist <- dist(scale(dataframe[c(control_id, sport_id), 34:45]), method="minkowski");
data.h <<- hclust(data.dist, method="ward");
plot(data.h)
#length(which((sport_id == c(2,4))>0))
}
spc_compare = function(target_id = 1) {
dataframe = spc_view()
sport_id <- as.numeric(unlist(spc_view("SELECT id FROM private WHERE sport <> 'control' AND old >18")))
control_id <- as.numeric(unlist(spc_view("SELECT id FROM private WHERE sport = 'control' AND old > 18")))
# annular microviscosity
mean_sa = sum(dataframe[sport_id, 34:39])/length(sport_id)
mean_ca = sum(dataframe[control_id, 34:39])/length(control_id)
# general microviscosity
mean_sg = sum(dataframe[sport_id, 40:45])/length(sport_id)
mean_cg = sum(dataframe[control_id, 40:45])/length(control_id)
# target id
mean_ta = sum(dataframe[target_id, 34:39])
mean_tg = sum(dataframe[target_id, 40:45])
# cat(mean_as, mean_ac, mean_sg, mean_cg, "\n")
q = paste("SELECT name, data_db, sport, height, weight, qual FROM private WHERE id = ", target_id, sep = "")
print(spc_view(q))
cat("Differencies in annular lipid found: ", mean_ta/mean_ca, "\n")
cat("Differencies in annular general found: ", mean_ta/mean_cg, "\n", "\n")
}
spc_mlr = function(){
dat1 = spc_view("SELECT * FROM private WHERE (id <>38 AND id<>45 AND id<>46 AND id<>47)")
dat0 = spc_view("SELECT * FROM rbc WHERE (id <>38 AND id<>45 AND id<>46 AND id<>47)")
# lm(dat1$qual ~ dat0$I0 + dat0$Imp1+dat0$Imp2+dat0$Imp4+dat0$Imp6+dat0$Imp8+dat0$Imp10+dat0$Imon1_286+dat0$Imon2_286+dat0$Imon4_286+dat0$Imon6_286+dat0$Imon8_286+dat0$Imon10_286+dat0$Imon1_337+dat0$Imon2_337+dat0$Imon4_337+dat0$Imon6_337+dat0$Imon8_337+dat0$Imon10_337+dat0$Ie1_286+dat0$Ie2_286+dat0$Ie4_286+dat0$Ie6_286+dat0$Ie8_286+dat0$Ie10_286+dat0$Ie1_337+dat0$Ie2_337+dat0$Ie4_337+dat0$Ie6_337+dat0$Ie8_337+dat0$Ie10_337+dat0$N1_286+dat0$N2_286+dat0$N4_286+dat0$N6_286+dat0$N8_286+dat0$N10_286+dat0$N1_337+dat0$N2_337+dat0$N4_337+dat0$N6_337+dat0$N8_337+dat0$N10_337+dat0$I0Imp1+dat0$I0Imp2+dat0$I0Imp4+dat0$I0Imp6+dat0$I0Imp8+dat0$I0Imp10+dat0$I0I0Imp1+dat0$I0I0Imp2+dat0$I0I0Imp4+dat0$I0I0Imp4+dat0$I0I0Imp8+dat0$I0I0Imp10+dat0$tetha+dat0$F1F0+dat0$Polarity1_286+dat0$Polarity2_286+dat0$Polarity4_286+dat0$Polarity6_286+dat0$Polarity8_286+dat0$Polarity10_286+dat0$Polarity1_337+dat0$Polarity2_337+dat0$Polarity4_337+dat0$Polarity6_337+dat0$Polarity8_337+dat0$Polarity10_337+dat1$old+dat1$sex+dat1$height+dat1$weight+dat1$exp)->llmm; summary(llmm)
lm(dat1$qual ~ dat0$I0 + dat0$Imp1+dat0$Imp2+dat0$Imp4+dat0$Imp6+dat0$Imp8+dat0$Imp10+dat0$Imon1_286+dat0$Imon2_286+dat0$Imon4_286+dat0$Imon6_286+dat0$Imon8_286+dat0$Imon10_286+dat0$Imon1_337+dat0$Imon2_337+dat0$Imon4_337+dat0$Imon6_337+dat0$Imon8_337+dat0$Imon10_337+dat0$Ie1_286+dat0$Ie2_286+dat0$Ie4_286+dat0$Ie6_286+dat0$Ie8_286+dat0$Ie10_286+dat0$Ie1_337+dat0$Ie2_337+dat0$Ie4_337+dat0$Ie6_337+dat0$Ie8_337+dat0$Ie10_337+dat0$N1_286+dat0$N2_286+dat0$N4_286+dat0$N6_286+dat0$N8_286+dat0$N10_286+dat0$N1_337+dat0$N2_337+dat0$N4_337+dat0$N6_337+dat0$N8_337+dat0$N10_337+dat0$I0Imp1+dat0$I0Imp2+dat0$I0Imp4+dat0$I0Imp6+dat0$I0Imp8+dat0$I0Imp10+dat0$I0I0Imp1+dat0$I0I0Imp2+dat0$I0I0Imp4+dat0$I0I0Imp4+dat0$I0I0Imp8+dat0$I0I0Imp10+dat0$tetha+dat0$F1F0+dat0$Polarity1_286+dat0$Polarity2_286+dat0$Polarity4_286+dat0$Polarity6_286+dat0$Polarity8_286+dat0$Polarity10_286+dat0$Polarity1_337+dat0$Polarity2_337+dat0$Polarity4_337+dat0$Polarity6_337+dat0$Polarity8_337+dat0$Polarity10_337+dat1$old+dat1$sex+dat1$height+dat1$weight+dat1$exp)->llmm; summary(llmm)
#plot(llmm)
}
|
841b14f4b87137238baccf1226557ab36ed45a63 | 343757949b33e95184d1ec81caeb7d16d1c04485 | /secret-example.R | f3a94bfa1fd2b6321b25b27524e17bdf0aaaf520 | [] | no_license | precision-sustainable-ag/on-farm-map | 14de036d7a601e91928db04ab018bc362d45ca8b | 02c50924f7846094b39fe49616577bd8aa24d4d3 | refs/heads/master | 2022-05-25T12:54:26.437296 | 2022-04-19T14:34:24 | 2022-04-19T14:34:24 | 219,055,309 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 148 | r | secret-example.R | # DB credentials
pg_dbname = "<Postgres db name>"
pg_host = "<Host URL>"
pg_port = 0000
pg_user = "<your username>"
pg_password = "<your password>"
|
5998168bbac286036c7828af90f2af8347d8c634 | 5c2350f172e1a7b7f61e1047d515357735e5895e | /man/text_coding.Rd | d2d962d21ac0947b2d83e691a460ad68ff2de761 | [
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | richarddmorey/Morey_Hoekstra_StatCognition | 4da5b3f205d1038b850fa701354bd59b62a05eed | 373b9ac75219d84d7b5a6454296e80aa4f34ea54 | refs/heads/master | 2022-12-06T14:50:55.198542 | 2022-11-30T18:23:58 | 2022-11-30T18:23:58 | 189,821,493 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,013 | rd | text_coding.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/christmas_stats_public-data.R
\docType{data}
\name{text_coding}
\alias{text_coding}
\title{Coding of the text responses}
\description{
Categorization of each participants' strategy-related
free-text responses
}
\details{
\itemize{
\item id: participant id
\item text_comparison: Response mentioned comparison to the random shuffle reports
\item text_asymmetry: Response mentioned symmetry/asymmetry in the experimental samples
\item text_sampling_var: Response mentioned random shuffle reports as a means to assess sampling variability,
distribution under the null, chance distribution, etc
\item text_inc_asymmetry: Response mentioned increasing asymmetry (or lack thereof) as sample size increased
\item text_no_shuffle: Explicitly said they did not use the shuffle reports
\item text_irrelevant: Text was irrelevant
\item text_missing: Text was missing
}
}
\author{
Richard D. Morey \email{richarddmorey@gmail.com}
}
\keyword{data}
|
24bd20cb538c2bfe46f992bd0587de15e011931d | 5e76b46f4c5e00dddda70596f7387bdd53285252 | /man/normVowels.Rd | bdc3bbd974f1265dbca9bcfb435d891b655bb542 | [] | no_license | djvill/phonR | 3096ff63edfe3869b251740b1d1dd558b56c875f | ada5ad8fd2f6c51f3742eba7446a5169f7890809 | refs/heads/master | 2021-01-17T22:33:15.278764 | 2016-09-15T17:18:31 | 2016-09-15T17:18:31 | 67,957,934 | 1 | 0 | null | 2016-09-15T17:18:31 | 2016-09-11T22:33:30 | R | UTF-8 | R | false | false | 7,642 | rd | normVowels.Rd | \name{Normalize vowel formant frequencies}
\alias{normVowels}
\alias{normBark}
\alias{normErb}
\alias{normLobanov}
\alias{normLog}
\alias{normLogmean}
\alias{normMel}
\alias{normNearey1}
\alias{normNearey2}
\alias{normSharedLogmean}
\alias{normWattFabricius}
\title{Normalize formant frequency values using a variety of algorithms}
\description{Functions for transforming vowel formant frequency data measured in
Hertz, using one of several normalization schemes commonly used in
phonetic and sociolinguistic research. \code{normVowels} is a
convenience function wrapping to the individual \code{norm[Method]}
functions.}
\usage{
normBark(f)
normErb(f)
normLog(f)
normMel(f)
normLobanov(f, group=NULL)
normLogmean(f, group=NULL, exp=FALSE, ...)
normNearey1(f, group=NULL, exp=FALSE, ...)
normNearey2(f, group=NULL, exp=FALSE, ...)
normSharedLogmean(f, group=NULL, exp=FALSE, ...)
normWattFabricius(f, vowel, group=NULL)
normVowels(method, f0=NULL, f1=NULL, f2=NULL, f3=NULL,
vowel=NULL, group=NULL, ...)
}
\arguments{
\item{f}{Vector or matrix of formant frequencies. For \code{normNearey},
\code{f} must be an N-by-4 matrix of frequencies, with column order
\dQuote{f0}, \dQuote{F1}, \dQuote{F2}, \dQuote{F3}. For
\code{normWattFabricius}, \code{f} must be an N-by-2 matrix or data
frame of F1 and F2 values. If passing a matrix to
\code{normLogmean}, formants must be grouped within columns, not
rows.}
\item{vowel}{Vector or factor of vowel symbols, with
\code{length(vowel)==nrow(f)}. Used only in
\code{normVowels(method="wattfabricius", ...)} or
\code{normWattFabricius(...)}.}
\item{group}{Vector or factor indicating rows of \code{f} that should be
normalized together. This is useful for, e.g., calculating
talker-intrinsic normalizations when \code{group} encodes
talker identity.}
\item{exp}{Logical; should the result of the logmeans calculation be passed
through the \code{\link[base]{exp}} function before being returned?}
\item{f0,f1,f2,f3}{Separate vectors of formant or
fundamental frequency values used in the convenience method
\code{plotVowels}. \code{f1} and \code{f2} are required when
\code{method} is \code{"wattfabricius"}, \code{"logmean"},
\code{"shared"}, \code{"nearey1"}, or \code{"nearey2"}.}
\item{method}{Specification of the normalization method to use when calling
the convenience method \code{normVowels}. Possible values are
\dQuote{bark}, \dQuote{erb}, \dQuote{lobanov},
\dQuote{log}, \dQuote{logmean}, \dQuote{mel},
\dQuote{shared}, and \dQuote{wattfabricius}.
\dQuote{zscore} is an accepted synonym for \dQuote{lobanov};
\dQuote{nearey1} is an accepted synonym for \dQuote{logmean};
\dQuote{nearey2} is an accepted synonym for \dQuote{shared};
and \dQuote{scentroid}, is an accepted synonym for
\dQuote{wattfabricius}.}
\item{...}{Additional arguments passed to \code{\link[base]{colMeans}} by
functions \code{normLogmean} and \code{normSharedLogmean} (useful for
specifying the value of \code{na.rm}).}
}
\details{
\code{normLogmean} is a synonym for \code{normNearey1}, which is also sometimes
confusingly called \dQuote{single logmean}. \code{normSharedLogmean} is a
synonym for \code{normNearey2}. The argument \code{exp=TRUE} for these functions
will yield values that are consistent with the \code{\link[vowels]{norm.nearey}}
implementation, which takes the result of Nearey's original formulae and uses
it as the exponent of the base of the natural logarithm (presumably so that
the function always yields positive values).
Note that \code{normErb} returns the \dQuote{ERB-rate scale} value (i.e.,
the number of ERBs below the given frequency), not the ERB of the auditory
filter centered at the given frequency.
The implementation of the Watt-Fabricius method varies slightly from the
formula in Watt & Fabricius (2002), since \code{normWattFabricius} simply
calculates which vowel has the highest mean F1 value and designates it as
the low corner of the triangle, rather than asking the user to expressly
specify the \dQuote{TRAP} or \dQuote{START} vowel. Similarly,
\code{normWattFabricius} simply calculates which vowel has the highest mean
F2 value and uses that to calculate the upper left corner, rather than
expressly looking for the mean of the \dQuote{point-vowel} /i/. The upper
right corner is, as in the original method, derived from the other two. If
the vowels with the highest mean F1 and highest mean F2 are not the same
pair of vowels for all members of \code{group}, \code{normWattFabricius}
returns an error.
}
\value{Most of the functions return a vector or matrix of the same dimensions as
were passed in. The exceptions are \code{normVowels}, which returns an
n-by-m matrix of n data points by m formants with formants in ascending
order with fundamental frequency first (if present), and
\code{normWattFabricius} (or \code{normVowels} with
\code{method=wattfabricius}), which only and always returns F1 and F2,
regardless of whether f0 and F3 were supplied. }
\author{McCloy, Daniel \email{drmccloy@uw.edu}}
\references{
Glasberg, B. R., & Moore, B. C. J. 1990 \dQuote{Derivation of auditory filter shapes from notched-noise data.} \emph{Hearing Research}, 47(1-2), 103-138. \url{http://dx.doi.org/10.1016/0378-5955(90)90170-T}
Lobanov, B. M. 1971 \dQuote{Classification of Russian vowels spoken by different speakers.} \emph{The Journal of the Acoustical Society of America}, 49(2), 606-608. \url{http://dx.doi.org/10.1121/1.1912396}
McCloy, D. R. 2012 \dQuote{Normalizing and plotting vowels with the phonR package.} \emph{Technical Reports of the UW Linguistic Phonetics Laboratory}. \url{http://dan.mccloy.info/pubs/McCloy2012_phonR.pdf}
Nearey, T. M. 1978 \dQuote{Phonetic feature systems for vowels} (Doctoral dissertation, University of Alberta). Reprinted by the Indiana University Linguistics Club. \url{http://www.ualberta.ca/~tnearey/Nearey1978_compressed.pdf}
Stevens, S. S., & Volkmann, J. 1940 \dQuote{The relation of pitch to frequency: A revised scale.} \emph{The American Journal of Psychology}, 53(3), pp. 329-353.
Traunmuller, H. 1990 \dQuote{Analytical expressions for the tonotopic sensory scale.} \emph{The Journal of the Acoustical Society of America}, 88(1), 97-100. \url{http://dx.doi.org/10.1121/1.399849}
Watt, D., & Fabricius, A. H. 2002 \dQuote{Evaluation of a technique for improving the mapping of multiple speakers' vowel spaces in the F1 ~ F2 plane.} \emph{Leeds Working Papers in Linguistics and Phonetics}, 9, 159-173.
}
\examples{
data(indoVowels)
bark <- with(indo, normBark(cbind(f1, f2)))
erbn <- with(indo, normErb(cbind(f1, f2)))
mel <- with(indo, normMel(cbind(f1, f2)))
lobanov <- with(indo, normLobanov(cbind(f1, f2), group=subj))
lognormed <- with(indo, normLog(cbind(f1, f2)))
# using the convenience wrapper:
bark <- with(indo, normVowels('bark', f1=f1, f2=f2))
wattfab <- with(indo, normVowels('wattfabricius', f1=f1, f2=f2,
vowel=vowel, group=subj))
}
\keyword{methods}
|
84e82ac319404bc58c81b986e3075900cf7f20c6 | 60ed9eb5b93f1051b3382d4bf7bd1f58818c1b34 | /plot4.R | 363104429fedb888c877e5d1230751dee0409d8f | [] | no_license | rohitchaube/ExData_Plotting1 | a7bc6e31e56434f3eea6326c73ed69ab8b236cba | a372913eec569c3dea6413128e74e3d1c5f0ddea | refs/heads/master | 2021-01-20T16:24:11.027479 | 2014-10-12T17:02:50 | 2014-10-12T17:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,469 | r | plot4.R | plot4 <- function(data_file) {
data <- data.frame()
combine_date_time <- data.frame()
file_read <- file(data_file)
data <- read.csv.sql( sql="select * from file_read where Date = '1/2/2007' or Date = '2/2/2007'",
sep = ";",
header=TRUE)
close(file_read)
## Combine Date and Time
combine_date_time <- paste(data$Date, data$Time)
## Convert
combine_date_time <- strptime(combine_date_time, "%d/%m/%Y %H:%M:%S")
## Output PNG file
png(file = "plot4.png")
## Have 4 graphs on the same PNG file creating the layout
par(mfrow = c(2, 2))
## Plot 1 - upper leftmost corner
plot(combine_date_time, data$Global_active_power, type="l", xlab = " ", ylab ="Global Active Power (kilowatts) ")
## Plot 2 - upper rightmost corner
plot(combine_date_time, data$Voltage, type="l", xlab = "datetime", ylab = "Voltage")
## Plot 3 - lower leftmost corner
plot(combine_date_time, data$Sub_metering_1, type="l", xlab = " ", ylab = "Energy sub metering")
lines(combine_date_time, data$Sub_metering_2, type="l", xlab = " ", col = "red")
lines(combine_date_time, data$Sub_metering_3, type="l", xlab = " ", col = "blue")
legend("topright", lwd = 2, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
## Plot 4 - lower rightmost corner
plot(combine_date_time, data$Global_reactive_power, type="l", xlab = "datetime", ylab = "Global_reactive_power")
## Close the PNG Device
dev.off()
} |
6f0ed060e7e276f490a046e8faf249b0dc4387e4 | bbaf6cc30800d6588c923f681d5db779f5eb92d7 | /wos_stm.R | 6278089b6fd9ce9bb2aba4f21c72ed6923193939 | [] | no_license | synbioks/synbioethics_ctm | 656f34124e5071a6b88ffec363c8c340bc7414cc | 110c10aa5ca6bdb8c6d35a1c62504b524177e75f | refs/heads/main | 2023-06-18T20:30:50.274685 | 2021-07-12T20:18:25 | 2021-07-12T20:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,687 | r | wos_stm.R | # ==============================================================================
# structural topic models with web of science data
# ==============================================================================
# preliminaries ================================================================
# first load data in wos_descriptives.R
# load packages
library(stm)
library(here)
# set seed
set.seed(1234)
# load data into stm object ====================================================
# preprocess
ethics_processed <- textProcessor(documents = ethics_data$abstract,
metadata = ethics_data,
removenumbers = FALSE,
removepunctuation = FALSE,
ucp = FALSE)
ethics_out <- prepDocuments(ethics_processed$documents,
ethics_processed$vocab,
ethics_processed$meta)
ethics_docs <- ethics_out$documents
ethics_vocab <- ethics_out$vocab
ethics_meta <- ethics_out$meta
# check term frequency
plotRemoved(ethics_processed$documents, lower.thresh = seq(1, 50, by = 1))
ethics_out <- prepDocuments(ethics_processed$documents,
ethics_processed$vocab,
ethics_processed$meta,
lower.thresh = 5)
# topic number diagnostics
ethics_k_diag <- searchK(out$documents,
out$vocab,
K = seq(5, 20, 1),
data = meta,
cores = 5L)
# estimate stm on ethics data ==================================================
ethics_k10 <- stm(documents = ethics_out$documents,
vocab = ethics_out$vocab,
K = 9,
max.em.its = 100,
data = ethics_out$meta,
init.type = "Spectral")
# visualize results : prevalence
plot(ethics_k10, labeltype = 'frex', n = 5)
# get two quotes per topic
thoughts <- findThoughts(ethics_k10, texts = ethics_out$meta$title,
topics = c(1:9), n = 3)
# plot quotes for each topic
plotQuote(thoughts$docs$`Topic 1`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 2`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 3`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 4`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 5`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 6`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 7`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 8`, maxwidth = 300, width = 50)
plotQuote(thoughts$docs$`Topic 9`, maxwidth = 300, width = 50)
|
9e59826f96d6c52a5fac5cf4067d8c6cf98b6382 | e7a67422605e1b94ed168157a584d1a682787442 | /Intro_to_ggmap_02232018.R | 795d4e1a729586e979ecf2c3ae2dcd420d6941e2 | [] | no_license | BWAM/R_Lessons | 429da04c30316be0d88c10eab942b3f3000593cc | 5071f8a4b4eb3a42cd20837d17f0486927c64599 | refs/heads/master | 2021-05-02T00:32:41.284512 | 2018-02-23T17:58:47 | 2018-02-23T17:58:47 | 120,946,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,542 | r | Intro_to_ggmap_02232018.R | # Author: Charles Stoll
# Date: 02.23.2018
#################################################################################
# GGMAP Notes:
#################################################################################
# ggmap is a package that is related to ggplot, both were written by Hadley Wicham.
# Becuase of this, you have the full power of ggplot and it uses the same arguments as ggplot.
# Other than the ggmap vignette, there isn't a lot of documentation for ggmap
#
# Materials from USGS found in X drive: X:\R Statistics\Mapping
#
# The basic idea behind ggmap is to download a map image, plot it as a layer,
# and then overlay data on top of that image
#
# There are 2 main steps:
# 1. Download the image(s) and format them for plotting (get_map())
# 2. Making the plots(s) and using ggmap() or qmap(), qmplot() functions
# An important note: ggmap requires that all data are in the same projection, just like ArcMap
# If you do not specify the spatial data properly, ggmap is not going to correct it and there
# could be error introduced into your analysis
#
#####
#################################################################################
# Set working directory
#################################################################################
setwd("C:/NYBackup/CStoll.CDrive/R_scripts/R_class/Introduction_to_ggmap/")
#################################################################################
# Open libraries
#################################################################################
library(ggmap)
#################################################################################
# Import data
#################################################################################
# Define the column classes of my site data file
colTypes <- c("character", "character", "numeric", "numeric", "character")
# Import the file
mapsites <- read.csv(file = "Input/Intro_to_GGMAP_mapSites.csv", colClasses = colTypes, stringsAsFactors = F)
#################################################################################
# Different ways to Enter a location for a map
#################################################################################
# To get a map we need a location
# 1. Use and Address
myLocation <- "Albany, NY"
# 2. A Long/Lat
myLocation_point <- c(long=-74.380519, lat = 42.074202)
# 3. A bounding box (must be in format: long, lat, long, lat)
myLocation_bounding_box <- c(-80.040921, 39.747047, -71.214456, 45.162553)
# Center of the US
# Lebanon, KS
myLocation_US_center <- c(lon = -98.5795, lat = 39.8283)
#################################################################################
# Create and display a map using stamen map type
#################################################################################
# To get a map use getmap() function
# Different Map types
# There are 4 different map types
# Refer to cheat sheets for information
#
# 1. Stamen
# 2. Google
# 3. OSM (Open Street Maps)
# 4. cloudmade (requires that you know the API of the map)
## Example using a staman map type
myMap_stamen <- get_map(location = myLocation_bounding_box, source = "stamen", maptype = "toner", crop = F) #zoom = 10
# Be careful using the zoom parameter in get_map() function
# if you want to make a higher resolution map, reduce the
# size of your bounding box to a more appropriate size
#Display created map
ggmap(myMap_stamen)
#################################################################################
# Create and display a map using a google map
# use a more specifiec get_map() function; i.e. get_googlemap()
#################################################################################
myMap_google <- get_googlemap(center = myLocation_point, source="google", zoom = 6, maptype = "terrain", crop = F)
ggmap(myMap_google)
#################################################################################
# Adjust map properties
#################################################################################
# Turn off devtools
dev.off()
# Assign map to a variable so that you can overlay points
myMapTest <- ggmap(myMap_google)
# Add points and color them; color based on coordinate system of point
myMapTest <- myMapTest + geom_point(data=mapsites, aes(x = dec_long_va, y = dec_lat_va, color = coord_datum_cd),
size = 3, shape = 20, alpha = 1)
# Display map with points
myMapTest
# Change name of axis labes; y on left of map, x on bottom
myMapTest <- myMapTest + labs(x = "Longitude", y ="Latitude")
# Assign specific color to point features
myMapTest <- myMapTest + scale_color_manual(values = c("NAD27" = "blue", "NAD83"="red"))
# Add a title to the map
myMapTest <- myMapTest + ggtitle("Northeastern HBN sites")
# Display the map
myMapTest
#################################################################################
# Save map as multiple outputs
#################################################################################
# Name of map output as jpg
myTestmapNam_JPG <- paste0("Output/NEHBNsites_example_output.jpg")
# Name of map output as pdf
myTestmapName_PDF <- paste0("Output/NEHBNsites_example_output.pdf")
# Save map to output folder
ggsave(myTestmapNam_JPG)
ggsave(myTestmapName_PDF)
#################################################################################
# END
################################################################################# |
4561e5ea8a0ed16c6f4b78ee991cea9e88313f39 | f0756ba7f487619f46999d3c5b9e6c7105fc79ae | /R_Overview.R | a551e45e46d7fa04cbbae86bf5e192ad4c85247a | [] | no_license | vishaldn/IntroductionToR | c0ca39bc5402156c9a5c30dc3dcab297cfb5ae04 | c53986ebb27f33a4ba6b2d490f06af5f7f529f0c | refs/heads/master | 2022-06-18T13:50:22.890155 | 2020-05-10T09:00:23 | 2020-05-10T09:00:23 | 262,747,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 411 | r | R_Overview.R |
#### CHAPTER 1 - OVERVIEW ####
# Note: You can write a comment in the script by starting a line with the "#" sign
# A comment is not run by R when you press ctrl + Enter
### Assignments
x = 1
y <- 2
### functions
print("Hello World")
### Computations
17 + 5
x + 5
y = 15 +5
y = sqrt(16) # Square Root function
sqrt(16)
# Call y, see y value in console
y
# Call x, see x value in console
x
|
9dd12a2e3e5f738e03b2656f0c4b4a367f7f6f00 | cbe44f0dca20bba077801c4b14cf8c64d5b7d352 | /man/question_3a_001_data_weather.Rd | 259ff6c2243d93038f0c0743e0409bad5598d078 | [] | no_license | s1888637/weathr | 1ced821f4bec8bf39cb38123960d9b59d3cc80d0 | 3adfd7fd3149ee2b8a2f32577d6ed5695b7450b4 | refs/heads/master | 2020-05-29T19:49:21.974074 | 2019-06-05T19:15:26 | 2019-06-05T19:15:26 | 189,339,729 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 324 | rd | question_3a_001_data_weather.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/question-3a-001-data-weather.R
\name{question_3a_001_data_weather}
\alias{question_3a_001_data_weather}
\title{question_3a_001_data_weather}
\usage{
question_3a_001_data_weather(num_boundaries = 10)
}
\description{
question_3a_001_data_weather
}
|
3138239b765fc05d56c5da897eac235424b46c01 | 56b5304c93d2c6158efbe70a59d1aca21ecfa379 | /intro figure.R | e12f3e35d85c59160d0d4970763a20df452949a0 | [] | no_license | kgunnar-lmu/PhD-Thesis | 1e860622003345b0bbd2636b1b360553c337b423 | 8acadd88695b6afb21cb8affd189267003be48a5 | refs/heads/main | 2023-04-18T05:31:17.203065 | 2021-04-27T13:08:03 | 2021-04-27T13:08:03 | 355,526,035 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,171 | r | intro figure.R | options(stringsAsFactors = F)
library(ggplot2)
library(tidyverse)
library(ggbeeswarm)
library(ggrepel)
library(RColorBrewer)
data=read.csv("/Home Office/Thesis/Figures/sc meetodite tabel.csv")
head(data)
data$order2=as.numeric(paste0(data$Year,".",data$Month))
cc1 <- c(brewer.pal(name = "YlOrRd",n=9))[6]#red
cc2 <- c(brewer.pal(name = "YlOrBr",n=9))[5]#or
cc3 <- c(brewer.pal(name = "YlOrRd",n=9))[3]#yellow
cc4 <- c(brewer.pal(name = "YlGn",n=9))[6]#green
cc5 <- c(brewer.pal(name = "YlGnBu",n=9))[5]#lightblue
cc6 <- c(brewer.pal(name = "Blues",n=9))[6]#blue
cc7 <- c(brewer.pal(name = "Purples",n=9))[6]#viol
cc8 <- c(brewer.pal(name = "RdPu",n=9))[7]#purp
cc=c(cc1,cc2,cc3,cc4,cc5,cc6,cc7,cc8)
ggplot(data,aes(x=order2,y=Cells,color=Type,label=Name))+ggrepel::geom_text_repel(color="black", min.segment.length = unit(0, 'lines'))+
geom_point(shape=1,size=3)+
scale_y_log10(breaks=c(1,10,100,1000,10000,100000,1000000))+theme_classic()+theme(axis.text.x = element_text(angle = 90,hjust = 1))+
scale_x_continuous(breaks = c(2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018))+scale_color_manual(values = cc[c(4,6,1)])
|
21955c4b1498a0528293e301ae71316e21e0feec | 102bdcc508763ea432455778e9e13f427b7b7e51 | /man/billboarder.Rd | b35a7bc390e28eacc3bfc51ba1cfb9e1cf61177e | [
"MIT"
] | permissive | dreamRs/billboarder | 53847ee7359118498a98895afd588b98c5ea0692 | 26c93896f8b040eeb48891f4f2e70690af71d4ff | refs/heads/master | 2023-03-07T23:52:34.505537 | 2023-02-22T14:24:22 | 2023-02-22T14:24:22 | 97,649,774 | 174 | 22 | NOASSERTION | 2018-11-13T07:52:27 | 2017-07-18T22:36:41 | R | UTF-8 | R | false | true | 687 | rd | billboarder.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/billboarder.R
\name{billboarder}
\alias{billboarder}
\title{Create a Billboard.js widget}
\usage{
billboarder(
bb_opts = list(),
data = NULL,
width = NULL,
height = NULL,
elementId = NULL
)
}
\arguments{
\item{bb_opts}{A \code{list} in JSON format with chart parameters,
see \url{https://naver.github.io/billboard.js/demo/}.}
\item{data}{A \code{data.frame}.}
\item{width}{A numeric input in pixels.}
\item{height}{A numeric input in pixels.}
\item{elementId}{Use an explicit element ID for the widget.}
}
\description{
Create an interactive visualization with Javascript library Billboard.js
}
|
5f1e4cb485cc6b51b386e0edcc5d5b87a8094e93 | 2e5060fa591b80f5b11507a4afb413001fe13604 | /GW.R | 0b77e04b7778cea835f124a3996aa25da4b087a1 | [] | no_license | oraza/GWScript | 2a94569121ccd1a4201769cada3ede44cc914a02 | 5f91663cbf1d4a722782f1c3bb08b2c61f14a229 | refs/heads/master | 2020-03-27T08:33:10.007713 | 2018-09-23T12:17:25 | 2018-09-23T12:17:25 | 146,266,972 | 1 | 0 | null | 2018-09-16T13:07:20 | 2018-08-27T08:09:18 | R | UTF-8 | R | false | false | 29,898 | r | GW.R | ## GWPCA on East Africa data set for Childhood morbidity
library(GWmodel)
library(RColorBrewer)
library(rgdal)
library(leaflet)
library(psych)
library(classInt)
## Calling dataset
setwd("E\\GW_Thesis\\ThesisData")
dsn <-getwd()
ea.border<-readOGR(dsn = dsn, layer = "ea_Env_DHS")
# converting aridity index to original values
ea.border@data$Aridity <- ea.border@data$Aridity * 0.0001
#### Global PCA #####
# Bartlett's test of sphericity
bart<-function(dat){ #dat is your raw data
R<-cor(dat)
p<-ncol(dat)
n<-nrow(dat)
chi2<- -((n-1)-((2*p)+5)/6 ) * log(det(R)) #this is the formula
df<-(p*(p-1)/2)
crit<-qchisq(.95,df) #critical value
p<-pchisq(chi2,df,lower.tail=F) #pvalue
cat("Bartlett's test of sphericity: X2(",
df,")=",chi2,", p=",
round(p,3),sep="" )
}
# applying Bartlett's test for all potential confounders
bart(ea.border@data[,16:41])
# scaling all explanatory variables including main
ea.df.sc = scale(as.matrix(ea.border@data[,15:41]))
# global PCA using psych package
ea.pca2 <- principal(as.matrix(ea.border@data[,16:41]),
nfactors=6, rotate="varimax")
# inserting Global PCs into main SpatialPolygonsDataFrame
ea.border$global.PC1 <- ea.pca2$scores[,1]
ea.border$global.PC2 <- ea.pca2$scores[,2]
ea.border$global.PC5 <- ea.pca2$scores[,5]
ea.border$global.PC3 <- ea.pca2$scores[,3]
ea.border$global.PC4 <- ea.pca2$scores[,4]
ea.border$global.PC6 <- ea.pca2$scores[,6]
# standardizing th PC Score (dividing by their SDs)
ea.border@data$st.global.PC1 <- ea.border@data$global.PC1/(sd(ea.border@data$global.PC1))
ea.border@data$st.global.PC2 <- ea.border@data$global.PC2/(sd(ea.border@data$global.PC2))
ea.border@data$st.global.PC3 <- ea.border@data$global.PC3/(sd(ea.border@data$global.PC3))
ea.border@data$st.global.PC4 <- ea.border@data$global.PC4/(sd(ea.border@data$global.PC4))
ea.border@data$st.global.PC5 <- ea.border@data$global.PC5/(sd(ea.border@data$global.PC5))
ea.border@data$st.global.PC6 <- ea.border@data$global.PC6/(sd(ea.border@data$global.PC6))
# Scree plot
scree(ea.pca2$r.scores,
factors = F,
pc = T,
hline = -9,
main = "Scree Plot for global PCA")
# Renaming variable
names(ea.border@data)[names(ea.border@data) == "PoorMid"] <- "Poor"
names(ea.border@data)[names(ea.border@data) == "childmorb"] <- "ChildMorbid"
## Summary of Geographical distribution of variables
round(summary(ea.border$ChildMorbid),2)
round(summary(ea.border$Poor),2)
round(summary(ea.border$Rural),2)
round(summary(ea.border$Solidfuel),2)
round(summary(ea.border$HealthAcc),2)
round(summary(ea.border$singlemom),2)
round(summary(ea.border$FEedu_NilP),2)
round(summary(ea.border$FEedu_SecH),2)
round(summary(ea.border$MAedu_NilP),2)
round(summary(ea.border$MAedu_SecH),2)
round(summary(ea.border$Moth_now),2)
round(summary(ea.border$Moth_prof),2)
round(summary(ea.border$Moth_agri),2)
round(summary(ea.border$GilrChild),2)
round(summary(ea.border$NonImprWat),2)
round(summary(ea.border$WateratHom),2)
round(summary(ea.border$Water30mAw),2)
round(summary(ea.border$CompImmu),2)
round(summary(ea.border$Aridity),2)
round(summary(ea.border$MaxTemp),2)
round(summary(ea.border$PM2_5),2)
round(summary(ea.border$DroughtHz),2)
round(summary(ea.border$agecat1),2)
round(summary(ea.border$agecat2),2)
round(summary(ea.border$agecat3),2)
round(summary(ea.border$agecat4),2)
round(summary(ea.border$agecat5),2)
round(summary(ea.border$agecat6),2)
round(sd(ea.border$ChildMorbid),2)
round(sd(ea.border$Poor),2)
round(sd(ea.border$Rural),2)
round(sd(ea.border$Solidfuel),2)
round(sd(ea.border$HealthAcc),2)
round(sd(ea.border$singlemom),2)
round(sd(ea.border$FEedu_NilP),2)
round(sd(ea.border$FEedu_SecH),2)
round(sd(ea.border$MAedu_NilP),2)
round(sd(ea.border$MAedu_SecH),2)
round(sd(ea.border$Moth_now),2)
round(sd(ea.border$Moth_prof),2)
round(sd(ea.border$Moth_agri),2)
round(sd(ea.border$GilrChild),2)
round(sd(ea.border$NonImprWat),2)
round(sd(ea.border$WateratHom),2)
round(sd(ea.border$Water30mAw),2)
round(sd(ea.border$CompImmu),2)
round(sd(ea.border$Aridity),2)
round(sd(ea.border$MaxTemp),2)
round(sd(ea.border$PM2_5),2)
round(sd(ea.border$DroughtHz),2)
round(sd(ea.border$agecat1),2)
round(sd(ea.border$agecat2),2)
round(sd(ea.border$agecat3),2)
round(sd(ea.border$agecat4),2)
round(sd(ea.border$agecat5),2)
round(sd(ea.border$agecat6),2)
#### GWPCA #####
# assigning coordinates
Coords=as.matrix(cbind(ea.border$xlong,
ea.border$ylat))
# merging coordinates with data set and converting it into shapefile
ea.scaled.spdf=SpatialPointsDataFrame(Coords,as.data.frame(ea.df.sc))
proj4string(ea.scaled.spdf) <- CRS("+init=epsg:4326")
##################### Base map ##########################
m <- leaflet(ea.border) %>%
addTiles() %>%
setView(37.701546, -6.765599, 4) %>%
addProviderTiles("MapBox", options = providerTileOptions(
id = "mapbox.light",
accessToken = Sys.getenv('MAPBOX_ACCESS_TOKEN')))
#### Descriptive Results
# prevalence of diarrhea
ea.border@data$DiaProp <- as.numeric(ea.border@data$Diarrhea*100)
pal <- colorNumeric(palette = "Reds",
domain = ea.border@data$DiaProp)
m %>% addPolygons(
stroke = F, smoothFactor = 0.2,
fillOpacity = 0.7,
color = ~pal(ea.border@data$DiaProp)) %>%
addLegend("bottomright",
title = "Prevalence of Diarrhea <br> in East Africa (%)",
pal = pal,
values = ea.border@data$DiaProp)
# prevalence of ARI
ea.border@data$ARIProp <- as.numeric(ea.border@data$ARI*100)
pal <- colorNumeric(palette = "Reds",
domain = ea.border@data$ARIProp)
m %>% addPolygons(
stroke = F, smoothFactor = 0.2,
fillOpacity = 0.7,
color = ~pal(ea.border@data$ARIProp)) %>%
addLegend("bottomright",
title = "Prevalence of ARI <br> in East Africa (%)",
pal = pal,
values = ea.border@data$ARIProp)
# prevalence of Fever
ea.border@data$FeverProp <- as.numeric(ea.border@data$fever*100)
pal <- colorNumeric(palette = "Reds",
domain = ea.border@data$FeverProp)
m %>% addPolygons(
stroke = F, smoothFactor = 0.2,
fillOpacity = 0.7,
color = ~pal(ea.border@data$FeverProp)) %>%
addLegend("bottomright",
title = "Prevalence of fever <br> in East Africa (%)",
pal = pal,
values = ea.border@data$FeverProp)
# prevalence of ChildMorb
ea.border@data$ChilMorbProp <- as.numeric(ea.border@data$ChildMorbid *100)
pal <- colorNumeric(palette = "Reds",
domain = ea.border@data$ChilMorbProp)
m %>% addPolygons(
stroke = F, smoothFactor = 0.2,
fillOpacity = 0.7,
color = ~pal(ea.border@data$ChilMorbProp)) %>%
addLegend("topright",
title = "Prevalence of childhood <br> morbidity in East Africa (%)",
pal = pal,
values = ea.border@data$ChilMorbProp)
# prevalence of Poor
ea.border@data$PoorProp <- as.numeric(ea.border@data$Poor *100)
pal <- colorNumeric(palette = "Blues",
domain = ea.border@data$PoorProp)
m %>% addPolygons(
stroke = F, smoothFactor = 0.2,
fillOpacity = 0.7,
color = ~pal(ea.border@data$PoorProp)) %>%
addLegend("topright",
title = "Prevalence of poor <br> in East Africa (%)",
pal = pal,
values = ea.border@data$PoorProp)
## calculting GW bandwidth to find an optimal adaptive bandwidth
## using a bi-square kernel
bw.gwpca.k8=bw.gwpca(ea.scaled.spdf,
vars=colnames(ea.scaled.spdf@data[,1:27]),
k = 8,
robust = FALSE,
adaptive = TRUE,
kernel = "bisquare")
prop.var=function(gwpca.obj, n.components){
return((rowSums(gwpca.obj$var[,1:n.components])/
rowSums(gwpca.obj$var))*100)
}
# PTV within GWPC1
ea.ptv1<-(gwpca.k8$var[,1:1])/rowSums(gwpca.k8$var)*100
ea.border$ea.ptv1.k8=ea.ptv1
summary(ea.border$ea.ptv1.k8)
sd(ea.border$ea.ptv1.k8)
# mapping PTV within GWPC1
mypal.6 <- c('#ffffb2', '#fecc5c', '#fd8d3c', '#f03b20', '#bd0026')
bins <- c(quantile(ea.border$ea.ptv1.k8, probs = seq(0,1, 0.2),
type = 8))
pal <- colorBin(mypal.6, domain = ea.border$ea.ptv1.k8, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.border$ea.ptv1.k8),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.5) %>%
addLegend("bottomright",
title = "Percentage of total variance <br>within first component",
pal = pal,
values = ea.border$ea.ptv1.k8)
# PTV within first GWPC2
ea.ptv2=prop.var(gwpca.k8, 2)
ea.border$ea.ptv2.k8=ea.ptv2
summary(ea.border$ea.ptv2.k8)
sd(ea.border$ea.ptv2.k8)
# mapping PTV within GWPC2
mypal.6 <- c('#ffffb2', '#fecc5c', '#fd8d3c', '#f03b20', '#bd0026')
bins <- c(quantile(ea.border$ea.ptv2.k8, probs = seq(0,1, 0.2),
type = 8))
pal <- colorBin(mypal.6, domain = ea.border$ea.ptv2.k8, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.border$ea.ptv2.k8),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.5) %>%
addLegend("bottomright",
title = "Percentage of total variance <br>within first two components",
pal = pal,
values = ea.border$ea.ptv2.k8)
# PTV within first 3 comp
ea.ptv3=prop.var(gwpca.k8, 3)
ea.border$ea.ptv3.k8=ea.ptv3
summary(ea.border$ea.ptv3.k8)
sd(ea.border$ea.ptv3.k8)
# mapping PTV within GWPC3
mypal.6 <- c('#ffffb2', '#fecc5c', '#fd8d3c', '#f03b20', '#bd0026')
bins <- c(quantile(ea.border$ea.ptv3.k8, probs = seq(0,1, 0.2),
type = 8))
pal <- colorBin(mypal.6, domain = ea.border$ea.ptv3.k8, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.border$ea.ptv3.k8),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.5) %>%
addLegend("bottomright",
title = "Percentage of total variance <br>within first three components",
pal = pal,
values = ea.border$ea.ptv3.k8)
# PTV within first 4 comp
ea.ptv4=prop.var(gwpca.k8, 4)
ea.border$ea.ptv4.k8=ea.ptv4
summary(ea.border$ea.ptv4.k8)
sd(ea.border$ea.ptv4.k8)
# mapping PTV within GWPC4
mypal.6 <- c('#ffffb2', '#fecc5c', '#fd8d3c', '#f03b20', '#bd0026')
bins <- c(quantile(ea.border$ea.ptv4.k8, probs = seq(0,1, 0.2),
type = 8))
pal <- colorBin(mypal.6, domain = ea.border$ea.ptv4.k8, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.border$ea.ptv4.k8),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.5) %>%
addLegend("bottomright",
title = "Percentage of total variance <br>within first four components",
pal = pal,
values = ea.border$ea.ptv4.k8)
# PTV within first 5 comp
ea.ptv5=prop.var(gwpca.k8, 5)
ea.border$ea.ptv5.k8=ea.ptv5
summary(ea.border$ea.ptv5.k8)
sd(ea.border$ea.ptv5.k8)
# mapping PTV within GWPC5
mypal.6 <- c('#ffffb2', '#fecc5c', '#fd8d3c', '#f03b20', '#bd0026')
bins <- c(quantile(ea.border$ea.ptv5.k8, probs = seq(0,1, 0.2),
type = 8))
pal <- colorBin(mypal.6, domain = ea.border$ea.ptv5.k8, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.border$ea.ptv5.k8),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.5) %>%
addLegend("bottomright",
title = "Percentage of total variance <br>within first five components",
pal = pal,
values = ea.border$ea.ptv5.k8)
# PTV within first 6 comp
ea.ptv6=prop.var(gwpca.k8, 6)
ea.border$ea.ptv6.k8=ea.ptv6
summary(ea.border$ea.ptv6.k8)
sd(ea.border$ea.ptv6.k8)
# mapping PTV within GWPC6
mypal.6 <- c('#ffffb2', '#fecc5c', '#fd8d3c', '#f03b20', '#bd0026')
bins <- c(quantile(ea.border$ea.ptv6.k8, probs = seq(0,1, 0.2),
type = 8))
pal <- colorBin(mypal.6, domain = ea.border$ea.ptv6.k8, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.border$ea.ptv6.k8),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.5) %>%
addLegend("bottomright",
title = "Percentage of total variance <br>within first six components",
pal = pal,
values = ea.border$ea.ptv6.k8)
# extracting most influential variables in GWPC1
loadings.pc1.k8=gwpca.k8$loadings[,,1] # View(loadings.pc1.k8)
PC1.WinVar.k8=max.col(abs(loadings.pc1.k8)) # summary(PC1.WinVar.k8) ?Is it a position of the kth variable?
ea.border$PC1.WinVar.k8=PC1.WinVar.k8
table(PC1.WinVar.k8)
# mapping
ea.border$PC1.WinVar.k8.cat <- as.factor(ea.border$PC1.WinVar.k8)
levels(ea.border$PC1.WinVar.k8.cat) <-c("Poor", "Rural", "Solid Fuel",
"Healthcare Access",
"Mother Edu < Secondary",
"Mother Edu >= Secondary",
"Father Edu < Secondary",
"Father Edu >= Secondary",
"Mother Occ - Prof",
"Unimproved Drinking Water",
"Water at Home")
factpal <- colorFactor(mypal.11, ea.border$PC1.WinVar.k8.cat)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.5,
color = ~factpal(ea.border$PC1.WinVar.k8.cat)
) %>%
addLegend("bottomright",
title = "Variables with max loadings <br> within 1st component",
pal = factpal,
values = ea.border$PC1.WinVar.k8.cat)
# extracting most influential variables in GWPC2
loadings.pc2.k8=gwpca.k8$loadings[,,2]
PC2.WinVar.k8=max.col(abs(loadings.pc2.k8))
ea.border$PC2.WinVar.k8=PC2.WinVar.k8
table(PC2.WinVar.k8)
# mapping
ea.border$PC2.WinVar.k8.cat <- as.factor(ea.border$PC2.WinVar.k8)
levels(ea.border$PC2.WinVar.k8.cat) <-c("Poor", "Rural", "Solid Fuel",
"Single Mother",
"Mother Occ - None",
"Girl Child",
"Unimproved Drinking Water",
"Complete Immunization", "Aridity",
"Max Temperature","PM2.5","Drought Hazard",
"6-11 months", "12-23 months",
"24-35 months", "36-47 months",
"48-59 months")
factpal <- colorFactor(mypal.17, ea.border$PC2.WinVar.k8.cat)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.7,
color = ~factpal(ea.border$PC2.WinVar.k8.cat)
) %>%
addLegend("bottomright",
title = "Variables with max loadings <br> within 2nd component",
pal = factpal,
values = ea.border$PC2.WinVar.k8.cat)
# extracting most influential variables in GWPC3
loadings.pc3.k8=gwpca.k8$loadings[,,3]
PC3.WinVar.k8=max.col(abs(loadings.pc3.k8))
ea.border$PC3.WinVar.k8=PC3.WinVar.k8
table(PC3.WinVar.k8)
# mapping
ea.border$PC3.WinVar.k8.cat <- as.factor(ea.border$PC3.WinVar.k8)
levels(ea.border$PC3.WinVar.k8.cat) <-c("Solid Fuel","Father Edu < Secondary",
"Mother Occ - None", "Girl Child",
"Complete Immunization",
"Maximum Temperature", "PM2.5",
"< 6 months",
"6-11 months",
"12-23 months",
"24-35 months",
"36-47 months",
"48-59 months")
factpal <- colorFactor(mypal.17, ea.border$PC3.WinVar.k8.cat)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.7,
color = ~factpal(ea.border$PC3.WinVar.k8.cat)
) %>%
addLegend("bottomright",
title = "Variables with max loadings <br> within 3rd component",
pal = factpal,
values = ea.border$PC3.WinVar.k8.cat)
# extracting most influential variables in GWPC4
loadings.pc4.k8=gwpca.k8$loadings[,,4]
PC4.WinVar.k8=max.col(abs(loadings.pc4.k8))
ea.border$PC4.WinVar.k8=PC4.WinVar.k8
table(PC4.WinVar.k8)
# mapping
ea.border$PC4.WinVar.k8.cat <- as.factor(ea.border$PC4.WinVar.k8)
levels(ea.border$PC4.WinVar.k8.cat) <-c("Solid Fuel","Healthcare Access",
"Single Mother",
"Mother Occ - None",
"Mother Occ - Prof",
"Girl Child",
"Unimproved Drinking Water",
"Complete Immunization",
"Maximum Temperature",
"< 6 months",
"6-11 months",
"12-23 months",
"24-35 months",
"36-47 months",
"48-59 months")
factpal <- colorFactor(mypal.15, ea.border$PC4.WinVar.k8.cat)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.7,
color = ~factpal(ea.border$PC4.WinVar.k8.cat)
) %>%
addLegend("bottomright",
title = "Variables with max loadings <br> within 4th component",
pal = factpal,
values = ea.border$PC4.WinVar.k8.cat)
# extracting most influential variables in GWPC5
loadings.pc5.k8=gwpca.k8$loadings[,,5]
PC5.WinVar.k8=max.col(abs(loadings.pc5.k8))
ea.border$PC5.WinVar.k8=PC5.WinVar.k8
table(PC5.WinVar.k8)
# mapping
ea.border$PC5.WinVar.k8.cat <- as.factor(ea.border$PC5.WinVar.k8)
levels(ea.border$PC5.WinVar.k8.cat) <-c("Poor","Solid Fuel",
"Healthcare Access",
"Single Mother",
"Mother Edu < Secondary",
"Mother Occ - None",
"Mother Occ - Prof",
"Girl Child",
"Unimproved Drinking Water",
"Complete Immunization",
"Maximum Temperature",
"< 6 months",
"6-11 months",
"12-23 months",
"24-35 months",
"36-47 months",
"48-59 months")
factpal <- colorFactor(mypal.17, ea.border$PC5.WinVar.k8.cat)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.7,
color = ~factpal(ea.border$PC5.WinVar.k8.cat)
) %>%
addLegend("bottomright",
title = "Variables with max loadings <br> within 5th component",
pal = factpal,
values = ea.border$PC5.WinVar.k8.cat)
# extracting most influential variables in GWPC6
loadings.pc6.k8=gwpca.k8$loadings[,,6]
PC6.WinVar.k8=max.col(abs(loadings.pc6.k8))
ea.border$PC6.WinVar.k8=PC6.WinVar.k8
table(PC6.WinVar.k8)
# mapping
ea.border$PC6.WinVar.k8.cat <- as.factor(ea.border$PC6.WinVar.k8)
levels(ea.border$PC6.WinVar.k8.cat) <-c("Solid Fuel",
"Healthcare Access",
"Single Mother",
"Mother Occ - None",
"Mother Occ - Prof",
"Girl Child",
"Unimproved Drinking Water",
"Complete Immunization",
"Maximum Temperature",
"< 6 months",
"6-11 months",
"12-23 months",
"24-35 months",
"36-47 months",
"48-59 months")
factpal <- colorFactor(mypal.17, ea.border$PC6.WinVar.k8.cat)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.7,
color = ~factpal(ea.border$PC6.WinVar.k8.cat)
) %>%
addLegend("bottomright",
title = "Variables with max loadings <br> within 6th component",
pal = factpal,
values = ea.border$PC6.WinVar.k8.cat)
## Monte Carlo Test for first component (prespecified BW)
## Simulation (n=1000) with Monte Carlo Test for
## first component (auto-calibirated BW )
## estimated time ~ 15 hours
gwpca.mc27.1000 <- montecarlo.gwpca.2(ea.scaled.spdf,
vars = colnames(ea.scaled.spdf@data[,1:27]),
k = 27, nsims=1000,
adaptive = TRUE)
with(ea.scaled.spdf@data[,1:27],{
plot(density(gwpca.mc10.1000$sims),
main = "Test statistics for eigenvalue nonstationarity",
xlab = "SD of local eigenvalues from randomisations \n N = 1000")
abline(v=2.160, col = "red", lwd = 2)
text(2.25, 2.5, paste("Observed SD of local eigenvalues \n p value = 0.0160"), col = "red",
srt = 90)
abline(v = 0.8164263, col = "darkblue", lwd = 2, lty = 5)
text(0.9, 2.5, paste("Mean of SD of local \n eigenvalues from randomisations"),
col = "darkblue", srt = 90)
rug(gwpca.mc10.1000$sims, col = "orange")
})
abline(h=-0.1, v=2.15900, col = "red", lwd = 2)
# to check Bandwidth function
sample.n <- 885
ea.mat<-as.matrix(ea.scaled.spdf@data[,1:27])
bwd.range.adapt <- c(seq(5,sample.n,by=10))
cv.score <- matrix(nrow=length(bwd.range.adapt),ncol=1)
for(i in 1:length(bwd.range.adapt)) cv.score[i] <-
gwpca.cv(bwd.range.adapt[i],ea.mat, loc = Coords,
k=8, robust = F, adaptive = T, kernel = "bisquare")
plot(bwd.range.adapt, cv.score, ylab="", xlab="", cex=0.01, pch=0.01,
ylim=c(0, 8500), type = "l")
title(ylab = list("CV score", cex=1.2, col="black"))
title(xlab = list("No. of nearest neighbours", cex=1.2, col="black"))
title(main = list("Adaptive bandwidth function for GWPCA", cex=1.4, col="black"))
abline(v = 275, col="red", lty=2)
abline(h = 3556, col="black", lty = 2)
#### GWPCR #####
# bivariable GWR between Childhood morb and poor
# Calculating BW
ea.gwr.bi <- bw.gwr(ChildMorbid ~ Poor,
data = ea.border, approach = 'AICc',
kernel = 'bisquare', adaptive = TRUE)
# GWR using Childhood morbidity and Poor
ea.gwr.res.bi <- gwr.basic(ChildMorbid ~ Poor,
data = ea.border, bw = 350,
kernel = 'bisquare', adaptive = TRUE,
F123.test = TRUE)
# mapping unadjusted coefficients for Poor
ea.border@data$bi.Poor <- as.numeric(ea.gwr.res.bi$SDF$Poor)
bins <- c(quantile(gwss.biPoor$SDF$Cov_ChildMorbid.Poor, probs = seq(0, 1, 0.20), type = 8))
pal <- colorBin(mypal.RedBlack, domain = gwss.biPoor$SDF$Cov_ChildMorbid.Poor, bins = bins)
m %>% addPolygons(
fillColor = ~pal(gwss.biPoor$SDF$Cov_ChildMorbid.Poor),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.6) %>%
addLegend("bottomright",
title = "Unadjusted coefficients<br>for Poor",
pal = pal,
#labels= c("Low", "","","","High"),
values = gwss.biPoor$SDF$Cov_ChildMorbid.Poor)
# mapping its associated p values
ea.adjust.gwt.biPoor <- gwr.t.adjust(ea.gwr.res.bi) # produces a list
names(ea.adjust.gwt.biPoor$SDF) # check the names of items within it
ea.adjust.gwt.biPoorTtable <- ea.adjust.gwt.biPoor$SDF@data
names(ea.adjust.gwt.biPoorTtable)
View(ea.adjust.gwt.biPoorTtable)
pal <- colorNumeric(
palette = "Blues",
domain = ea.adjust.gwt.biPoorTtable$Poor_p
)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.5,
color = ~pal(ea.adjust.gwt.biPoorTtable$Poor_p)
) %>%
addLegend("bottomright", pal = pal, values = ~ea.adjust.gwt.biPoorTtable$Poor_p,
title = "p values",
opacity = 0.5)
# multivariable GWR
DeVar="ChildMorbid"
InDeVars=c("Poor", "st.global.PC1", "st.global.PC2", "st.global.PC3","st.global.PC4",
"st.global.PC5", "st.global.PC6")
# model selection
model.sel=model.selection.gwr(DeVar, InDeVars, data=ea.border,
kernel = "bisquare", adaptive=T, bw=350)
sorted.models=model.sort.gwr(model.sel, numVars=length(InDeVars),
ruler.vector=model.sel[[2]][,2])
model.list=sorted.models[[1]]
# visualizing model (minor modification in original function)
model.view.gwr.OR <- function (DeVar, InDeVars, model.list)
{
n <- length(InDeVars)
if (n > 10) {
cex <- 10/n
}
else {
cex <- 1
}
numModels <- length(model.list)
alpha <- 2 * pi/numModels
cols <- rainbow(n)
pchs <- rep(c(16, 16, 16, 16, 16, 16, 16), length.out = n)
plot(x = 0, y = 0, xlim = c(-3 * n/4, n + 6),
ylim = c(-n - 1, n + 1), cex = 2, axes = F,
pch = "O", xlab = "", ylab = "",
main = "View of GWPCR model selection")
for (i in 1:numModels) {
vars <- model.list[[i]][[2]]
nvar <- length(vars)
p1 <- c(0, 0)
for (j in 1:nvar) {
radius <- sqrt(n) * sqrt(j)
var.idx <- which(InDeVars == vars[j])
coord <- c(radius * cos((i - 1) * alpha), radius *
sin((i - 1) * alpha))
lines(x = c(p1[1], coord[1]), y = c(p1[2], coord[2]),
col = "black", lwd = cex, lty = 3)
points(x = coord[1], y = coord[2], col = cols[var.idx],
pch = pchs[var.idx], cex = (cex * i/numModels +
0.3))
p1 <- coord
}
text(x = (radius + 0.5) * cos((i - 1) * alpha),
y = (radius + 0.5) * sin((i - 1) * alpha),
as.character(i), cex = cex * 0.6)
}
legend(x = n + 1, y = n/1, col = c("black", cols),
pch = c(1, pchs), c(DeVar, InDeVars), box.col = "white")
}
model.view.gwr.OR(DeVar,InDeVars, model.list=model.list)
plot(sorted.models[[2]][,3], col=c("grey40", "darkred"),
pch=16, lty=1, lwd=2,
main="AICc based model selection for GWPCR",
ylab="AICc", xlab="Model number", type="b")
abline(h= seq(-980, -2000, -5),
lty=9, lwd = 0.3, col = "grey30")
# Calculating BW for GWPCR
ea.gwr <- bw.gwr(ChildMorbid ~ Poor + st.global.PC1 + st.global.PC2 +
st.global.PC3 + st.global.PC4 + st.global.PC5 +
st.global.PC6,
data = ea.border, approach = 'AICc',
kernel = 'bisquare', adaptive = TRUE)
# GWPCR using first 6 global PCs
ea.gwr.res <- gwr.basic(ChildMorbid ~ Poor + st.global.PC1 + st.global.PC2 +
st.global.PC3 + st.global.PC4 + st.global.PC5 +
st.global.PC6,
data = ea.border, bw = 350,
kernel = 'bisquare', adaptive = TRUE,
F123.test = TRUE)
## Mapping local R2 in full range
pal <- colorNumeric(
palette = "Blues",
domain = ea.gwr.res$SDF$Local_R2
)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.5,
color = ~pal(ea.gwr.res$SDF$Local_R2)
) %>%
addLegend("bottomright", pal = pal, values = ~ea.gwr.res$SDF$Local_R2,
title = "Local R2",
opacity = 0.5)
# plotting GWPCR Coefficient for Poor
# color pallate
mypal.6 = c("#4d4d4d", "#999999", "#e0e0e0", "#fddbc7", "#ef8a62","#b2182b")
bins <- c(quantile(ea.gwr.res$SDF$Poor, probs = seq(0, 1, 0.20), type = 8))
pal <- colorBin(mypal.6, domain = ea.gwr.res$SDF$Poor, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.gwr.res$SDF$Poor),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.6) %>%
addLegend("bottomright",
title = "Coefficients for Poor",
pal = pal,
#labels= c("Low", "","","","High"),
values = ea.gwr.res$SDF$Poor)
# adjust p-values for multiple hypothesis tests
ea.adjust.gwt <- gwr.t.adjust(ea.gwr.res) # produces a list
names(ea.adjust.gwt$SDF) # check the names of items within it
ea.adjust.gwtTtable <- ea.adjust.gwt$SDF@data
names(ea.adjust.gwtTtable)
View(ea.adjust.gwtTtable)
pal <- colorNumeric(
palette = "Blues",
domain = ea.adjust.gwtTtable$Poor_p
)
m %>%
addPolygons(
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.5,
color = ~pal(ea.adjust.gwtTtable$Poor_p)
) %>%
addLegend("bottomright", pal = pal, values = ~ea.adjust.gwtTtable$Poor_p,
title = "p values",
opacity = 0.5)
# running multicollinearity test
ea.gwr.collin<- gwr.collin.diagno(ChildMorbid ~ Poor + st.global.PC1 +
st.global.PC2 + st.global.PC3 +
st.global.PC4 + st.global.PC5 +
st.global.PC6,
data = ea.border, bw = 350,
kernel="bisquare", adaptive=TRUE)
# Plotting Local Condition Numbers
ea.border@data$LocCN <- as.numeric(ea.gwr.collin$local_CN)
mypal.6 <- c('#ffffb2', '#fecc5c', '#fd8d3c', '#f03b20', '#bd0026')
bins <- c(quantile(ea.border@data$LocCN, probs = seq(0,1, 0.2),
type = 8))
pal <- colorBin(mypal.6, domain = ea.border@data$LocCN, bins = bins)
m %>% addPolygons(
fillColor = ~pal(ea.border@data$LocCN),
weight = 1,
opacity = 0.3,
color = 'grey75',
fillOpacity = 0.5) %>%
addLegend("bottomright",
title = "Local CNs",
pal = pal,
values = ea.border@data$LocCN)
|
c7f0bfa48d31687f9d9436bede9eb39c4d42698b | 7620c35c064d095c31a306ad437a7e4599dff5ec | /plot3.R | 1f59e59876784bae0f7b903ceb404ffd0a5a4094 | [] | no_license | juancarlozamora/ExData_Plotting1 | 3d113b3de6229ff044932e27a7da1afe72df0d73 | 4c2123252aa85db8f529b16cb49a25a5280854de | refs/heads/master | 2021-04-26T04:37:13.301145 | 2018-03-06T06:49:54 | 2018-03-06T06:49:54 | 124,028,682 | 0 | 0 | null | 2018-03-06T05:59:20 | 2018-03-06T05:59:20 | null | UTF-8 | R | false | false | 1,431 | r | plot3.R | # Set the working directory into downloaded directory of the data set.
setwd("C:/Users/zamojua/Documents/Graduate Studies/Data Science Course/Exploratory Data Analysis/Week 1")
# Load the data required from household_power_consumption.txt
consumption <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
# Subset the data into the dates from 2007-02-01 to 2007-02-02
subsetpower <- consumption[consumption$Date %in% c("1/2/2007", "2/2/2007"),]
# Convert the Date and Time variables into Date/Time classes in R
# Owned by Juan Carlo Zamora
datetime <- strptime(paste(subsetpower$Date, subsetpower$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
plot2 <- as.numeric(subsetpower$Global_active_power)
submeter1<-as.numeric(subsetpower$Sub_metering_1)
submeter2<-as.numeric(subsetpower$Sub_metering_2)
submeter3<-as.numeric(subsetpower$Sub_metering_3)
# Save into PNG type of visual graphics and set the size to 480
png("plot3.png", width = 480, height = 480)
# Plot the file
plot(datetime, submeter1, type = "l", ylab = "Energy Submetering", xlab = "")
lines(datetime, submeter2, type = "l", col="red")
lines(datetime, submeter3, type = "l", col="blue")
# Then add legend
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2.5, col = c("black", "red", "blue"))
#turn off device
dev.off()
|
194b4be6e86d87c9171eef39122a757b58db3997 | 20ea933626e5d896dc25b72d9f639650f1eb8993 | /meta_dmr.r | f9d8c3a7744efe9693efd21b483bcc4c8f5818d4 | [] | no_license | nievergeltlab/ewas | 4c5903ee435cb74fc15259c8db716ab7a99ab255 | b94be46df8881b23023060dd7e2a7c9ee3d11883 | refs/heads/master | 2020-05-26T14:56:39.538931 | 2019-05-23T17:11:54 | 2019-05-23T17:11:54 | 188,276,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,969 | r | meta_dmr.r | #Note that this is for just CpG islands. The other options are genes and promoters
#So I suggest using find and replace on "CGI" to get the appropriate file for genes or promoters, making a copy for each version of this.
#Load DMR analysis results.
load('semper/mrs_cgi_gsea_100k.r')
load('mirror/prismo_cgi_gsea_100k.r')
load('starrs_cgi_gsea_100k.r')
#List the number of subjects in the analysis
mrs = 126
starrs=78
prismo=62
#For each dataset:
#Make a dataframe with the p-value and normalized effect size (direction)
#Name the dataframe rows by the CpG island names
#Convert p-values to Z scores
#dataset 1
g1 <- data.frame(mrsCGI$CGI$pval,mrsCGI$CGI$NES)
names(g1) <- c('p_mrs','nes_mrs')
row.names(g1) <- row.names(mrsCGI$CGI)
g1$chi_mrs <- qchisq(g1$p_mrs,1,lower.tail=F)
g1$z_mrs <- sqrt(g1$chi_mrs)* sign(g1$nes_mrs)
g1$CpG <- row.names(g1)
#dataset 2
g2 <- data.frame(starrsCGI$CGI$pval,starrsCGI$CGI$NES)
names(g2) <- c('p_starrs','nes_starrs')
row.names(g2) <- row.names(starrsCGI$CGI)
g2$chi_starrs <- qchisq(g2$p_starrs,1,lower.tail=F)
g2$z_starrs <- sqrt(g2$chi_starrs)* sign(g2$nes_starrs)
g2$CpG <- row.names(g2)
#dataset 3
g3 <- data.frame(prismoCGI$CGI$pval,prismoCGI$CGI$NES)
names(g3) <- c('p_prismo','nes_prismo')
row.names(g3) <- row.names(prismoCGI$CGI)
g3$chi_pris <- qchisq(g3$p_prismo,1,lower.tail=F)
g3$z_pris <- sqrt(g3$chi_pris)* sign(g3$nes_prismo)
g3$CpG <- row.names(g3)
#Merge the 3 sets of results together according to DMR name
Bmat0 <- merge(g1,g2,by="CpG",suffixes=c("_mrs","starrs"))
Bmat3 <- merge(Bmat0,g3,by="CpG")
#Weight Z by sample size
Bmat3$bw1 <- Bmat3$z_starrs *sqrt(78) #31 Cases and 47 controls = 78
Bmat3$bw2 <- Bmat3$z_mrs *sqrt(126) #63 and 63 = 126
Bmat3$bw3 <- Bmat3$z_pris *sqrt(62) #29 and 33 = 62
#Sum of weighted Zs (numerator for sz pool)
Bmat3$bwsum <- Bmat3$bw1 + Bmat3$bw2 + Bmat3$bw3
#Sum of weights (denominator for sz pool)
Bmat3$wsum <- sqrt( 78 + 126 + 62)
#Pooled sz (sum of weighted sz, averaged by weight)
Bmat3$szpool <- Bmat3$bwsum / Bmat3$wsum
#P-value
Bmat3$p_meta <- pchisq(Bmat3$szpool^2,1,lower.tail=F)
#Get top p-values for each analysis (per study and meta analysis
r1 <- row.names(Bmat3[Bmat3$p_starrs < 0.00017,])
r1 <- c(r1,row.names(Bmat3[Bmat3$p_mrs < 0.00017,]))
r1 <- c(r1,row.names(Bmat3[Bmat3$p_pris < 0.00017,]))
r1 <- c(r1,row.names(Bmat3[Bmat3$p < 0.00017,]))
#Filter meta analysis data down to just these markers
sigmarkers <- Bmat3[row.names(Bmat3) %in% r1,]
#Save the significant DMRs to a file
write.table(sigmarkers,'gsea_CGI_dmrs_meta_100k.txt')
#Also save the entire meta analysis to a file
save(Bmat3,file="gsea_CGI_dmr_meta_100k_ruttenhc3.R")
####
#Get annotation info for DMRs, i.e. the span of the DMR .. WORK IN PROGRESS
#
islands <- c("chr1:156814881-156815792","chr6:33048416-33048814","chr21:35831697-35832365","chr7:1885033-1885402","chr7:27169572-27170638","chr6:32551851-32552331","chr6:25882327-25882560","chr7:1885033-1885402", "chr11:70672834-70673055" ,"chr12:9217328-9217715", "chr7:27169572-27170638", "chr6:33048416-33048814", "chr10:530713-531099", "chr17:76037074-76037323" ,"chr21:35831697-35832365", "chr17:8702342-8702824", "chr16:1583809-1584641" ,"chr5:191792-192544")
head(starrsCGI$CGI[row.names(starrsCGI$CGI) %in% islands,])
c("HLA-DPB1","HLA-DRB1","IFT140","PRDM16","HLA-DPB1","KIF25" )
cpgs_assoc <- starrsCGI$CGI_association[islands]
save(cpgs_assoc , file="gsea_CGI_dmr_meta_100k_ruttenhc3_CpGs.R")
load("gsea_CGI_dmr_meta_100k_ruttenhc3_CpGs.R")
library(data.table)
annots <- fread('misc/humanmethylation450_15017482_v1-2.csv',data.table=F,skip=7,nrow=485553)
row.names(annots) <- annots$IlmnID
cmat <- as.data.frame(matrix(nrow=length(cpgs_assoc),ncol=2))
cmat$length <- NA
cmat$nprobes <- NA
cmat$start <-NA
cmat$stop <- NA
cmat$Gene <- NA
for (num in 1:length(names(cpgs_assoc)))
{
ma <- subset(annots, IlmnID %in% unlist(cpgs_assoc[num][1]))
cmat[num,]$Gene <- ma$UCSC_RefGene_Name[1]
cmat[num,]$start <- min(ma$MAPINFO)
cmat[num,]$stop <- max(ma$MAPINFO)
cmat[num,]$nprobes=dim(ma)[1]
}
cmat
#for islands, just need nprobes
dmat <- as.data.frame(names(cpgs_assoc))
dmat$length <- NA
for ( i in 1:length(cpgs_assoc))
{
dmat[i,]$length <- length(unlist(cpgs_assoc[i]))
}
#For a given study, plot the dmr stuff (WORK IN PROGRESS, mCSEA plot needs to be hacked for this to work!!)
#Load EWAS statistics
load('semper/MRS_ruttenhc3_results.Rdata')
#Plot DMR
pdf('test_HLACGI.pdf',7,7)
mCSEAPlot(mrsCGI, regionType = "CGI",
dmrName = "chr6:33048416-33048814",
transcriptAnnotation = "symbol", makePDF = TRUE)
dev.off()
#PLot enrichment
pdf('test_HLACGI.pdf',7,7)
mCSEAPlotGSEA(resultsT[-1,3], mrsCGI, regionType = "CGI", dmrName = "chr6:33048416-33048814")
dev.off()
|
229bccbb9ea23f5af4872cace0e6de60e2fc063b | e201e192fc8ed87d7b92f141568bdf21b17d965d | /R/print.info.atomic.R | a441c8bdbc455b73f2a8df4bf5f489c02cfbc7ac | [] | no_license | zeligdev/ZeligDVN | e66c0ee16dea4cf5faae9bfe35a986eaa0489885 | 091b46787249fe1362cdbfb7184873f29b2082e9 | refs/heads/master | 2020-12-24T14:18:47.566770 | 2011-12-13T15:21:06 | 2011-12-13T15:21:06 | 2,349,825 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | r | print.info.atomic.R | #' Print a Column Analysis
#'
#' Correctly format and display the analysis of an individual column.
#' @usage \method{print}{info.atomic}(x, ...)
#' @S3method print info.atomic
#' @param x a \code{column.analysis} object
#' @param ... ignored parameters
#' @return the object (invisibly)
print.info.atomic <- function (x, ...) {
cat("Class: ", x$class, "\n")
cat("Mode: ", x$mode, "\n")
cat("Length:", x$length, "\n")
cat("Unique Values:", length(x$unique), "\n")
invisible(x)
}
|
21127cfc3789128ed939e332d92e0ed0870d6cf3 | e91552560cb59fbc71789062e47155b9f006293e | /topofire/R/projection_data_warp.R | 0e9eeab5fd005e40df2ad45463f32df92ebf4cfa | [] | no_license | LMXB/drought_indicators | 66ed760564c4721c425d44ef9e5f2c9563678d9d | 1723c46985291f046de00340e6c1f5528530b0d8 | refs/heads/master | 2022-05-08T21:10:47.331095 | 2020-04-24T16:37:13 | 2020-04-24T16:37:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,299 | r | projection_data_warp.R | rm(list = ls())
library(raster)
library(ncdf4)
library(rgdal)
library(parallel)
library(tictoc)
library(stringr)
library(foreach)
library(doParallel)
#import master grid to warp match
master = raster("/mnt/ScratchDrive/data/Hoylman/SPI/NWS_SPI_Test/nws_current_spi_30.tif")
#define directories
input.dir = '/mnt/ScratchDrive/data/Hoylman/SPI/Raw_gridMET_SPI_test/'
write.dir = "/mnt/ScratchDrive/data/Hoylman/SPI/Raw_gridMET_SPI_test/2.5km_warp/"
git.dir = '/home/zhoylman/drought_indicators/zoran/R/'
work.dir <- "/home/zhoylman/drought_indicators/zoran/R/"
#import file paths
input = list.files(input.dir, pattern = ".tif$", full.names = T)
input_short = list.files(input.dir, pattern = ".tif$", full.names = F)
#import funtion to calculate time
source(paste0(git.dir, "fdates.R"))
#compute time
time = fdates(input)
time_char = as.character(time)
#warp in parallel
tic()
cl = makeCluster(detectCores()-1)
registerDoParallel(cl)
projected_raster = foreach(i=1:length(time)) %dopar% {
library(raster)
source(paste0(work.dir, "ProjectRaster_function.R"))
input.file <- raster(input[i])
target.file <- master
out.file <- paste0(write.dir, "warp_2.5km_",input_short[i])
gdalProjRaster(input.file, target.file, out.file)
}
#stop parellel cluster
stopCluster(cl)
toc()
|
d0ebd3ce7e93018edefb364bd7ba7ca4d1bce79d | 58fd3b96113e90860cc683c1dd29250c2f965e9a | /R/local_path.R | 35d6ea14d8ba6661d5b3d18bf42b4ea7a8a82a14 | [] | no_license | dacarras/r4sda | d0a75ef2f6425dc263ff8638345e0191d58f56df | 702465d57f125551eb71250a1b2c4ba106a9cca9 | refs/heads/master | 2023-01-13T07:30:45.381142 | 2022-12-29T14:23:57 | 2022-12-29T14:23:57 | 167,874,962 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 736 | r | local_path.R | #' local_path() returns the parent directory of the working directory.It aids the generation of relative directory to store all code in single folder, and then reproduce the calculations in a Mac OS machine or a Windows machine, or else.
#'
#' @param x a string with directory location (e.g, '/00_data/', '/01_syntax/', '/02_tables/')
#'
#' @return an absolute route or logical directory for the current machine, adding its root directory
#'
#' @examples
#'
#'
#' parent_directory <- local_path(getwd())
#' parent_directory # this is the current working directory
#' # it assumes the syntax folder, is within the parent directory
#' @export
local_path <- function(x){
paste0(tools::file_path_as_absolute('..'),x)
}
|
07ac46013bbcbc0cebb561df48f935c3cf3489f6 | 5e4273c9870b9dc46708dc9e88bf631db565c707 | /man/plot.yapafit.Rd | d841ea83b08fe45d78fdce4869262c7198c53385 | [
"MIT"
] | permissive | alexpavlakis/pollagg | 28f03b4f2ad775a5c4f30466fe8b312801b2bc6e | f29b64d4fa87277b19b8a6d22695b51451407b03 | refs/heads/master | 2022-12-16T13:01:37.688375 | 2020-09-24T19:35:36 | 2020-09-24T19:35:36 | 295,246,051 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 303 | rd | plot.yapafit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yapa.R
\name{plot.yapafit}
\alias{plot.yapafit}
\title{plot a yapa model}
\usage{
\method{plot}{yapafit}(x, ...)
}
\arguments{
\item{x}{a yapafit model.}
\item{...}{additional arguments.}
}
\description{
plot a yapa model
}
|
69999589c495b2da791d27d0b5884a4a93c1298a | 24f0e5f5b4113736d630ff97e1d9e092ac203eb4 | /SourceTracker_v1.0.1.R | a6e8839d9ebdf3f76143ff006235e95feea84cbd | [] | no_license | CamEJ/UsingSourceTracker | 6eb7217c3139cd4314c8f19858f60561c9a50d78 | ff5a2c77302467cb7fee43e3b8646bad73af1305 | refs/heads/master | 2022-04-17T15:57:11.819103 | 2020-04-16T13:08:34 | 2020-04-16T13:08:34 | 256,209,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,888 | r | SourceTracker_v1.0.1.R |
## == get work space raad ===
setwd("~/Google Drive/Personal/WP3 paper workings/sourcetracker-1.0.1")
source('/Users/cam/Google Drive/Data & reports/R/setFactorOrder.R')
require(dplyr)
library(stringr)
library(ggplot2)
library(reshape2)
# Running SourceTracker version 1.0.1
# https://github.com/danknights/sourcetracker
# Download tar.gz and then extract this and set folder as wkdir
# Open example.r and run script the first time to check everything works
# correctly using their example data.
# ================== getting data ready ====================================#
# you will need 1) a metadatafile and 2) an OTU table
# 1. metadata file formate
# this must have:
# a) a column called 'SourceSink' saying if your sample is a source or a sink.
# b) a column called 'Env' saying which environment your sample is from
# c) a column called Description which you can use to describe your samples
# d) any samples you do now want to be analysed as a source or sink should
# have NA in these 3 columns.
# head(metadata)
# SlurryAdd Phase Treatment SourceSink Env Description
# Slurry1 <NA> <NA> <NA> source slurry slurry
# Slurry2 <NA> <NA> <NA> source slurry slurry
# T0_Tmt1_a Minus NoFlood Control source soil soil
# T0_Tmt1_b Minus NoFlood Control source soil soil
# T0_Tmt1_c Minus NoFlood Control source soil soil
# T0_Tmt2_a Plus NoFlood Slurry sink tmt2 0_slurry
# 2. OTU table.
# This needs to have same sample names as in your metadata file
# They suggest using trimmed OTU to remove low abundance OTUs
# I ran this with OTUs trimmed to > 0.005% as per Bokulich et al 2013.
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3531572/
# The script below expects that initially your samples will be columns and
# OTUs will be rows (& they will therefore be transformed).
# your OTU table should not include any taxo information.
# use biom table from Mothur's make.biom() command
# ====================== loading data & running scrips ================================
# load sample metadata
metadata <- read.table('cDNA_metadata_forSTracker2019.txt',sep='\t',h=T,row.names=1)
# load OTU table
# This 'read.table' command is designed for a
# QIIME-formatted OTU table.
# namely, the first line begins with a '#' sign
# and actually _is_ a comment; the second line
# begins with a '#' sign but is actually the header
otus <- read.table('cDNA_forSTracker2019.txt',sep='\t', header=T,row.names=1,check=F,comment='')
head(otus)
dim(otus)
head(otus) # check
otus <- t(as.matrix(otus)) # transform.
## === next section as it appears in original sourcetracker.R file
# extract only those samples in common between the two tables
common.sample.ids <- intersect(rownames(metadata), rownames(otus))
otus <- otus[common.sample.ids,]
metadata <- metadata[common.sample.ids,]
# double-check that the mapping file and otu table
# had overlapping samples
if(length(common.sample.ids) <= 1) {
message <- paste(sprintf('Error: there are %d sample ids in common '),
'between the metadata file and data table')
stop(message)
}
# extract the source environments and source/sink indices
train.ix <- which(metadata$SourceSink=='source')
test.ix <- which(metadata$SourceSink=='sink')
envs <- metadata$Env
if(is.element('Description',colnames(metadata))) desc <- metadata$Description
# load SourceTracker package
source('src/SourceTracker.r')
# tune the alpha values using cross-validation (this is slow!)
# tune.results <- tune.st(otus[train.ix,], envs[train.ix])
# alpha1 <- tune.results$best.alpha1
# alpha2 <- tune.results$best.alpha2
# note: to skip tuning, run this instead:
alpha1 <- alpha2 <- 0.001
# train SourceTracker object on training data
st <- sourcetracker(otus[train.ix,], envs[train.ix])
# Error in sum(x[i, ]) : invalid 'type' (character) of argument
# I initially got this error when I had only one source sample.
# added a second and it worked a charm
# Estimate source proportions in test data
results <- predict(st,otus[test.ix,], alpha1=alpha1, alpha2=alpha2)
# Estimate leave-one-out source proportions in training data
results.train <- predict(st, alpha1=alpha1, alpha2=alpha2)
### plot results =====
labels <- sprintf('%s %s', envs,desc)
labels <- sprintf('%s %s', envs,Treatment)
plot(results, labels[test.ix], type='pie')
# other plotting functions
# plot(results, labels[test.ix], type='bar')
plot(results, labels[test.ix], type='dist')
# plot(results.train, labels[train.ix], type='pie')
# plot(results.train, labels[train.ix], type='bar')
# plot(results.train, labels[train.ix], type='dist')
# =================== reading data out to plot more clearly======================
ProportionsOut = as.data.frame(results$proportions)
# now write out / plot 'ProportionsOut'
|
1eb9d98feb9ae6986d7d5511dba3b8cbd7e7f3c1 | e6896d88d76eccc687616e6f5929a4143a555a82 | /Plot6.R | b4434abaed1f97fefaee076028ab8f369d07be1e | [] | no_license | hh0749/Exploratory-Data-Analysis---project2 | db48b69f0857e7130c84fa362eeffad9ab527af1 | 55fcf547787a7ed00e6b46053f67954eec4b0373 | refs/heads/master | 2021-01-19T05:53:55.500690 | 2016-06-17T11:39:39 | 2016-06-17T11:39:39 | 61,369,521 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,023 | r | Plot6.R | ##Assignment2
# Compare emissions from motor vehicle sources in Baltimore City with emissions
# from motor vehicle sources in Los Angeles County, California (fips == 06037).
# Which city has seen greater changes over time in motor vehicle emissions?
##Read dataset into R
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(plyr)
library(ggplot2)
final<-filter(NEI,type=="ON-ROAD", fips %in% c("24510","06037"))
final2<-ddply(final,.(year,type,fips),function(x) sum(x$Emissions))
colnames(final2)[4] <- "Emissions"
colnames(final2)[3] <- "County"
final2$County[final2$County == '24510'] <- 'Baltimore'
final2$County[final2$County == '06037'] <- 'California'
png("plot6.png", width=480, height=480)
qplot(year, Emissions, data=final2,color = County, geom = "line")+
ggtitle(expression("Total US" ~ PM[2.5] ~ "Emissions by Year from mortor vehicle sources in Baltimore vs California"))+
xlab("Year") +
ylab(expression("Total" ~ PM[2.5] ~ "Emissions (tons)"))
dev.off()
|
22000d99187e9adf5a843367fdb24bc2f7476f2f | 6a811d1fbe579e346b9813857f9059be010a67a7 | /scripts/figures/upset.R | b9348f20f9eebc469935cb09893e0e27c4cbf617 | [] | no_license | HongyuanWu/rkipcreg | 317634c4d3b500ad23cf39c6f50843e889237692 | c50b5e3d0d6576c268fe160ac37f0db6e3cc3f11 | refs/heads/master | 2021-10-10T23:28:30.353773 | 2019-01-19T04:04:05 | 2019-01-19T04:04:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 362 | r | upset.R | # loading required libraries
library(tidyverse)
library(cRegulome)
library(grid)
library(gridExtra)
# load data
load('data/tf.rda')
# generate graph
tf_targets %>%
cTF() %>%
cor_upset()
grid.edit('arrange', name = 'p1')
p1 <- grid.grab()
ggsave(plot = p1,
filename = 'output/figures/upset.png',
width = 10, height = 10, units = 'cm')
|
e4d6b9b0fc4e43ab7c9cd71be62b41a1cd95554a | 20a3b30be39c5f9e11837c477a2332223074258a | /man/barah-package.Rd | 73c5fe6f17082132cb56d0ec20ba63a0387cb2fe | [
"MIT"
] | permissive | daranzolin/barah | 9622eef127ffeead2a88ae15f0f054af61d63e79 | f81a02688d16de6972482ea42039c010e6235435 | refs/heads/master | 2022-07-01T00:53:18.219057 | 2020-05-11T16:55:42 | 2020-05-11T16:55:42 | 262,227,608 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 480 | rd | barah-package.Rd | \name{barah-package}
\alias{barah-package}
\alias{barah}
\docType{package}
\title{
Format elements into a specified pattern
}
\description{
Data comes in messy, unpredictable formats. Use the fashion function to bring
order to the chaos.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
David Ranzolin
Maintainer: David Ranzolin <daranzolin@gmail.com>
}
\keyword{ package }
|
97113c81289b4dd4563a30c43431bd1ed8678916 | 5b153389e67e59a30aebf6a0d84b69fd89f805d4 | /datatools/man/bloomberg.market.Rd | cfb89ec43d21f93d085bc13f13bc53babda67481 | [] | no_license | dengyishuo/dengyishuo.github.com | 480d9b5911851e56eca89c347b7dc5d83ea7e07d | 86e88bbe0dc11b3acc2470206613bf6d579f5442 | refs/heads/master | 2021-03-12T23:17:10.808381 | 2019-08-01T08:13:15 | 2019-08-01T08:13:15 | 8,969,857 | 41 | 35 | null | null | null | null | UTF-8 | R | false | false | 644 | rd | bloomberg.market.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{bloomberg.market}
\alias{bloomberg.market}
\title{get market data}
\usage{
bloomberg.market(name, startDate = NULL, endDate = NULL, addSMA = FALSE,
ifPlot = TRUE, ...)
}
\arguments{
\item{name}{symbol of the bloomberg ticker}
\item{startDate}{startDate of data}
\item{endDate}{endDate of data}
\item{addSMA}{if add simple moving average line to the plot}
\item{ifPlot}{if plot to the window}
\item{...}{}
}
\value{
data frame and plot
}
\description{
get market information data
}
\details{
get market information data
}
\author{
Weilin Lin
}
|
23438a3a89280fa0e09e918e1bd71ed6fea57c51 | b0b02bd60d27b9dfcbe20c43396d008a6f949d19 | /checkMobLocation.R | 192b11891905f5011500e8e75a1a7cb55f0d4065 | [] | no_license | jwhill07/practice | 5edb3a3d2fa57d219bb59f99529ad12a29f3b234 | 59d2e3508ab4b7de8b1400dba6b39153b8852ec3 | refs/heads/master | 2021-01-13T04:48:53.603042 | 2017-02-14T01:29:38 | 2017-02-14T01:29:38 | 78,690,654 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 477 | r | checkMobLocation.R | # checkMobLocation ensures all patients entered onto Redcap were marked as "scanned at UWMC"
# notes inconsistencies
source("fix_dataset.R")
# if "data." precedes colnames in dataset1, fix
colnames(dataset1) <- gsub("data.", "", colnames(dataset1))
indices <- which(dataset1["mob_location"] != 4)
if (length(indices) > 0) {
sprintf("Subject %s did not have mob assessment at UWMC", dataset1[indices, "idnum"])
} else {
cat("All Subjects had mob assessments at UWMC")
}
|
b0fa6aadab9239897172d47076b9c26584270479 | 59c51287120be281e0e5a155e122e8931508bc4a | /man/spikeTimeCrosscorrelationEvents-methods.Rd | 3685eb9b5ec2cccdb24045ca302f9ddd9bfb013b | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | kevin-allen/relectro | 2a43159f3f2f08cf7c09168c69b90e1e3b51dc55 | b2fb35c49f8c46d1519f502e9cb01c6c9f2cb0a8 | refs/heads/master | 2020-04-12T08:05:01.663420 | 2019-03-18T21:22:11 | 2019-03-18T21:22:11 | 57,366,781 | 8 | 8 | null | 2018-08-02T14:16:57 | 2016-04-29T08:00:25 | R | UTF-8 | R | false | true | 1,213 | rd | spikeTimeCrosscorrelationEvents-methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpikeTrain.R
\docType{methods}
\name{spikeTimeCrosscorrelationEvents}
\alias{spikeTimeCrosscorrelationEvents}
\alias{spikeTimeCrosscorrelationEvents,SpikeTrain-method}
\alias{spikeTimeCrosscorrelationEvents,ANY,ANY-method}
\title{Calculate the spike-time crosscorrelation between the spike trains and a list of events}
\usage{
spikeTimeCrosscorrelationEvents(st, binSizeMs = 1, windowSizeMs = 200,
probability = FALSE)
\S4method{spikeTimeCrosscorrelationEvents}{SpikeTrain}(st, binSizeMs = 1,
windowSizeMs = 200, probability = FALSE)
}
\arguments{
\item{st}{SpikeTrain object}
\item{binSizeMs}{Default is 1}
\item{windowSizeMs}{Default is 200, meaning that it ranges from + and - 200}
\item{probability}{If TRUE, will calculate the probability of a spike in a given bin instead of the spike count}
}
\value{
st SpikeTrain object with the slot crossEvents filled
}
\description{
Each event is treated in turn as a reference event.
The number of spikes or probability to observe a spike around the reference event is calculated.
You can set the bins size in ms and and the time window for which you want to do the analysis on.
}
|
b93477f09399c070c4e2007b2c6c33cd8d8ecd92 | ac958b19be657d0a842f3837ead2a7f34728e579 | /OriginalExoticPlantsProject/individual_obs_for_each_species.r | fec7fb2860dc2fe6a6e1d74b3310d51859244c88 | [] | no_license | daisyduursma/SDMexoticPlants | 5b1fcb157df8b6a82b2c01c2ea7be45e3f40752d | 17c99bf1dcd64f6828fee3d88fcf9cab773cb245 | refs/heads/master | 2021-08-28T10:00:23.180922 | 2017-12-11T22:56:53 | 2017-12-11T22:56:53 | 113,917,196 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 735 | r | individual_obs_for_each_species.r | #make individual observation .csv files for each species, this is needed to calculate AIC and BIC in ENMtools.
#make sure workspace is clean
rm(list = ls())
# library(dismo)
#get directories where data located
work.dir<-"C:\\Daisy\\Current Projects\\exotic plants\\data\\"
#read in SWD files
sp_dat<- read.table(paste(work.dir,"observation_SWD_8_8_12.csv", sep=""),header=TRUE, sep=",")
species <-as.vector(unique(sp_dat$species))
#for each species
for (i in 1:length(species)){
#get occurance points for one species
remove_dat<-sp_dat$species==species[i]
occurence<-sp_dat[remove_dat,]
write.csv(occurence,paste(work.dir,"Observations_by_species\\",species[i],".csv",sep=""))
} |
c5acdb6cd39a8ef87599563a108c3beb9bc58294 | 5e7aa59a610986857aefdb58b7adf5e867f6dca7 | /code/t-test.R | a34c8d627e1d832d74f3b65a3a8b94f7d821c374 | [] | no_license | yong-again/R_prac | 3526ab1a3f05b6b217c89bd59b85c4879d5295eb | 0fac8af1569a7cfea45d71827eced51f2f31c1e7 | refs/heads/main | 2023-05-07T12:23:38.942629 | 2021-06-04T14:26:50 | 2021-06-04T14:26:50 | 371,392,185 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,910 | r | t-test.R | ?t.test
install.packages("pwr")
library(pwr)
?pwr.t.test
install.packages("moonBook")
library(moonBook)
library(ggplot2)
library(dplyr)
library(plyr)
?acs
head(acs)
str(acs)
## 가설설정
### 두 집단(남성과 여성)의 나이 차이를 알고 싶다.
#### 귀무가설: 남성과 여성의 평균 나이에 대해 차이가 없다 .
#### 대립가설: 남성과 여성의 평균 나이에 대해 차이가 있다.
mean.male <- mean(acs$age[acs$sex == "Male"])
mean.female <- mean(acs$age[acs$sex == "Female"])
cat(mean.male, mean.female)
# 정규분포 테스트
## ggplot 을 쓸 경우 데이터 가공이 필요
## moonBook 패키지에 손쉽게 그래프를 도출할 수 있는 함수 내재
moonBook::densityplot(age ~ sex, data=acs)
# 귀무가설: 정규분포가 맞다.(p-value > 0.05)
# 대립가설: 정규분포가 아니다.
# 남성의 정규분포
shapiro.test(acs$age[acs$sex == "Male"])
# 여성의 정규분포
shapiro.test(acs$age[acs$sex == "Female"])
# 등분산 테스트
var.test(age ~ sex, data=acs)
# 정규분포 X -> MWW 검정
wilcox.test(age~ sex, data=acs)
## 대립가설채택
# t-test 방식
t.test(age ~ sex, data=acs, var.test=T, alt= "two.sided")
# welch's test
t.test(age ~ sex, data=acs, var.test=F, alt="two.sided")
# dummy : 0은 군을 나타내고, 1은 시를 나타냄
# 시와 군에 따라서 합계 출산율의 차이가 있는지 알아보려고 한다.
# 귀무가설: 차이가 없다.
# 대립가설: 차이가 있다.
mydata <- read.csv("C:/r_wrok/data/independent.csv")
View(mydata)
mean.dummy0 <- mean(mydata$birth_rate[mydata$dummy==0])
mean.dummy0
mean.dummy1 <- mean(mydata$birth_rate[mydata$dummy==1])
mean.dummy1
cat(mean.dummy0, mean.dummy1)
# 정규분포여부
shapiro.test(mydata$birth_rate[mydata$dummy==0])
shapiro.test(mydata$birth_rate[mydata$dummy==1])
# dummy0: p-value = 0.009702
# dummy1: p-value = 0.001476
# MWW
wilcox.test(birth_rate ~ dummy, data=mydata)
# p-value = 0.04152
# t-test
t.test(birth_rate ~ dummy, data=mydata)
# 결론: 시와 군에서의 출산율의 차이는 유의미한 결과를 보인다.
str(mtcars)
head(mtcars)
# am : 0 = auto, 1 = manual
# mpg : 연비
# 오토나 수동에 따른 연비 차이
# 귀무가설: 오토의 수동에 따라 연비는 같다.
# 대립가설: 오토와 수동에 따라 연비는 다르다.
mean.auto <- mean(mtcars$mpg[mtcars$am == 0])
mean.m <- mean(mtcars$mpg[mtcars$am == 1])
cat(mean.auto, mean.m)
# 정규분포여부
shapiro.test(mtcars$mpg[mtcars$am == 0]) # p-value = 0,8987
shapiro.test(mtcars$mpg[mtcars$am == 1]) # p-value = 0.5363
# 등분산 여부
var.test(mpg ~ am, data=mtcars) # p-value = 0.06691
var.test(mtcars[mtcars$am==1, 1], mtcars[mtcars$am==0, 1])
# t- test
t.test(mpg ~ am, data=mtcars, var.test=T, alt='less')
# p-value = 0.0006868
# 결론: auto 와 manual에 따른 연비차이가 있음
|
7eae8205d318673fd572f355329df3f3e0d75b41 | 8e0a77b55ee4f0f16ae52d53ec9193d02989e87b | /modules/plots/plot_corr_matrix_wq.R | 28b43f78b521edf289c5de929bee65e3cdd62efa | [
"MIT"
] | permissive | dancrocker/DCR-WAVE | 63da2d3ef59f08334ce4559f29e27f9e8fc18c9a | 576d34223247bffca9a69b7a9d9b82a928117894 | refs/heads/master | 2022-10-04T18:22:48.569711 | 2022-10-04T12:33:27 | 2022-10-04T12:33:27 | 135,455,004 | 2 | 4 | MIT | 2022-10-04T12:33:28 | 2018-05-30T14:29:59 | R | UTF-8 | R | false | false | 2,820 | r | plot_corr_matrix_wq.R | ##############################################################################################################################
# Title: plot_corr_matrix_wq.R
# Type: Secondary Module for DCR Shiny App
# Description:
# Written by: Nick Zinck, Spring 2017
##############################################################################################################################
##############################################################################################################################
# User Interface
##############################################################################################################################
PLOT_CORR_MATRIX_WQ_UI <- function(id) {
ns <- NS(id) # see General Note 1
tagList(
fluidRow(
plotOutput(ns("plot"), height = "100%")
)
) # end taglist
} # end UI function
##############################################################################################################################
# Server Function
##############################################################################################################################
PLOT_CORR_MATRIX_WQ <- function(input, output, session, Df) {
ns <- session$ns # see General Note 1
# numeric rows (Rows with Result Data)
num_rows <- reactive({sapply(Df(), is.numeric)})
# number of numeric rows / coresponds to Parameters selected
num_rows_count <- reactive({sum(num_rows(), na.rm=TRUE)})
# Correlation Matrix
Df1 <- reactive({
if(num_rows_count() > 1){
Df()[, num_rows()] %>%
cor(use = "pairwise.complete.obs") %>%
round(2) %>%
reorder_cormat() %>%
get_upper_tri() %>%
melt(na.rm = TRUE)
} else{
NULL
}
})
# Plot Creation
P <- reactive({
ggplot(data = Df1(), aes(Var2, Var1, fill = value))+
geom_tile(color = "white") +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal() +
theme(axis.text.y = element_text(size = 12)) +
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 12, hjust = 1)) +
theme(legend.text = element_text(size = 12)) +
coord_fixed() +
geom_text(aes(Var2, Var1, label = value), color = "black", size = 4) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank())
})
# output Plot
observe({
output$plot <- renderPlot({
P()
}, width = 250 + 35*num_rows_count(), height = 150 + 35*num_rows_count())
})
} # end server function
|
53d7b13a0136a975057e54a6b052222bd3ccd8cd | c1a5c4dc784361d1e32430fe011422cc8145b2ce | /A2.0.3_functions_covariate_preprocessing_regional.R | d1ac7615eaeb0c1e14482dad0187dea16dde7d5e | [] | no_license | magali17/TRAP | a45259683acfd0144459a7f4aaa09cc172e5ad0a | aeeab48f3bb8afe87a15195876f34bbd591e9fe1 | refs/heads/master | 2021-06-20T18:35:43.155492 | 2020-12-22T23:30:45 | 2020-12-22T23:30:45 | 151,753,446 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,322 | r | A2.0.3_functions_covariate_preprocessing_regional.R |
#==============================================================================================================================================================================
# 'covariate.preprocess'
#
# Input arguments:
# * covars.mon: region-specific covariate data at monitoirng sites
# * covars.sub: region-specific covariate data at cohort addresses
# covariate data for moniotirng and cohort locations should
# # be region-specific
# # be selected for the defined areas
# # be separate by monitoring and cohort locations
# # include the same covariates between montioring and cohort data
# # have no missing values for any variables (residual oil for NY, street canyon for NY and IL, and CALINE alternative for CA)
# # have the column, 'site_type', for type of site including "AQS", "FIXED", "HOME", "COMCO" or "P"
# * region: study region for monitoirng and cohort data ('CA', 'NY', and 'IL' should be used when analysts want to include CALINE alternative, residual oil, or street canyon)
# * exclude.cat.canyon: preprocessing 2-0. whether or not to exclude non-numeric street canyon variable (canyon_road_type) (default is TRUE)
# * exclude.cityhall: preprocessing 2-0. whether or not to exclude distances to main and local city halls (default is TRUE)
# * exclude.census: preprocessing 2-0. whether or not to exclude census variables (default is TRUE)
# * exclude.old.LUR: preprocessing 2-0. whether or not to exclude USGS land use variables constructed in 1970-80 (default is TRUE)
# * exclude.relative.elevation: preprocessing 2-0. whether or not to exclude relative elevation (default is FALSE)
#
# Output objects:
# * covars.mon: preprocessed region-specific covariate data at monitoring astes
# * covars.sub: preprocessed region-specific covarirate data at cohort addresses
# * exclude.include.vars: initial, excluded, and included variables
# * common.value: preprocessing 2-1. list of excluded variables which have less than 20% of being different from the most common value in monitoring data
# * low.landuse: preprocessing 2-2. list of excluded varlabies with maximum land use variables less than 10 in monitoring data
# * sd.ratio: preprocessing 2-3. list of excluded variables with SD of cohort data greater than 5 times of SD of monitoring data
# * outlier: preprocessing 2-4. list of excluded variables with outliers more than 2% in monitoring and cohort data combined
# * initial: list of initial variables after preprocessing 2-0
# * include: list of final variables after preprocessing 2
# * vars.count: numbers of excluded variables for each preprocessing step
#==============================================================================================================================================================================
# covars.mon = cov_mm0
# covars.sub = cov_act0 #cov_act_fake0
# region = "NW"
# common=T
# exclude.cat.canyon = TRUE
# exclude.cityhall = TRUE
# exclude.census = TRUE
# exclude.old.LUR = TRUE
# exclude.relative.elevation = FALSE
# remove_nonlog_dist = TRUE
covariate.preprocess <- function( covars.mon, covars.sub, region,common=T,
exclude.cat.canyon = TRUE, exclude.cityhall = TRUE, exclude.census = TRUE,
exclude.old.LUR = TRUE, exclude.relative.elevation = FALSE,
#added this for sensitivity analyses
remove_nonlog_dist = TRUE
)
{
# 0. Data prepration
# [drop columns that don't apply for our region]
if(region!="NY" & region!="IL"){
oil <- grepl("oil", names(covars.mon))
canyon <- grepl("canyon", names(covars.mon)) #canyon: street canyon variables for NY and Chicago
bus <- grepl("bus", names(covars.mon))
covars.mon <- covars.mon[,!oil & !canyon & !bus]
covars.sub <- covars.sub[,!oil & !canyon & !bus]
}
# if(region=="IL"){
# oil <- grepl("oil", names(covars.mon)) #oil: residual oil variables for NY
# bus <- grepl("bus", names(covars.mon))
# covars.mon <- covars.mon[,!oil & !bus]
# covars.sub <- covars.sub[,!oil & !bus]
# }
# if(region!="CA"){
# caline.alt <- grepl('^calinemod_alt_lt',names(covars.mon)) #caline.alt: caline alternative variables for LA
# covars.mon <- covars.mon[,!caline.alt]
# covars.sub <- covars.sub[,!caline.alt]
# }
# [make sure cohort covariates are same as the monitoring sites covariates being used; combine datasets]
covars.sub <- covars.sub[,names(covars.mon)]
covars.all <- rbind(covars.mon, covars.sub)
# 1. Preprocessing 1: recoding
# 1.1. compute min distance to any roads (a1, a2 and m3)
covars.a123D <- combine_a123_m(covars.all, removeOrig=FALSE)
# 1.2. comute and small roads (a2 and a3) and remove original variables
covars.a23D <- combine_a23_m(covars.a123D, removeOrig=TRUE)
# 1.3. comute and small roads (a2 and a3) and remove original variables
covars.a23L <- combine_a23_ll(covars.a23D, removeOrig=TRUE)
# 1.4. natural log transform CALINE, emission, and distance to sources (roads, any & large airports, coast, SML ports, commercial area, railrod/yard, oil, and cityhall)
# truncate distance variables at 10 m
# remove original variables
covars.cal <- log_transform_caline(covars.a23L, removeOrig=TRUE) #don't include
covars.em <- log_transform_emission(covars.a23L, removeOrig=TRUE)
covars.process1 <- log_transform_distance(covars.em, lowerBound=10, removeOrig= remove_nonlog_dist) #removeOrig=TRUE
# 2. Preprocessing 2: variable exclusion
## [KEEP - merges all data in output otherwise; OLD: Amanda: "can ignore this." don't need "site_type" column if don't use this. had originally set ACT locations to "P" and mobile monitoring locations to "FIXEX"]
covars.process1.mon <- covars.process1[covars.process1$site_type!="P",]
covars.process1.sub <- covars.process1[covars.process1$site_type=="P",]
# 2.0. exclude distances to road types for street canyons, city halls, old LUR, census, or relative elevation variables
## [ -----> don't include this? WHAT IS THIS DOING?? ..something w/ NAs in columns?...]
# covars.process2<-covars.process1
# # [---> (?) table of counts for non-missing & missing values in each column ]
# a<-apply(as.data.frame(!is.na(covars.process1)),2,table)
# # [subtracting columns w/ NAs - get ERROR msgs. doing this on my own]
# # browser()
# if (common==F){
# for (i in 1:dim(covars.process1)[2]){
# if (a[[i]]==0 ){
# # [---> ?? use only 1 set of bracket??]
# #if (a[i]==0 ){
# covars.process2<-covars.process2[,setdiff(colnames(covars.process2),names(a[i]))]
# }}}
# if (common==T){
# for (i in 1:dim(covars.process1)[2]){
# if (names(a[[i]])[1]==F){
# # [?? use only 1 set of bracket??]
# #if (names(a[i])[1]==F){
# covars.process2<-covars.process2[,setdiff(colnames(covars.process2),names(a[i]))]
# }}}
#covars.process1<-covars.process2
gis.vars <- c('m_to|pop|ll|ndvi|imp|em|calinemod|rlu|lu|elev|tl|intersect|oil|canyon|no2')
initial.vars <- names(covars.process1)[grepl(gis.vars, names(covars.process1))]
cityhall <- names(covars.process1)[grep('cityhall',names(covars.process1))]
old.LUR <- names(covars.process1)[grep('^lu',names(covars.process1))]
census <- names(covars.process1)[grep('^tr|^bg|^bk',names(covars.process1))]
relative.elevation <- names(covars.process1)[grep('elev_1k|elev_5k',names(covars.process1))]
if(exclude.cat.canyon) {
initial.vars <- initial.vars[!(initial.vars %in% 'canyon_road_type')]
}
if(exclude.cityhall) {
initial.vars <- initial.vars[!(initial.vars %in% cityhall)]
}
if(exclude.old.LUR) {
initial.vars <- initial.vars[!(initial.vars %in% old.LUR)]
}
if(exclude.census) {
initial.vars <- initial.vars[!(initial.vars %in% census)]
}
if(exclude.relative.elevation) {
initial.vars <- initial.vars[!(initial.vars %in% relative.elevation)]
}
# 2.1 exclude variables with values less than 20% of being different from most common values
exclude.common.value <- fail_most_common_value(covars.process1.mon, initial.vars, thres=0.2)
# 2.2 exclude variables max landuse variable < 10%
exclude.low.landuse <- fail_low_landuse(covars.process1.mon, initial.vars, lowValue=10)
# 2.3 exclude variables with sd of cohort > 5 times sd of monitoring data
exclude.sd.ratio <- fail_sd_ratio(covars.process1.mon, covars.process1.sub, initial.vars, thres=5)
# 2.4 exclude variables more than 2% outliers in monitor sites and ppt sites combined
exclude.outlier <- fail_outlier_check(covars.process1.mon, covars.process1.sub, initial.vars, thres=0.02)
exclude.vars <- list(exclude.common.value, exclude.low.landuse, exclude.sd.ratio, exclude.outlier)
names(exclude.vars) <- c('common.value','low.landuse','sd.ratio','outlier')
include.vars <- initial.vars[!(initial.vars %in% unique(unlist(exclude.vars)))]
covars.process2.mon <- covars.process1.mon[,include.vars]
covars.process2.sub <- covars.process1.sub[,include.vars]
# 3. Output extraction
exclude.include.vars <- list(exclude.common.value, exclude.low.landuse,
exclude.sd.ratio, exclude.outlier, initial.vars, include.vars)
names(exclude.include.vars) <- c(names(exclude.vars), 'initial', 'include')
vars.count <- c(length(initial.vars), sapply(exclude.vars,length), length(include.vars))
names(vars.count) <- c('Initial',names(exclude.vars),'Include')
covars <- list(covars.mon=covars.process2.mon, covars.sub=covars.process2.sub,
exclude.include.vars=exclude.include.vars, vars.count=vars.count)
return(covars)
}
#======================================================================================================================
# Sub-functions
# 1-1-1) combine_a123_m: min distance to any roads (a1,a2,a3)
# 1-1-2) combine_a23_m: min of a2,a3 distance
# 1-1-3) combine_a23_ll: sum of a2 and a3 roads in buffers
# 1-2-1) log_transform_caline: natural log transform CALINE variables
# 1-2-2) log_transform_emission: natural log transform emission variables
# 1-2-3) log_transform_distance: natural log transform distance variables and truncate at 10 meters
# 2-1) fail_most_common.value: variables with less than <20> % being differing from the most common value
# 2-2) fail_low_landuse: land use variables (old and new) whose max value is less than <10> % in monitoring data
# 2-3) fail_sd_ratio: variables whose SD for cohort data is <5> times greater than SD for monitoirng data
# 2-4) fail_outlier_check: variables with more than <2> % outliers (Z score >5)
#=======================================================================================================================
combine_a123_m <- function(all.data, removeOrig=FALSE)
{
ma1a2a3.index <- grepl("^m_to_a[123]$", colnames(all.data))
newcol.index <- 1 + ncol(all.data)
all.data[, newcol.index] <- apply(all.data[, ma1a2a3.index], 1, min)
colnames(all.data)[newcol.index] <- 'm_to_a123'
if (removeOrig) all.data <- all.data[, !ma1a2a3.index]
return(all.data)
}
combine_a23_m <- function(all.data, removeOrig=FALSE)
{
ma2a3.index <- grepl("^m_to_a[23]$", colnames(all.data))
newcol.index <- 1 + ncol(all.data)
all.data[, newcol.index] <- apply(all.data[, ma2a3.index], 1, min)
colnames(all.data)[newcol.index] <- 'm_to_a23'
if (removeOrig) all.data <- all.data[, !ma2a3.index]
return(all.data)
}
combine_a23_ll <- function(all.data, removeOrig=FALSE)
{
a2.vars <- grep("ll_a2", colnames(all.data))
for (i in a2.vars)
{
newcol.index <- 1 + ncol(all.data)
a3.var <- grep(gsub('a2','a3',colnames(all.data)[i]), colnames(all.data))
all.data[, newcol.index] <- all.data[, i] + all.data[, a3.var]
colnames(all.data)[newcol.index] <- paste("ll_a23_", strsplit(colnames(all.data)[a3.var], '_')[[1]][3], sep="")
}
ll.vars <- grep("ll_a[^1]_s", colnames(all.data))
if (removeOrig) all.data <- all.data[, -ll.vars]
return(all.data)
}
log_transform_caline <- function(all.data, removeOrig=FALSE)
{
caline.vars <- grep("caline", colnames(all.data))
new.varnames <- c()
for (i in caline.vars)
{
newcol.index <- 1 + ncol(all.data)
all.data[, newcol.index] <- log(all.data[, i] + 0.1)
colnames(all.data)[newcol.index] <- paste('log_', colnames(all.data)[i], sep='')
}
if (removeOrig) all.data <- all.data[, -caline.vars]
return (all.data)
}
log_transform_emission <- function(all.data, removeOrig=FALSE)
{
em.vars <- grep("^em_", colnames(all.data))
new.varnames <- c()
for (i in em.vars)
{
newcol.index <- 1 + length(colnames(all.data))
all.data[, newcol.index] <- log(all.data[, i] + 0.1)
colnames(all.data)[newcol.index] <- paste('log_', colnames(all.data)[i], sep='')
}
if (removeOrig) all.data <- all.data[, -em.vars]
return (all.data)
}
log_transform_distance <- function(all.data, lowerBound=10, removeOrig=FALSE)
{
distance.vars <- grep("^m_to_", colnames(all.data))
new.varnames <- c()
for (i in distance.vars)
{
newcol.index <- 1 + ncol(all.data)
all.data[, newcol.index] <- log( sapply( all.data[, i], function(x) { max(lowerBound, x) } ) )
colnames(all.data)[newcol.index] <- paste('log_', colnames(all.data)[i], sep='')
}
if (removeOrig) all.data <- all.data[, -distance.vars]
return (all.data)
}
fail_most_common_value <- function(mon.data, vars.all, thres=0.2)
{
thres <- dim(mon.data[,vars.all])[1]*thres
fail <- apply( mon.data[,vars.all], 2, function(x) {
tmp <- split(x,x)
most.common.value <- tmp[[which.max(sapply(tmp, length))]][1]
sum(x != most.common.value, na.rm=T) } ) < thres
fail <- names(mon.data[,vars.all])[fail]
return(fail)
}
fail_low_landuse <- function(mon.data, vars.all, lowValue=10)
{
lu.vars <- grep("^rlu_|^lu", vars.all, value=T)
fail <- sapply(lu.vars, function(x) return (max(mon.data[, x]) < lowValue))
fail <- names(fail)[grep("TRUE", fail)]
return (fail)
}
# mon.data = covars.process1.mon
# cohort.data= covars.process1.sub
# vars.all = initial.vars
# thres=5
# [ probably dont need this since monitoring stops are similar to the cohort ]
fail_sd_ratio <- function(mon.data, cohort.data, vars.all, thres=5)
{
fail <- c()
for (i in vars.all) {
#i=vars.all[1]
mon.sd <- sd(mon.data[, i], na.rm=TRUE)
cohort.sd <- sd(cohort.data[, i], na.rm=TRUE)
if (cohort.sd > thres * mon.sd | all(is.na(mon.sd)))
# [ vector of variables where cohort sites are more variable than monitoring sites ]
fail <- c(fail, i)
}
return (fail)
}
fail_outlier_check <- function(mon.data, cohort.data, vars.all, thres=0.02)
{
all.data = rbind(mon.data[,vars.all], cohort.data[,vars.all])
fail <- sapply( vars.all, function(x)
return ( sum(abs(scale(all.data[, x]))> 5) > nrow(all.data)*thres ) )
fail <- names(fail)[grep("TRUE", fail)]
return (fail)
}
covariate.preprocess.for.prediction <- function(covars.all, region)
{
# 1. Preprocessing 1: recoding
# 1.1. compute min distance to any roads (a1, a2 and m3)
covars.a123D <- combine_a123_m(covars.all, removeOrig=FALSE)
# 1.2. comute and small roads (a2 and a3) and remove original variables
covars.a23D <- combine_a23_m(covars.a123D, removeOrig=TRUE)
# 1.3. comute and small roads (a2 and a3) and remove original variables
covars.a23L <- combine_a23_ll(covars.a23D, removeOrig=TRUE)
# 1.4. natural log transform CALINE, emission, and distance to sources (roads, any & large airports, coast, SML ports, commercial area, railrod/yard, oil, and cityhall)
# truncate distance variables at 10 m
# remove original variables
covars.cal <- log_transform_caline(covars.a23L, removeOrig=TRUE)
covars.em <- log_transform_emission(covars.cal, removeOrig=TRUE)
covars.process1 <- log_transform_distance(covars.em, lowerBound=10, removeOrig=TRUE)
return(covars.process1)
}
|
9eeeba2c1906a7187135fd86c73fd3cc21f934a7 | 182a2933e90a972355e3ef785ea1e45b4327f009 | /man/pred_reel_2c_bis.Rd | b0029b98c8a99e9966e2cf1553c8e0ed44adefd5 | [] | no_license | CCCelestine/siseanalytics | eac90b600766c4792c21f670be198a07c18174c8 | d9a2536371ea2497f1db13d96567d800e02a6b7f | refs/heads/main | 2023-01-31T05:21:57.970625 | 2020-12-08T21:01:26 | 2020-12-08T21:01:26 | 311,376,289 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 520 | rd | pred_reel_2c_bis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pred_reel_2c_bis.R
\docType{data}
\name{pred_reel_2c_bis}
\alias{pred_reel_2c_bis}
\title{Dataset pred_reel_2c_bis}
\format{
A data frame with 357 rows and 2 variables:
\describe{
\item{val_pred}{predicted membership class variable}
\item{val_reel}{real membership class variable}
}
}
\usage{
pred_reel_2c_bis
}
\description{
A dataset containing the value of prediction and the real values, 2 different values possible
}
\keyword{datasets}
|
4e95c1f0de85ae40d1d299184bc36288fd0287e9 | bccaf9ca75d67fef6bec733e784c582149a32ed1 | /plagiat/R/jstorm2jclu.f.R | 80efa42f7affb9063a2958f5fa828fd959b6b222 | [] | no_license | brooksambrose/pack-dev | 9cd89c134bcc80711d67db33c789d916ebcafac2 | af1308111a36753bff9dc00aa3739ac88094f967 | refs/heads/master | 2023-05-10T17:22:37.820713 | 2023-05-01T18:42:08 | 2023-05-01T18:42:08 | 43,087,209 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,135 | r | jstorm2jclu.f.R | #' JSTOR Master 2 Journal Cluster
#'
#' @param jstorm
#'
#' @return
#' @export
#' @import igraph data.table ggnetwork
#' @importFrom magrittr %>%
#'
#' @examples
jstorm2jclu.f<-function(jstorm,shallow=F,eng=F){
if(eng) jstorm<-jstorm[sapply(discipline,function(x) 'ENGLISH'%in%x)]
nT<-jstorm[,.N]
nP<-jstorm[,publisher_name %>% unique %>% length]
jstorm<-jstorm[title_id==title_history][sapply(discipline,length)>0]
el<-jstorm[,.(discipline=unlist(discipline) %>% unique),by=title_history] %>% as.matrix
g<-graph_from_edgelist(el)
V(g)$type<-V(g)$name %in% el[,2]
b<-g
g<-bipartite_projection(g)
if(shallow){
cw1<-g$proj1 %>% cluster_louvain()
m1<-cw1%>% {.$memberships[1,]}
names(m1)<-cw1$names
c1<-igraph:::groups.default(list(membership=m1))
cw2<-g$proj2 %>% cluster_louvain()
m2<-cw2%>% {.$memberships[1,]}
names(m2)<-cw2$names
c2<-igraph:::groups.default(list(membership=m2))
} else {
cw1<-cluster_walktrap(g$proj1)
m1<-membership(cw1)
c1<-igraph:::groups.default(list(membership=m1))
cw2<-cluster_walktrap(g$proj2)
m2<-membership(cw2)
c2<-igraph:::groups.default(list(membership=m2))
}
jclu<-list(gt=g$proj1,title_history=cw1,gd=g$proj2,discipline=cw2)
# label membership for disciplines with super
k<-sapply(c2,function(x) {
if(length(x)==1) return(x)
igraph::degree(induced_subgraph(g$proj2,V(g$proj2)$name%in%x)) %>% sort(decreasing=T) %>% head(1) %>% names
})
f2<-factor(k[m2],levels = k[order(sapply(c2,length),decreasing=T)])
V(jclu$gd)$memb<-as.character(f2)
jstormr<-jstorm[,.(discipline=discipline %>% unlist %>% unique),keyby=title_history]
jstormr<-jstormr[data.table(th=jclu$title_history$names,m=m1) %>% setkey(th)]
jstormr<-jstormr[,.N,by=.(m,discipline)] %>% setorder(m,-N)
j<-jstormr[,.(super=discipline[1],N=sum(N)),by=m][,!'m']
f1<-factor(j$super[m1])
f1<-factor(f1 %>% as.character,levels = table(f1) %>% sort(decreasing = T) %>% names)
V(jclu$gt)$memb<-as.character(f1)
w<-j[,which(duplicated(super)|duplicated(super,fromLast = T))]
if(length(w)) {
s<-jstormr[,.(super=discipline[1:2] %>% paste(collapse=' (') %>% paste0(')')),by=m][w,super]
j[w,super:=s]
}
jstormr<-j
j<-data.table(title_history=jclu$title_history$names,super=jstormr[m1,super]) %>% setkey(title_history)
tab <-table(f1) %>% data.frame %>% data.table %>% setnames(ec('super,N'))
tab[,Pct:=round(N/jstorm[,.N]*100,1)]
og<-jstorm[,sapply(tab$super,function(x) sapply(discipline,function(y) x%in%y) %>% sum)]
tab<-rbindlist(
list(tab,data.table('Total',jstorm[,.N],tab[,sum(Pct)]))
)
tab[,Labeled:=c(og,sum(og))][,LPct:=round(Labeled/jstorm[,.N]*100,1)]
# crossings
E(jclu$gd)$cross<-rev(c("within","between"))[(!crossing(jclu$discipline,jclu$gd)) + 1]
E(jclu$gt)$cross<-rev(c("within","between"))[(!crossing(jclu$title_history,jclu$gt)) + 1]
nS<-f1 %>% levels %>% length
nL<-jstorm$discipline %>% unlist %>% unique %>% length
jclu<-c(b=list(b),jclu,ft=list(f1),fd=list(f2),tab=list(tab),super=list(j))
jclu$n<-list(t=nT,j=jstorm[,sum(j)],p=nP,l=nL,s=nS,d=nL-nS)
jclu
}
|
fe434082ca94fb558489d40b105ee28078f0fdad | 223faae9458ecb676c5590e14caebca22ef34277 | /cachematrix.R | 89546004d04524d9fb7575abf6e584a8e68de2e9 | [] | no_license | hamsternik/ProgrammingAssignment2 | 7c59da78c25f057c68c3f55b80a3106289f8d082 | d10d5d2b084a21f3b0605bdd7cb5616c2db16ebb | refs/heads/master | 2020-12-25T02:25:11.828585 | 2014-07-25T19:50:49 | 2014-07-25T19:50:49 | 22,208,395 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,454 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
### In this function we gain a put (or don't) a matrix
### And have a four "public" methods - special functions for next working with data
### func. "setMatrix" set a matrix if it doesn't gain in func.arguments
### func. "getMatrix" - print our matrix
### func. "setSolveMatrix" - take a "inversible" matrix
### func. "getSolveMatrix" - print input "inversible" matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
setMatrix <- function(NewMatrix) {
x <<- NewMatrix
m <<- NULL
}
getMatrix <- function() x
setSolveMatrix <- function(SolveMatrix) m <<- SolveMatrix
getSolveMatrix <- function() m
list(setMatrix = setMatrix, getMatrix = getMatrix, setSolveMatrix = setSolveMatrix, getSolveMatrix = getSolveMatrix)
}
## Write a short comment describing this function
### In this func. we take our object x (inctance, if talk with OOP concepts)
### We check our solve matrix - does it exist or not
### and give a message if we caching our solve matrix
### or not and find a solve matrix, after that caching it
cacheSolve <- function(x) {
m <- x$getSolveMatrix()
if (!is.null(x$getSolveMatrix())) {
message("getting cached data")
return(m)
}
Matrix <- x$getMatrix()
m <- solve(x$setSolveMatrix(Matrix))
print(m)
}
|
a186130ef57b129b1343d90211d2bc0d3fa80b56 | 9ab9a9d6888dccdbe54ade2df6089bafb674fb7b | /K-nn Algorithm/1.k -NN Algorithm (Prostate_Cancer data).R | 7c57fd53894c1312811d53d086881d649cbd3e8b | [] | no_license | anuradhaschougale18/RProgramming | 3c064947f8750d7ea19385eaf7ac43490179f949 | 07f5ad689a2873b602f9a00ef86a887213e01d78 | refs/heads/master | 2022-11-21T04:02:08.549229 | 2020-07-17T05:22:10 | 2020-07-17T05:22:10 | 280,327,114 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,837 | r | 1.k -NN Algorithm (Prostate_Cancer data).R | ############## Data Import #######################
prc<-read.csv("F:/R and Data Science/Knn/Prostate_Cancer.csv")
############## Data type and column names #######################
str(prc)
names(prc)
table(prc$diagnosis_result) # it helps us to get the numbers of patients
############## Removing irreleavent column and #######################
prc <- prc[-1] #removes the first variable(id) from the data set
#prc$diagnosis <- factor(prc$diagnosis_result, levels = c("B", "M"), labels = c("Benign", "Malignant"))
#prop.table(table(prc$diagnosis))*100 # it gives the result in the percentage form rounded of to 1 decimal place( and so it's digits = 1)
############## Data Normalization #######################
# it bring every call under the range of to 1
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x))) }
summary(prc$radius)
prc_n <- as.data.frame(lapply(prc[2:9], normalize))
summary(prc_n$radius)
summary(prc_n$texture)
##############333#Data Partition#789#######################
prc_train <- prc_n[1:65,]
prc_test <- prc_n[66:100,]
prc_train_labels <- prc[1:65, 1]
prc_test_labels <- prc[66:100, 1]
############## Model Building(method 1 when you take a sqrt of n for k) #######################
library(class)
# Manual method k =10 is knn value
prc_test$prc_test_pred <- knn(train = prc_train, test = prc_test,cl = prc_train_labels, k=10)
############## Model Performance on testing data #######################
library(e1071)
library(caret)
confusionMatrix(prc_test$prc_test_pred,prc_test_labels)
############## method 2 using k fold cross validation for value of k #######################
##############333#Data Partition#789#######################
set.seed(123)
library(caret)
Train <- createDataPartition(prc$diagnosis_result, p=0.7, list=FALSE)
training <- prc[ Train, ]
testing <- prc[ -Train, ]
############## Model Building ##############
# cv= k fold cross validation and number (k)=10
# tunelength means different value of k and try
# if tunelength is 15 then different 15 k values
#method = "center"(x-mean(x))
#method = "scale" (x-mean(x))/standard deviation.
library(e1071)
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
knn_fit <- train(diagnosis_result ~., data = training, method = "knn",
trControl=trctrl,
preProcess = c("center", "scale"),
tuneLength = 15)
############ To get the Best Model ############
knn_fit
plot(knn_fit)
############## Model Prediction testing data #######################
testing$predicted <- predict(knn_fit, newdata = testing)
############## Model Performance on testing data #######################
confusionMatrix(testing$predicted, testing$diagnosis_result )
|
2469cf0f47ddc37023b9049cba9c2111c86df887 | 72e5b0ecf11c05a04880c86743ca06efb4234339 | /man/make_filename.Rd | 379d16cb2df57ee34a4380fdaa52f57ed0bddc2e | [] | no_license | 00mathieu/FarsExample | fd8c18db83ef420d6cc042a19b8e48ef75c8cfa4 | 524017b8b832b482041b7b15175d20cab004f7ae | refs/heads/master | 2021-05-16T00:00:46.785573 | 2017-10-15T19:22:18 | 2017-10-15T19:22:18 | 106,901,541 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 463 | rd | make_filename.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.r
\name{make_filename}
\alias{make_filename}
\title{print filename}
\usage{
make_filename(year)
}
\arguments{
\item{year}{An integer year (2013)}
}
\value{
This function returns a string that
corresponds to the name of a csv
}
\description{
This is a function takes a year integer \code{year} (eg 2013)
and returns the name of the csv
}
\examples{
make_filename(2013)
}
|
74a073b0e7e75d0906c868476f31aeada2d13d1b | 9d9e2cd081ebc4f6bb6da5511a69e8dc40496576 | /TextPredict_tau.R | 4a6537212132c3ae331ffa3b702d8ca8025b50c1 | [] | no_license | wenlarry/freakCode | 3f746cce37bfb875063ac02e1bf38bd4a001a52f | 33b5db996966865d052ba0d1004472e7cc2018d2 | refs/heads/master | 2021-01-02T22:57:11.126810 | 2020-08-26T12:09:36 | 2020-08-26T12:09:36 | 99,430,858 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 695 | r | TextPredict_tau.R |
# Use tau to test prediction from the Quiz 2 of Capstone
# Initial Results 5/10
# Codes from StackOverflow
library(tau)
f <- function(queryHistoryTab, query, n = 2) {
Dfm<-sort(textcnt(rep(tolower(names(queryHistoryTab)), queryHistoryTab), method = "string", n = length(scan(text = query, what = "character", quiet = TRUE)) + 1))
query <- tolower(query)
idx <- which(substr(names(Dfm), 0, nchar(query)) == query)
res <- head(names(sort(Dfm[idx], decreasing = TRUE)), n)
res <- substr(res, nchar(query) + 2, nchar(res))
return(res)
}
f(c("but the crowd"=4,"but the players"=3, "but the defense"=2,
"but the referee" =1),"but the")
|
8afe8a326ffa712b3d1089903d13912d853e6339 | 4971a12bfeb53c5efbc5d4bf5c2b9f8ac14108c1 | /man/new_ezmmek_act_group.Rd | 59b096d422cf8998de46454681814300fc41cf08 | [] | no_license | ccook/ezmmek | 17c2f2da665186256f79e77ec72b727dbdedde23 | d4e64da3197bcf54891ec16ce7ca1ae0de61d5a4 | refs/heads/master | 2020-09-13T11:03:44.422026 | 2020-08-14T15:47:18 | 2020-08-14T15:47:18 | 222,752,358 | 3 | 2 | null | 2020-09-10T22:37:53 | 2019-11-19T17:31:02 | R | UTF-8 | R | false | true | 926 | rd | new_ezmmek_act_group.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new_ezmmek_act_group.R
\name{new_ezmmek_act_group}
\alias{new_ezmmek_act_group}
\title{new_ezmmek_act_group}
\usage{
new_ezmmek_act_group(act.data.fn, ..., method = NA, columns = NULL)
}
\arguments{
\item{act.data.fn}{Activity data file as character string}
\item{...}{User defined column names to join std.data.fn and act.data.fn}
\item{method}{Enzyme assay protocol. Must define method as 'isc' or 'ibc'}
\item{columns}{Column names carried over from parent functions if parent functions used}
}
\description{
Groups raw activity data by user-defined columns
}
\examples{
\dontrun{new_obj <- new_ezmmek_act_group("data/tyson_sat_steen_04172020.csv,
site_name,
std_type,
method = "isc",
columns = NULL)
new_obj <- new_ezmmek_act_group("data/tyson_sat_german_04172020.csv,
site_name,
std_type,
method = "ibc",
columns = NULL)}
}
|
216316c80c84b3eb711466e6f97c341f7f43a964 | 7ca0669150b015a1e2251f2aded86a1cea67eb9b | /plot2.R | 2436360756f2a31664826323667678564713bbd3 | [] | no_license | kechengxu/ExData_Plotting1 | ac077dd904d970526791eacba267aaac8aab76e5 | 95e3d160ef35ca58b52e93eb3b54ab5b42e6136b | refs/heads/master | 2021-01-15T18:31:57.501671 | 2014-07-12T22:01:49 | 2014-07-12T22:01:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 621 | r | plot2.R | ## load the dataset
setwd("~/Desktop/Coursera/4-graphics/project 1")
colclasses=c("character","character",rep("numeric",7))
data<-read.csv("household_power_consumption.txt",colClasses=colclasses,na.strings=c("?"),sep=";")
data<-subset(data,data$Date %in% c("1/2/2007","2/2/2007"))
## merge Date and Time
x<-NULL
for (i in 1:length(data$Time)) {
x<-c(x,paste(data$Date[i],data$Time[i],sep=" "))
}
## plot and save the figure
x<-strptime(x,"%d/%m/%Y %H:%M:%S")
y<-data$Global_active_power
plot(x,y,xlab="",ylab="Global Active Power (kilowatts)",type="l")
dev.copy(png,file="plot2.png",width=480,height=480)
dev.off()
|
3fcea922866b7830f4efeacb3716e649d415629f | c5c6e96ffdc9a52cdf7eba542f64fc149a9d7f4b | /R/Old/run_everything.R | 55786f8a27b096ff2692d531d3c03441c3f36b51 | [
"MIT"
] | permissive | kravitz-eli/prssMixture | e09ac2dc6d3174a445319f5ca8930b9f56039fad | 5b50cc7fb74d9d78a0fbe443a603fa96273e53d7 | refs/heads/master | 2020-12-05T16:16:14.879353 | 2020-02-07T15:05:46 | 2020-02-07T15:05:46 | 232,169,920 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,437 | r | run_everything.R | library("here")
source(here("R", "posterior_mixture.R"))
source(here("R", "get_mixing_prop.R"))
source(here("R", "make_cov_matrix.R"))
source(here("R", "normal_normal_conjugate.R"))
# Set parameters in bivariate normal likelihood -------------------------------
likelihood_params = list(
y_obs = log(c(0.76, 0.84)),
sigma_L_11 = 1 / 48 + 1/ 37,
sigma_L_22 = 1/31 + 1/32,
rho_L = 0.55
)
#
# # Make covariance matrix from variance and covariance terms
# likelihood_params[["sigma_L"]] = with(likelihood_params,
# make_cov_matrix(sigma_L_11, sigma_L_22, rho_L))
# Set prior parameters ---------------------------------
# Mixing proportion
p_mix = 0.50
# First component
comp1_prior_params = list(
mu_P = log(c(0.68, 0.74)),
sigma_P_11 = 0.2,
sigma_P_22 = 0.2,
rho_P = 0.30
)
# # Make covariance matrix from variance and covariance terms
# comp1_prior_params[["sigma_P"]] = with(comp1_prior_params,
# make_cov_matrix(sigma_P_11, sigma_P_22, rho_P))
# Second component
comp2_prior_params = list(
mu_P = log(c(1, 1)),
sigma_P_11 = 1,
sigma_P_22 = 1,
rho_P = 0.5
)
# # Make covariance matrix from variance and covariance terms
# comp2_prior_params[["sigma_P"]] = with(comp2_prior_params,
# make_cov_matrix(sigma_P_11, sigma_P_22, rho_P))
# Get posterior parameter
post_closed = posterior_mixture(likelihood_params,
p_mix,
comp1_prior_params,
comp2_prior_params)
# Run JAGS model to test against
library("rjags")
source(here("R", "normal_mixture_jags_no_cor_prior.R"))
jags_results = normal_mixture_jags(
likelihood_params,
p_mix = p_mix,
comp1_prior_params,
comp2_prior_params
)
# See if posterior parameters match JAGS --------------
# Get posterior parameters
p_mix_post = post_closed$p_mix_post # Mixing param
mu_1_post = post_closed$mu_post_1 # Mean of first component
mu_2_post = post_closed$mu_post_2 # Mean of second component
mu_comb_post = mu_1_post * p_mix_post + (1 - p_mix_post) * mu_2_post # Combined mean
sigma_mix_post = p_mix_post^2 * post_closed$sigma_post_1 + #Combined variance
(1 - p_mix_post)^2 * post_closed$sigma_post_2
# Compare closed form to JAGS
error_p_mix = p_mix_post - jags_results$p_mix_post
error_mu = mu_comb_post - jags_results$mu_post
error_sigma = sigma_mix_post - jags_results$sigma_post
|
7087fc7bb3fe7682b41d4714d1439d6f4125ca23 | 700d8121a4e3a9fc4c31e015db643758cb843569 | /inst/registered/UCSC_genomes/mpxvRivers.R | 59a04773ed47f40f2849e73a016dbbf0eba091f8 | [] | no_license | Bioconductor/GenomeInfoDb | 727c90f03c289f692999860a12077775f4d65317 | 9dba03f8d2a4f76732e2b12beac7c0ee3230a693 | refs/heads/devel | 2023-08-09T21:33:11.074781 | 2023-06-20T21:40:39 | 2023-06-20T21:40:39 | 102,149,975 | 14 | 15 | null | 2023-03-13T17:45:24 | 2017-09-01T20:19:20 | R | UTF-8 | R | false | false | 569 | r | mpxvRivers.R | GENOME <- "mpxvRivers"
ORGANISM <- "Monkeypox virus"
ASSEMBLED_MOLECULES <- "NC_063383.1"
CIRC_SEQS <- character(0)
library(GenomeInfoDb) # for fetch_chrom_sizes_from_UCSC()
FETCH_ORDERED_CHROM_SIZES <-
function(goldenPath.url=getOption("UCSC.goldenPath.url"))
{
chrom_sizes <- GenomeInfoDb:::fetch_chrom_sizes_from_UCSC(GENOME,
from="bigZips",
goldenPath.url=goldenPath.url)
stopifnot(identical(chrom_sizes[ , "chrom"], ASSEMBLED_MOLECULES))
chrom_sizes
}
|
97b2e2ed5dc2026c0a81b3e56a070b3afd192951 | de8a96754e8aae0a8c05dc471ef9dbecf5d4b938 | /Code/ReadDB.R | 38883c23077aedd9e987d3b53088e50fb2746c60 | [] | no_license | sumesh1/Time_series_analysis_R | f892247ccef7ec957207e729aa8f18fae1a7dbea | f4dd94c9c411159c88f974a0d8cbd5c169137118 | refs/heads/master | 2022-11-16T14:43:05.250776 | 2020-07-03T08:21:11 | 2020-07-03T08:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,203 | r | ReadDB.R | library(BETS)
#Possible values for the parameter src
#IBGE
#Brazilian Institute of Geography and Statistics
#BCB
#Central Bank of Brazil
#FGV
#Getulio Vargas Foundation
#FGv-IBRE
#Getulio Vargas Foundation - Brazilian Institute of Economics
#BCB e FGV
#Central Bank of Brazil and Getulio Vargas Foundation
#BCB-Deban
#Cetral Bank of Brazil - Department of Banking and Payments
#BCB-Depin
#Central Bank of Brazil - Department of International Reserves
#BCB-Derin
#Central Bank of Brazil - Department of International Affairs
#BCB-Desig
#Central Bank of Brazil - Department of Financial Monitoring
#BCB-Secre
#Central Bank of Brazil - Executive Secretariat
#BCB-Demab
#Central Bank of Brazil - Department of Open Market Operations
#BCB-Denor
#Central Bank of Brazil - Department of Economics
#Sisbacen
#Central Bank of Brazil Information System
#Possible values for the parameter periodicity:
#A - anual data
#M - monthly data
#Q - quaterly data
#W - weekly data
SearchTimeSeries<-function(orignPlace,per){
aux = BETS.search(src = orignPlace,periodicity = per)
print(aux)
}
GetTimeSeries<-function(myCode){
ts = BETS.get(code = myCode)
ts
} |
c9e6a81d0ace623bf9d9b31304b73636f04a44bb | 082a3d044fa581dcaeca960b3c8e9a183b4de359 | /Plot_Fisheries_Summaries.r | 57243c3aa133d4167cd9c24c4e3259ca1484760a | [] | no_license | tetemouton/Input-plots | 881437a0f30b28cd7d891adb04f56822a52b71d9 | 1d92b66a378d237c3d7c23ef550a0ff6e1d7c48c | refs/heads/master | 2020-12-29T02:37:20.978036 | 2017-03-01T22:32:05 | 2017-03-01T22:32:05 | 53,087,204 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,640 | r | Plot_Fisheries_Summaries.r | library(splines)
library(scales)
library(ggplot2)
library(MASS)
library(reshape2)
library(grid)
library(ggmap)
library(plyr)
library(maps)
library(mapproj)
library(magrittr)
library(dplyr)
# Set the drive that is being worked off, if on my compter work off the C:/ else work off penguin
drv <- ifelse(Sys.info()["nodename"] == "SPC122114", "C:/skj/2016", "//penguin/assessments/skj/2016")
setwd(paste0(drv, "/assessment/Data_preperation/Fisheries_Structure"))
#source("support.functions.r")
source("C:/Users/SamM/Desktop/GitHub_Desk/Input-plots/Input-plots/support.functions.r")
theme_set(theme_bw())
setInternet2(TRUE) # Sometimes permissions prevent generating the map unless this is set
# Get a map to be used for plotting the extent of the individual fisheries
reg.map <- get_map(location = c(160,15), zoom = 3, maptype = 'satellite')# maptype = 'roadmap')
# Need to define the subregions for each fishery unfortunately - will try to automate via extraction of regions from muffie at some stage
reg_defs <- read.csv("Assessment_Region_Coordinates_SKJ.csv", header=TRUE, stringsAsFactors=FALSE)
# reg_defs <- read.csv("Assessment_Region_Coordinates_SKJ_alt.csv", header=TRUE, stringsAsFactors=FALSE)
regKeep <- reg_defs$MufArea
# List below explicitely links the 33 fisheries to the regions they are in. Note some in R7 don't cover the whole region so have to state thier subregions
#fsh.dat <- data.frame(reg = c(1,1,1,2,2,2,2,5,5,5,5,3,3,3,3,4,4,4,4,4,4,4,4))
fsh.reg <- c(1,1,1,2,2,2,2,5,5,5,5,3,3,3,3,4,4,4,4,4,4,4,4)
# fsh.reg = c(1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,6,6,6,7,7,7)
regHigh <- lapply(1:length(fsh.reg), function(x) reg_defs$MufArea[fsh.reg[x] == reg_defs$reg])
# Use labels.tmp to provide plot titles
tmp.titles <- read.table("labels.tmp", header = FALSE, sep = ".")
# tmp.titles <- read.table("labels_alt.tmp", header = FALSE, sep = ".")
Ftit <- paste0("F", tmp.titles[,1], " ", gsub("\t", "", tmp.titles[,2]))
# Exclusion lists
# There are some cases where size data exists for a fleet that does not have catch in that fishery - might be a scale problem, data entry etc. etc. - all very minor so removed
# alb.exc = list(F1 = 'NONE', F2 = 'NONE', F3 = 'NONE', F4 = 'NONE', F5 = 'NONE', F6 = 'NONE', F7 = 'NONE', F8 = 'NONE', F9 = 'NONE', F10 = 'NONE',
# F11 = 'NONE', F12 = 'NONE', F13 = 'NONE', F14 = 'NONE', F15 = 'NONE', F16 = 'NONE', F17 = 'NONE', F18 = 'NONE', F19 = 'NONE', F20 = 'NONE',
# F21 = 'NONE', F22 = 'NONE', F23 = 'NONE', F24 = 'NONE', F25 = 'NONE', F26 = 'NONE', F27 = 'NONE', F28 = 'NONE', F29 = 'NONE', F30 = 'NONE',
# F31 = 'NONE', F32 = 'NONE', F33 = 'NONE')
fsh.exc <- as.list(rep("NONE", length(fsh.reg)))
# Define a couple of species specific parameters
mod="SKJ_1_detail"
# mod="SKJ_3_detail"
# mod="SKJ_5_detail"
fisheries=1:length(fsh.reg)
Lrange=c(2,110)
fyr=1975
lyr=2015
# Structure all the data
cfile = paste(mod, "_CE.csv", sep='') # Defines the name of the file containing the catch data for the different fisheries
lfile = paste(mod, "_LF.csv", sep='') # Defines the name of the file containing the length data for the different fisheries
Cdat = read.csv(cfile, header=TRUE) # load catch data
Cdat = rename(Cdat, yr=yy) # rename variables to make things easier for functions later on
Cdat = ExpandDat(dat=Cdat, year.lab="yr", fisheries=fisheries, first.yr=fyr, last.yr=lyr) # calls function that cleans and reshapes the data
Ldat = read.csv(lfile, header=TRUE) # Loads length data
Ldat = ExpandDat(dat=Ldat, year.lab="yr", fisheries=fisheries, first.yr=fyr, last.yr=lyr) # calls function that cleans and reshapes the data
Ldat$len = as.numeric(as.character(Ldat$len))
Ldat = Ldat[Ldat$len >= Lrange[1] & Ldat$len < Lrange[2],] # culls data outside size range used in assessment
Ldat = rename(Ldat, freq = lf_samples)
L.avail = ifelse(is.na(match(fisheries, Ldat$fsh))=='TRUE',0,1) # creates variable which indicates the availability of data, prevents crashing if there's no data
# Defines some colours for the different fleets
# The list below started as just a few fleets for LL, but increased and increased - should have just chosen a palette in retrospect...
# This list sets the colours for the shitload of fleets in the dataset, if two major fleets are too similar delete one between the two fleets and it will change one of them
fullist = c('#FFCC00', '#330099', '#CCFF99', '#000000', '#FF00CC', '#FFFF00', '#FFFFCC', '#99CC99', '#33FF00', '#FF6600', '#3366FF', '#663333', '#6699FF', '#9966CC',
'#990000', '#CCFFCC', '#003200', '#CCFFFF', '#3333FF', '#666666', '#FF6600', '#99FF33', '#CC00FF', '#660000', '#FF6699', '#3300CC', '#00CCFF', '#00FF99', '#CCCC33', '#FFFF66',
'#9999FF', '#330000', '#6633FF', '#FF99FF', '#0066FF', '#6600FF', '#333366', '#99FF00', '#33FF66', '#FF9933', '#CCCC00', '#FF00FF', '#FF0066', '#993300', '#999999', '#333333',
'#CC0000', '#FF3366', '#CCCC66', '#CCCC99', '#669933', '#3399FF', '#99CCFF', '#00CC66', '#009966', '#33FF99', '#0099FF', '#0033FF', '#CC9966', '#CC6600', '#CC9999', '#CCFF33', '#FF9999', '#FFCCCC',
'#9933FF', '#FFFF33', '#FFFF99')
collist = fullist[1:length(unique(c(Cdat$FlgFlt,Ldat$FlgFlt)))] # Pulls out the actual number of colours needed based on the fleets in all the data
names(collist) = sort(unique(c(Cdat$FlgFlt,Ldat$FlgFlt))) # Gives the colours names so that the fleets can be matched to a colour and the colour can be kept consistent between plots
# The actual function that does it all and produces the plots
generate.fsh.plot = function(fishery=1, prelim.filenm=paste0(drv, "/assessments/skj/2016/assessment/Data_preperation/Fisheries_Structure/5Reg/FshPlot_F"))
{
Fsh.plot <- plot.fishery(reg.keep=regKeep, reg.highlight=regHigh, fishy=fishery, reg_defs=reg_defs) # makes the map of the fishery
C.plot <- Catch.Plot(dat=Cdat, fishery=fishery, collist=collist, all.yrs = 1970:2015, brwidth = 1, Fsh.title = Ftit[fishery]) # makes the plot of the number or weight of the fish caught
if(L.avail[fishery] == 1) # if there are length samples then plot the number of samples and the median size of the samples
{
L.N.tab <- create.N.tab(dat=Ldat, fishery=fishery, exc.list=fsh.exc[[fishery]], all.yrs=seq(1970,2015,1), by_yr=TRUE)
L.N.plot <- Sz.N.Plot(tab.dat=L.N.tab, y.lab="No. fish measured", collist=collist, xbrks=seq(1970, 2010, 10), brwidth = 1)
L.Med.tab <- create.Med.tab(dat=Ldat, sz.lab="len", fishery=fishery, exc.list=fsh.exc[[fishery]], all.yrs=seq(1970,2015,1), by_yr=TRUE)
L.Med.plot <- Sz.Med.Plot(tmp.med=L.Med.tab, y.lab="Median length (cm)", collist=collist)
size.plt <- size.dist.plot(dat=Ldat, sz.lab="len", fishery=fishery, exc.list=fsh.exc[[fishery]], all.yrs = seq(1970, 2010, 10), by_yr=TRUE, rnding=2)
}
# Set up full plot
windows(2000,1400)
pushViewport(viewport(layout = grid.layout(3,2))) # make a grid of 2 rows by 2 columns
print(C.plot, vp = viewport(layout.pos.row=1, layout.pos.col=1)) # put the catch plot in row 1 column 1 etc. etc.
print(Fsh.plot, vp = viewport(layout.pos.row=1, layout.pos.col=2))
if(L.avail[fishery] == 1)
{
print(L.N.plot, vp = viewport(layout.pos.row=2, layout.pos.col=1))
print(L.Med.plot, vp = viewport(layout.pos.row=2, layout.pos.col=2))
}
print(size.plt, vp=viewport(layout.pos.row=3, layout.pos.col=1:2))
savePlot(paste0(prelim.filenm, fishery),type='png')
dev.off()
}
# Main model
for(i in 1:length(fsh.reg)) generate.fsh.plot(i, prelim.filenm=paste0(drv, "/assessment/Data_preperation/Fisheries_Structure/5Reg/FshPlot_F"))
# Alternative plots to check alternative fisheries assumptions - LL fisheries, R2 pole and line and R4 fisheries
for(i in 1:length(fsh.reg)) generate.fsh.plot(i, prelim.filenm=paste0(drv, "/assessment/Data_preperation/Fisheries_Structure/5Reg_Alt/FshPlot_F"))
# Alternative spatial structure plots
for(i in 1:length(fsh.reg)) generate.fsh.plot(i, prelim.filenm=paste0(drv, "/assessment/Data_preperation/Fisheries_Structure/7Reg_Alt/FshPlot_F"))
|
9fcee54a97262de9a68595dc7c6b8e53dbc5429a | c62ba61de7cc64d6d1b66b14105a67d9ad0be70e | /old_scripts/other_things.R | 98ee32b5d1ad0e3f8624b38fedc975887085f89e | [] | no_license | CescMateu/msf-datathon | d6ae5496720d89cc348672b293c890bd331a2ede | cbc695348e3d0c74f0e472a6c2fd233f6423f330 | refs/heads/master | 2020-08-08T06:14:10.244726 | 2019-10-10T09:39:45 | 2019-10-10T09:39:45 | 213,750,368 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,281 | r | other_things.R | rm(list = ls())
gc()
a <- fread('processed_data/MSF_SometablesW.csv')
setnames(a, 'Descripci\xf3n', 'Descripcion')
setnames(a, 'GRUPO SEGMENTACI\xd3N', 'GrupoSegmentacion')
a[, Descripcion := NULL]
a[, FECHA_INTERACCION := NULL]
a[, GrupoSegmentacion := NULL]
a[, V1 := NULL]
a[, MONTH_str := NULL]
a[, FECHA_PROC := NULL]
cols_factor <- c('IDMEDIO', 'IDCAMPANYA', 'CODIGO', 'IDCANAL', 'NUEVA_APORTACION', 'CAMBIOCUOTA',
'IDTIPOCOMUNICACION', 'GRUPO_MEDIO', 'GRUPO_CAMPANYA', 'ID_REGISTRO_AUMENTO',
'ID_REGISTRO_DECREMENTO')
a[, (cols_factor):= lapply(.SD, as.factor), .SDcols=cols_factor]
a[, (cols_factor):= lapply(.SD, as.numeric), .SDcols=cols_factor]
for (c in colnames(a)) {
na_prop <- a[, sum(is.na(.SD))/nrow(a), .SDcols = c]
print(paste0(c, ':', na_prop))
}
a <- fread('processed_data/mailings.csv')
setnames(a, colnames(a), c('IDMIEMBRO', 'V1_VARIABLE_MAILING_SERGI', 'IDVERSION', 'IND_IMPACTE_POSTAL',
'IND_IMPACTE_EMAIL', 'OFERTA1', 'OFERTA2', 'OFERTA3'))
a <- fwrite(x = a, file = 'processed_data/mailings.csv', sep = ';')
for (c in colnames(a)) {
na_prop <- a[, sum(is.na(.SD))/nrow(a), .SDcols = c]
print(paste0(c, ':', na_prop))
}
a <- fread('processed_data/final_dataset.csv')
|
60b84f1c9421e07f596f462d993ea15758d4159b | 1b56f1658eb6019aa2e31de31aeff2080be7158b | /plot2.R | 31f5c3061867008b58ebf333d048ee015ddbdaad | [] | no_license | gautamconnect/ExData_Plotting1 | 3e2be54f68c12bdd8093f63c86393e08c5c91e1e | ed46f47ef4cdb61fc8bfd702dc62c36cf4d4a643 | refs/heads/master | 2020-12-25T22:32:02.350205 | 2015-01-11T09:12:37 | 2015-01-11T09:12:37 | 29,085,350 | 0 | 0 | null | 2015-01-11T08:03:34 | 2015-01-11T08:03:34 | null | UTF-8 | R | false | false | 262 | r | plot2.R | #Get data
source('get_data.R')
#Open PNG device
png(filename='plot2.png')
#Make the plot
plot(power_data_df$Date_time,
power_data_df$Global_active_power,
ylab='Global Active Power (kilowatts)',
xlab='',
type='l')
#Close device
dev.off() |
841cec877e87a9eaae4f564ce5aa9c056a7d9d2a | c3cf36095797ad2538e98091cec1610ed39a180f | /Optimization in Finance/Homework 5/hw5_eaj628.R | c93ea5b3973dcd76ebb39d195a6db0c112e64952 | [] | no_license | evanaj12/R_and_other_examples | 4f1440dfb4989e7503e8631650619beba1eae34c | baa2a39a2b5b9a4cec5034d8e54a6a277a5ca134 | refs/heads/master | 2021-01-09T21:46:58.137191 | 2017-02-14T18:13:08 | 2017-02-14T18:13:08 | 48,967,571 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,175 | r | hw5_eaj628.R | # Homework 5
# Evan Johnston, eaj628
#################################
#Problem 1
# create probability values for p
p<-seq(0,1,0.01)
# create probability function (see report)
prob<-function(p){
return ( 2*p**6 - 6*p**5 + 6*p**4 - 2*p**3 + 3*p**2 - 3*p + 1)
}
# plot the values
plot(p, prob(p), type="l", xlab="Probability Values for P", ylab="Prob. of Series Ending Within 6 Games", main="Problem 1")
points(0.5, prob(0.5), pch=19)
abline(v=0.5, untf=F, par(lty=2))
abline(h=prob(0.5), untf=F, par(lty=2))
# note that if the average probability of winning is 0.5, then the probability of the series ending within 6 games is 0.21875, the minimum of the generated curve.
prob(0.5)
#########Class code###############
N=10000
vec<-c(0,N)
for (i in 1:N){
#gen rv: generates 7 random values [0,1], uses logical operator, and converts to number
wins<-(runif(7)>0.5)+0
#sample(10,10) #generates 10 random numbers between 0 and 10
#sim model
count<-sum(wins)>4+0
#keep track
vec[i]<-count
}
print(mean(vec))
#alternatively
mean(rowSums((matrix(runif(70000), 10000, 7)>0.5)+0)>4)
# gives row sum of 70,000 rvs, which is 7 games of 10,000 each. The entire simulation in a single line.
# notice this is equivalent to:
mean(rbinom(10000,7,0.5)>4)
#Thus in all methods we see a probability of about 0.22 for the series ending before the 7th game
#################################
#Problem 2
# number of trials
N<-1000
# number of tickets sold, 30 to 60.
sold<-seq(30,60,1)
# initialize revenue matrix
all_revenues<-matrix(c(rep(0,(length(sold)*N+length(sold)))), ncol=N+1)
all_revenues[,1]<-sold
# initialize revenue means matrix
all_revenues_means<-matrix(c(rep(0,length(sold)*2)), ncol=2)
all_revenues_means[,1]<-sold
# function to calculate revenue from number of actual passengers
revenue<-function(showed){
if (showed<=40){
return(10*showed)
}
else {
return(40*10-showed%%40*25)
}
}
# loops for each number of tickets sold
for (i in 1:length(sold)){
# random distribution of N trials with 0.9 probability of sold[i] number of customers actually riding the bus.
showed<-rbinom(N,sold[i],0.9)
# initialize revenues vector for this sold[i]
revenues<-c(rep(0,N))
# keeps revenue values
for (j in 1:length(showed)){
rev<-revenue(showed[j])
revenues[j]<-rev
}
# stores this iteration of sold[i] in revenue matricies
all_revenues[i,2:(N+1)]<-revenues
all_revenues_means[i,2]<-mean(revenues)
}
# shows the mean of all tested revenues
plot(all_revenues_means, type="l", xlab="Tickets Sold", ylab="Revenue", main="Problem 2", ylim=c(0,450), col="red")
# shows the actual revenues from each test of different sold values
matpoints(all_revenues[,1], all_revenues[,2:N], pch=1, col="black")
# shows the maximum revenue, on average
abline(v=43, untf=F, par(lty=2))
abline(h=max(all_revenues_means), untf=F, par(lty=2))
points(x=43, y=max(all_revenues_means), pch=15, col="blue")
# on average, the maximum revenue occurs at 43 tickets sold
max(all_revenues_means)
#################################
#Problem 3
# number of trials
N=10000
# initialize keep and switch vectors
# run 2 sims. Prob of win with staying (keep) and switching (swit)
keep<-rep(NA,N)
swit<-rep(NA,N)
for (i in 1:N){
#gen rv's: the prize door and the first pick
prize<-sample(33,1)
pick1<-sample(33,1)
#sim model:
# this gives the difference between the entire set of choices and the set of choices the host cannot open. The host opens 5 doors.
host<-sample(setdiff(c(1:33),union(prize,pick1)), 5)
# this gives the difference between the first selection and those revealed by the host, selecting 1 of those not pick 1 or in host.
pick2<-sample(setdiff(c(1:33),union(pick1,host)), 1)
#keep track
keep[i]<-(pick1==prize)
swit[i]<-(pick2==prize)
}
print(mean(keep))
print(mean(swit))
# Thus it is still beneficial to switch doors, but the probability of success is much less with 33 doors compared to 3.
#alternatively - not used
#N=10000
#keep<-rep(NA,N)
#swit<-rep(NA,N)
#prize<-sample(33,N, replace=T)
#pick1<-sample(33,N, replace=T)
#print(mean(prize==pick1))
#print(mean(prize!=pick1))
|
aad0b166fb23dc37960b81c96c88b65f48e23bda | 79b5273841a030e62de06a5b614c9a428ddca33a | /Helper.R | 8d2b8ed1181840103af7e38ba2942e8de8d4f168 | [] | no_license | vykhand/devdataprod-010 | 36ec81395608e122816dd89ea78730e910f5d4ea | efc5845fe116fe3130262b420e5a430a061640d6 | refs/heads/master | 2020-06-04T12:45:00.120847 | 2015-01-28T08:38:24 | 2015-01-28T08:38:24 | 29,799,151 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 772 | r | Helper.R | #https://londondatastore-upload.s3.amazonaws.com/average-house-prices-borough.xls
require(xlsx)
require(tidyr)
if (!file.exists("avg_prices.xls")) download.file("https://londondatastore-upload.s3.amazonaws.com/average-house-prices-borough.xls", destfile="avg_prices.xls", method = "curl")
dt.mean.annual <- read.xlsx("avg_prices.xls", sheetIndex = 3, header = T)
#dt.mean.quarter <- read.xlsx("avg_prices.xls", sheetIndex = 5, header = F, startRow = 4)
dt.mean.annual <- na.omit(dt.mean.annual)
dt.mean.annual <- subset(dt.mean.annual,select=-Code,row.names=F)
dt.mean.annual <- gather(dt.mean.annual, year, price, -Area)
#dt.mean.annual$yearnum = as.numeric(sub("X","",dt.mean.annual$year))
#dt.mean.annual$year = as.factor(as.numeric(sub("X","",dt.mean.annual$year)))
|
e5fdfbd94b0f1a6a78c3e9128ee4c87a22613505 | a469986260ce95a00caae925dbfc431c04bace94 | /plot1.R | ccf7029ac32deeb41ed9084d67578f49b343bf55 | [] | no_license | bojansovilj/ExData_Plotting1 | 55dd507186a1d7d31a6ba3af276694e0b78c7d74 | d90788697601cdc865c19fd924d02c42b5981886 | refs/heads/master | 2021-01-17T23:16:48.484640 | 2015-09-13T22:49:21 | 2015-09-13T22:49:21 | 42,391,770 | 0 | 0 | null | 2015-09-13T10:25:12 | 2015-09-13T10:25:12 | null | UTF-8 | R | false | false | 947 | r | plot1.R | #This script is solution to Exploratory Data Analysis course project
#Goal is to make histogram of Global Active Power by house hold for two dates
#reading data file
householdPC <- read.table("household_power_consumption.txt",
header = TRUE,
dec = ".",
na.strings = "?",
stringsAsFactors = FALSE,
sep=";")
#selecting relevant data and making proper formats
plotingData <- householdPC[householdPC$Date == "1/2/2007" | householdPC$Date == "2/2/2007", ]
plotingData$Global_active_power <- as.numeric(plotingData$Global_active_power)
#creating plot
png(filename = "plot1.png", width = 480, height = 480)
hist(plotingData$Global_active_power, main = "Global Active Power",
xlab = "Global Active Power (kilowats)",
col = "red")
dev.off() |
c51018bda5a47d5ea5e25eb238c307e61ef0faf7 | b7f7d1ee4e982cc950343ee8b126d2b6cc8e5758 | /Module 11/11-04-tidy-data.R | 70c0767c71705a4f362c3948190b2cc797a98de7 | [] | no_license | agentmishra/intro-to-r | ba895bcc6e518efa6d07b6f8a2317adcbc36d975 | 9c06fc68faee6b9ef53d92e5f95deab8c6f38475 | refs/heads/main | 2023-06-04T08:19:50.661597 | 2021-07-01T15:49:56 | 2021-07-01T15:49:56 | 511,027,957 | 1 | 0 | null | 2022-07-06T07:08:29 | 2022-07-06T07:08:28 | null | UTF-8 | R | false | false | 1,331 | r | 11-04-tidy-data.R | #### Tidy Data ####
# Examples of tidying data
# first, the tidy dataset
table1
# every variable has its own column
# every observation has its own row
# every cell has a value
table2
# what do you notice?
# how can we make this data tidy?
# pivot_wider()
# column to take the variable names from: "type" which has cases and populations
# column that has the values: "count"
table2 %>% pivot_wider(names_from = type, values_from = count)
# now we are back with our tidy form of data in table 1
# pivot_longer()
# sometimes names of columns are actually the values of variables like table4a:
table4a
# it has the years (1999, 2000) as column names
# let's pivot this data to make it tidy
# need 3 parameters:
# 1. columns whose names are values not variables
# 2. name of the variable = "year"
# 3. name of the variable to move the values to = "cases"
tidya <- table4a %>% pivot_longer(c(`1999`,`2000`), names_to = "year",
values_to = "cases")
# take a look at table4b
table4b
# we can perform the same process to make this dataset tidy
tidyb <- table4b %>% pivot_longer(c(`1999`,`2000`), names_to = "year",
values_to = "population")
# now lets join them to get our tidy dataset
left_join(tidya, tidyb)
|
340a773cbdc007a7b78cb351e45ef593eeef07a6 | 38bde0c54f5d0feb811dfe2912cbc9779a3ba9c2 | /r/preprocessing_validation.R | 904ee4160fd723e3a1e9b8daf184a1eb304073aa | [
"MIT"
] | permissive | andeek/kaggle-digitrecognizer | f6b64e137d1a6411351cedfcfdcb65a9c11bdf5c | 10cc94418944f1b2012818ffd36afa6196f591dd | refs/heads/master | 2016-09-08T05:05:30.140874 | 2015-03-06T21:45:19 | 2015-03-06T21:45:19 | 31,507,717 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 745 | r | preprocessing_validation.R | #preprocessing file
#load data and create separate validation set
#make sure to run this file first
#depends on: setwd() to folder above data/
#data ------------------------------------------
train <- read.csv("../data/train.csv", header=TRUE)
test <- read.csv("../data/test.csv", header=TRUE)
#library --------------------------------------
library(dplyr)
library(tidyr)
library(ggplot2)
#split into train/validate -------------------
set.seed(503503) #reproduce
train %>%
group_by(label) %>%
sample_frac(1/3) -> validation
train %>%
anti_join(validation) -> train
train %>%
mutate_each(funs(as.numeric), starts_with("pixel")) -> train
test %>%
mutate(label = paste0("fake", 1:n())) -> test #fake label for grouping
|
ed1c470e5db6186ee0f270ed62e9f1bb4c1cf3f0 | 1d2128e15f37b04e0d5ac7ec25458b852ad08b16 | /plot3.R | 33c145b251d8d447da3218947a80f0d591a74360 | [] | no_license | jjaimon/ExData_Plotting1 | 0ec86cef5af68453df2bbdf8ad9fa701d8ea13d9 | bc507f4ac1a7c0cceba24c026896ade05970b069 | refs/heads/master | 2021-01-23T23:38:57.611425 | 2014-06-05T19:10:32 | 2014-06-05T19:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 676 | r | plot3.R | #
# Course Project 1: Exploratory Data Analysis
# Plot 1. Histogram of Global Active Power
#
source("data.prep.R")
#
# Draw directly to the file
#
png(filename="plot3.png", width=480, height=480, units = "px")
#
# Draw a line chart
#
plot(data.df$DTime, data.df$Sub_metering_1, type="n", ylab="Energy sub metering", xlab="")
lines(data.df$DTime, data.df$Sub_metering_1)
lines(data.df$DTime, data.df$Sub_metering_2, col="red")
lines(data.df$DTime, data.df$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1, 1, 1),
col=c("black", "red", "blue"),
pt.cex=1,
cex=0.7)
dev.off() |
128808c9dda6b9a828e59e88a7525c4777efb1f5 | 8eee897915fe1c832399fab4263f7875ed3402be | /man/ApproxBayesMeta-package.Rd | 88ba3c1128827344f4576f3a6d640b47d69eead8 | [] | no_license | pjnewcombe/ApproxBayesMeta | 46d4f5f6091d32f5c30d09437a720875ebec71ff | 0fe4ba4c58a80838ab7aac708f1632a27b3cd1db | refs/heads/master | 2021-03-12T23:45:34.766439 | 2015-03-12T15:28:55 | 2015-03-12T15:28:55 | 22,107,859 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 643 | rd | ApproxBayesMeta-package.Rd | \name{ApproxBayesMeta-package}
\alias{ApproxBayesMeta-package}
\alias{ApproxBayesMeta}
\docType{package}
\title{Please fill a text title description into the txt file TextTitle in the root of the package directory.}
\description{Please fill a text description into the txt file TextDescription in the root of the package directory.}
\details{
\tabular{ll}{
Package: \tab ApproxBayesMeta\cr
Type: \tab Package\cr
Version: \tab 0.1-03-09-2014\cr
License: \tab GPL (>=2)\cr }
Please fill a text overview into the txt file TextOverview in the root of the package directory.
}
\author{Paul Newcombe \email{paul.newcombe@mrc-bsu.cam.ac.uk}}
|
58fd698581720c262e87199623222d7d4454767d | 7236c172aae84f262675b093b9938bd2e04d0e5d | /Three_to_six_year_check_ins/shell_vol_analysis.R | 2be33d776243ed0c9b27043901594fd1879ef829 | [] | no_license | Oyster-Recovery-Partnership/Monitoring | c102397a3d51cd4e88ee7ded8d298fff69b3bc09 | aa9acb6f5ef75cc4e67a9fabe06ebc765e3f0fa9 | refs/heads/master | 2021-08-03T00:34:08.627052 | 2021-07-26T17:58:00 | 2021-07-26T17:58:00 | 219,568,619 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,809 | r | shell_vol_analysis.R | # ----------------- #
# test if there was a change in shell volume between the 2015 and 2018 check in
# ----------------- #
# ----------------- #
# load packages
library(readxl)
library(dplyr)
library(ggplot2)
library(lubridate)
library(agricolae) #HSD.test
# ----------------- #
# ----------------- #
# set dir
# ----------------- #
dir.out = "G:/1.0 Restoration and Monitoring/1.0 3_6_yr_monitoring/shell_volume_analysis/"
# ----------------- #
# ----------------- #
# load data
# ----------------- #
dat_2015 <- read_excel("C:/Users/kcoleman/Downloads/HistoricMonitoringData_2012_2018_final.xlsx", sheet=3)
dat = filter(dat_2015, !is.na(TotVolume)) %>%
dplyr::select(SampleEvent, Rep, TotVolume, pctBlackShell) %>%
mutate(year = sapply(strsplit(SampleEvent,"-"), "[[", 2),
trib = sapply(strsplit(SampleEvent, "-"), head, 1),
survey = sapply(strsplit(SampleEvent,"-"), "[[", 3),
reef = sapply(strsplit(SampleEvent, "-"), tail, 1),
type = sapply(strsplit(SampleEvent, "-"), "[[", 4),
surfShellVol = TotVolume - (TotVolume * (pctBlackShell/100))) %>%
filter(trib %in% "HARR", year %in% c(2015, 2018), survey %in% c("3YR", "6YR"))
rm(dat_2015)
# ----------------- #
# dat_2018 <- read_excel("C:/Users/kcoleman/Downloads/Copy of BluePrint_2018.xlsx")
# names(dat_2018) = gsub(" ", "", names(dat_2018), fixed = TRUE)
# dat_2018 = dat_2018[34:42,]
#dat_2015_2 <- read_excel("C:/Users/kcoleman/Downloads/HistoricMonitoringData_2012_2018_final.xlsx", sheet=2)
#dat_2015_2 = dplyr::select(dat_2015_2, SampleEvent, SampleDate) %>%
# mutate(yr = year(SampleDate))
# left_join(., dat_2015_2, by = "SampleEvent")
# tests = as.data.frame(matrix(ncol=3,nrow=9,data = NA))
# names(tests) = c("reefID","shellvol_2015","shellvol_2018")
# tests[,3] = dat_2018$'Aveshellvolumeacrossentirereef(litresperm2)'
# tests[,2] = as.numeric(dat_2018$'Aveshellvolumeacrossentirereef(litresperm2)') - as.numeric(dat_2018$TotalVolumeChange)
# tests[,1] = dat_2018$ReportReefIDs
# ----------------- #
# check to see that they are normal (but since reefID isnt really a continuous variable this is probably silly)
# ----------------- #
ggplot() + geom_bar(data = filter(dat, year %in% 2015), aes(x = reef, y = TotVolume), stat="identity")
ggplot() + geom_bar(data = filter(dat, year %in% 2018), aes(x = reef, y = TotVolume), stat="identity")
ggplot() +
geom_point(data = filter(dat, year %in% 2015), aes(x = reef, y = TotVolume)) +
geom_point(data = filter(dat, year %in% 2018), aes(x = reef, y = TotVolume), col = "magenta")
# ----------------- #
# ----------------- #
# aov
# ----------------- #
dat2 = mutate(dat, reef_year = paste(reef, year)) %>%
filter(reef %in% c("002","008","046","056","072","106","TR1","TR2","TR5"))
dat_aov_totVol = aov(TotVolume ~ reef_year, data = dat2)
summary(dat_aov_totVol)
dat_aov_surfVol = aov(surfShellVol ~ reef_year, data = dat2)
summary(dat_aov_surfVol)
# ----------------- #
# ----------------- #
# tukeys HSD
# ----------------- #
#TukeyHSD(dat_aov)
#total
out = HSD.test(dat_aov_totVol, "reef_year", console=TRUE)
x = as.data.frame(out$groups)
y = as.data.frame(matrix(rownames(x))) %>%
mutate(reef = sapply(strsplit(as.character(V1), "\\s+"), head, 1),
year = sapply(strsplit(as.character(V1), "\\s+"), "[[", 2)) %>%
dplyr::select(-V1)
HSD_groups = cbind(x,y)
x = as.data.frame(out$means)
y = as.data.frame(matrix(rownames(x))) %>%
mutate(reef = sapply(strsplit(as.character(V1), "\\s+"), head, 1),
year = sapply(strsplit(as.character(V1), "\\s+"), "[[", 2)) %>%
dplyr::select(-V1)
HSD_means = cbind(x,y)
# surf
out2 = HSD.test(dat_aov_surfVol, "reef_year", console=TRUE)
x = as.data.frame(out2$groups)
y = as.data.frame(matrix(rownames(x))) %>%
mutate(reef = sapply(strsplit(as.character(V1), "\\s+"), head, 1),
year = sapply(strsplit(as.character(V1), "\\s+"), "[[", 2)) %>%
dplyr::select(-V1)
HSD_groups2 = cbind(x,y)
x = as.data.frame(out2$means)
y = as.data.frame(matrix(rownames(x))) %>%
mutate(reef = sapply(strsplit(as.character(V1), "\\s+"), head, 1),
year = sapply(strsplit(as.character(V1), "\\s+"), "[[", 2)) %>%
dplyr::select(-V1)
HSD_means2 = cbind(x,y)
# ----------------- #
# ----------------- #
# descriptive plots
# ----------------- #
#boxplot
p = ggplot() + geom_boxplot(data = dat2, aes(x = reef, y = TotVolume, fill = year)) + theme_bw() +
geom_text(data = HSD_groups, aes(x = reef, y = TotVolume + 25, label = groups, col = year), fontface = "bold", size = 5, position = "dodge")+
theme(text = element_text(size = 20))+
ggtitle("Total Shell Volume")
p
ggsave(paste(dir.out, "tot_shell_vol_boxplot.png",sep=""),p)
p = ggplot() + geom_boxplot(data = dat2, aes(x = reef, y = surfShellVol, fill = year)) + theme_bw() +
geom_text(data = HSD_groups2, aes(x = reef, y = surfShellVol + 25, label = groups, col = year), fontface = "bold", size = 5, position = "dodge")+
theme(text = element_text(size = 20))+
ggtitle("Surface Shell Volume")
p
ggsave(paste(dir.out, "surf_shell_vol_boxplot.png",sep=""),p)
# anomoly
t1 = HSD_groups %>% group_by(reef) %>% summarize(n=n()) %>% filter(n>1)
t2 = HSD_groups %>% filter(reef %in% t1$reef) %>% arrange(reef, year) %>%
group_by(reef) %>% mutate(t = groups == lag(groups)) %>%
filter(any(t %in% TRUE))
sig.diff.years = HSD_groups %>% filter(reef %in% t1$reef, !reef %in% t2$reef) %>% arrange(reef,year) %>%
group_by(reef) %>% mutate(diffs = TotVolume - lag(TotVolume)) %>% filter(!is.na(diffs))
anom_tab = dplyr::select(HSD_groups, reef) %>% distinct(reef) %>% arrange(reef) %>%
mutate(diffs = 0.1,
diffs = replace(diffs, reef %in% sig.diff.years$reef, sig.diff.years$diffs),
diffs = replace(diffs, reef %in% t2$reef, NA),
Change = "Stable",
Change = replace(Change, diffs > 0.1, "Increase"),
Change = replace(Change, diffs < -0.1, "Decrease"),
diffs = replace(diffs, is.na(diffs), 0.1))
p = ggplot() + geom_bar(data = anom_tab, aes(x = reef, y = diffs, fill = Change), stat="identity", position = "dodge") +
labs(x = "Reef", y = "Difference in Volume when Sig. Diff. btwn years") +
scale_fill_manual(values=c("blue","red","black")) +
scale_x_discrete(drop = FALSE) + theme_bw() +
theme(text = element_text(size = 20)) +
ggtitle("Total Shell Volume")
p
ggsave(paste(dir.out, "tot_shell_vol_anom.png", sep=""), p)
# surf
# anomoly
t1 = HSD_groups2 %>% group_by(reef) %>% summarize(n=n()) %>% filter(n>1)
t2 = HSD_groups2 %>% filter(reef %in% t1$reef) %>% arrange(reef, year) %>%
group_by(reef) %>% mutate(t = groups == lag(groups)) %>%
filter(any(t %in% TRUE))
sig.diff.years = HSD_groups2 %>% filter(reef %in% t1$reef, !reef %in% t2$reef) %>% arrange(reef,year) %>%
group_by(reef) %>% mutate(diffs = surfShellVol - lag(surfShellVol)) %>% filter(!is.na(diffs))
anom_tab = dplyr::select(HSD_groups2, reef) %>% distinct(reef) %>% arrange(reef) %>%
mutate(diffs = 0.1,
diffs = replace(diffs, reef %in% sig.diff.years$reef, sig.diff.years$diffs),
diffs = replace(diffs, reef %in% t2$reef, NA),
Change = "Stable",
Change = replace(Change, diffs > 0.1, "Increase"),
Change = replace(Change, diffs < -0.1, "Decrease"),
diffs = replace(diffs, is.na(diffs), 0.1))
p = ggplot() + geom_bar(data = anom_tab, aes(x = reef, y = diffs, fill = Change), stat="identity", position = "dodge") +
labs(x = "Reef", y = "Difference in Volume when Sig. Diff. btwn years") +
scale_fill_manual(values=c("blue","red","black")) +
scale_x_discrete(drop = FALSE) + theme_bw() +
theme(text = element_text(size = 20)) +
ggtitle("Surface Shell Volume")
p
ggsave(paste(dir.out, "surf_shell_vol_anom.png", sep=""), p)
# ----------------- #
|
dde2f001c5eb107882d5bc3185c54cbab47ebe89 | 3aaa7b94e7646d205fb88380bebba6e099d958e3 | /plot3.R | 0d02ab3a2972a47606629f025fe5e9777328425c | [] | no_license | shakvik/ExData_Plotting1 | f7d16a9c95916a10226950bc30b9e2de789e0081 | 6008dd37a0fb615478da2009e0a33ba00f2f369a | refs/heads/master | 2021-01-21T00:21:43.148431 | 2016-06-27T10:04:22 | 2016-06-27T10:06:28 | 62,035,487 | 0 | 0 | null | 2016-06-27T07:41:52 | 2016-06-27T07:41:52 | null | UTF-8 | R | false | false | 764 | r | plot3.R |
##Read raw file
house<-read.table(file="./household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",colClasses = c("factor", "factor", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
##create tidy data
library(dplyr)
house<-mutate(house,date=as.Date(Date,"%d/%m/%Y"))
house<-filter(house,date=="2007-02-01"|date=="2007-02-02")
house<-house[,c(10,2:9)]
#plot
plot(house$Sub_metering_1,type='l',col="black",ylab="Energy sub metering",xaxt='n',xlab="")
lines(house$Sub_metering_2,type='l',col="red")
lines(house$Sub_metering_3,type='l',col="blue")
legend("topright",pch="_____",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
axis(1,at=c(0,1500,2900),labels=c("Thu","Fri","Sat"))
|
4085f87c1b18c77d10c7acf4b3351172e2406808 | 9b7e1ae0f2f8b448d551988659522c47e66abb5d | /old_map.R | 38fbc6f6bbf3b2a076d368524d7d573fe1386f74 | [] | no_license | BigBallerBenzie/OldMap | 7dcff28091117954c7e9a412bdaf1eb43539f5d5 | 5569e1802ceb6006e59220b3cd3717f4baa7562e | refs/heads/main | 2023-03-25T06:47:50.727902 | 2021-03-22T21:56:02 | 2021-03-22T21:56:02 | 350,451,288 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,719 | r | old_map.R | library(raster)
library(rayshader)
library(elevatr)
setwd("D:/Maps/Old Map")
#topo_map <- raster::brick(" https://prd-tnm.s3.amazonaws.com/StagedProducts/Maps/HistoricalTopo/GeoTIFF/WA/WA_Mt%20Rainier_242672_1928_125000_geo.tif")
#topo_map <- raster::stack(topo_map)
topo_map <- topo_map <- raster::brick("myakejima1.tif")
topo_map <- raster::stack(topo_map)
#
#locations <- data.frame(matrix(extent(topo_map), ncol = 2))
#projstring <- raster::projection(topo_map)
#elevation <- get_elev_raster(locations, z = 15, prj = projstring)
elevation <- get_elev_raster(topo_map, z = 10)
plot(elevation)
#elevation1 = raster::raster("N35E138.hgt")
#crop elevation to the full map extent (past neatline)
elevation <- raster::crop(elevation, extent(topo_map))
##this raster will help knockdown the elevation outside the
## neatline in the physical map
base_raster <- elevation * 0 + 450
## I want to crop the elevation raster to the neatlines
x <- c(139.450, 139.450, 139.575, 139.575)
y <- c(34.042, 34.125, 34.042, 34.125)
xy <- cbind(x,y)
S <- SpatialPoints(xy, proj4string = CRS("+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs "))
S <- spTransform(S, crs(topo_map))
interior_elevation <- raster::crop(elevation, extent(S))
elevation <- merge(interior_elevation, base_raster)
names(topo_map) <- c("r", "g", "b")
topo_r <- rayshader::raster_to_matrix(topo_map$r)
topo_g <- rayshader::raster_to_matrix(topo_map$g)
topo_b <- rayshader::raster_to_matrix(topo_map$b)
topo_rgb_array <- array(0, dim = c(nrow(topo_r), ncol(topo_r), 3))
topo_rgb_array[,,1] <- topo_r/255
topo_rgb_array[,,2] <- topo_g/255
topo_rgb_array[,,3] <- topo_b/255
## the array needs to be transposed, just because.
topo_rgb_array <- aperm(topo_rgb_array, c(2,1,3))
elev_mat <- raster_to_matrix(elevation)
ray_shadow <- ray_shade(elev_mat, sunaltitude = 40, zscale = 30, multicore = TRUE)
ambient_shadow <- ambient_shade(elev_mat, zscale = 30)
#elev_mat1 = resize_matrix(elev_mat, scale = 2)
elev_mat %>%
sphere_shade(texture = "bw") %>%
add_overlay(topo_rgb_array) %>%
add_shadow(ray_shadow, max_darken = 0.7) %>%
add_shadow(ambient_shadow, 0.25) %>%
plot_map()
plot_3d(topo_rgb_array,elev_mat, zscale = 20, windowsize = c(1800,2400),
phi = 40, theta = 135, zoom = 0.9,
background = "grey30", shadowcolor = "grey5",
soliddepth = -50, shadowdepth = -100)
#render_camera(theta = 0, phi = 89, zoom = 0.7, fov = 0)
#render_camera(theta = -0, phi = 60, zoom = 0.12, fov = 150)
render_highquality('myakijima.png', lightintensity = 500, samples = 400,
width = 7200, height = 4800, lightdirection = 290)
|
44bea88bd97a381037f3fe251c37a5911a3df361 | bf916b91ae7772ad5b08238e4fa9c99d073e3bb6 | /assignment9.R | 40bd8adb4cc2869b2f758dd0b0794a5a6fbf3ff1 | [] | no_license | lhagopian/usfspring2018 | aa465103eeb778cd4f857324a1b35d404b6dc8bf | 10e8f174e2cc4c7bb36871b4cd6c1dc7f37a57d0 | refs/heads/master | 2021-05-13T18:40:42.399040 | 2018-04-26T17:37:02 | 2018-04-26T17:37:02 | 116,873,715 | 0 | 1 | null | 2018-02-11T00:35:26 | 2018-01-09T21:39:40 | R | UTF-8 | R | false | false | 488 | r | assignment9.R | install.packages("lattice")
library(lattice)
install.packages("ggplot2")
library(ggplot2)
sunspot.month<-datasets::sunspot.month
plot(sunspot.month, main="Plot without Packages")
xyplot(sunspot.month~time, sunspot.month, grid = TRUE, main="Plot using Lattice")
ggplot(sunspot.month, aes(time, sunspot.month)) +
geom_point(data=sunspot.month, aes(time, sunspot.month), colour = 'red', size = .5) +
ggtitle("Plot using Ggplot2") +
theme(plot.title = element_text(hjust = 0.5))
|
55df1817fb758d2d31e02f91752e22ed112421fe | 94cf7468e784693fc688b01e7c32f451060d6123 | /producing cod data.R | a92b6b5b6337aed00c79e5d98f1079902009aa78 | [] | no_license | szuwalski/cod_vs_crab | a0b9fcd4607a942d835ee8460c2ce62b2f8ef7f2 | 6bcdba76f70e030372eebad8acbc176a0a7f80fd | refs/heads/master | 2021-06-05T06:06:20.935464 | 2021-05-10T20:31:27 | 2021-05-10T20:31:27 | 162,767,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,913 | r | producing cod data.R | # Compile cod data
library(tidyverse)
library(here)
here <- here::here()
cod_dat_raw <- read_csv(here("data","cod.csv"),col_types='ciiiiiiiddddiiddccciddccddiccddiic')
# figure theme
plot_theme <- theme_minimal()+
theme(text=element_text(family="sans",size=12,color="black"),
legend.text = element_text(size=14),
axis.title=element_text(family="sans",size=14,color="black"),
axis.text=element_text(family="sans",size=8,color="black"),
panel.grid.major = element_line(color="gray50",linetype=3))
theme_set(plot_theme)
# remove unnecessary columns and rename
cod_dat_rn <- cod_dat_raw %>%
select(Year,`Haul Join ID`,`Starting Latitude (dd)`,`Starting Longitude (dd)`,`Ending Latitude (dd)`,`Ending Longitude (dd)`, Stratum,
`Satisfactory Gear Performance`,`Bottom Depth`,`Weight (kg)`,`Number of Fish`) %>%
rename(hauljoin=`Haul Join ID`,year=Year,startlat=`Starting Latitude (dd)`,startlon=`Starting Longitude (dd)`,
endlat=`Ending Latitude (dd)`,endlon=`Ending Longitude (dd)`,gear_satisfactory=`Satisfactory Gear Performance`,depth=`Bottom Depth`,
weight=`Weight (kg)`,number=`Number of Fish`
)
# Join appropriate GIS stations and empty hauls (cross-referenced from snow crab data)
load('data/haul_join_key.Rdata')
cod_dat <- cod_dat_rn %>%
full_join(haul_join_key,by=c("hauljoin","year")) %>%
# fill in zeroes for unrepresented hauls, and use 0.01 for area swept
mutate(weight=coalesce(weight,0),number=coalesce(number,0)) %>%
rename(area_km2=AreaSwept_km2) %>% mutate(area_km2=coalesce(area_km2,0.01)) %>%
# for those hauls missing midlat/midlon, fill with startlat/startlon
mutate(midlon=coalesce(midlon,startlon),midlat=coalesce(midlat,startlat)) %>%
filter(year>1981,year<2018)
print(paste("The number of missing latitudes is",sum(is.na(cod_dat$midlat))))
print(paste("The number of missing longitudes is",sum(is.na(cod_dat$midlon))))
print(paste("The number of missing stations IDs is",sum(is.na(cod_dat$station))))
print(paste("The number of missing years is",sum(is.na(cod_dat$year))))
# Aggregate by station and year
cod_dat_clean <- cod_dat %>%
select(towid,station,year,area_km2,midlon,midlat,weight,number) %>%
#density in numbers and weight
mutate(dens_weight=weight/area_km2,dens_num=number/area_km2)
save(cod_dat_clean,file="data/cod_dat_clean.Rdata")
# plot total biomass over time
cod_by_yr <-cod_dat_clean %>%
group_by(year) %>%
summarise(n_obs=n(),
num=mean(dens_num,na.rm = T),
num_sd=sd(dens_num,na.rm = T),
weight=mean(dens_weight,na.rm=T),
weight_sd=sd(dens_weight,na.rm=T)) %>%
ungroup()
weight_by_yr_plot <-cod_by_yr %>%
ggplot(aes(year,weight))+
geom_line()+
geom_point()+
labs(x='year',y='weight (kg/km2)')
num_by_yr_plot <-cod_by_yr %>%
ggplot(aes(year,num))+
geom_line(col='blue')+
geom_point(col='blue')+
labs(x='year',y='density (number/km2)')
library(gridExtra)
# compare visually to assessment data
assessment_abundance <- read_csv(here("data","assessment_tot_abun.csv"),col_types = 'dddddd') %>%
ggplot(aes(Year,Estimate/100))+
geom_point(col='darkgreen')+geom_line(col='darkgreen')+
labs(x='year',y='Abundance (x 1e5 fish)')+
theme(panel.border = element_rect(fill=NA,color="darkgreen",size=2))
grid.arrange(weight_by_yr_plot,num_by_yr_plot,assessment_abundance)
#size composition (from assessment)
sizecomp <- read_csv('data/assessment_size_comp.csv') %>%
gather(cm,prop,-Year,-N) %>%
mutate(prop=prop/N) %>%
filter(cm!="120+") %>%
mutate(cm=as.numeric(cm),decade=case_when(
Year<1990 ~ "1980s",
Year<2000&Year>1989 ~ "1990s",
Year<2010&Year>1999 ~ "2000s",
Year>2009 ~ "2010s"
))
#match to bins
# binsize=5
# sizes <-seq(27.5,132.5,binsize)
# # size bin matching key
# sizesdf <- tibble(dn=seq(25,130,5),up=seq(30,135,5),size=sizes)
|
d5f4c51160d2780064eafefcd7d5d5cfcced694b | 768411a39703ce54c6d2e161baf63e22d6f5641c | /datediff.R | 7b776f0fdc1ce37468287f72c3bc4541a091b58b | [
"MIT"
] | permissive | joelonsql/coronalyzer | ef092df181bff32a408ffcb8fedf08661e29194e | 4253f2fd9fbda212a8dfe2f29b2fde972052d557 | refs/heads/master | 2021-05-20T02:40:47.969881 | 2020-04-24T13:04:54 | 2020-04-24T13:04:54 | 252,151,329 | 15 | 3 | null | null | null | null | UTF-8 | R | false | false | 6,116 | r | datediff.R | library(tidyverse)
first_date <- as.Date("2020-03-11")
last_date <- as.Date("2020-04-11")
deaths_20200402 <- c(0,0,1,1,2,2,1,6,7, 9,8,11, 8,16,22,27,31,26,25,26,26,13, 5, 0,0, 0, 0, 0, 0, 0, 0,0)
deaths_20200403 <- c(1,0,1,1,2,2,1,6,7, 9,8,11, 9,16,22,27,31,29,27,30,33,23,23, 2,0, 0, 0, 0, 0, 0, 0,0)
deaths_20200404 <- c(1,0,1,1,2,2,1,6,7, 9,8,11, 9,16,23,27,31,29,28,30,36,25,36,18,1, 0, 0, 0, 0, 0, 0,0)
deaths_20200405 <- c(1,0,1,1,2,2,1,6,7, 9,8,11, 9,16,24,27,32,29,29,30,36,31,43,22,6, 1, 0, 0, 0, 0, 0,0)
deaths_20200406 <- c(1,0,1,1,2,2,1,6,7, 9,8,11,10,16,24,28,33,29,31,32,36,35,47,34,17,23,13, 0, 0, 0, 0,0)
deaths_20200407 <- c(1,0,1,1,2,2,1,6,7, 9,8,11,11,17,24,30,33,31,32,38,37,40,55,49,40,49,37, 2, 0, 0, 0,0)
deaths_20200408 <- c(1,0,1,1,2,2,2,6,7,10,7,11,11,18,25,29,33,31,34,38,36,42,59,54,48,58,55,36, 6, 0, 0,0)
deaths_20200409 <- c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,45,65,58,54,67,66,53,47, 3, 0,0)
deaths_20200410 <- c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,47,67,64,57,75,74,60,67,20, 3,0)
deaths_20200411 <- c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,47,67,65,57,75,74,60,70,23,13,0)
stable_date <- as.Date("2020-04-07") # first_date + max(which(!is.na(match(deaths_20200411 - deaths_20200410,0)))) - 1
data <- data.frame(deaths = integer(), death_date = as.Date(as.character()), report_date = as.Date(as.character())) %>%
add_row(
deaths = deaths_20200402,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-02"), length(deaths_20200402))
) %>%
add_row(
deaths = deaths_20200403 - deaths_20200402,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-03"), length(deaths_20200403))
) %>%
add_row(
deaths = deaths_20200404 - deaths_20200403,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-04"), length(deaths_20200404))
) %>%
add_row(
deaths = deaths_20200405 - deaths_20200404,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-05"), length(deaths_20200405))
) %>%
add_row(
deaths = deaths_20200406 - deaths_20200405,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-06"), length(deaths_20200406))
) %>%
add_row(
deaths = deaths_20200407 - deaths_20200406,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-07"), length(deaths_20200407))
) %>%
add_row(
deaths = deaths_20200408 - deaths_20200407,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-08"), length(deaths_20200408))
) %>%
add_row(
deaths = deaths_20200409 - deaths_20200408,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-09"), length(deaths_20200409))
) %>%
add_row(
deaths = deaths_20200410 - deaths_20200409,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-10"), length(deaths_20200410))
) %>%
add_row(
deaths = deaths_20200411 - deaths_20200410,
death_date = seq(first_date, last_date, by = "day"),
report_date = rep(as.Date("2020-04-11"), length(deaths_20200411))
)
before_texts <- c(
"Tegnell presentation 2020-04-06 i SVT: \"Fallen ligger på knappt 30 om dan\"\n\n",
"Tegnell presentation 2020-04-07 i SVT: \"Vi ligger på ett snitt på 40 fall per dygn.\"\n\n",
"Tegnell presentation 2020-04-08 i SVT: \"Nu ligger vi på 45 eller högre.\"\n\n"
)
after_texts <- c(
"Tegnell presentation 2020-04-06 i SVT: \"Fallen ligger på knappt 30 om dan\"\n\nEftersläpningseffekten per 2020-04-10",
"Tegnell presentation 2020-04-07 i SVT: \"Vi ligger på ett snitt på 40 fall per dygn.\"\n\nEftersläpningseffekten per 2020-04-10",
"Tegnell presentation 2020-04-08 i SVT: \"Nu ligger vi på 45 eller högre.\"\n\nEftersläpningseffekten per 2020-04-10"
)
for (i in 1:3) {
last_report_date <- as.Date("2020-04-05") + i
before <- data %>% filter(death_date <= last_report_date & report_date <= last_report_date)
after <- data %>% filter(death_date <= last_report_date)
before$report_date <- as.factor(before$report_date)
after$report_date <- as.factor(after$report_date)
pal <- c("#ad64a0",rep("#55aaa0",9))
after_plot <- ggplot(after, aes(x=death_date)) +
geom_col(aes(y=deaths, fill=report_date), position = position_stack(reverse = TRUE)) +
theme_minimal() +
scale_fill_manual(values=pal) +
labs(x = "Avlidendatum", y = "Antal nya dödsfall") +
scale_x_date(date_breaks = "5 days", date_labels = "%d-%b") +
ggtitle("Antal avlidna per dag", subtitle = after_texts[i]) +
guides(fill = FALSE) +
theme(
plot.title = element_text(size = 30, face = "bold"),
plot.subtitle = element_text(size = 15)
)
gpb <- ggplot_build(after_plot)
before_plot <- ggplot(before, aes(x=death_date)) +
geom_col(aes(y=deaths, fill=report_date), position = position_stack(reverse = TRUE)) +
theme_minimal() +
scale_fill_manual(values=pal) +
labs(x = "Avlidendatum", y = "Antal nya dödsfall") +
scale_x_date(date_breaks = "5 days", date_labels = "%d-%b") +
ggtitle("Antal avlidna per dag", subtitle = before_texts[i]) +
guides(fill = FALSE) +
coord_cartesian(ylim = c(
gpb$layout$panel_scales_y[[1]]$range$range[1],
gpb$layout$panel_scales_y[[1]]$range$range[2]
)) +
theme(
plot.title = element_text(size = 30, face = "bold"),
plot.subtitle = element_text(size = 15)
)
print(before_plot)
print(after_plot)
ggsave(paste("fhm_",i,"_0.pdf",sep=""), before_plot)
ggsave(paste("fhm_",i,"_1.pdf",sep=""), after_plot)
}
data$report_date <- as.factor(data$report_date)
ggplot(data, aes(x=death_date)) +
geom_col(aes(y=deaths, fill=report_date), position = position_stack(reverse = TRUE)) +
theme_minimal() +
labs(x = "Datum avliden", fill = "Rapportdatum", y = "Antal avlidna") +
ggtitle("Folkhälsomyndigheten - Covid19 Historik Excel - Avlidna per dag") +
geom_vline(aes(xintercept = stable_date))
|
a496bbb48b992688bbd4aac52e36512406bc0478 | d129aca2797ec95826d5dd639a0254f29647bba4 | /cachematrix.R | 98c7f0d8aba5c928fac01e215f311eba31c973f6 | [] | no_license | 200002466/ProgrammingAssignment2 | cc3772256df83c35619a9066837712b0f417c193 | 37fdb7f117bbe97b22f30d15a2e0cd5980088b35 | refs/heads/master | 2020-12-25T12:39:34.260487 | 2015-08-23T22:16:27 | 2015-08-23T22:16:27 | 41,263,574 | 0 | 0 | null | 2015-08-23T19:21:18 | 2015-08-23T19:21:17 | null | UTF-8 | R | false | false | 3,003 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## The function make Cache Matrix creates a special matrix object
## that can cache its inverse.
##
## ******** Define Input Variables ********************
## x is a square invertabl matrix
##
## ******* Define Local Varriables ******************
## matrix_inverse_1 is the inverse of the matrix_1
##
##
## ******* Define parent Varriables ******************
## matrix_2 .
## matrix_inverse_2 is a logical varible T/F
##
##
matrix_inverse_2 <- NULL
##
## set the value of the matrix
set <- function(matrix_2) {
## the <<- rebinds an existing name in the parent of the current enveiroment.
x <<- matrix_2
matrix_inverse_2 <<- NULL
} ## End of function set ************************************
##
## get the value of the matrix.
get <- function() x
##
## set the value of the inverse here and in the parent enviroment.
setinverse <- function(matrix_inverse_1) matrix_inverse_2 <<- matrix_inverse_1
##
## get the value of the inverse
getinverse <- function() matrix_inverse_2
##
## the following lines stores the 4 functions.
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
##
} ## End of Function makeCacheMatrix *****************************
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## The function: Cache Solve, computes the inverse of the
## special "Matrix" returned by makeCacheMatrix. If the inverse
## matrix has already been calculated and the matrix has not
## changed, then the cachesolve should retrieve the inverse from
## the cache. The matrix must be invertable for this function to work.
## Note: If The determinat of a matrix is zero it is not invertable.
## Return a matrix that is the inverse of 'x'
##
## ******** Define Input Variables **************************
## x is a square matrix of numbers.
##
##
## ******* Define Local Varriables **************************
## matrix_2 is a square matrix of numbers.
## matrix_inverse_1 is the inverse of matrix_1
##
##
## ******* Define parent Varriables *************************
## matrix_inverse_2 is a logical varible T/F
##
##
matrix_inverse_1 <- x$getinverse()
##
if (!is.null(matrix_inverse_1)) {
message ("getting cached data")
return(matrix_inverse_1)
} ## end of if
##
matrix_2 <- x$get()
##
matrix_inverse_1 <- mean(matrix_2, ...)
##
x$setinverse(matrix_inverse_1)
##
matrix_inverse_1
##
} ## End of Function CacheSolver --------------------------------
|
42948e8060a7cadc75226cf3995513f5fd1de9a7 | 7f1111ad4a899e69bba1cda4f1d88507826e9e31 | /Answer4.R | d10d5b4e5675352e7429038027c20f6b6314a341 | [] | no_license | CodyStumpo/WhiffModel | 239addea2028b86f9d58276e8f965d20ea3bfcc6 | 4b315b1bd1020377812ba865b086728b590f21d1 | refs/heads/master | 2021-01-10T18:20:59.297690 | 2016-08-21T17:36:43 | 2016-08-21T17:36:43 | 52,711,094 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,668 | r | Answer4.R | library("readr")
library("dplyr")
train=read_delim(file = "TrainData.txt", delim = "|")
# 4.In addition to the 3 things above, we’d like you to create some summary data on the entirety of the training data set.
# For pitchers with a minimum of 50 FB in the training data set,
# find the top five fastest FB by init_vel and list
# the pitch, pitcher, number of pitches thrown, avg init_vel, avg X break and avg Z break for each of the pitchers in the top five.
# Repeat this for pitchers who have thrown 50+ CB, SL and CH in the training set
# (so 4 separate result sets w/5 pitchers in each result set).
# Please save this result set in a csv file.
#find top 5 fastballs
fastest5 = function(pt) {
train %>%
select(pitcherid, pitcher_name, Init_Vel,pitchtype, Break_X, Break_Z) %>%
filter(pitchtype == pt & pitcherid != 175) %>% #the Harry Doyle exception
group_by(pitcherid) %>%
summarise(pitchtype=max(pitchtype), name=max(pitcher_name), count=n(), avgVel=mean(Init_Vel),avgBreakX = mean(Break_X), avgBreakZ=mean(Break_Z)) %>%
filter(count > 50) %>%
select(pitchtype, name, count, avgVel, avgBreakX, avgBreakZ) %>%
top_n(5, avgVel) %>%
arrange(avgVel)
}
fb5=fastest5("FB")
cb5=fastest5("CB")
sl5=fastest5("SL")
ch5=fastest5("CH")
answer4=rbind(fb5, cb5, sl5, ch5)
write.csv(answer4,file = "CodyStumpoAnswer4.csv", row.names = FALSE)
# Looked into suspicious outlier...
# Trying to trick me! Harry Doyle's Changeup shows up at 95.
# hdoyle = train %>% filter(pitcherid==175) shows several 100 mph CH. Faster than FB.
# Harry Doyle is the announcer from Major League (cute)
# Also there is no Harry Doyle in historical records |
757a8c5ccc54b521c6e4e1b44f0cf52ba5b975bb | acd09fd92aabb80a4695f197ad3a2f817d6cdac2 | /20171021_practice2.R | ec178be0c7ff0058ecfe7047da6685c80e862759 | [] | no_license | hyun-park/__R__SogangRClass | 2dc06dd0b98a7b4da1cb16201168f3e09bbe95cb | 44a2fae92b88c727a197d1820ec8f3abb3b44f6a | refs/heads/master | 2021-07-23T11:51:19.382192 | 2017-11-02T05:46:22 | 2017-11-02T05:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 562 | r | 20171021_practice2.R | TEXT <- scan(file="howtostartastartup.txt", what="char", quote=NULL)
head(TEXT)
rep(TEXT)
rep(head(TEXT),2)
SortedTEXT <- sort(TEXT)
CleanedTEXT <- gsub("[[:punct:]]","",SortedTEXT)
head(CleanedTEXT)
table(CleanedTEXT)
sortedTable <- sort(table(CleanedTEXT), decreasing=T)
DataFrame <- data.frame(sortedTable)
head(DataFrame)
DataFrame <- data.frame(row.names = DataFrame$CleanedTEXT, Freq=DataFrame$Freq, Rel.Freq=round(DataFrame$Freq/sum(DataFrame$Freq),3))
head(DataFrame)
write.table(DataFrame, file="20171021_practice", quote = F, col.names = NA, sep="\t")
|
34a0941ad362c3c2b458e73ec445e72f5e3603e1 | 7d0e6b61f92fe48afa6e32902fd3edfb1b7b52a9 | /R/from_here.R | acba69f5f088dedd21c5a58742846cb2f6b0c683 | [
"MIT"
] | permissive | ratmaster/collateral | cd6682ee0a83a308eeabc0bf65577956b9b89356 | d9cc212d597fce83bdfee8896ee1554ab94551d7 | refs/heads/master | 2021-08-20T07:43:22.393533 | 2017-11-28T15:07:06 | 2017-11-28T15:07:06 | 105,816,679 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 758 | r | from_here.R | #' @export
#' @title Set up parallel environment from here
#' @description Set up parallel environment from here.
#'
#' @details Set global options.
#'
#' @seealso \code{\link{plapply}} and \code{\link{psapply}}.
from_here <- function(progress = FALSE,
title = "Progress",
memo = FALSE,
resume = FALSE,
eta = FALSE,
time = FALSE,
threads = 1,
sameSeed = FALSE,
stopOnError = TRUE) {
opts <- getOption("collateralopts")
pre <- opts
x <- as.list(match.call())[-1]
if (length(x) > 0) {
opts[names(x)] <- x
options("collateralopts" = opts)
}
return(invisible(pre))
} |
a341a81a858df005ac9c413355a446edecfdac95 | 6e95686db673461f336c2198de395a2d5a3bca37 | /Run2bcd.Rscr | 8e8fcc2473ee3da0d55c863f0002b742d9bfbc1d | [] | no_license | sbaker2423/S2S_PostProcess | 5b6886fe5b2d7a34272f7c0a9e50718d54af7d35 | 39cf3df11a0f42b944a28fb41a35af35e95c5e21 | refs/heads/master | 2020-04-06T15:44:18.590406 | 2018-11-27T22:28:14 | 2018-11-27T22:28:14 | 157,590,912 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 400 | rscr | Run2bcd.Rscr | #!/opt/R-3.2.3/bin//Rscript
## Run 2b,c,d to see which process produces the best results
source("/home/sabaker/s2s/analysis/scripts/cfsv2_analysis/post_process/plsr/2b_PLSR_baseModel_CV.Rscr")
source("/home/sabaker/s2s/analysis/scripts/cfsv2_analysis/post_process/plsr/2c_PLSR_stepWiseModel_CV.Rscr")
source("/home/sabaker/s2s/analysis/scripts/cfsv2_analysis/post_process/plsr/2d_PCR_MLR_CV.Rscr")
|
91a4a81f50986d7580675771a92eed39139f1d66 | 1e5aaad5bb774e0084745cb06bb4b33ff96c944a | /R/print.contents.R | 7d4e1e66c6e61bd921bdc45fa44212eb6e3e2060 | [
"MIT"
] | permissive | Rkabacoff/qacr | cd66c7e1c6b188bdbd2e6cab618c1d77a5ed2a0c | d30d4cd9cbadbfba30f8b7c19b00eff6e9053ab2 | refs/heads/main | 2021-06-25T14:49:23.176096 | 2021-03-11T14:45:39 | 2021-03-11T14:45:39 | 218,363,729 | 0 | 6 | null | 2019-12-02T15:43:19 | 2019-10-29T19:09:40 | R | UTF-8 | R | false | false | 1,319 | r | print.contents.R | #' @title Print a contents object
#' @description
#' \code{print.contents} prints the results of the content function.
#' @param x a object of class \code{contents}
#' @param ... not used.
#' @return NULL
#'
#' @examples
#' testdata <- data.frame(height=c(4, 5, 3, 2, 100),
#' weight=c(39, 88, NA, 15, -2),
#' names=c("Bill","Dean", "Sam", NA, "Jane"),
#' race=c('b', 'w', 'w', 'o', 'b'))
#'
#' x <- contents(testdata)
#' print(x)
#'
#' @rdname print.contents
#' @importFrom crayon blue
#' @export
print.contents <- function(x, ...){
if(!inherits(x, "contents")) stop("Must be class 'tab'")
cat("\nThe data frame", x$dfname, "has",
format(x$nrow, big.mark=","), "observations and",
format(x$ncol, big.mark=","), "variables.\n")
if(x$nrow == 0 | x$ncol == 0)return(NULL)
cat("\n", crayon::blue$underline$bold('Overall'), "\n", sep="")
print(x$overall, row.names=FALSE, right=FALSE)
if(!is.null(x$qvars)){
cat("\n", crayon::blue$underline$bold('Numeric Variables'),
"\n", sep="")
print(x$qvars)
}
if(!is.null(x$cvars)){
cat("\n",
crayon::blue$underline$bold('Categorical Variables'),
"\n", sep="")
print.data.frame(x$cvars, right=FALSE, row.names=FALSE)
}
return(NULL)
}
|
59f5d5993bb805bebca95f1e9d9acc28e6059a95 | 4f65e100fe9a619b2b87debb15201018dd4250f3 | /getVAdata.R | 04051b2281c9fd45a1e666f00c3ea3e7628a905c | [] | no_license | jmmaxwell/IDA | beb6cc504afa93418329238ca32b3bbce5836bcb | de1a1153df6fdf2e8402e091d3adcdff6165d9e7 | refs/heads/master | 2020-05-17T22:37:39.672492 | 2014-04-21T17:12:53 | 2014-04-21T17:12:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,842 | r | getVAdata.R | ## function for getting VA data for each year
## careful! when all 5 are loaded it's about 290 MBs
get.VAdata = function(yr){
require(RODBC)
require(data.table)
# set db file path
db = file.path(paste("C:/Users/jmmaxwell/Desktop/IMdata/", yr, ".accdb", sep = ""))
# connect to db
channel = odbcConnectAccess2007(db)
table = paste("dbo_ValueAdded", yr, sep = "")
SQL.q2 = paste("SELECT
VAEMP.DataYear as YR,
VAEMP.ComboFIPS as FIPS,
VAEMP.IndustryCode as IC,
VAEMP.VA_Amount as VAEMP,
VAGOS.VA_Amount as VAGOS,
VAMPI.VA_Amount as VAMPI,
VATPI.VA_Amount as VATPI
FROM ((((SELECT DataYear, ComboFIPS, IndustryCode, VA_Amount FROM ", table," VA WHERE VA_Type = 'PoW Employee Compensation') as VAEMP)
INNER JOIN (SELECT ComboFIPS, IndustryCode, VA_Amount FROM ", table," VA WHERE VA_Type = 'PoW Gross Operating Surplus (Other Property Income)') as VAGOS
ON VAEMP.ComboFIPS = VAGOS.ComboFIPS and VAEMP.IndustryCode = VAGOS.IndustryCode)
INNER JOIN (SELECT ComboFIPS, IndustryCode, VA_Amount FROM ", table," VA WHERE VA_Type = 'PoW Mixed (Proprietary) Income') VAMPI
ON VAEMP.ComboFIPS = VAMPI.ComboFIPS and VAEMP.IndustryCode = VAMPI.IndustryCode)
INNER JOIN (SELECT ComboFIPS, IndustryCode, VA_Amount FROM ", table," VA WHERE VA_Type = 'Taxes on Production and Imports (Indirect Business Taxes)') VATPI
ON VAEMP.ComboFIPS = VATPI.ComboFIPS and VAEMP.IndustryCode = VATPI.IndustryCode", sep = "")
# use SQL command to fetch data
dt = data.table(sqlQuery(channel, query = SQL.q2))
#close channel
odbcClose(channel)
return(dt)
} |
d4acbf06089f2399152068075266383620fb5041 | a54d562769bee1f3118817a7c9bb94f7738fa7f5 | /code/data.R | 4e46e2ff0e21875ee584ab1c4185d61ecd50a088 | [] | no_license | zouyuxin/susierss_diagnostic | db2652b0a742c5a0ddbab01391a49812d2caed31 | 8799297a2b745a0eda8a8941f277fda14064e50f | refs/heads/main | 2023-04-19T06:31:00.137259 | 2021-05-03T01:40:00 | 2021-05-03T01:40:00 | 323,688,546 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,571 | r | data.R | ## data
library(data.table)
datpos = unlist(strsplit(dataset, ','))
chr=datpos[1]; start=datpos[2]; end=datpos[3]
# extract data
cmd = paste0('/project2/mstephens/software/plink-1.90b6.10/plink --bfile data/1kg_unrelated ',
'--chr ', chr, ' --from-bp ', start, ' --to-bp ', end, ' --make-bed ',
'--threads 2 --memory 5000 --out ', plinkfile)
system(cmd)
cmd = paste0('/project2/mstephens/software/plink-1.90b6.10/plink --bfile ', plinkfile, ' ',
'--export A --out ', plinkfile)
system(cmd)
# load data
geno <- fread(paste0(plinkfile, '.raw'),sep = " ",header = TRUE,stringsAsFactors = FALSE,
showProgress = FALSE)
class(geno) <- "data.frame"
ids <- with(geno,paste(FID,IID,sep = "_"))
geno <- geno[-(1:6)]
rownames(geno) <- ids
geno <- as.matrix(geno)
storage.mode(geno) <- "double"
files = unlist(strsplit(plinkfile, '/'))
junk <- dir(path = paste(files[1:(length(files)-1)], collapse = '/'),
pattern=files[length(files)],
full.names = T)
file.remove(junk)
# select individual label: GBR IBS
labels <- read.table("data/omni_samples.20141118.panel",
sep = " ",header = TRUE,as.is = "id")
ids <- sapply(strsplit(rownames(geno),"_"),"[",2)
labels <- subset(labels,is.element(labels$id,ids))
X = geno[labels$pop == 'GBR',] # British in England and Scotland.
X.IBS = geno[labels$pop == 'IBS',] # Iberian Populations in Spain.
X.CLM = geno[labels$pop == 'CLM',] # Colombians in Medellin, Colombia.
# filter on missing rate, maf, and do mean imputation
X = filter_X(X, 0.05, 0.05)
X.IBS = filter_X(X.IBS, 0.05, 0.05)
X.CLM = filter_X(X.CLM, 0.05, 0.05)
# get common SNPs
indx <- Reduce(intersect, list(colnames(X), colnames(X.IBS),colnames(X.CLM)))
X = X[, indx]
X.IBS = X.IBS[, indx]
X.CLM = X.CLM[, indx]
X = susieR:::set_X_attributes(X)
X = t((t(X) - attributes(X)[["scaled:center"]]) / attributes(X)[["scaled:scale"]]);
r.sample = cor(X)
N.GBR = nrow(X)
X.IBS = susieR:::set_X_attributes(X.IBS)
X.IBS_scaled = t((t(X.IBS) - attributes(X.IBS)[["scaled:center"]]) / attributes(X.IBS)[["scaled:scale"]]);
r.IBS = cor(X.IBS)
N.IBS = nrow(X.IBS)
X.CLM = susieR:::set_X_attributes(X.CLM)
X.CLM_scaled = t((t(X.CLM) - attributes(X.CLM)[["scaled:center"]]) / attributes(X.CLM)[["scaled:scale"]]);
r.CLM = cor(X.CLM)
N.CLM = nrow(X.CLM)
r.IBS.2dist = Matrix::norm(r.sample - r.IBS, type='2')
r.CLM.2dist = Matrix::norm(r.sample - r.CLM, type='2')
r.IBS.Mdist = max(abs(r.sample - r.IBS))
r.CLM.Mdist = max(abs(r.sample - r.CLM))
|
23d2964841c094e88ff823e2ce1593c756d4b910 | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.shield/man/delete_protection.Rd | 7fa4f870fd9257b940db08242107d2ef2b13fcc9 | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 516 | rd | delete_protection.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.shield_operations.R
\name{delete_protection}
\alias{delete_protection}
\title{Deletes an AWS Shield Advanced Protection}
\usage{
delete_protection(ProtectionId)
}
\arguments{
\item{ProtectionId}{[required] The unique identifier (ID) for the Protection object to be deleted.}
}
\description{
Deletes an AWS Shield Advanced Protection.
}
\section{Accepted Parameters}{
\preformatted{delete_protection(
ProtectionId = "string"
)
}
}
|
4ef55390e199e025483923e2e8eacf4a19ff05ac | 1e6f64fb9f3adcf2f78b5d53a1f26fd11c7ee0ac | /heatmap_RPSS.R | b169a7c636921ad811ba69162dc6c69cffe03453 | [] | no_license | bigtiger94/droughtPrediction | 30fac36a49e6042d52263c474fbfe7673fcda7fb | e52368e65f7998158934c29df629b154ac616033 | refs/heads/master | 2022-11-27T12:17:54.984625 | 2020-08-02T11:19:20 | 2020-08-02T11:19:20 | 284,443,046 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,999 | r | heatmap_RPSS.R | ### draw heatmap
dindexname = "sri3"
bsnnames = c("soyang", "daecheong", "andong", "sumjin", "chungju", "hapcheon", "namgang", "imha")
bsncodes = c(1012, 3008, 2001, 4001, 1003, 2015, 2018, 2002)
period.irr = c(4:9)
period.nonirr = setdiff(c(1:12), period.irr)
bsncase = 1
imgsavepath = file.path("./predictResult", "RPSSanal")
for (bsncase in c(1:length(bsncodes))){
bsnname = bsnnames[bsncase]
RPSS_bsn = read.csv(file.path("./predictResult", paste0("RPSS_",dindexname,"_",bsnname,".csv")))
if (bsncase==1) {
RPSS_all.df = data.frame(BSN = toupper(bsnname), Case = RPSS_bsn$GROUP, RPSS = RPSS_bsn$all)
RPSS_irr.df = data.frame(BSN = toupper(bsnname), Case = RPSS_bsn$GROUP, RPSS = RPSS_bsn$irrigation)
RPSS_nonirr.df = data.frame(BSN = toupper(bsnname), Case = RPSS_bsn$GROUP, RPSS = RPSS_bsn$non.irrigation)
} else{
RPSS_all.df = rbind(RPSS_all.df, data.frame(BSN = toupper(bsnname), Case = RPSS_bsn$GROUP, RPSS = RPSS_bsn$all))
RPSS_irr.df = rbind(RPSS_irr.df, data.frame(BSN = toupper(bsnname), Case = RPSS_bsn$GROUP, RPSS = RPSS_bsn$irrigation))
RPSS_nonirr.df = rbind(RPSS_nonirr.df, data.frame(BSN = toupper(bsnname), Case = RPSS_bsn$GROUP, RPSS = RPSS_bsn$non.irrigation))
}
}
heatmapname = paste0("RPSS_all")
heatmapplot = ggplot(RPSS_all.df, aes(x=BSN, y=Case)) + geom_tile(aes(fill=RPSS)) + theme_bw() + ylab("") +ggtitle("")+xlab("")+
scale_x_discrete(position = "top")+
scale_y_discrete(limits = rev(unique(RPSS_all.df$Case))) +
scale_fill_gradientn(colours=c("red", "white", "blue"),
limits = c(-1, 1),
breaks =c(-1, 0, 1),
labels=c(-1, 0, 1)) +
guides(fill = guide_colourbar(barwidth = 1, barheight = 15)) +
theme(axis.text = element_text(color="black", face='bold', size=10),
axis.text.x = element_text(angle = 45, hjust = 0))
ggsave(heatmapplot, filename = paste0(heatmapname,"_",dindexname, ".png"), path=imgsavepath)
heatmapname = paste0("RPSS_irrigation")
heatmapplot = ggplot(RPSS_irr.df, aes(x=BSN, y=Case)) + geom_tile(aes(fill=RPSS)) + theme_bw() + ylab("") +ggtitle("")+xlab("")+
scale_x_discrete(position = "top")+
scale_y_discrete(limits = rev(unique(RPSS_irr.df$Case))) +
scale_fill_gradientn(colours=c("red", "white", "blue"),
limits = c(-1, 1),
breaks =c(-1, 0, 1),
labels=c(-1, 0, 1)) +
guides(fill = guide_colourbar(barwidth = 1, barheight = 15)) +
theme(axis.text = element_text(color="black", face='bold', size=10),
axis.text.x = element_text(angle = 45, hjust = 0))
ggsave(heatmapplot, filename = paste0(heatmapname,"_",dindexname, ".png"), path=imgsavepath)
heatmapname = paste0("RPSS_non-irrigation")
heatmapplot = ggplot(RPSS_nonirr.df, aes(x=BSN, y=Case)) + geom_tile(aes(fill=RPSS)) + theme_bw() + ylab("") +ggtitle("")+xlab("")+
scale_x_discrete(position = "top")+
scale_y_discrete(limits = rev(unique(RPSS_nonirr.df$Case))) +
scale_fill_gradientn(colours=c("red", "white", "blue"),
limits = c(-1, 1),
breaks =c(-1, 0, 1),
labels=c(-1, 0, 1)) +
guides(fill = guide_colourbar(barwidth = 1, barheight = 15)) +
theme(axis.text = element_text(color="black", face='bold', size=10),
axis.text.x = element_text(angle = 45, hjust = 0))
ggsave(heatmapplot, filename = paste0(heatmapname,"_",dindexname,".png"), path=imgsavepath)
RPSSmean_all = RPSS_all.df %>% group_by(Case) %>% summarise(., mean(RPSS))
RPSSmean_irr = RPSS_irr.df %>% group_by(Case) %>% summarise(., mean(RPSS))
RPSSmean_nonirr = RPSS_nonirr.df %>% group_by(Case) %>% summarise(., mean(RPSS))
# data.frame(All=RPSSmean_all$`mean(RPSS)`, Irrigation=RPSSmean_irr$`mean(RPSS)`, Nonirrigation=RPSSmean_nonirr$`mean(RPSS)`) %>%
# 'rownames<-'(as.vector(RPSSmean_all$Case)) %>%
# write.csv(., file.path(imgsavepath, paste0("Avg_RPSS_",dindexname,".csv")))
# RPSS_all.df
# graphics.off()
# |
22c07ff3cbac88d5afd9b3ce1aedf95141b2832d | 41bdfc76822961af95a4e185f289e9bd81282d12 | /Plot1.R | 5acf0767643dc524927d19635a7ba262bf800192 | [] | no_license | greeng/ExData_Plotting1 | 4141be8664f4aeb26d79b184444d721453bbb9c8 | 09e1ee1f6aad296d396b1ced38f6e1c01bdd056e | refs/heads/master | 2021-01-21T16:53:21.260605 | 2014-12-06T07:46:52 | 2014-12-06T07:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,687 | r | Plot1.R | # Exploratory Data Analysis: Course Project 1
# Plot 1
################################################
# INSTALL data.table package to your library
# DATA FILE "household_power_consumption.txt" must be in the working directory
################################################
library(data.table)
# Begining date of data gathered is 1/2/2007
bd <- "1/2/2007"
# Calculate the number of rows to read in nr = days * 24hrs/day * 60min/hr
# In this appplication we are gathering 2 days of data
nr <- 2 * 24 * 60
# Read only necessary data
sub_hpc <- fread("household_power_consumption.txt",
sep=";",
colClasses = c("character", "character", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric"),
na.strings = c("?",""),
skip = bd,
nrows = nr,
data.table = FALSE)
# Add column names to data
colnames(sub_hpc) <- colnames(fread("household_power_consumption.txt",
nrows = 0,
data.table = FALSE))
# Combine date and time into one column and convert to time variable
datetime <- strptime(paste(sub_hpc[,1], sub_hpc[,2],
sep = " "),
format = "%d/ %m/ %Y %H:%M:%S")
# Create data with time and household power consumption
data <- cbind(datetime, sub_hpc[,-(1:2)])
# Plot 1
png(filename = "plot1.png",
width = 480, height = 480, units = "px")
par(mar = c(4,4,4,4))
hist(data$Global_active_power,
main = "Global Active Power",
breaks = 12,
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency",
col = "red")
|
a8475d04ae454ea4e74e3b86ae14c6f691752468 | ea7e11caa009a40a159633499265c5ea4a55344d | /man/evaluate_trt_spec_pmf_eif.Rd | a068379e514d2733589b24f3c007751c4d077e9b | [
"MIT"
] | permissive | benkeser/drord | 9d0f58da939346ab4d25cf5ca3e6e6678f8a32fd | 112d8041c7019759f19d0732955e7b80fbae7f52 | refs/heads/master | 2023-04-30T10:19:42.794853 | 2021-05-21T13:31:42 | 2021-05-21T13:31:42 | 255,166,065 | 3 | 2 | null | null | null | null | UTF-8 | R | false | true | 934 | rd | evaluate_trt_spec_pmf_eif.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logodds_fn.R
\name{evaluate_trt_spec_pmf_eif}
\alias{evaluate_trt_spec_pmf_eif}
\title{Get a matrix of eif estimates for treatment-specific PMF}
\usage{
evaluate_trt_spec_pmf_eif(
trt_spec_pmf_est,
trt_spec_prob_est,
trt_level,
treat,
out,
out_levels
)
}
\arguments{
\item{trt_spec_pmf_est}{Estimated conditional PMF for \code{trt_level}.}
\item{trt_spec_prob_est}{Estimated propensity for \code{trt_level}.}
\item{trt_level}{Treatment level}
\item{treat}{A \code{numeric} vector containing treatment status. Should only assume
a value 0 or 1.}
\item{out}{A \code{numeric} vector containing the outcomes. Missing outcomes are
allowed.}
\item{out_levels}{A \code{numeric} vector containing all ordered levels of the
outcome.}
}
\value{
a matrix of EIF estimates
}
\description{
Get a matrix of eif estimates for treatment-specific PMF
}
|
5fe0ceb4388b53b7ae6db71a45d7570e715e1363 | 9d5e46f25112a9f2b8486a4210b2d66da093e2f7 | /test1.R | 44977cd72ca035f2833fcd3425c903847d36fa19 | [
"BSD-2-Clause",
"BSD-2-Clause-Views",
"BSD-3-Clause"
] | permissive | anhinga/r-to-clojure | b1c51ab5a2320c89f0432120496a35033cfc8036 | ca3a3f5f293c5c936feaaa392cfa970ec1d7ba67 | refs/heads/master | 2020-06-08T05:55:54.170690 | 2012-01-18T03:43:10 | 2012-01-18T03:43:10 | 2,699,658 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 103 | r | test1.R | test1 <- function(x, exp) {
res <<- 1.0
for (i in 1:exp) {
res <<- res * x
}
return(res)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.