blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc3dac3fdddaa2ea46fdf2e756371f0e5e7f11be | 66265f769bd0f86dfad76eb3441cbf65f152137c | /inst/doc/control_table_keys.R | 519acdeb04e423fa2efb93d45e88ab627999c532 | [] | no_license | cran/cdata | e2a43f374d408c9fe4780a2e277617d465f3b7c0 | 31a375a6ae01fafde6f12577692f200e8e2deba6 | refs/heads/master | 2023-09-04T14:37:13.870002 | 2023-08-20T00:02:32 | 2023-08-20T01:29:33 | 86,578,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,909 | r | control_table_keys.R | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library("cdata")
d <- wrapr::build_frame(
"Sepal.Length" , "Sepal.Width", "Petal.Length", "Petal.Width", "Species" |
5.1 , 3.5 , 1.4 , 0.2 , "setosa" |
4.9 , 3 , 1.4 , 0.2 , "setosa" )
d$id <- seq_len(nrow(d))
knitr::kable(d)
## -----------------------------------------------------------------------------
expect <- wrapr::build_frame(
"id" , "Species", "Part" , "Measure", "Value" |
1L , "setosa" , "Sepal", "Length" , 5.1 |
1L , "setosa" , "Sepal", "Width" , 3.5 |
1L , "setosa" , "Petal", "Length" , 1.4 |
1L , "setosa" , "Petal", "Width" , 0.2 |
2L , "setosa" , "Sepal", "Length" , 4.9 |
2L , "setosa" , "Sepal", "Width" , 3 |
2L , "setosa" , "Petal", "Length" , 1.4 |
2L , "setosa" , "Petal", "Width" , 0.2 )
knitr::kable(expect)
## -----------------------------------------------------------------------------
control_table <- wrapr::qchar_frame(
"Part" , "Measure", "Value" |
"Sepal", "Length" , Sepal.Length |
"Sepal", "Width" , Sepal.Width |
"Petal", "Length" , Petal.Length |
"Petal", "Width" , Petal.Width )
layout <- rowrecs_to_blocks_spec(
control_table,
controlTableKeys = c("Part", "Measure"),
recordKeys = c("id", "Species"))
print(layout)
## -----------------------------------------------------------------------------
res <- d %.>% layout
knitr::kable(res)
## -----------------------------------------------------------------------------
inv_layout <- t(layout)
print(inv_layout)
back <- res %.>% inv_layout
knitr::kable(back)
|
3d8fbdfd22562a80ea5b5976769224b2dd912b2d | f23fd97b4ba3c63ffbf393d197ff0aa22dbf99fe | /man/AutocorPlot.Rd | dcdb2e338a22b8ba9eb5f1de0f2722035f5663fc | [
"MIT"
] | permissive | Spiritspeak/skMisc | c8c3079faed0231b3b01acb0e208169e3bd7133d | 886ce8f889e8bf140808e0a895bef4fe7d1bb5de | refs/heads/master | 2022-09-17T00:57:20.264458 | 2022-09-14T11:02:40 | 2022-09-14T11:02:40 | 209,576,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 590 | rd | AutocorPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skPlotting.R
\name{AutocorPlot}
\alias{AutocorPlot}
\title{Per-subject Autocorrelation Plotting}
\usage{
AutocorPlot(ds, ppvar, rtvar, scope = 64)
}
\arguments{
\item{ds}{a dataset}
\item{ppvar}{name of the variable indicating participant ID}
\item{rtvar}{name of the variable indicating reaction time}
\item{scope}{numeric, the maximum lag at which to compute autocorrelation.}
}
\description{
Per-subject Autocorrelation Plotting
}
\examples{
AutocorPlot(ds=ToothGrowth,ppvar="supp",rtvar="len",scope=10)
}
|
0963546777327ea974bf74176cf1378f57c98720 | 1b7082f868984a82186d26548934c05c497c8684 | /beginning.r | 0e6188618e325b47422c4e3f486e06e6bb072293 | [] | no_license | hjulias/tinyhouse | 50a1533cbb55a8bcdf3c98dc16a07c9112875b04 | 13185ac56b360a89f729b7837f800fe0ae520557 | refs/heads/master | 2022-11-22T08:04:57.843643 | 2020-07-12T15:52:31 | 2020-07-12T15:52:31 | 276,779,865 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,108 | r | beginning.r | #Organizando dados de "houses_to_rent_v2"
andares <- houses_to_rent_v2$floor
cidadesv2 <- houses_to_rent_v2$city
area_casa <- houses_to_rent_v2$area
#Verificando a existência de inconsistências e corrigindo-as
andares[andares== "-"] <- 0 #Substitui os valores - na coluna "floor" por 0. Demonstra que nenhum andar da casa fora construído até o momento.
summary(houses_to_rent_v2[,])
#Criando gráfico de barras para ilustrar a quantidade de casas por cidade
barplot(table(cidadesv2), xlab = "Cidades", ylab = "N° de Casas", col = c("cyan3" , "cyan1", "cyan2", "cyan4", "blue"), main = "Quantidade de casas por cidade", ylim = c(0,6000))
text(0.7, 900, table(cidadesv2[cidadesv2 == "Belo Horizonte"]))
text(1.9, 600, table(cidadesv2[cidadesv2 == "Campinas"]))
text(3.1, 900, table(cidadesv2[cidadesv2 == "Porto Alegre"]))
text(4.3, 1300, table(cidadesv2[cidadesv2 == "Rio de Janeiro"]))
text(5.5, 5000, table(cidadesv2[cidadesv2 == "São Paulo"]))
#Atribuindo à variável SP os dados que se relacionam com a cidade de São Paulo, a visualização e escrita do código é simplificada.
SP <- cidadesv2[cidadesv2 == "São Paulo"]
area_casaSP <- area_casa[cidadesv2 == "São Paulo"] #possui a área de todas as casas localizadas em São Paulo.
#agora, veremos quantas dessas casas tem área = 27m². table(SP[area_casa == 27])
reducedSP <- data.frame(table(area_casaSP))
x <- reducedSP[,2] #são os valores da segunda coluna de reducedSP
y <- as.numeric(levels(reducedSP[,1])) #São os valores da primeira coluna de reducedSP; dessa forma, y passa de factor para numeric.
#Gráfico para ilustrar a quantidade de casas em sp
par(mfrow = c(1,2), mar=c(3,3,2,2), oma=c(3,3,2,2))
plot(x,y, col = ifelse(x==9 & y == 27, "red", "black"), main = "Quantidade de casas por área em São Paulo")
plot(x,y,xlim = c(0,1000), ylim = c(0,1000), col = ifelse(x==9 & y == 27, "red", "black"), main = "Quantidade de casas por área em São Paulo (ampliado x25)")
mtext(side=1, text="N° de Casas", outer=T)
mtext(side=2, text="Área em m²", outer=T)
#Gráfico para ilustrar a quantidade de casas com 27m² em sp
par(mfrow = c(1,2), mar=c(3,3,2,2), oma=c(3,3,2,2))
plot(x,y,xlim = c(0,100), ylim = c(0,100), col = ifelse(x==9 & y == 27, "red", "black"), main = "Quantidade de casas por área em São Paulo (ampliado x250)")
plot(x,y,xlim = c(0,30), ylim = c(0,30), col = ifelse(x==9 & y == 27, "red", "black"), main = "Quantidade de casas por área em São Paulo (ampliado x833)")
mtext(side=1, text="N° de Casas", outer=T)
mtext(side=2, text="Área em m²", outer=T)
legend("bottomright", legend="Casas com 27m²", bty="n", fill = "red")
#Criando “sphouses”
sphouses <- data.frame(area_casaSP)
#Adicionando a coluna “Quartos”
quartos <- houses_to_rent_v2$rooms[houses_to_rent_v2$city == "São Paulo"]
sphouses["Quartos"] <-quartos
#Adicionando a coluna “Banheiros”
banheiros <- houses_to_rent_v2$bathroom[houses_to_rent_v2$city == "São Paulo"]
sphouses["Banheiros"] <- banheiros
#Adicionando a coluna “Vagas”
vagas <- houses_to_rent_v2$parking.spaces[houses_to_rent_v2$city == "São Paulo"]
sphouses["Vagas"] <- vagas
#Adicionando a coluna “Hoa”
hoa <- houses_to_rent_v2$hoa..R..[houses_to_rent_v2$city == "São Paulo"]
sphouses["Hoa"] <- hoa
#Adicionando a coluna “Rent Amount”
rentamount <- houses_to_rent_v2$rent.amount..R..[houses_to_rent_v2$city == "São Paulo"]
sphouses["Rent Amount"] <- rentamount
#Adicionando a coluna “Property Tax”
propertytax <- houses_to_rent_v2$property.tax..R..[houses_to_rent_v2$city == "São Paulo"]
sphouses["Property Tax"] <- propertytax
#Adicionando a coluna “Fire Insurance”
fireinsur <- houses_to_rent_v2$fire.insurance..R..[houses_to_rent_v2$city == "São Paulo"]
sphouses["Fire Insurance"] <- fireinsur
#Adicionando a coluna “Total”
total <- houses_to_rent_v2$total..R..[houses_to_rent_v2$city == "São Paulo"]
sphouses["Total"] <- total
#Procurando quartos == 2
sphouses[sphouses$area_casaSP == 27 & sphouses$Quartos == 2,]
#Adequando o numero de quartos
potentialth <- sphouses[sphouses$area_casaSP == 27 & sphouses$Quartos == 1 & sphouses$Banheiros == 1 & sphouses$Vagas== 1,]
print(potentialth)
#CUSTOS
#Criando a variável com os dados da primeira linha de “sphouses” e colunas que se relacionam com os custos totais.
casa1176 <- (potentialth[1,c(5,6,7,8,9)])
#Atribuindo cada valor da coluna a uma variável
casa1cond <- casa1176[,1]
casa1alug <- casa1176[,2]
casa1predial <- casa1176[,3]
casa1seg <- casa1176[,4]
casa1total <- casa1176[,5]
#Atribuindo o valor de “casa1(...)” pelo total a um vetor.
vector1 <- c(casa1cond/casa1total, casa1alug/casa1total, casa1predial/casa1total, casa1seg/casa1total)
#Criando o gráfico.
dev.off()
pie(vector1, main = "Composição dos Custos Totais: Casa 1176", labels = c("24,7%", "72,1%", "2,2%", "0,9%"), col = c("mediumorchid1", "cadetblue3", "darkgreen", "gray80"))
legend("bottomright", fill = c("mediumorchid1", "cadetblue3", "darkgreen", "gray80 "),legend = c("Condomínio R$", "Aluguel R$", "Contribuição Predial R$", "Seguro Incêndio R$"),cex = 0.5, pt.cex = 1.5)
#Criando a variável com os dados da segunda linha de “sphouses” e colunas que se relacionam com os custos totais.
casa2182 <- (potentialth[2,c(5,6,7,8,9)])
#Atribuindo cada valor da coluna a uma variável
casa2cond <- casa2182[,1]
casa2alug <- casa2182[,2]
casa2predial <- casa2182[,3]
casa2seg <- casa2182[,4]
casa2total <- casa2182[,5]
#Atribuindo o valor de “casa2(...)” pelo total a um vetor.
vector2 <- c(casa2cond/casa2total, casa2alug/casa2total, casa2predial/casa2total, casa2seg/casa2total)
#Criando o gráfico
pie(vector2, main = "Composição dos Custos Totais: Casa 2182", labels = c("", "98, 7%", "", "1,2%"), col = c("mediumorchid1", "cadetblue3", "darkgreen", "gray80"))
legend("bottomright", fill = c("mediumorchid1", "cadetblue3", "darkgreen", "gray80 "), legend = c("Condomínio R$", "Aluguel R$", "Contribuição Predial R$", "Seguro Incêndio R$"),cex = 0.5, pt.cex = 1.5)
|
2a9a83f8b320cf18fab66e445402a7863c69b984 | e382ada498a814fad78b36f5298108d121edc71c | /tests/testthat/test-imputer.R | a56370c0ff0b8546a09b987feba28bc70df1265f | [
"MIT"
] | permissive | syberia/syberiaMungebits2 | 58e2797f3360ff6ea83bd5281368ad242a9ed780 | 06f013cf0698d1e8c521324161c602a0448bd154 | refs/heads/master | 2021-01-22T10:02:13.851098 | 2017-05-19T19:26:10 | 2017-05-19T19:26:10 | 55,318,078 | 3 | 1 | null | 2017-09-30T15:46:49 | 2016-04-02T21:56:16 | R | UTF-8 | R | false | false | 3,414 | r | test-imputer.R | context("imputer")
setup_imputation_mungebit <- function() {
mb <- imputer()
iris2 <- iris
iris2[1, ] <- NA
iris2 <- mb$run(iris2, c('Sepal.Length', 'Sepal.Width'))
list(mb, iris2)
}
medians <- function(dataset) {
unlist(lapply(dataset[2:150, 1:2], function(x) median(x, na.rm = TRUE)))
}
test_that("it imputes a column in a dataframe correctly", {
x <- setup_imputation_mungebit()
mb <- x[[1]]; iris2 <- x[[2]]
expect_equal(medians(iris), unlist(iris2[1, 1:2]))
# Ignore starred attributes for now
expect_equal(length(grep("^[^*].*[^*]?", names(mb$.input))), 2,
info = paste0("Expecting imputer mungebit to store inputs for 2 columns.",
" Did you set mutating = TRUE ",
" when defining the column_transformation?"))
})
test_that("it restores an imputed column correctly", {
. <- setup_imputation_mungebit()
mb <- .[[1]]; iris2 <- .[[2]]
iris2[1, ] <- NA
iris2 <- iris2[1, , drop = FALSE]
iris2 <- mb$run(iris2, c("Sepal.Length", "Sepal.Width"))
# make sure same medians get restored when predicting
expect_equal(medians(iris), unlist(iris2[1, 1:2]),
info = paste0("The imputer mungebit must be able to restore medians using ",
"the trained mungebit"))
})
test_that("it can handle imputation with a function column specifier", {
iris2 <- iris
mb <- imputer()
iris2[1, 1:2] <- NA
iris2 <- mb$run(iris2, function(x) is.numeric(x) && sum(is.na(x)) > 0)
iris2 <- iris
iris2[1, ] <- NA
out <- try(iris2 <- mb$run(iris2, function(x) is.numeric(x) && sum(is.na(x)) > 0))
expect_false(is(out, "try-error"), info = "There should not have been any warnings thrown")
# make sure same medians get restored when predicting
expect_equal(medians(iris), unlist(iris2[1, 1:2]),
info = paste0("The imputer mungebit must be able to restore medians using ",
"the trained mungebit"))
expect_identical(c(NA_real_, NA_real_), unname(unlist(iris2[1, 3:4])),
info = paste0("The imputer mungebit must not restore inappropriate columns"))
})
test_that("it can impute factors (base case)", {
# make a data.frame
df <- data.frame(x=1:3, y=factor(c('A','B','B')))
# train it
mb <- imputer()
df <- mb$run(df)
# run it on a data.frame with a missing value
df[1,2] <- NA
df2 <- mb$run(df)
# check that it works in the simplest case
expect_identical(as.character(df2$y), c('B','B','B'), "Failed to impute")
})
test_that("for imputing factors it will take the first mode when there are more than one", {
# make a data.frame
df <- data.frame(x=1:3, y=factor(c('A','B','C')))
mb <- imputer()
# train it
df <- mb$run(df)
# run it on a data.frame with a missing value
df[3,2] <- NA
df <- mb$run(df)
# check that it imputes
expect_identical(as.character(df$y), c('A','B','A'), "Fails when there are multiple modes")
})
test_that("it can impute new levels that the validation data.frame has not seen before", {
# make a data.frame
df <- data.frame(x=1:3, y=factor(c('A','B','A')))
# train it
mb <- imputer()
df <- mb$run(df)
# run it on a data.frame with a missing value
df <- data.frame(x=1:3, y=factor(c(NA, NA, NA)))
df <- mb$run(df)
# check that it imputes
expect_identical(as.character(df$y), c('A','A','A'),
"Fails when there is a new level in the factor to be imputed")
})
|
aa8f40eb39743957c8a7e29011f539ac72614f56 | 2a6d2e96b56e0d419354f203c90857f3d000ef31 | /man/mudata.Rd | bab2bde1716463418691542a853dbf264e74da67 | [] | no_license | cran/mudata | f4cbd1991834077128f7fa8baad4e6332c6c332a | 5c38cb9b3c52e9039f0c7afce9d9bb368850f22b | refs/heads/master | 2021-01-21T06:46:42.175576 | 2017-11-13T04:39:52 | 2017-11-13T04:39:52 | 83,280,356 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,096 | rd | mudata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mudata.R
\name{mudata}
\alias{mudata}
\title{Create a MuData object}
\usage{
mudata(data, locations = NULL, params = NULL, datasets = NULL,
columns = NULL, dataset.id = "default", location.id = "default",
defactorize = TRUE, validate = TRUE, expand.tags = TRUE,
retype = FALSE)
}
\arguments{
\item{data}{The data table, which is a molten data frame containing the columns (at least)
'dataset', 'location', 'x', 'param', and 'value'. The 'dataset' column can be omitted
if there is only one dataset contained in the object (its name can be specified by
passing the parameter \code{dataset.id}). The 'location' column can be omitted if
there is only data for one dataset and one location (its name can be specified by
passing the parameter \code{location.id}).}
\item{locations}{The locations table, which is a data frame containing the columns (at least)
'datset', and 'location'. If omitted, it will be created automatically using all unique
dataset/location combinations.}
\item{params}{The params table, which is a data frame containing the columns (at least)
'datset', and 'param'. If omitted, it will be created automatically using all unique
dataset/param combinations.}
\item{datasets}{The datasets table, which is a data frame containing the column (at least)
'dataset'. If omitted, it will be generated automatically using all unique datasets.}
\item{columns}{The columns table, which is a data frame containing the columns (at least)
'dataset', 'table', and 'column'. If omitted, it will be created automatically using
all dataset/table/column combinations.}
\item{dataset.id}{The dataset id to use if the datasets table is omitted.}
\item{location.id}{The location id if the locations table is omitted.}
\item{defactorize}{Pass \code{FALSE} to suppress coersion of 'dataset', 'location', and 'param'
columns to type 'character'.}
\item{validate}{Pass \code{FALSE} to skip validation of input tables.}
\item{expand.tags}{Pass \code{FALSE} to collapse non-required columns to a single column
(called 'tags'), with key/value pairs in JSON format. See \link{expand.tags}.}
\item{retype}{Pass \code{TRUE} to retype columns based on the 'type' column of the 'columns'
table. This is useful when reading data from disk, where date/time columns may be stored
as text.}
}
\value{
A \code{mudata} object
}
\description{
Create an object describing multi-parameter spatiotemporal data in the (mostly) universal
data format. This format is a collection of tables as described below. For an example
of data already in this format, see the \link{kentvillegreenwood} dataset.
}
\examples{
library(reshape2)
library(dplyr)
data(pocmaj)
# melt data and summarise replicates
datatable <- pocmaj \%>\%
melt(id.vars=c("core", "depth"), variable.name="param") \%>\%
group_by(core, param, depth) \%>\%
summarise(sd=mean(value), value=mean(value)) \%>\%
rename.cols("depth"="x", "core"="location")
# create mudata object
md <- mudata(datatable)
summary(md)
plot(md, yvar="x", geom=c("path", "point"))
}
|
3fccbb91a71ae46730a22ef2334370a33f5ee76b | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /SuperGauss/tests/testthat/test-Toeplitz-solve.R | b86df569d82030b476f1d5023c0c6cbbf2f6cf7d | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,267 | r | test-Toeplitz-solve.R | source("SuperGauss-testfunctions.R")
context("Toeplitz - Solve.")
nrep <- 10
test_that("GSchur solve method gives correct result.", {
replicate(n = nrep, expr = {
N <- round(abs(rnorm(n = 1, mean = 100, sd = 10)))
d <- sample(1:3, 1)
Toep <- Toeplitz$new(N)
case.par <- expand.grid(type = c("fbm", "matern"), b = c(TRUE, FALSE))
ncase <- nrow(case.par)
X <- matrix(rnorm(N * d), N, d)
if(runif(1) > .5) X <- drop(X)
for(ii in 1:ncase){
cp <- case.par[ii, ]
type <- as.character(cp$type)
acf <- test_acf_func(N, type)
Tmat <- toeplitz(acf)
Z <- solve(Tmat, X)
Toep$set_acf(acf)
if(cp$b) {
expect_equal(solve(Toep, X), Z, tolerance = 1e-5)
expect_equal(Toep$solve(X), Z, tolerance = 1e-5)
} else {
expect_equal(Tmat %*% solve(Toep), diag(N), tolerance = 1e-5)
expect_equal(Tmat %*% Toep$solve(), diag(N), tolerance = 1e-5)
}
}
})
})
nrep <- 10
test_that("PCG solve method gives correct result.", {
replicate(n = nrep, expr = {
N <- round(abs(rnorm(n = 1, mean = 10, sd = 2)))
p <- sample(1:3, 1)
## P1 <- PCG(N)
Tz <- Toeplitz$new(N)
case.par <- expand.grid(type = c("fbm", "matern"), b = c(TRUE, FALSE))
ncase <- nrow(case.par)
X <- matrix(rnorm(N*p), N, p)
if(runif(1) < .5) X <- drop(X)
tol <- 1e-15
for(ii in 1:ncase){
cp <- case.par[ii, ]
type <- as.character(cp$type)
acf <- test_acf_func(N, type)
Tz$set_acf(acf)
Tmat <- toeplitz(acf)
Z <- solve(Tmat, X)
if(cp$b) {
expect_equal(Tz$solve(X, method = "pcg", tol = tol), Z)
expect_equal(solve(Tz, X, method = "pcg", tol = tol), Z)
## expect_equal(max(abs((Tmat %*% P1$solve(acf, X, ntol) - X) / Z)), 0, tolerance = 1e-6)
} else {
expect_equal(Tmat %*% Tz$solve(method = "pcg", tol = tol), diag(N))
expect_equal(Tmat %*% solve(Tz, method = "pcg", tol = tol), diag(N))
## expect_equal(max(abs((P1$solve(acf, X, ntol)- Z) / Z)), 0, tolerance = 1e-6)
}
}
})
})
nrep <- 10
test_that("LTZ solve method gives correct result.", {
replicate(n = nrep, expr = {
N <- round(abs(rnorm(n = 1, mean = 10, sd = 2)))
p <- sample(1:3, 1)
## P1 <- PCG(N)
## Tz <- Toeplitz$new(N)
case.par <- expand.grid(type = c("fbm", "matern"), b = c(TRUE, FALSE))
ncase <- nrow(case.par)
X <- matrix(rnorm(N*p), N, p)
## if(runif(1) < .5) X <- drop(X)
tol <- 1e-15
for(ii in 1:ncase){
cp <- case.par[ii, ]
type <- as.character(cp$type)
acf <- test_acf_func(N, type)
## Tz$set_acf(acf)
Tmat <- toeplitz(acf)
ldV <- 2 * sum(log(diag(chol(Tmat))))
Z <- solve(Tmat, X)
if(cp$b) {
expect_equal(DurbinLevinson_solve(X, acf), Z)
## expect_equal(solve(Tz, X, method = "pcg", tol = tol), Z)
## expect_equal(max(abs((Tmat %*% P1$solve(acf, X, ntol) - X) / Z)), 0, tolerance = 1e-6)
} else {
expect_equal(Tmat %*% DurbinLevinson_solve(diag(N), acf), diag(N))
## expect_equal(Tmat %*% solve(Tz, method = "pcg", tol = tol), diag(N))
## expect_equal(max(abs((P1$solve(acf, X, ntol)- Z) / Z)), 0, tolerance = 1e-6)
}
}
})
})
|
7faed11eb1140ab30ed56cf848a77c228a4c59f3 | fb7655e2bcfc5ee8c228eed0684e7516eee432f8 | /02_build/04_combine_county.R | ce7bfefa840a7a718c51841fced8336bdee6f284 | [] | no_license | galsk223/tribalclimate | 738e7ea2e4c74b142d84f3e00f4eb7575e8f89dd | bced46be1953ae06b54a1b7a9bda48523b98fff8 | refs/heads/main | 2023-07-24T07:12:27.477008 | 2021-08-30T20:16:10 | 2021-08-30T20:16:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,876 | r | 04_combine_county.R |
rm(list = ls())
tribedf <- read_rds("01_data/cache/tribe_county_shapefiles.rds")
tribecounties <- tribedf %>%
dplyr::select(GEOID) %>%
unique()
empty <- tribecounties %>%
mutate(area = as.numeric(st_area(geometry))) %>%
st_set_geometry(NULL) %>%
filter(area == 0)
tribeuse <- tribecounties %>%
filter(!GEOID %in% empty$GEOID)
heat <- read_rds("01_data/clean/a_heat_gridmet_county.rds")
drought <- read_rds("01_data/clean/b_drought_county.rds") %>%
rename(drought_mean = `1980-2020 Mean`)
precip <- read_rds("01_data/clean/c_precip_county.rds")
whp <- read_rds("01_data/clean/d_whp_county.rds")
elrug <- read_rds("01_data/clean/e_ElevationAndRuggedness_County.rds")
wells_oil <- read_rds("01_data/clean/f_Oil_wells_county.rds") %>%
dplyr::select(GEOID,AllArea_OilPortion) %>%
mutate(AllArea_OilPortion = as.numeric(AllArea_OilPortion)) %>%
unique() %>%
left_join(tribeuse,.,by="GEOID") %>%
st_set_geometry(NULL) %>%
replace(is.na(.), 0)
wells_gas <- read_rds("01_data/clean/f_Gas_wells_county.rds") %>%
dplyr::select(GEOID,AllArea_GasPortion) %>%
mutate(AllArea_GasPortion = as.numeric(AllArea_GasPortion)) %>%
unique() %>%
left_join(tribeuse,.,by="GEOID") %>%
st_set_geometry(NULL) %>%
replace(is.na(.), 0)
OGbasins <- read_rds("01_data/clean/g_OilGas_basins_county.rds") %>%
st_set_geometry(NULL)
PAD <- read_rds("01_data/clean/h_federalland_county.rds")
soc <- map_dfr(list.files("01_data/cache/soc_county2", full.names = T),
function(fl){
t <- read_rds(fl)
}) %>%
group_by(GEOID) %>%
summarise(SOC_mean = mean(Interpolated_15)) %>%
ungroup()
all <- tribedf %>%
st_set_geometry(NULL) %>%
left_join(.,heat,by="GEOID") %>%
left_join(.,drought,by="GEOID") %>%
left_join(.,precip,by="GEOID") %>%
left_join(.,whp,by="GEOID") %>%
left_join(.,elrug,by="GEOID") %>%
left_join(.,wells_oil,by="GEOID") %>%
left_join(.,wells_gas,by="GEOID") %>%
left_join(.,OGbasins,by="GEOID") %>%
left_join(.,PAD,by="GEOID") %>%
left_join(.,soc,by="GEOID")
# Rename fields, drop units, replace NAs when appropriate
final_ds <- all %>%
select(tribe,
GEOID,
heatdays=heatdays_mean,
drought=drought_mean,
precip,
whp=whp_mean,
oil_portion=AllArea_OilPortion,
gas_portion=AllArea_GasPortion,
og_basin_portion=BasinPortion,
federal_lands_portion=PADPortion,
soc=SOC_mean,
elevation=elevation_mean,
tri=tri_mean) %>%
inner_join(tigris::fips_codes %>%
mutate(GEOID=str_c(state_code,county_code),
county=str_remove(county,"County")) %>%
select(GEOID,state,county),.,
by="GEOID")
write_csv(final_ds,"01_data/clean/tribal_dispossession_county.csv")
write_csv(final_ds,"/RSTOR/tribal_climate/data_products/tribal_dispossession_county.csv")
us_co <- USAboundaries::us_counties(resolution = "low")
# Append geography and export as geopackage
final_ds_geo <- inner_join(select(us_co,GEOID=geoid),final_ds,by="GEOID")
write_sf(final_ds_geo,"01_data/clean/tribal_dispossession_county.gpkg")
# sums <- all %>%
# dplyr::select(-contains(c("q25","q75","sd","min","median","max","GEOID","tribe"))) %>%
# summarise_all(list(
# N = ~sum(!is.na(.)),
# Min = ~min(., na.rm = T),
# Mean = ~mean(., na.rm = T),
# Max = ~max(., na.rm = T))) %>%
# pivot_longer(everything()) %>%
# mutate(Stat = str_remove(str_extract(name,"_N|_Min|_Mean|_Max"),"_"),
# Variable = str_remove(str_remove(name,"_N|_Min|_Mean|_Max"),"_"),
# value = round(value,3)) %>%
# pivot_wider(-name,
# names_from = Stat)
|
7f3b2c0713be9d288756ca72e3a01f9e34181624 | 5132f759c41b35ccc6330a9e0ece6cff7d88910f | /tests/testthat/test-targetedevent-resize.R | 881dd50dff22992e2b95fbb771084b3502af3c2b | [
"MIT"
] | permissive | mrc-ide/individual | b9592fa20adb50004cfef19ce8d4ae9412aa940c | 066130bbd35c0d666f3381e475e18d2085fe382a | refs/heads/master | 2023-09-01T15:53:08.741369 | 2023-08-31T09:33:19 | 2023-08-31T09:33:19 | 228,632,510 | 26 | 11 | NOASSERTION | 2023-08-30T14:14:35 | 2019-12-17T14:19:24 | R | UTF-8 | R | false | false | 1,886 | r | test-targetedevent-resize.R | test_that("extending a TargetedEvent returns a larger bitset", {
event <- TargetedEvent$new(10)
listener <- mockery::mock()
event$add_listener(listener)
event$schedule(c(2, 4), 1)
event$queue_extend(10)
event$.resize()
event$.tick()
event$.process()
expect_equal(
mockery::mock_args(listener)[[1]][[2]]$max_size,
20
)
})
test_that("extending a TargetedEvent with a schedule works", {
event <- TargetedEvent$new(10)
listener <- mockery::mock()
event$add_listener(listener)
event$queue_extend_with_schedule(c(1, 2))
event$.resize()
event$.tick()
event$.process()
expect_targeted_listener(listener, 1, t = 2, target = 11)
event$.tick()
event$.process()
expect_targeted_listener(listener, 2, t = 3, target = 12)
})
test_that("TargetedEvent shrinking variables removes values (bitset)", {
event <- TargetedEvent$new(10)
listener <- mockery::mock()
event$add_listener(listener)
event$schedule(c(2, 4), 1)
event$queue_shrink(Bitset$new(10)$insert(2))
event$.resize()
event$.tick()
event$.process()
expect_targeted_listener(listener, 1, t = 2, target = 3)
expect_equal(
mockery::mock_args(listener)[[1]][[2]]$max_size,
9
)
})
test_that("TargetedEvent shrinking variables removes values (vector)", {
event <- TargetedEvent$new(10)
listener <- mockery::mock()
event$add_listener(listener)
event$schedule(c(2, 4), 1)
event$queue_shrink(4)
event$.resize()
event$.tick()
event$.process()
expect_targeted_listener(listener, 1, t = 2, target = 2)
expect_equal(
mockery::mock_args(listener)[[1]][[2]]$max_size,
9
)
})
test_that("TargetedEvent invalid shrinking operations error at queue time", {
x <- TargetedEvent$new(10)
expect_error(x$queue_shrink(index = 1:20))
expect_error(x$queue_shrink(index = -1:20))
expect_error(x$queue_shrink(index = Bitset$new(20)$insert(1:20)))
})
|
8cc4ae12b0fd436e2e39fb1996915d865ab2d24b | 624b5be5c5effd718303b47525358c7088222d85 | /Lab7/lab7_probforstats.R | 682b049352315558839d84667228075c008d0944 | [] | no_license | HesterLim/Probability-for-Statistics-MAST20006- | 0eeba5efd70083b16510b54791895e449e922061 | 30c4dd92d7f28888b4576a3600f61eb491749773 | refs/heads/master | 2021-03-13T20:40:08.545304 | 2020-07-03T23:34:31 | 2020-07-03T23:34:31 | 246,709,969 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 140 | r | lab7_probforstats.R | #Q5
1 - pgamma(3, shape = 8, rate = 1/2)
#[1] 0.9998304
ppois(7, lambda=3/2)
#[1] 0.9998304
1 - pchisq(3,df = 16)
#[1] 0.9998304
|
bb801466dccac98a0a100b7f3cdf495c1e8c4263 | 2b29801a9a64028bb056b201c9d4c6ac9650bb43 | /3. Marker identification/filtration_by_tissue.R | 542d57a2ebf1230c91bf70fde10400acd197d4de | [
"Apache-2.0"
] | permissive | zhq921/cfWGBS-bioinfo-pip | 3502147ae348e307231d3dc57fbd3c524bf6aefd | c9fb634829535818899eaf6e7b2dd6dadc1693a6 | refs/heads/master | 2021-12-26T04:21:34.265124 | 2021-10-25T04:39:18 | 2021-10-25T04:39:18 | 166,790,087 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,408 | r | filtration_by_tissue.R | library("glmnet")
library("randomForest")
library("pROC")
library(pheatmap)
library(ggplot2)
library(RColorBrewer)
library("caret") #for confusion matrix
library(e1071)
library("verification") # for roc p value
library(scatterplot3d)
library("ggsci")
library("Rtsne")
dmr_mat_total <- read.table("./met_mat/cfDNA_mat",sep = "\t",stringsAsFactors = F,header = T,row.names = 1)
dmr_mat_total <- dmr_mat_total[!grepl("^M",rownames(dmr_mat_total)),] #rm the rows of chrom MT
dmr_mat_total <- dmr_mat_total[,!grepl("Z17",colnames(dmr_mat_total))] # remove the Z17 sample from the XH
#add chr to be a name
rownames(dmr_mat_total) = paste0("chr",rownames(dmr_mat_total))
{####filter by tissue
#paste chr start end
cpg.mat[,2] = as.character(cpg.mat[,2])
cpg.mat[,3] = as.character(cpg.mat[,3])
cpg.mat[,1] = apply(as.matrix(cpg.mat[,1:3]),1,function(x) paste(x,collapse = "_"))
#rm these col
cpg.mat = cpg.mat[,-(4:6)]
#rm the last col
cpg.mat = cpg.mat[,-dim(cpg.mat)[2]]
#fill the na value
nm.idx = grepl("N",colnames(cpg.mat))
early.idx = grepl("T",colnames(cpg.mat))
for(i in 1:dim(cpg.mat)[1])
{
na.idx = is.na(cpg.mat[i,])
cpg.mat[i,nm.idx&na.idx] = median(unlist(cpg.mat[i,nm.idx]),na.rm = T)
cpg.mat[i,early.idx&na.idx] = median(unlist(cpg.mat[i,early.idx]),na.rm = T)
}
#calculate the mean methylation for each region
region = unique(cpg.mat[,1])
region.mat = c()
for(i in 1:length(region)) #collapse the positions into region
{
tmp = colMeans(cpg.mat[cpg.mat[,1]==region[i],4:dim(cpg.mat)[2]],na.rm = T)
region.mat = rbind(region.mat,tmp)
rownames(region.mat)[i] = region[i]
}
#Calculate the p value and mean differ
nm.idx = which(grepl("N",colnames(region.mat))==T)
early.idx = which(grepl("T",colnames(region.mat))==T)
region.mat = cbind(region.mat,tissue_pvalue = 0,tissue_early_meth_mean = 0, tissue_normal_meth_mean= 0, tissue_meandiff = 0)
for(i in 1:dim(region.mat)[1])
{
region.mat[i,"tissue_pvalue"] = t.test(x=region.mat[i,nm.idx],y = region.mat[i,early.idx])$p.value
region.mat[i,"tissue_early_meth_mean"] = mean(region.mat[i,early.idx])
region.mat[i,"tissue_normal_meth_mean"] = mean(region.mat[i,nm.idx])
region.mat[i,"tissue_meandiff"] = mean(region.mat[i,early.idx])-mean(region.mat[i,nm.idx])
}
#read the training set's p and meandiffer
train_dmr_differ_info = dmr_mat_total[,1:2]
filtered_dmr_name = rownames(region.mat)[(abs(region.mat[,"tissue_meandiff"])>0.2)&
(train_dmr_differ_info$meandiff*region.mat[,"tissue_meandiff"]>0)]
filtered_dmr_name = paste0("chr",filtered_dmr_name)
dmr_mat_total = dmr_mat_total[filtered_dmr_name,]
##output the 68 regions' detail info
out.tmp <- t(apply(dmr_mat_total,1,function(x)
{
mean_normal_in_cfdna = mean(x[grepl("Training_Normal",colnames(dmr_mat_total))])
mean_early_in_cfdna = mean(x[grepl("Training_Early",colnames(dmr_mat_total))])
return(c(mean_normal_in_cfdna,mean_early_in_cfdna))
}))
out.tmp <-cbind(out.tmp, dmr_mat_total[,1:2])
colnames(out.tmp)[1:2] <-c("cfdna_normal_meth_mean","cfdna_early_meth_mean")
rownames(region.mat) <-paste0("chr",rownames(region.mat))
out.tmp <-cbind(out.tmp,region.mat[filtered_dmr_name,c(10,11,9,12)])
write.table(out.tmp,"model/DMR68_info.txt",row.names = T,col.names = T,quote = F,sep="\t")
}
|
71461206ee5cc8d235e5a583080f7767714439d1 | 4ef336dbebc3e5b6d17424104402a10156b45fd2 | /ConfidenceQuant/man/CompExperiment_2dim_D.Rd | 23d06de7d7cad755297f8465c6e5629dc8b3ffb7 | [] | no_license | likun-stat/ConfidenceQuant | ca037eebae9f1a170735d5a8f1aea6648fc9e07d | 667ea899f0139ddd5026ac823ea0bf67d5c55b4f | refs/heads/master | 2020-03-31T06:59:07.378497 | 2018-12-21T10:33:28 | 2018-12-21T10:33:28 | 152,002,714 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,525 | rd | CompExperiment_2dim_D.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CompExperiment_2dim_D.R
\name{CompExperiment_2dim_D}
\alias{CompExperiment_2dim_D}
\alias{searchLambda_2dim_D}
\title{Comparative Experiment (Discretized).}
\usage{
CompExperiment_2dim_D(cores = NULL, treatment, control, alpha = 0.05,
tau = 0.25, lambda = 2, D = 50, s = 15,
b1 = floor(nrow(treatment)/s), b2 = floor(nrow(control)/s), r = 100,
M = 30, Range = c(0.001, 0.999), Search = FALSE)
searchLambda_2dim_D(lambda, Tmp, s, indices, tau, n, b)
}
\arguments{
\item{cores}{The number of cores to use for parallel execution. If not
specified, the number of cores is set to the value of
\code{options("cores")}, if specified, or to one-half the number of cores
detected by the \code{\link{parallel}} package.}
\item{treatment}{A 3-dim optional data frame for the treatment group (or
object coercible by \code{\link{as.data.frame}} to a data frame) containing
the variables in the model. The last column of this data frame must be the
response for the experiment.}
\item{control}{A 3-dim optional data frame for the control group (or object
coercible by \code{\link{as.data.frame}} to a data frame) containing the
variables in the model. The last column of this data frame must be the
response for the experiment.}
\item{alpha}{The confidence level required. The default is 0.05.}
\item{tau}{A specific quantile to be estimated. Must be a number between 0 and
1.}
\item{lambda}{The smoothing parameter used for \code{treatment} &
\code{control} if \eqn{Search=FALSE}, which governs the tradeoff between
fidelity and the penalty component for the triogram term.}
\item{D}{A number that determines the density of a grid of x values at which
the quantile function will be predicted. If specified, it will evaluate a
confidence surface on a \eqn{D×D} grid.}
\item{s}{The number of subsamples used in the BLB algorithm. Kleiner et al.
suggest that \eqn{s} should be 10~20.}
\item{b1}{The subsample size in the BLB algorithm for \code{treatment}.
Kleiner et al. suggest that the size should be around \eqn{n1^0.6}, where
\eqn{n1} is the data size for \code{treatment}.}
\item{b2}{The subsample size in the BLB algorithm for \code{control}. It is
also suggested that the size should be around \eqn{n2^0.6}, where \eqn{n2}
is the data size for \code{control}.}
\item{r}{The number of bootstrap iterations (samples with with replacement).
\eqn{r=100} is suggested.}
\item{M}{A numeric value that controls how fine that data set should be
discretized.}
\item{Range}{A vector with 2 values that specifys the range of the data set
where the user wants to perform BLB over. It is defined using lower and
upper quantile. The default value is \eqn{(0.001,0.999)}.}
\item{Search}{If \code{TRUE} (which is recommended), then the function will
first search for an optimum smoothing parameter \eqn{\lambda}.}
}
\value{
A list with three parts - \code{result1}, \code{result2}, and
\code{Diff}, which respectively return confidence bands for
\code{treatment}, \code{control} and the difference between the two dataset.
Each part includes the following:
1. \code{x0} and \code{CI_average}, where \code{x0} contains the x values at
which the confidence intervals are evaluated, and \code{CI_average} is 2-dim
matrix which contains the corresponding lower and upper bounds.
2. \code{lambda}, which is the optimum smoothing parameter selected by
\code{BLB_Discretize_2dim}. If it is done automatically, the function also returns
\code{Lambda} and \code{Fid}, which respectively stand for a vector of lambda
values and their corresponding cross-validation MCV values.
}
\description{
\code{CompExperiment_2dim_D} compares the confidence regions for a given
quantile for two different datasets, one related to a treatment and the other
to a control. It applies the BLB algorithm to each dataset to get confidence
regions using quantile smoothing splines for 2-dim covariate. What's special
about this function is that it discretizes the data set to decrease the sample
size, and it utilizes ALL the observations.
\code{searchLambda_2dim_D} is a wrapper function to calculate the optimum lambda.
}
\details{
This function runs \code{BLB} twice, once for each dataset. It is based on
\code{\link{BLB_Discretize_2dim}}, which implements BLB for quantile smoothing splines
with a two-dimensional covariate dataset. It performs parallelization to speed
up the calculation.
\if{html}{\figure{comp2.png}{options: width=100 alt="Image output"}}
\if{latex}{\figure{comp2.png}{options: width=3in}}
\code{\link{CompPlot_2dim}} takes the results and use ggplot/plotly to visualize
them, in which different colors represent different scenarios. See figure
above.
}
\examples{
data(treatment_2dim)
data(control_2dim)
#alpha=0.05;tau=0.5
all<-CompExperiment_2dim_D(cores=7, treatment_2dim, control_2dim, tau=0.5, Search=TRUE)
plot<-CompPlot_2dim(control = control_2dim,treatment = treatment_2dim,all = all,xlab="x1",
ylab="x2",zlab="z")
}
\references{
Kleiner, I. J et al. JRSS B, 2012. \eqn{A Scalable Bootstrap for
Massive Data}.
Akima, H. (1978). \eqn{A Method of Bivariate Interpolation and Smooth Surface Fitting for Irregularly Distributed Data Points}. ACM Transactions on Mathematical Software 4, 148-164.
}
\seealso{
\code{\link{contour},\link{image}}
\code{\link{BLB_Discretize_2dim}} for BLB with one dataset that has 1-dim
covariate.
\code{\link{CompExperiment_1dim_D}} for comparative experiments with
1-dim covariate data sets.
}
|
72da0aa1470cc736ac9f43c942f69d92de8f2162 | 26f1cb213312ad204072dadd6b1163bcc0fa1bba | /exemples/chap8/8.11.R | 973391e633d511bfb75b4bf3b924af4061009794 | [] | no_license | fmigone/livreR | a9b6f61a0aab902fb3b07fc49ea7dd642b65bdc3 | 998df678da1559ee03438c439335db796a416f2f | refs/heads/master | 2020-04-22T05:47:14.083087 | 2018-05-01T14:39:10 | 2018-05-01T14:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 270 | r | 8.11.R | old_par <- par(no.readonly = TRUE) # sauvegarde
par(mai=c(0.1,0.1,0.1,0.1)) # marges
# diagramme circulaire montrant la répartition des
# appartements par type de location
pie(table(flats$room_type),
radius=0.6,
cex=0.7)
par(old_par) # restauration
|
99299e3a379ddf5bb1f877f823067c0aac4fd102 | 6158902809046c3a300faa2d7767c7cb3b827b39 | /man/as_tex.Rd | 4e6156e3e36efd235634d477ee01bb35e415927f | [
"MIT"
] | permissive | ygeunkim/rmdtool | 5790f686c83cdb1950810821f682cadf97271e55 | 7d37558c0542c19f79c0150afda64e9109e64304 | refs/heads/master | 2020-04-22T19:56:57.507699 | 2019-06-30T13:56:15 | 2019-06-30T13:56:15 | 170,625,045 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 687 | rd | as_tex.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/latex_print.R
\name{as_tex}
\alias{as_tex}
\alias{as_tex.character}
\alias{as_tex.matrix}
\title{Coerce characters and matrices to tex}
\usage{
as_tex(x, ...)
\method{as_tex}{character}(x, ...)
\method{as_tex}{matrix}(x, ...)
}
\arguments{
\item{x}{`character` or `matrix` object to be `tex`}
\item{...}{Additional arguments passed to the S3 method.}
}
\description{
`as_tex()` can be applied to a `character` or a `matrix` object, so that the input is changed into the new object `tex`. This might make up the defect of `knitr_print` method for matrix.
}
\examples{
A <- matrix(1:9, nrow = 3)
as_tex(A)
}
|
d32bacd5a13ced2b98cccb7be451df3e09138317 | 9c712485c349366e6aca30715d171309b697a877 | /study1_code_changed_variable_name.R | ef01acee51e7b6ebaf3680083abe22aca4780618 | [] | no_license | yinjixing/Neurosci_bias-1 | 6f517387991e23906f877a42fb1add483c2815a4 | dbfb35ccb2a8c91dc527e2e4b039c4a812f21fa1 | refs/heads/master | 2020-05-03T22:20:05.219203 | 2019-04-01T14:55:47 | 2019-04-01T14:55:47 | 178,841,961 | 0 | 0 | null | 2019-04-01T10:42:18 | 2019-04-01T10:42:18 | null | UTF-8 | R | false | false | 8,735 | r | study1_code_changed_variable_name.R | ### code for analysis of neuroimage bias ###
# code author:Ji-Xing Yin
# email:jixing-yin@outlook.com
# Author date
# ========== ======
# jixing-yin 18-4-5
# Yuepei XU 19-3-1
# input data
# oringinal file: "study1 combined duplicate.csv"
# output data
# output file:
## variables of this study
#Independent variable
#age(18 vs 38)
#evidence(behavior_court,brain_noimage,brain-brain) ##behavior_court means behavior evidence with a court image
##brain_no_image means brain evidence with no image
##brain-brain means brain evidence with brain image
# Measurements:
#death_penalty(death penalty):1-7
#percep_of_respon(perception of responsibility):1-7
#percep_of_danger(perception of danger):1-7
#ability_to_back(ability to come back society):1-7
#free_will(free wil):1-5
#just_sense(justice sense):0-5
#just_belief(just world belief):1-6
#know_of_law(knowledge of the law):1-7
#know_of_sci(knowledge of the science):1-7
#bielief_of_case(belief in truth of the case):1-7
### preparing ###
curDir = dirname(rstudioapi::getSourceEditorContext()$path)
setwd(curDir)
rm(list = setdiff(ls(), lsf.str())) # remove all variables except functions
curDir = dirname(rstudioapi::getSourceEditorContext()$path)
Sys.setlocale("LC_ALL", "English") # set local encoding to English
Sys.setenv(LANG = "en") # set the feedback language to English
### load Packages
library(tidyverse,psych,reshape)
## read data
total.data <- read.csv("Study1_data.csv",header = TRUE,sep = ',', stringsAsFactors=FALSE, na.strings=c(""," ","NA"))
## exclude participants who pay insufficient attention to the test problem and the scenarios
valid.data <- total.data %>%
dplyr::filter(Q34=="4" & Q2_13=="3") %>% # select the participants whose Q34=4 and Q2_13=3
dplyr::filter(((X18b =="2"| X18bb == "2" | X18no == "2") & Q25<=18) |
((X38b =="2"| X38bb == "2" | X38no == "2") & Q25>18)) %>% # select the participant with correct memory of age
dplyr::filter(!is.na(Q30) & !is.na(Q31) & !is.na(Q32)) %>% # select no-na data
dplyr::mutate(CrimeAge1 = ifelse(X18b == "2" |X18bb=="2" | X18no=="2", "age17", "age37"),
CrimeAge2 = ifelse(X38b == "2" |X38bb=="2" | X38no=="2", "age37", "age17")) %>%
dplyr::mutate(CrimeAge1 = ifelse(X18b == "2" |X18bb=="2" | X18no=="2", "age17", "age37")) %>%
dplyr::mutate(CrimeAge2 = ifelse(X38b == "2" |X38bb=="2" | X38no=="2", "age37", "age17")) %>%
dplyr::mutate(CrimeAge = coalesce(CrimeAge1, CrimeAge2)) %>% # coalesce two columns into one
dplyr::mutate(EvidenceType1 = ifelse(X18no == "2" |X38no =="2", "be_no_b"),
EvidenceType2 = ifelse(X18b == "2" |X38b =="2", "b_no_b"),
EvidenceType3 = ifelse(X18bb == "2" |X38bb =="2", "b_b")) %>%
dplyr::mutate(EvidenceType = coalesce(EvidenceType1, EvidenceType2,EvidenceType3)) %>% # coalesce three columns into one
dplyr::select(-c(CrimeAge1, CrimeAge2,EvidenceType1, EvidenceType2,EvidenceType3)) # delete intermediate columns
valid.data<-rename(valid.data,c("Q2_3"="free_will_1","Q2_8"="free_will_2","Q2_12"="free_will_3","Q2_16"="free_will_4",
"Q2_2"="scientific_1","Q2_10"="scientific_2","Q2_14"="scientific_3","Q2_15"="scientific_4",
"Q2_1"="fatalistic_1","Q2_4"="fatalistic_2","Q2_6"="fatalistic_3","Q2_9"="fatalistic_4",
"Q2_5"="unpreditic_1","Q2_7"="unpreditic_2","Q2_11"="unpreditic_3","Q2_17"="unpreditic_4",
"Q3_1"="just_sense_1","Q3_2"="just_sense_2","Q3_3"="just_sense_3","Q3_4"="just_sense_4","Q3_5"="just_sense_5","Q3_6"="just_sense_6","Q3_7"="just_sense_7","Q3_8"="just_sense_8",
"Q8_1"="just_belief_1","Q8_2"="just_belief_2","Q8_3"="just_belief_3","Q8_4"="just_belief_4","Q8_5"="just_belief_5","Q8_6"="just_belief_6","Q8_7"="just_belief_7","Q8_8"="just_belief_8","Q8_9"="just_belief_9","Q8_10"="just_belief_10","Q8_11"="just_belief_11","Q8_12"="just_belief_12","Q8_13"="just_belief_13",
"Q5"="now_status","Q6"="past_status","Q7"="future_status","Q9"="par_gender","Q10"="par_age",
"Q30"="death_penalty","Q31"="percep_of_respon","Q32"="percep_of_danger",
"Q33"="after_10years_danger","Q35_1"="punishment_for_killing","Q35_2"="punishment_for_protecting","Q35_3"="puishment_for_warning",
"Q36"="law_familiar","Q37"="sci_familiar"))
##select data of the scales
valid.data2<-lapply(valid.data,as.numeric)
attach(valid.data2)
#free will
free_will_total<- data.frame(free_will_1,free_will_2,free_will_3,free_will_4)
free_will<- ((free_will_1+free_will_2+free_will_3+free_will_4)/4)
ap_free_will <- psych::alpha(free_will_total)
ap_free_will <- round(ap_free_will$total[1],2)
scientific_total <- data.frame(scientific_1,scientific_2,scientific_3,scientific_4)
scientific <- ((scientific_1+scientific_2+scientific_3+scientific_4)/4)
ap_scientific <- psych::alpha(scientific_total)
ap_scientific <- round(ap_scientific$total[1],2)
fatalistic_total <- data.frame(fatalistic_1,fatalistic_2,fatalistic_3,fatalistic_4)
fatalistic <- ((fatalistic_1+fatalistic_2+fatalistic_3+fatalistic_4)/4)
ap_fatalistic <- psych::alpha(fatalistic_total)
ap_fatalistic <- round(ap_fatalistic$total[1],2)
unpreditic_total<- data.frame(unpreditic_1,unpreditic_2,unpreditic_3,unpreditic_4)
unpreditic<- ((unpreditic_1+unpreditic_2+unpreditic_3+unpreditic_4)/4)
ap_unpreditic <- psych::alpha(unpreditic_total)
ap_unpreditic <- round(ap_unpreditic$total[1],2)
#just sense
just_sense_total<- data.frame(just_sense_1,just_sense_2,just_sense_3,just_sense_4,just_sense_5,just_sense_6,just_sense_7,just_sense_8)
just_sense<-(just_sense_1+just_sense_2+just_sense_3+just_sense_4+just_sense_5+just_sense_6+just_sense_7+just_sense_8)/8
ap_just_sense <- psych::alpha(just_sense_total)
ap_just_sense <- round(ap_just_sense$total[1],2)
#just world belief
just_belief_total<- data.frame(just_belief_1,just_belief_2,just_belief_3,just_belief_4,just_belief_5,just_belief_6,just_belief_7,just_belief_8,just_belief_9,just_belief_10,just_belief_11,just_belief_12,just_belief_13)
just_belief <- (just_belief_1+just_belief_2+just_belief_3+just_belief_4+just_belief_5+just_belief_6+just_belief_7+just_belief_8+just_belief_9+just_belief_10+just_belief_11+just_belief_12+just_belief_13)/13
ap_just_belief <- psych::alpha(just_belief_total)
ap_just_belief <- round(ap_just_belief$total[1],2)
detach(valid.data2)
### end preparing ###
### information of participants ###
participant.age<-as.numeric(as.character(valid.data[,"par_age"])) #transform factor to numeric
participant.gender<- factor(valid.data$par_gender,
levels = c(1, 2),
labels = c("Male", "Femle"))
age <- summary(participant.age)
gender <- summary(participant.gender)
### analysis ###
## caculate the depentent varibles ##
# ANOVA for three different varibles #
death_penalty_anova <- summary(aov(valid.data$death_penalty~valid.data$CrimeAge*valid.data$EvidenceType))
percep_of_respon_anova <- summary(aov(valid.data$percep_of_respon~valid.data$CrimeAge*valid.data$EvidenceType))
percep_of_danger_anova <- summary(aov(valid.data$percep_of_danger~valid.data$CrimeAge*valid.data$EvidenceType))
# multivariable linear regression #
# test Multicollinearity #
attach(valid.data)
pre_regression_data<-data.frame(free_will,scientific,fatalistic,unpreditic,just_belief,just_sense,death_penalty,percep_of_respon,percep_of_danger)
regression_data<-pre_regression_data[-1:-4,] # there is 4 NA, we delete them here
cor_regression_data<-cor(regression_data)
mcl_test<-kappa(cor_regression_data[,1:6]) #here,we use "kappa" to test multicollinearity,K<100 means multicollinearity is small
# regresssion of three varibles #
death_penalty_lm<-summary(lm(death_penalty~free_will++scientific++fatalistic++unpreditic++just_sense++just_belief,data=regression_data))
percep_of_respon_lm<-summary(lm(percep_of_respon~free_will++scientific++fatalistic++unpreditic++just_sense++just_belief,data=regression_data))
percep_of_danger_lm<-summary(lm(percep_of_danger~free_will++scientific++fatalistic++unpreditic++just_sense++just_belief,data=regression_data))
detach(valid.data)
### end of data analysis ### |
2e0081a4c69bf5adc7d34d61651bb86c812e1bbe | 5ad281aa3680a525215bbfb95a2c3e0000b0421a | /R/ghit-package.R | 36821c592b295335089d906cea1fdcce6b49c9ee | [] | no_license | JustinMShea/ghit | 9b000099ff40c4469265d565fe1c5f0e45d0b127 | 22e48a2d03444589fba4f5f99ac4775142d23d11 | refs/heads/master | 2020-04-02T11:44:13.251118 | 2018-10-23T23:09:55 | 2018-10-23T23:09:55 | 154,404,086 | 0 | 0 | null | 2018-10-23T22:19:41 | 2018-10-23T22:19:41 | null | UTF-8 | R | false | false | 431 | r | ghit-package.R | #' @docType package
#' @name ghit
#' @title Lightweight GitHub Package Installer
#' @description ghit provides two functions, \code{\link{install_github}} and \code{\link{install_bitbucket}}, which allow users to install R packages hosted on GitHub or Bitbucket without needing to install or load the heavy dependencies required by devtools. ghit provides a drop-in replacement that provides (almost) identical functionality.
NULL
|
55ec0aae5f76d368148f84abd93a46687f64f859 | dca957d6bcf9b83895d738e7f2223c45d01d546c | /man/sub-HSDSDataset-numeric-numeric-ANY-method.Rd | 22d9fe054b4d74790ceb1b97c9b630ffa4b6873e | [] | no_license | vjcitn/rhdf5client | 755376d2798280c7244ab79b02d89cfc5e913373 | 42062fd17d3b12f237eda132d98cc9383fdf0bbd | refs/heads/main | 2023-09-05T09:24:35.514077 | 2023-08-18T03:59:42 | 2023-08-18T03:59:42 | 586,076,019 | 2 | 1 | null | 2023-09-08T22:36:42 | 2023-01-06T21:56:21 | HTML | UTF-8 | R | false | true | 705 | rd | sub-HSDSDataset-numeric-numeric-ANY-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Dataset.R
\docType{methods}
\name{[,HSDSDataset,numeric,numeric,ANY-method}
\alias{[,HSDSDataset,numeric,numeric,ANY-method}
\title{bracket method for 2d request from HSDSDataset}
\usage{
\S4method{[}{HSDSDataset,numeric,numeric,ANY}(x, i, j, ..., drop = TRUE)
}
\arguments{
\item{x}{object of type HSDSDataset}
\item{i}{vector of indices (first dimension)}
\item{j}{vector of indices (second dimension)}
\item{\dots}{not used}
\item{drop}{logical(1) if TRUE return has no array character}
}
\value{
an array with the elements requested from the HSDSDataset
}
\description{
bracket method for 2d request from HSDSDataset
}
|
33b28bcec7078ddf5980bc8922570557bbaf8664 | 7a7b1b13b8a17fab6c747f1b330350390f9c5492 | /ui/merge_identification/merge_iden_parameter_ui.R | e3bb0e7618a9721b907b45d371170653b3b7bcee | [] | no_license | jaspershen/pregnancy_project_website | 6948bacae2aeb3cc770c794da0faa9d14f6c0437 | 2afc02d94988ecba3ad09f4eb3d1d2b55f35c8c1 | refs/heads/master | 2020-11-25T12:28:29.063718 | 2019-12-17T18:30:48 | 2019-12-17T18:30:48 | 228,660,894 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,307 | r | merge_iden_parameter_ui.R | fluidPage(
sidebarLayout(sidebarPanel = sidebarPanel(
# h4("Batch alignment parameters"),
numericInput(inputId = "merge.iden.mz.tol",
label = "m/z tolerance (ppm)",
value = 25, min = 5, max = 50, step = 1),
numericInput(inputId = "merge.iden.rt.tol",
label = "Retention time tolerance (second)",
value = 180, min = 5, max = 300, step = 1),
actionButton(inputId = 'merge.iden.submit.button',
"Submit",
styleclass = "info",
icon = icon('play-circle')),
useShinyalert(),
actionButton(inputId = "merge.iden.parameter.2.result.download",
label = "Next", styleclass = "warning"),
helpText("Click", strong("Submit"), "to merge identification.")
),
mainPanel = mainPanel(
span(textOutput(outputId = "merge.iden.params.message"), style = "color:black"),
tabsetPanel(id = "merge.result", type = "tabs",
tabPanel(title = "Merge identification",
icon = icon("table"),
shinysky::busyIndicator(text = "Processsing..."),
DT::dataTableOutput(outputId = "merge.iden.peak.identification")
)
),
br(),br(),br()
)
)
) |
fcf1a27cd0a973b84ab492ff7f6356d8f6ac84d4 | 901f2d0c83b9e04decbb193a7653131d31c48e78 | /R/QC_weights.R | 1fb447597a1235d21f96e8559a58892fa8a877d8 | [] | no_license | psavoy/StreamLightUtils | b22ff1acb320b0e239b2d3ae639339ea62a87117 | b6ce97cb08773b37d924e93942c3b6686cf41074 | refs/heads/master | 2022-04-29T12:32:26.192072 | 2022-03-23T15:03:33 | 2022-03-23T15:03:33 | 245,456,390 | 0 | 1 | null | 2022-01-12T20:30:08 | 2020-03-06T15:38:04 | R | UTF-8 | R | false | false | 1,201 | r | QC_weights.R | #' Assigns weights to LAI data based on the 5-level QC score
#' @description This function unpacks downloaded MODIS LAI data from the lpdaac
#' AppEEARS download portal into a list with each item in the list representing
#' a single site.
#'
#' @param SCF_QC The 5-level QC score (FparLai_QC_SCF_QC)
#' @param wmin The minimum weight for a data point
#' @param wmid The middle weight for a data point
#' @param wmax The maximum weight for a data point (best quality)
#'
#' @return Returns weights based on the 5-level QC score
#' @export
#'
#===============================================================================
#Define my version of the weights function (adapted from phenofit)
#Created 7/10/2020
#===============================================================================
QC_weights <- function(SCF_QC, wmin = 0.2, wmid = 0.5, wmax = 1){
#Create a blank vector of weights
weights <- rep(NA, length(SCF_QC)) #default zero
#Assign weights based on the 5-level confidence score
weights[SCF_QC %in% c("000", "001")] <- wmax
weights[SCF_QC %in% c("010", "011")] <- wmid
weights[SCF_QC %in% c("100")] <- wmin
return(weights)
} #End QC_weights function |
1b2b70e1ea44fb29f2ed10a3ad8ce1766f18cd32 | d7d9043665455d721fccb914d04a58184419572a | /prepare/relig-income-functions.R | 909456fc00d41ce89f66c31dfa1503aeff1c8a07 | [] | no_license | frycast/umbrella | 2705aca68628d7432be55d130c82344a91b65ba7 | 1efa96cfe5d8add80548dfe161d368f6f86567a6 | refs/heads/main | 2023-09-06T06:52:27.189603 | 2021-11-25T01:25:25 | 2021-11-25T01:25:25 | 431,071,469 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 278 | r | relig-income-functions.R | # This script contains all functions for preparing the relig_income data
# Pivoting the table to make it tidy
tidy_relig_income <- function(relig_income) {
tidyr::pivot_longer(
data=relig_income,
cols=!religion,
names_to = "income",
values_to = "count"
)
} |
58529860b19a3d66d2616227c7cac2af43c19860 | 0102fcf7a11dc9310cdfbd7fb5ae7a0a82d2e3be | /man/groupOTU-methods.Rd | 20a99a846d4db0f9c25dc1f0a0022634fc9023fa | [] | no_license | nemochina2008/treeio | 7c0de3b4f7c22bcb33d313da821ecd02951f8063 | b6ae142e3891e337e982fbdc2e94ecbbf5a9dd66 | refs/heads/master | 2021-07-06T14:28:56.904111 | 2017-09-29T03:28:28 | 2017-09-29T03:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,702 | rd | groupOTU-methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/method-groupOTU.R
\docType{methods}
\name{groupOTU}
\alias{groupOTU}
\alias{groupOTU,beast-method}
\alias{groupOTU,codeml-method}
\alias{groupOTU,codeml_mlc-method}
\alias{groupOTU,jplace-method}
\alias{groupOTU,paml_rst-method}
\alias{groupOTU,phangorn-method}
\alias{groupOTU,phylip-method}
\alias{groupOTU,phylo-method}
\alias{groupOTU,r8s-method}
\alias{groupOTU,treedata-method}
\title{groupOTU method}
\usage{
groupOTU(object, focus, group_name = "group", ...)
\S4method{groupOTU}{beast}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{codeml}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{codeml_mlc}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{jplace}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{treedata}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{phangorn}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{phylip}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{paml_rst}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{phylo}(object, focus, group_name = "group", ...)
\S4method{groupOTU}{r8s}(object, focus, group_name = "group", tree = "TREE",
...)
}
\arguments{
\item{object}{supported objects, including phylo, paml_rst,
codeml_mlc, codeml, jplace, beast, hyphy}
\item{focus}{a vector of tip (label or number) or a list of tips.}
\item{group_name}{name of the group, 'group' by default}
\item{...}{additional parameter}
\item{tree}{which tree selected}
}
\value{
group index
}
\description{
group tree based on selected OTU, will traceback to MRCA
}
|
5f4e77385bb680be90613121f112d5b4edc2fd00 | 034ec001c663c1857e80b280828dacbd1afd377c | /man/form.it.Rd | 4919ed20106fd1ff7f86008da38d2b6aafe267ed | [] | no_license | socioskop/grit | 17439caa0db2a3ce90d7196360db8ea84ef9aa1f | 1df6f73dcebda13d65518fd2988a1920b30e9303 | refs/heads/master | 2023-04-26T10:29:17.054154 | 2021-05-25T10:29:57 | 2021-05-25T10:29:57 | 354,280,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 325 | rd | form.it.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formatting.R
\name{form.it}
\alias{form.it}
\title{formats a numeric vector to a fixed string format with digits}
\usage{
form.it(x, digits = 3, perc = F, max = NULL)
}
\description{
formats a numeric vector to a fixed string format with digits
}
|
e87b34d8e0eb9662e0506dcf4e1d774e45aaed0d | 3a96f3edd9e2427d7a01a89dd9a77b781a23a8bb | /FORESTFIRES.R | f30e8c4401e44de3a2a6c4af6a644dabd8659549 | [] | no_license | SRUTHISANKAR1/R_codes | 9f2b7cf110423d4766b3b7cda93eb648928224c2 | 7e549be7057ba20b1edf2dbeba43ceba83aa064b | refs/heads/main | 2023-05-17T11:41:02.938020 | 2021-06-11T06:50:56 | 2021-06-11T06:50:56 | 375,926,180 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,334 | r | FORESTFIRES.R | ##################SUPPORT VECTOR MACHINE######################
#Problem statement
#######classify the Size_Categorie using SVM
#month month of the year: 'jan' to 'dec'
#day day of the week: 'mon' to 'sun'
#FFMC FFMC index from the FWI system: 18.7 to 96.20
# DMC index from the FWI system: 1.1 to 291.3
#DC DC index from the FWI system: 7.9 to 860.6
# ISI index from the FWI system: 0.0 to 56.10
#temp temperature in Celsius degrees: 2.2 to 33.30
#RH relative humidity in %: 15.0 to 100
#wind speed in km/h: 0.40 to 9.40
#rain outside rain in mm/m2 : 0.0 to 6.4
#Size_Categorie the burned area of the forest ( Small , Large)
#target variable is "size_category"
#load the dataset
forestfires<-read.csv(file.choose())
View(forestfires)
dim(forestfires) # 517 31
colnames(forestfires)
attach(forestfires)
######EXPLORATORY DATA ANALYSIS#####
#actually here dummy variables are created for month and day columns .so remove the original column and remove dummy variable trap
Forestfires<-forestfires[-c(1,2,12,19)]
head(Forestfires) #4 columns removed
dim(Forestfires) #517 27
colnames(Forestfires)
attach(Forestfires)
str(Forestfires) #num,int and chr types of data present
sum(is.na(Forestfires)) #no null values
summary(Forestfires)
#here output variable is in Chr. label encode it and convert it into factor
unique(Forestfires$size_category) #"small" "large"
Forestfires$size_category<-factor(Forestfires$size_category,labels=c(0,1))
head(Forestfires$size_category)
str(Forestfires)
#convert all variables to factor
columns<-c("FFMC","DMC","DC","ISI","temp","RH","wind","rain","area","daymon","daysat","daysun","daythu","daytue","daywed","monthaug",
"monthdec","monthfeb","monthjan","monthjul",
"monthjun","monthmar","monthmay","monthnov",
"monthoct","monthsep","size_category")
columns
Forestfires[,columns] <- lapply(Forestfires[,columns] ,factor)
str(letters)
str(Forestfires)
sum(is.na(Forestfires))
summary(Forestfires)
############train -test data splitting#####################
library(caTools)
set.seed(123)#for getting same random numbers repeatedly(all the time) .Pseudo random number generation.
FF_split=sample.split(size_category,SplitRatio=0.8) #new column created
FF_train=subset(Forestfires,FF_split==TRUE)
View(FF_train)
dim(FF_train) #413 27
FF_test=subset(Forestfires,FF_split==FALSE)
View(FF_test)
dim(FF_test) #104 27
#############SVM _model building############################
library(caret)
library(e1071)
library(kernlab)
model1<-ksvm(size_category~.,data=Forestfires,kernel="vanilladot")
model1 #Number of Support Vectors : 360 ,Training error : 0.009671
model1_pred<-predict(model1,FF_test)
head(model1_pred)
x=table(model1_pred,FF_test$size_category)
#check the agreement between predictions and test data
agreement<-model1_pred==FF_test$size_category
table(agreement) #FALSE TRUE
# 2 102
prop.table(table(agreement))
agreement
#FALSE TRUE
#0.01923077 0.98076923
#98% accuracy- model is good
##we can create several models by using differnt kernel tricks
#use different kernel tricks like # "rbfdot", "polydot", "tanhdot","laplacedot",
# "besseldot", "anovadot", "splinedot", "matrix"
|
a09eba1d2fc72dabd39cec739db42c39e071bf2e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/InformationValue/examples/specificity.Rd.R | 1d0df53ba98887ebbfd2dbb3888030bf1cfcd1a7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 240 | r | specificity.Rd.R | library(InformationValue)
### Name: specificity
### Title: specificity
### Aliases: specificity
### ** Examples
data('ActualsAndScores')
specificity(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
|
9b1e6f5aff09764b63186516c39f2d18aeb48c6c | f91d0120a1c1c77ac9b4aa25139d3e84227b9f03 | /cachematrix.R | 4d2dc4cbb6675c765ea4a92153ef2c513ef2f130 | [] | no_license | jcruzupr/ProgrammingAssignment2 | 949ceea782189a0a134ed30f65e9e01678e160c6 | 8bacaf375ede81da4d9494c2aebf165f810e78d4 | refs/heads/master | 2021-01-18T06:00:47.305092 | 2014-07-20T13:28:06 | 2014-07-20T13:28:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,051 | r | cachematrix.R | ## The following functions create a special "matrix" object that stores
## a given matrix and will also store/cache the inverse of the matrix.
## NOTE The file "HW_2 Sample Run" provides examples of usage.
## makeCacheMatrix: This function accepts a matrix as an argument
## and creates a special "matrix" object that can cache its inverse.
## The inverse is set to NULL when a new object is created or whenever
## the matrix that is stored is updated using the set() method. It also
## provides a get() method that returns the value of the matrix, and the
## methods setinv() and getinv() that set or get the inverse of the matrix.
## Here are two ways to create a special "matrix"
##
## plainMatrix1 <- matrix(c(3,2,0,0,0,1,2,-2,1), nrow=3, ncol=3)
## specialMatrix1 <- makeCacheMatrix(plainMatrix1)
##
## specialMatrix2 <- makeCacheMatrix(matrix(rnorm(25), c(5,5)))
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve: This function accepts a special "matrix" as an argument.
## It returns the inverse of the special "matrix". If the inverse has
## already been calculated (and the matrix has not changed), then this
## method will return the value of inverse that is stored in cache,
## otherwise it will calculate the inverse and store it in cache.
## For example:
## cacheSolve(specialMatrix1) returns the inverse of specialMatrix1
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## If the inverse exists in cache return it
inv <- x$getinv()
if(!is.null(inv)) {
message("getting and using cached data")
return(inv)
}
## If inv is NULL (cache does exist) then
## calculate, store and return the inverse
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
f4f4c69f3cc54c81c0668977e87d853a2e0e287d | 777577ac7fb98704460cc1ace288a8dcbea655dd | /Result_img.R | ae797303971d88a4d3b25f89cd7d68b640bd7d06 | [] | no_license | SafiulAlom/Missing-Data-Analysis | d7864a597c168424724699b742d152b8c58135d8 | e3efc475a8e8c94b27488ceac4cf28b7691e7e34 | refs/heads/main | 2023-03-26T15:06:39.302804 | 2021-03-26T21:53:28 | 2021-03-26T21:53:28 | 350,869,008 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,550 | r | Result_img.R | remove(list = ls())
library(VIM)
library(mice)
Result_mice = readRDS("C:\\Users\\Himel\\OneDrive\\Studium\\M.Sc. Statistics\\3_Semester\\Statistical analysis of missing data\\presentation\\object.cart.rf.rds")
setwd("C:\\Users\\Himel\\OneDrive\\Studium\\M.Sc. Statistics\\3_Semester\\Statistical analysis of missing data\\presentation\\image")
# Missingness pattern can be visualised in VIM package by
#png("pattern_miss.png", height = 350, width = 600)
miss.data_mice = Result_mice$miss.data_mice
aggr(miss.data_mice, col=mdc(1:2), numbers=TRUE, sortVars=TRUE,
labels=names(miss.data_mice), cex.axis=.7, gap=3,
ylab=c("Proportion of missingness","Missingness Pattern"))
#dev.off()
#png("marginPlot.png", height = 350, width = 500)
# The margin plot of the pairs can be plotted using VIM package as
marginplot(miss.data_mice[, c("cons.price.idx", "cons.conf.idx")],
col = mdc(1:2), cex.numbers = 1.2, pch = 19)
#dev.off()
imp.cart_mice = Result_mice$imp.cart_mice
#diagonistic plots for multiple imputation with cart
#png("stripplot_cart.png", height = 350, width = 600)
stripplot(imp.cart_mice, pch = 20, cex = 1.2)
#dev.off()
#png(filename="convergencePlot_cart1.png", height = 350, width = 600)
#convergence plot
plot(imp.cart_mice, c("cons.price.idx", "cons.conf.idx"))
#dev.off()
#png(filename="convergencePlot_cart2.png", height = 350, width = 600)
plot(imp.cart_mice, c("age", "duration", "campaign"))
#dev.off()
#density plot
#png(filename="density_cart.png", height = 350, width = 600)
densityplot(imp.cart_mice)
#dev.off()
imp.rf_mice = Result_mice$imp.rf_mice
#diagonistic plots for multiple imputation with random forest
#png("stripplot.png", height = 350, width = 600)
stripplot(imp.rf_mice, pch = 20, cex = 1.2)
#dev.off()
#convergence plot
#png(filename="convergencePlot_rf1.png", height = 350, width = 600)
plot(imp.rf_mice, c("cons.price.idx", "cons.conf.idx"))
#dev.off()
#png(filename="convergencePlot_rf2.png", height = 350, width = 600)
plot(imp.rf_mice, c("age", "duration", "campaign"))
#dev.off()
#density plot
#png(filename="densityplot_rf.png", height = 350, width = 600)
densityplot(imp.rf_mice)
#dev.off()
Result_simulation = readRDS("C:\\Users\\Himel\\OneDrive\\Studium\\M.Sc. Statistics\\3_Semester\\Statistical analysis of missing data\\presentation\\object_mi.rds")
write.csv2(Result_simulation$Bias, "Bias.csv")
write.csv2(Result_simulation$MSE, "MSE.csv")
write.csv2(Result_simulation$Coverage, "Coverage.csv")
|
c6d5044c1a82d2038ac7cc9ebb11f0730fd5fc69 | 8127b757d7e83e239b83934079a86bd1e9c15e25 | /Learning_RMarkdown/utilities/plot_pop_by_country.R | 0dbdb04d21c3ada02ad25bb0bfe1c61c58b8dc39 | [
"MIT"
] | permissive | samirgadkari/ds | e58ba311fff8f428c5af9baf0eb1c83672d7e7b8 | 79240ca5ba70e877b13b5330d12e2ca1f3d560df | refs/heads/master | 2021-06-27T07:03:16.271118 | 2020-12-29T02:09:31 | 2020-12-29T02:09:31 | 198,858,452 | 0 | 0 | MIT | 2020-12-29T02:09:32 | 2019-07-25T15:37:15 | Jupyter Notebook | UTF-8 | R | false | false | 205 | r | plot_pop_by_country.R | plotPopByCountry = function(popData, countryToPlot) {
gappyCountry = popData[popData$country == countryToPlot,]
p = ggplot(data = gappyCountry, aes(x=year, y=pop)) +
geom_point()
return(p)
} |
a8881ae9bf349d6f3974b5b47c2f4665f3f0a71d | 1df29054dba27843aeb8b46286e9347dac7dd6b1 | /Groundwater/scripts/GWState.R | 955f410156fc5eb05b2b3aae76587d52d188e016 | [] | no_license | lukefullard/LAWA2021 | 9e40fab111493361de1a95a0572e05a7d9410a7a | db3528005791fba016b40cbd8dd268dba4ed2b6e | refs/heads/main | 2023-07-19T16:00:29.563125 | 2021-09-27T19:50:27 | 2021-09-27T19:50:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,238 | r | GWState.R | #Groudnwater state analysis
rm(list=ls())
library(tidyverse)
source('H:/ericg/16666LAWA/LAWA2021/Scripts/LAWAFunctions.R')
source('H:/ericg/16666LAWA/LWPTrends_v2101/LWPTrends_v2101.R')
EndYear <- 2020#year(Sys.Date())-1
startYear5 <- EndYear - 5+1
startYear10 <- EndYear - 10+1
startYear15 <- EndYear - 15+1
plotto=F
applyDataAbundanceFilters=F
dir.create(paste0("H:/ericg/16666LAWA/LAWA2021/Groundwater/Data/", format(Sys.Date(),"%Y-%m-%d")),showWarnings = F)
# GWdata = readxl::read_xlsx(paste0("C:/Users/ericg/Otago Regional Council/Abi Loughnan - LAWA Annual Water Refresh 2021/Groundwater Quality/",
# "GWExport_20210914.xlsx"),sheet=1,guess_max = 50000)%>%
# filter(Variable_aggregated%in%c("Nitrate nitrogen","Chloride",
# "Dissolved reactive phosphorus",
# "Electrical conductivity/salinity",
# "E.coli","Ammoniacal nitrogen"))%>%as.data.frame
GWdata = readxl::read_xlsx(tail(dir(path="H:/ericg/16666LAWA/LAWA2021/Groundwater/Data/",
pattern="GWExport_.*xlsx",full.names = T,recursive = T),1),
sheet=1,guess_max = 50000)%>%
filter(Variable_aggregated%in%c("Nitrate nitrogen","Chloride",
"Dissolved reactive phosphorus",
"Electrical conductivity/salinity",
"E.coli","Ammoniacal nitrogen"))%>%as.data.frame
siteTab=GWdata%>%drop_na(Site_ID)%>%select(Source,Site_ID,LAWA_ID,RC_ID,Latitude,Longitude)%>%distinct
these=which(is.na(GWdata$Site_ID))
if(length(these)>0)GWdata$Site_ID[these]=siteTab$Site_ID[match(GWdata$LAWA_ID[these],siteTab$LAWA_ID)]
these=which(is.na(GWdata$RC_ID))
if(length(these)>0)GWdata$RC_ID[these]=siteTab$RC_ID[match(GWdata$LAWA_ID[these],siteTab$LAWA_ID)]
these=which(is.na(GWdata$Latitude))
if(length(these)>0)GWdata$Latitude[these]=siteTab$Latitude[match(GWdata$LAWA_ID[these],siteTab$LAWA_ID)]
these=which(is.na(GWdata$Longitude))
if(length(these)>0)GWdata$Longitude[these]=siteTab$Longitude[match(GWdata$LAWA_ID[these],siteTab$LAWA_ID)]
rm(these)
#206482 of 17 4-8-20
#234751 of 17 10-8-20
#254451 14-8-20
#262451 24-8-20
#261854 28-8-20
#262749 04-9-20
#216873 14-9-20
#197088 3/8/21
#194562 6/8/21
#215206 13/8/21
#208293 20/8/21
#213592 27/8/21
#212767 03/09/21
GWdata%>%split(f=.$Source)%>%purrr::map(.f = function(x)any(apply(x,2,FUN=function(y)any(grepl('<|>',y,ignore.case=T)))))
#ECAN GDC HBRC HRC MDC ORC ES TRC TDC GWRC WCRC do
#AC BOPRC NCC NRC WRC dont
#Auckland censoring info
#PK at WRC says bit posns 13 and 14 are < and >
table(bitwAnd(2^13,as.numeric(GWdata$Qualifier))==2^13)
table(GWdata$Source,bitwAnd(2^14,as.numeric(GWdata$Qualifier))==2^14)
if(bitwAnd(2^13,as.numeric(GWdata$Qualifier))==2^13){}
if(bitwAnd(2^14,as.numeric(GWdata$Qualifier))==2^14){}
CensLeft = which(bitwAnd(2^14,as.numeric(GWdata$Qualifier))==2^14)
CensRight = which(bitwAnd(2^13,as.numeric(GWdata$Qualifier))==2^13)
if(length(CensLeft)>0){
with(GWdata[CensLeft,],table(Variable_aggregated,`Result-raw`))
GWdata$`Result-raw`[CensLeft]=paste0('<',GWdata$`Result-raw`[CensLeft])
GWdata$`Result-prefix`[CensLeft]='<'
}
rm(CensLeft)
if(length(CensRight)>0){
GWdata$`Result-raw`[CensRight]=paste0('>',GWdata$`Result-raw`[CensLeft])
GWdata$`Result-prefix`[CensLeft]='>'
}
rm(CensRight)
#6/3/20
#CHeck BOPRC for censored ecoli
GWdata%>%filter(Source=="Bay of Plenty")%>%grepl(pattern = '<',x = .$`Result-prefix`)%>%sum
if(0){
#BOP censoring should be indicated in the result-prefix column
bopcens = readxl::read_xlsx('h:/ericg/16666LAWA/LAWA2021/Groundwater/Data/BOPRC E coli QT datasets.xlsx',sheet = 2)%>%
dplyr::rename(RC_ID=Site,'Result-prefix'=Qualifiers)%>%
mutate(Date=Time-hours(12))%>%
select(RC_ID,Date,`Result-prefix`)
bopcens$`Result-prefix` = as.character(factor(bopcens$`Result-prefix`,levels=c("<DL",">DL"),labels=c('<','>')))
bopdat=GWdata%>%filter(Source=='Bay of Plenty'&Variable_aggregated=="E.coli")%>%select(-`Result-prefix`)
bopdat = left_join(bopdat,bopcens,by=c("RC_ID","Date"))
rm(bopcens)
GWdata=full_join(GWdata%>%filter(!(Source=='Bay of Plenty'&Variable_aggregated=="E.coli")),bopdat)%>%arrange(Source)
rm(bopdat)
# 3/9/20
GWdata%>%filter(Source=="Waikato")%>%grepl(pattern = '<',x = .$`Result-prefix`)%>%sum
waikcens = readxl::read_xlsx('h:/ericg/16666LAWA/LAWA2021/Groundwater/Data/E coli BOP and Waikato.xlsx',sheet=1)%>%
dplyr::filter(Region=="Waikato")%>%
dplyr::rename(LawaSiteID=`Lawa ID`,Date=`Result-date`)%>%
select(LawaSiteID,Date,`Result-prefix`)%>%
arrange(Date)
waikdat=GWdata%>%filter(Source=='Waikato'&Variable_aggregated=="E.coli")%>%select(-`Result-prefix`)%>%arrange(Date)
waikdat$`Result-prefix` <- waikcens$`Result-prefix`
rm(waikcens)
GWdata <- full_join(GWdata%>%filter(!(Source=="Waikato"&Variable_aggregated=="E.coli")),waikdat)%>%arrange(Source)
rm(waikdat)
}
c("Nitrate nitrogen",
"Chloride",
"Dissolved reactive phosphorus",
"Electrical conductivity/salinity",
"E.coli",
"Ammoniacal nitrogen")
# This copied from SWQ, needs fixing up to GW context
#Censor the data there that came in.
# Datasource or Parameter Type Measurement or Timeseries Name Units AQUARIUS Parameter Detection Limit
# Total Oxidised Nitrogen Total Oxidised Nitrogen g/m3 Nitrite Nitrate _as N__LabResult 0.001
# Total Nitrogen Total Nitrogen g/m3 N _Tot__LabResult 0.01
# Ammoniacal Nitrogen Ammoniacal Nitrogen g/m3 Ammoniacal N_LabResult 0.002
# Dissolved Reactive Phosphorus DRP g/m3 DRP_LabResult 0.001
# Total Phosphorus TP g/m3 P _Tot__LabResult 0.001
# Turbidity Turbidity NTU Turbidity, Nephelom_LabResult 0.1
# pH pH pH units pH_LabResult 0.2
# Visual Clarity BDISC m Water Clarity_LabResult 0.01
# Escherichia coli Ecoli /100 ml E coli_LabResult 1
# These next from Lisa N 15/9/2021
# Chloride Cl _Dis__LabResult 0.5 g/m3 value from Paul Scholes email
# Nitrate nitrogen Nitrate _N__LabResult 0.001
# Nitrogen - Other Nitrite _as N__LabResult 0.001
# Dissolved reactive phosphorus DRP_LabResult 0.001
# Electrical conductivity/salinity Conductivity_LabResult 0.001
# Ammoniacal nitrogen Ammoniacal N_LabResult 0.002
# E.coli E coli QT_LabResult 1
# Nitrogen - Other N _Tot__LabResult 0.01
# Nitrogen - Other Nitrite Nitrate _as N__LabResult 0.001
cenNH4 = which(GWdata$Source=='Bay of Plenty'&
GWdata$Variable_aggregated=="Ammoniacal nitrogen"&
as.numeric(GWdata$`Result-raw`)<0.002)
cenCL = which(GWdata$Source=='Bay of Plenty'&
GWdata$Variable_aggregated=="Chloride"&
as.numeric(GWdata$`Result-raw`<0.5))
cenDRP = which(GWdata$Source=='Bay of Plenty'&
GWdata$Variable_aggregated=="Dissolved reactive phosphorus"&
as.numeric(GWdata$`Result-raw`)<0.001)
cenCond = which(GWdata$Source=='Bay of Plenty'&
GWdata$Variable_aggregated=="Electrical conductivity/salinity"&
as.numeric(GWdata$`Result-raw`<0.001))
cenNit = which(GWdata$Source=='Bay of Plenty'&
GWdata$Variable_aggregated=="Nitrate nitrogen"&
as.numeric(GWdata$`Result-raw`<0.001))
cenECOLI=which(GWdata$Source=='Bay of Plenty'&
GWdata$Variable_aggregated=="E.coli"&
as.numeric(GWdata$`Result-raw`)<1)
if(length(cenNH4)>0){
GWdata$`Result-raw`[cenNH4] <- '<0.002'
GWdata$`Result-prefix`[cenNH4] <- '<'
GWdata$`Result-edited`[cenNH4] <- 0.002
}
if(length(cenCL)>0){
GWdata$`Result-raw`[cenCL] <- '<0.5'
GWdata$`Result-prefix`[cenCL] <- '<'
GWdata$`Result-edited`[cenCL] <- 0.5
}
if(length(cenDRP)>0){
GWdata$`Result-raw`[cenDRP] <- '<0.001'
GWdata$`Result-prefix`[cenDRP] <- '<'
GWdata$`Result-edited`[cenDRP] <- 0.001
}
if(length(cenCond)>0){
GWdata$`Result-raw`[cenCond] <- '<0.001'
GWdata$`Result-prefix`[cenCond] <- '<'
GWdata$`Result-edited`[cenCond] <- 0.001
}
if(length(cenNit)>0){
GWdata$`Result-raw`[cenNit] <- '<0.001'
GWdata$`Result-prefix`[cenNit] <- '<'
GWdata$`Result-edited`[cenNit] <- 0.001
}
if(length(cenECOLI)>0){
GWdata$`Result-raw`[cenECOLI] <- '<1'
GWdata$`Result-prefix`[cenECOLI] <- '<'
GWdata$`Result-edited`[cenECOLI] <- 1
}
rm(cenNH4,cenDRP,cenECOLI,cenCL,cenCond,cenNit)
GWdata <- GWdata%>%mutate(LawaSiteID=`LAWA_ID`,
Measurement=`Variable_aggregated`,
Region=`Source`,
# Date = `Date`,
Value=`Result-edited`,
siteMeas=paste0(LAWA_ID,'.',Variable_aggregated))
#Drop QC-flagged problems
stopifnot(all(bitwAnd(as.numeric(GWdata$Qualifier),255)%in%c(10,30,42,43,151,NA))) #See email from Vanitha Pradeep 13-8-2021
table(GWdata$Source[!bitwAnd(as.numeric(GWdata$Qualifier),255)%in%c(10,30,42,43,151,NA)])
GWdata$Qualifier = bitwAnd(as.numeric(GWdata$Qualifier),255)
GWdata <- GWdata%>%dplyr::filter(!Qualifier%in%c(42,151)) #42 means poor quality, 151 means missing
#206482 of 22 4-8-20
#234751 of 22 10-8-20
#254451 14-8-20
#262432 of 23 24-8-20
#261832 of 23 28-8-20
#262727 4/9/20
#263248 of 23 14/9/2021
#197063 of 23 3/8/2021
#194537 6/8/21
#215184 13/8/21
#208268 20/8/21
#213567 27/8/21
#212742 3/9/21
noActualData = which(is.na(GWdata$Site_ID)&is.na(GWdata$`Result-raw`)&is.na(GWdata$Date))
if(length(noActualData)>0){
GWdata <- GWdata[-noActualData,]
}
rm(noActualData) #215176
GWdata <- GWdata%>%distinct
#214254 of 31 11-7
#212401 of 31 11-14
#225242 of 22 11-22
#225898 of 22 11-25
#228933 of 22 13-3-2021
#229104 of 22 20-3-20
#229050 of 22 27-3-20
#206382 of 22 04/08/20
#234621 of 22 10-8-20
#254315 14-8-20
#262225 of 23 24-8-20 #qualifier column added
#261775 of 23 28-8-20
#262670 4/9/20
#216665 14-9-20
#196964 3/8/21
#194437 6/8/21
#215097 13/8/21
#208174 20/8/21
#213480 27/8/21
#212655 03/09/21
GWdata$Value[which(GWdata$`Result-prefix`=='<')] <- GWdata$`Result-edited`[which(GWdata$`Result-prefix`=='<')]*0.5
GWdata$Value[which(GWdata$`Result-prefix`=='>')] <- GWdata$`Result-edited`[which(GWdata$`Result-prefix`=='>')]*1.1
if(plotto){
table(GWdata$Value==GWdata$`Result-edited`)
table(GWdata$`Result-prefix`==""|is.na(GWdata$`Result-prefix`))
GWdata[which(GWdata$`Result-prefix`!="" & GWdata$Value==GWdata$`Result-edited`),]
}
GWdata$Censored=FALSE
GWdata$Censored[grepl(pattern = '^[<|>]',GWdata$`Result-prefix`)]=TRUE
GWdata$CenType='not'
GWdata$CenType[grepl(pattern = '^<',GWdata$`Result-prefix`)]='lt'
GWdata$CenType[grepl(pattern = '^>',GWdata$`Result-prefix`)]='gt'
if(plotto){
table(GWdata$Source,GWdata$CenType)
table(GWdata$Region,GWdata$Measurement)
}
#Conductivity is in different units ####
table(GWdata$Variable_units[GWdata$Variable_aggregated=="Electrical conductivity/salinity"])
table(GWdata$Variable_units[GWdata$Variable_aggregated=="Electrical conductivity/salinity"],
GWdata$Source[GWdata$Variable_aggregated=="Electrical conductivity/salinity"])
par(mfrow=c(2,1),mar=c(10,4,4,2))
with(GWdata[GWdata$Variable_aggregated=="Electrical conductivity/salinity"&GWdata$Value>0,],
plot(as.factor(Variable_units),(Value),log='y',las=2))
#Set all units to S/m
these = which(GWdata$Variable_units%in%c('µS/cm','us/cm','uS/cm'))
GWdata$Variable_units[these] <- 'µS/cm'
# these = which(GWdata$Variable_units%in%c('mS/cm','ms/cm'))
# GWdata$Variable_units[these] <- 'µS/cm'
# GWdata$Value[these] = GWdata$Value[these]*1000
these = which(GWdata$Variable_units=='mS/m')
GWdata$Variable_units[these] <- 'µS/cm'
GWdata$Value[these] = GWdata$Value[these]*1000/100
these = which(GWdata$Variable_units%in%c('mS/m @25 deg C','mS/m @25°C','ms/m@25C','mS/m@20C','mS/m@25C'))
GWdata$Variable_units[these] <- 'µS/cm'
GWdata$Value[these] = GWdata$Value[these]*1000/100
with(GWdata[GWdata$Variable_aggregated=="Electrical conductivity/salinity"&GWdata$Value>0,],
plot(as.factor(Variable_units),Value,log='y',las=2))
mtext(side = 1,text='µS/cm')
#FreqCheck expects a one called "Date" ####
GWdata$myDate <- as.Date(as.character(GWdata$Date))
GWdata <- GetMoreDateInfo(GWdata)
GWdata$monYear = base::format.Date(GWdata$myDate,"%b-%Y")
GWdata$quYear = paste0(quarters(GWdata$myDate),'-',base::format.Date(GWdata$myDate,'%Y'))
write.csv(GWdata,paste0('h:/ericg/16666LAWA/LAWA2021/Groundwater/Data/',
format(Sys.Date(),'%Y-%m-%d'),'/GWdata.csv'),row.names=F)
# GWdata = read_csv(tail(dir('h:/ericg/16666LAWA/LAWA2021/Groundwater/Data/','GWdata.csv',recursive=T,full.names=T),1),guess_max = 5000)
#STATE ####
#Carl Hanson 11/9/2019: We only need state and trend for five parameters:
# nitrate nitrogen, chloride, DRP, electrical conductivity and E. coli.
periodYrs=5
GWdataRelevantVariables <- GWdata%>%
filter(Measurement%in%c("Nitrate nitrogen","Chloride",
"Dissolved reactive phosphorus",
"Electrical conductivity/salinity",
"E.coli","Ammoniacal nitrogen"))%>%
filter(`Result-raw`!='*')%>% #This excludes non-results discussed by Carl Hanson by email April 2020
dplyr::filter(lubridate::year(myDate)>=(EndYear-periodYrs+1)&lubridate::year(myDate)<=EndYear)%>%
select(-'Result-raw',-'Result-metadata',-'Variable')%>%
dplyr::group_by(siteMeas,monYear)%>% #Month and year
dplyr::summarise(.groups='keep',
LawaSiteID=unique(LawaSiteID),
Measurement=unique(Measurement),
Year=unique(Year),
Qtr=unique(Qtr),
Month=unique(Month),
Value=median(Value,na.rm=T),
Date=first(Date,1),
myDate=first(myDate,1),
LcenLim = suppressWarnings({max(`Result-edited`[`Result-prefix`=='<'],na.rm=T)}),
RcenLim = suppressWarnings({min(`Result-edited`[`Result-prefix`=='>'],na.rm=T)}),
CenType = ifelse(Value<LcenLim,'lt',ifelse(Value>RcenLim,'gt',NA)))%>%
ungroup%>%distinct
#68163
#32382 3/8/21
#65305 13/8/21
#62173 20/8/21
#64958 27/8/21
#64979
freqs <- split(x=GWdataRelevantVariables,
f=GWdataRelevantVariables$siteMeas)%>%purrr::map(~freqCheck(.))%>%unlist
table(freqs)
# freqs
# bimonthly monthly quarterly
# 19 190 3547 11-14
# 37 196 3649 11-22
# 36 192 3829 11-25
# 36 192 4000 03-06-2021
# 36 180 3992 03-13-2021
# 32 180 3917 03-20-2021
# 35 180 3994 03-27-2021
# 35 180 3993 04-23-2021
# 38 130 4151 04/08/2021
# 43 130 4880 10/8/20
# 43 134 5229 14-8-20
# 44 134 5349 24-8-20
# 43 134 5355 28-8-20
# 42 134 5356
# 42 134 5366 14-9-20
# 14 58 3041 3-8-21
# 32 65 4758 6-8-21
# 32 77 5245 13-8-21
# 32 74 5004 20-8-21
# 32 75 5214 27/8/21
# 32 75 5221 3/9/21
GWdataRelevantVariables$Frequency=freqs[GWdataRelevantVariables$siteMeas]
rm(freqs)
GWdataRelevantVariables$CenBin = 0
GWdataRelevantVariables$CenBin[GWdataRelevantVariables$CenType=='lt'] = 1
GWdataRelevantVariables$CenBin[GWdataRelevantVariables$CenType=='gt'] = 2
#Calculate state (median) ####
#Get a median value per site/Measurement combo
GWmedians <- GWdataRelevantVariables%>%
group_by(LawaSiteID,Measurement)%>%
dplyr::summarise(.groups='keep',
median=quantile(Value,probs = 0.5,type=5,na.rm=T),
MAD = quantile(abs(median-Value),probs=0.5,type=5,na.rm=T),
count = n(),
minPerYear = min(as.numeric(table(factor(as.character(lubridate::year(myDate)),
levels=as.character(startYear5:EndYear))))),
nYear = length(unique(Year)),
nQuart=length(unique(Qtr)),
nMonth=length(unique(Month)),
# censoredCount = sum(!is.na(CenType)),
censoredCount = sum(Value<=(0.5*LcenLim) | Value>=(1.1*RcenLim)),
CenType = Mode(CenBin),
Frequency=unique(Frequency))%>%
ungroup%>%
mutate(censoredPropn = censoredCount/count)
#5328 of 13
GWmedians$CenType = as.character(GWmedians$CenType)
GWmedians$CenType[GWmedians$CenType==1] <- '<'
GWmedians$CenType[GWmedians$CenType==2] <- '>'
GWmedians$censMedian = GWmedians$median
GWmedians$censMedian[GWmedians$censoredPropn>=0.5 & GWmedians$CenType=='<'] <-
paste0('<',2*GWmedians$median[GWmedians$censoredPropn>=0.5 & GWmedians$CenType=='<'])
GWmedians$censMedian[GWmedians$censoredPropn>=0.5 & GWmedians$CenType=='>'] <-
paste0('>',GWmedians$median[GWmedians$censoredPropn>=0.5 & GWmedians$CenType=='>']/1.1)
#E coli detection ####
#1 is detect, 2 is non-detect
# GWmedians$EcoliDetect=NA
# GWmedians$EcoliDetect[which(GWmedians$Measurement=="E.coli")] <- "1" #Detect
# GWmedians$EcoliDetect[which(GWmedians$Measurement=="E.coli"&
# (GWmedians$censoredPropn>0.5|
# GWmedians$median==0|
# is.na(GWmedians$median)))] <- "2" #Non-detect
# table(GWmedians$EcoliDetect)
#Changing the 'censoredCount' above.
#When it's based on censoredCount = sum(!is.na(CenType)), I get 88 detects, 688 non detects
#When it's based on censoredCount = sum(Value<=(0.5*LcenLim) | Value>=(1.1*RcenLim)) I get 89 detects, 687 non detects
GWmedians$EcoliDetectAtAll=NA
GWmedians$EcoliDetectAtAll[which(GWmedians$Measurement=="E.coli")] <- "1" #Detect
GWmedians$EcoliDetectAtAll[which(GWmedians$Measurement=="E.coli"&
(GWmedians$censoredPropn==1|
GWmedians$median==0|
is.na(GWmedians$median)))] <- "2" #Non-detect
table(GWmedians$EcoliDetectAtAll)
#Changing the 'censoredCount' above.
#When it's based on censoredCount = sum(!is.na(CenType)), I get 391 detects, 385 non detects
#When it's based on censoredCount = sum(Value<=(0.5*LcenLim) | Value>=(1.1*RcenLim)) I get 394 detects, 382 non detects
# table(GWmedians$EcoliDetect,GWmedians$EcoliDetectAtAll)
#3845 of 11 11-7
#3756 of 11 11-14
#3882 of 11 11-22
#4057 of 11 11-25
#4228 of 11 03-06-20
#4208 of 12 03-13-20
#4208 of 13 03-20-20 Added ecoliDetect Emails in LAWA 2019 groudnwater folder
#4209 of 15 03-27-20
#4208 of 16 04-23-20 added ecoliDetectAtAll
#4319 of 16 04/08/20
#5053 of 16 10/8/20
#5473 14/8/20
#5527 24/8/20
#5532 28
#5532
#5542 14-9-20
#3113 3-8-21
#4855 6-8-21
#5354 13/8/21
#5321 27/8/21
#5328 3/9/21
GWmedians$meas = factor(GWmedians$Measurement,
levels=c("Ammoniacal nitrogen","Chloride","Dissolved reactive phosphorus",
"E.coli","Electrical conductivity/salinity","Nitrate nitrogen"),
labels=c("NH4","Cl","DRP","ECOLI","NaCl","NO3"))
#Plotting
if(plotto){
with(GWmedians[GWmedians$Frequency=='quarterly',],table(minPerYear,count))
GWmedians[which(GWmedians$Frequency=='quarterly'&GWmedians$count==20),]
par(mfrow=c(1,1))
plot(GWmedians$median,GWmedians$MAD,log='xy',xlab='Median',ylab='MAD',
col=as.numeric(factor(GWmedians$Measurement)),pch=16,cex=1)
abline(0,1)
GWmedians%>%filter(MAD>median)
plot(GWmedians$median,GWmedians$MAD/GWmedians$median,log='x',xlab='Median',ylab='MAD/Median',
col=as.numeric(factor(GWmedians$Measurement)),pch=16,cex=1)
#Show data abundance with original filter cutoffs:
par(mfrow=c(3,1))
with(GWmedians%>%filter(Frequency=='monthly'),hist(count,main='monthly'));abline(v=30,lwd=2,col='red')
with(GWmedians%>%filter(Frequency=='bimonthly'),hist(count,main='bimonthly',breaks = 20));abline(v=15,lwd=2,col='red')
with(GWmedians%>%filter(Frequency=='quarterly'),hist(count,main='quarterly'));abline(v=10,lwd=2,col='red')
par(mfrow=c(3,1))
with(GWmedians%>%filter(Frequency=='monthly'),hist(minPerYear,main='monthly'));abline(v=6,lwd=2,col='red')
with(GWmedians%>%filter(Frequency=='bimonthly'),hist(minPerYear,main='bimonthly',breaks = 20));abline(v=3,lwd=2,col='red')
with(GWmedians%>%filter(Frequency=='quarterly'),hist(minPerYear,main='quarterly'));abline(v=2,lwd=2,col='red')
par(mfrow=c(3,1))
with(GWmedians%>%filter(Frequency=='monthly'),hist(nMonth,main='monthly'));abline(v=11.5,lwd=2,col='red')
with(GWmedians%>%filter(Frequency=='bimonthly'),hist(nMonth,main='bimonthly',breaks=12));abline(v=5,lwd=2,col='red')
with(GWmedians%>%filter(Frequency=='quarterly'),hist(nQuart,main='quarterly'));abline(v=3.5,lwd=2,col='red')
}
if(plotto){
par(mfrow=c(3,3))
plot(GWmedians$median,GWmedians$MAD/(GWmedians$median*GWmedians$count^0.5),
log='xy',pch=c(1,16)[as.numeric(GWmedians$Exclude)+1],col=as.numeric(factor(GWmedians$Measurement)),cex=sqrt(GWmedians$count))
GWmedians%>%split(GWmedians$Measurement)%>%
purrr::map(~plot(.$median,.$MAD/(.$median*.$count^0.5),
log='xy',pch=c(1,16)[as.numeric(.$Exclude)+1],main=unique(.$Measurement),cex=sqrt(.$count)))
par(mfrow=c(3,3))
plot(GWmedians$median,GWmedians$MAD/(GWmedians$median),
log='xy',lwd=c(2,1)[as.numeric(GWmedians$Exclude)+1],col=as.numeric(factor(GWmedians$Measurement)),cex=sqrt(GWmedians$count))
abline(h=1)
GWmedians%>%split(GWmedians$Measurement)%>%
purrr::map(~{plot(.$median,.$MAD/(.$median),
log='xy',lwd=c(2,1)[as.numeric(.$Exclude)+1],main=unique(.$Measurement),cex=sqrt(.$count),ylim=c(0.001,5))
abline(h=1)})
par(mfrow=c(3,3))
plot(GWmedians$median,GWmedians$MAD,
log='xy',pch=c(1,16)[as.numeric(GWmedians$Exclude)+1],col=as.numeric(factor(GWmedians$Measurement)),cex=sqrt(GWmedians$count))
abline(0,1)
GWmedians%>%split(GWmedians$Measurement)%>%
purrr::map(~{plot(.$median,.$MAD,
log='xy',pch=c(1,16)[as.numeric(.$Exclude)+1],main=unique(.$Measurement),cex=sqrt(.$count))
abline(0,1)})
table(GWmedians$count)
}
if(plotto){
table(GWmedians$count)
table(GWmedians$count,GWmedians$meas)
table(GWmedians$count,GWmedians$Frequency)
par(mfrow=c(1,1))
plot(density(GWmedians$count,from=min(GWmedians$count),adjust=2),xlim=range(GWmedians$count))
par(mfrow=c(3,3))
uMeasures=unique(GWmedians$Measurement)
for(measure in uMeasures){
dtp = GWmedians%>%filter(Measurement==measure,median>=0)
rtp = GWdataRelevantVariables%>%filter(Measurement==measure,Value>=0)
if(!measure%in%('Water Level')){
if(measure%in%c('Ammoniacal nitrogen','E.coli')){adjust=2}else{adjust=1}
plot(density(log(dtp$median),na.rm=T,adjust=adjust),main=measure,xaxt='n',xlab='')
lines(density(log(rtp$Value),na.rm=T,adjust=2*adjust),col='grey')
rug(log(dtp$median))
axis(side = 1,at = pretty(log(dtp$median)),labels=signif(exp(pretty(log(dtp$median))),2))
}else{
plot(density(dtp$median,na.rm=T),main=measure,xlab='')
rug(dtp$median)
lines(density(rtp$Value,na.rm=T),col='grey')
}
}
rm(dtp,rtp,measure)
#pick a site, pick a measurement
nExample=0
while(nExample<3){
site=sample(x = unique(GWmedians$LawaSiteID),size = 1)
meas=sample(x = unique(GWmedians$Measurement),size = 1)
if(length(which(GWmedians$LawaSiteID==site&GWmedians$Measurement==meas))>0){
toPlot = GWdataRelevantVariables%>%filter(LawaSiteID==site&Measurement==meas)%>%select(Value)%>%drop_na%>%unlist
plot(density(toPlot,na.rm=T,from=0),xlab='',main=paste(site,meas))
rug(toPlot)
abline(v = GWmedians%>%filter(LawaSiteID==site&Measurement==meas)%>%select(median))
nExample=nExample+1
}
}
rm(site,meas)
}
GWmedians$Source = GWdata$Source[match(GWmedians$LawaSiteID,GWdata$LawaSiteID)]
GWmedians$StateVal = GWmedians$median
GWmedians$StateVal[GWmedians$Measurement=="E.coli"] <- GWmedians$EcoliDetectAtAll[GWmedians$Measurement=="E.coli"]
#Export Median values
dir.create(paste0("h:/ericg/16666LAWA/LAWA2021/Groundwater/Analysis/",format(Sys.Date(),"%Y-%m-%d")))
write.csv(GWmedians,file = paste0('h:/ericg/16666LAWA/LAWA2021/Groundwater/Analysis/',format(Sys.Date(),"%Y-%m-%d"),
'/ITEGWState',format(Sys.time(),"%d%b%Y"),'.csv'),row.names = F)
write.csv(GWmedians,file = paste0("c:/Users/ericg/Otago Regional Council/Abi Loughnan - LAWA Annual Water Refresh 2021/Groundwater Quality/EffectDelivery/",
'/ITEGWState',format(Sys.time(),"%d%b%Y"),'.csv'),row.names = F)
rm(GWdataRelevantVariables)
if(exists('GWdataReducedTemporalResolution')){rm(GWdataReducedTemporalResolution)}
GWmedians <- read.csv(tail(dir(path='./Analysis',pattern='ITEGWState',full.names = T,recursive = T),1),stringsAsFactors = F)
######################################################################################
#Calculate trends ####
GWtrendData <- GWdata%>%filter(Measurement%in%c("Nitrate nitrogen","Chloride",
"Dissolved reactive phosphorus",
"Electrical conductivity/salinity",
"E.coli","Ammoniacal nitrogen"))
library(parallel)
library(doParallel)
workers <- makeCluster(2)
registerDoParallel(workers)
startTime=Sys.time()
clusterCall(workers,function(){
library(magrittr)
library(plyr)
library(dplyr)
})
foreach(py = c(10,15),.combine=rbind,.errorhandling="stop")%dopar%{
pn=0.75
tredspy <- split(GWtrendData,GWtrendData$siteMeas)%>%
purrr::map(~trendCore(.,periodYrs=py,proportionNeeded=pn,valToUse = 'Result-edited')) #Proportion needed is greater or equal
tredspy <- do.call(rbind.data.frame,tredspy)
row.names(tredspy)=NULL
return(tredspy)
}->GWtrends
stopCluster(workers)
rm(workers)
Sys.time()-startTime #2.9 mins 13/8/21
#8592 of 39 11-11
#8382 of 39 14-11
#8478 of 39 22-11
#8836 of 39 25-11
#8886 of 39 13-03-2021
#8888 of 39 27-03-2021
#9070 04/08/20
#10542 10/8/20
#11436 14-8-20
#11524
#11534 28-8-20
#11544
#6354 3-8-21
#9928 6*8*21
#10986 13/8/21
#10456 20/8/21
#10916 27/8/21
GWtrends$Source = GWdata$Source[match(GWtrends$LawaSiteID,GWdata$LawaSiteID)]
GWtrends$ConfCat <- cut(GWtrends$Cd, breaks= c(-0.1, 0.1,0.33,0.67,0.90, 1.1),
labels = rev(c("Very likely improving","Likely improving","Indeterminate","Likely degrading","Very likely degrading")))
GWtrends$ConfCat=factor(GWtrends$ConfCat,levels=rev(c("Very likely improving","Likely improving","Indeterminate","Likely degrading","Very likely degrading")))
GWtrends$TrendScore=as.numeric(GWtrends$ConfCat)-3
GWtrends$TrendScore[is.na(GWtrends$TrendScore)]<-(NA)
fGWt = GWtrends%>%dplyr::filter(!grepl('^unassess',GWtrends$frequency)&
!grepl('^Insufficient',GWtrends$MKAnalysisNote)&
!grepl('^Insufficient',GWtrends$SSAnalysisNote))
# 2476 of 41 11-7
# 2515 of 41 11-14
# 2405 of 41 11-22
# 2572 of 41 11-25
# 2585 of 31 03-13-2021
# 2560 of 31 03-27-2021
# 1537 of 41 04/08/2021
# 1615 of 41 10/8/20
# 1815
# 1938 of 41 24/8/2021
# 1935 28-8-20
# 1977
# 2252 14-9-20
# 757 3-8-21
# 2265 6-8-21
# 2594 13-8-21
# 2543 20/8/21
# 2580 27/8/21
# nitrate nitrogen, chloride, DRP, electrical conductivity and E. coli.
# fGWt = fGWt%>%filter(Measurement %in% c("Nitrate nitrogen","Chloride","Dissolved reactive phosphorus",
# "Electrical conductivity/salinity","E.coli"))
# table(GWtrends$proportionNeeded,GWtrends$period)
# knitr::kable(table(fGWt$proportionNeeded,fGWt$period),format='rst')
if(plotto){
with(fGWt,knitr::kable(table(MKAnalysisNote,period),format='rst'))
with(fGWt,knitr::kable(table(SSAnalysisNote,period),format='rst'))
knitr::kable(with(fGWt%>%filter(period==10 & proportionNeeded==0.75)%>%
droplevels,table(Measurement,TrendScore)),format='rst')
table(GWtrends$numQuarters,GWtrends$period)
table(GWtrends$Measurement,GWtrends$ConfCat,GWtrends$proportionNeeded,GWtrends$period)
table(fGWt$Measurement,fGWt$ConfCat,fGWt$proportionNeeded,fGWt$period)
# with(GWtrends[GWtrends$period==5,],table(Measurement,ConfCat))
with(GWtrends[GWtrends$period==10,],table(Measurement,ConfCat))
with(GWtrends[GWtrends$period==15,],table(Measurement,ConfCat))
table(GWtrends$Measurement,GWtrends$period)
table(GWtrends$ConfCat,GWtrends$period)
knitr::kable(table(GWtrends$ConfCat,GWtrends$Measurement,GWtrends$period))
}
#Export Trend values
write.csv(GWtrends,file = paste0('h:/ericg/16666LAWA/LAWA2021/Groundwater/Analysis/',format(Sys.Date(),"%Y-%m-%d"),
'/ITEGWTrend',format(Sys.time(),"%d%b%Y"),'.csv'),row.names = F)
write.csv(fGWt,file = paste0('h:/ericg/16666LAWA/LAWA2021/Groundwater/Analysis/',format(Sys.Date(),"%Y-%m-%d"),
'/ITEGWTrendSuccess',format(Sys.time(),"%d%b%Y"),'.csv'),row.names = F)
write.csv(GWtrends,file=paste0("c:/Users/ericg/Otago Regional Council/Abi Loughnan - LAWA Annual Water Refresh 2021/Groundwater Quality/EffectDelivery/",
"ITEGWTrend",format(Sys.time(),"%d%b%Y"),'.csv'),row.names = F)
write.csv(fGWt,file = paste0("c:/Users/ericg/Otago Regional Council/Abi Loughnan - LAWA Annual Water Refresh 2021/Groundwater Quality/EffectDelivery/",
'/ITEGWTrendSuccess',format(Sys.time(),"%d%b%Y"),'.csv'),row.names = F)
#
# #Put the state on the trend?
#
# GWtrends$Calcmedian = GWmedians$median[match(x = paste(GWtrends$LawaSiteID,GWtrends$Measurement),
# table = paste(GWmedians$LawaSiteID,GWmedians$Measurement))]
# GWtrends$Calcmedian[GWtrends$Calcmedian<=0] <- NA
# par(mfrow=c(2,3))
# for(meas in unique(GWtrends$Measurement)){
# dfp=GWtrends%>%dplyr::filter(Measurement==meas)
# plot(factor(dfp$ConfCat),dfp$Calcmedian,main=meas,log='y')
# }
#
# head(GWmedians%>%filter(!Exclude)%>%select(-Exclude))
|
f74a76ec8da0752bdd71e6247789a6848f23a2a9 | 29585dff702209dd446c0ab52ceea046c58e384e | /GrammR/R/MatrixkNorm.R | ca037773e2c812d4239bfbd082912b72d7c3fcf3 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 552 | r | MatrixkNorm.R | MatrixkNorm <-
function(X,p){
n = nrow(X);
D <- matrix(0, ncol= n, nrow = n);
for (i in 1:(n-1)){
for (j in (i+1):n){
if (is.finite(p)){
D[i,j] <- (sum(abs(X[i,] - X[j,])^p))^(1/p);
}
if (is.infinite(p)){
D[i,j] <- max(abs(X[i,] - X[j,]));
}
D[j,i] <- D[i,j];
}
}
return(D);
}
|
c96a32b6310f260b216e76bd4656c68987cbc46e | 9cc7423f4a94698df5173188b63c313a7df99b0e | /tests/testthat/test-analyze.aov.R | c23181c59a6f201e86ac8c146fc754658a81435a | [
"MIT"
] | permissive | HugoNjb/psycho.R | 71a16406654b11007f0d2f84b8d36587c5c8caec | 601eef008ec463040c68bf72ac1ed8d4a8f7751f | refs/heads/master | 2020-03-27T01:24:23.389884 | 2018-07-19T13:08:53 | 2018-07-19T13:08:53 | 145,707,311 | 1 | 0 | null | 2018-08-22T12:39:27 | 2018-08-22T12:39:27 | null | UTF-8 | R | false | false | 729 | r | test-analyze.aov.R | context("analyze.aov")
test_that("If it works.", {
library(psycho)
library(lmerTest)
library(lme4)
df <- psycho::affective
x <- aov(Tolerating ~ Salary, data = df)
testthat::expect_equal(nrow(summary(psycho::analyze(x))), 2)
x <- anova(lm(Tolerating ~ Salary, data = df))
testthat::expect_equal(nrow(summary(psycho::analyze(x))), 2)
x <- aov(Tolerating ~ Birth_Season + Error(Sex), data = df)
testthat::expect_message(psycho::analyze(x))
x <- anova(lmerTest::lmer(Tolerating ~ Birth_Season + (1 | Sex), data = df))
testthat::expect_equal(nrow(summary(psycho::analyze(x))), 1)
x <- anova(lme4::lmer(Tolerating ~ Birth_Season + (1 | Sex), data = df))
testthat::expect_error(psycho::analyze(x))
})
|
7461a127b6a1ef5b6e1cad3550ba78d1dc00b974 | 09970aa232523e17e031b7a9913baa08bc08ae0f | /Week 8/foobarBrowser.R | 07a0c0bf6c2e20123aa4e72c3d4aabfab9ee0e6a | [] | no_license | Sandeep-Joshi/R-class-Assignments | 6ec17a54d9ad9c8f3aa1f3fb0f86f19ca1ef1aee | e0915a59a377b6af422727beb2e1c16d43f68c11 | refs/heads/master | 2021-01-10T15:31:41.997080 | 2016-01-07T21:19:53 | 2016-01-07T21:19:53 | 48,950,572 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 368 | r | foobarBrowser.R | foo <- function()
{
browser()
x <- 1
y <- 2
z <- 3
return(c(x, y, z))
}
bar <- function()
{
browser()
foo()
}
# =====================================================
fooConditional <- function()
{
x <- rnorm(1)
if (abs(x) < 10)
browser()
y <- x * 2
print(x)
print(y)
return(x + y)
} |
de7b4bfb75ce51084f9a0aea63449bb236e8ee01 | 11e61fd37b4361e1feaadb0e431ea117171255cb | /code/starter_script.R | 4d79d694f7c6063b8a3eb7ede45605ec9e1821bb | [] | no_license | vpontis/make-it-rain | 413baaf0dd429b1e10be1695ff3e824247fb2677 | d033622f69fadb0cb65418c752f5698bb2118896 | refs/heads/master | 2016-09-10T08:54:40.215205 | 2015-05-09T22:51:07 | 2015-05-09T22:51:07 | 35,348,470 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,735 | r | starter_script.R | #####################################################
## Starter Script for Kaggle Will it Rain competition
#####################################################
##
## Performance: 0.00973457
## Relative performance: 70th out of 188; improves upon
## provided benchmark (0.01177621), and using all 1's (0.01017651)
######################################################
#record the starting time
start <- Sys.time()
## read in a fixed number of rows (increase/decrease based on memory footprint)
## you can use read.csv in place of data.table; it's just much slower
library(data.table)
setwd('~/Dropbox/Classes/6.UAP/')
# train <- fread("./data/train_2013.csv", select="Expected")
train <- fread("./data/train_small.csv", select="Expected")
gc()
## collect the probability it will rain 0mm, 1mm, 2mm...9mm
for(i in 1:10){
if (i==1) {
avgRainRate <- train[, mean(ifelse(Expected <= (i - 1), 1, 0))]
} else if (i>1) {
avgRainRate <- c(avgRainRate, train[, mean(ifelse(Expected <= (i - 1), 1, 0))])
}
}
## fill in 10mm+ with 100% (it will be lower than 10mm 100% of the time)
avgRainRate <- c(avgRainRate, rep(1, 60))
## now construct a prediction by using the matrix as a lookup table to the first 10 prediction levels
test <- fread("data/test_2014.csv", select="Id")
gc()
predictions <- as.data.frame(cbind(test$Id, as.data.frame(t(avgRainRate))))
colnames(predictions) <- c("Id", paste0("Predicted", (seq(1:70) - 1)))
## output predictions; outputs as 188MB, but compresses to <3MB (lot of 1's)
options("scipen"=100, "digits"=8)
write.table(predictions, "histogram_benchmark.csv", quote = FALSE, sep = ",", row.names=FALSE)
##how long did it take
stop <- Sys.time()
stop-start
##all done. compress and submit
|
5d283fab13f45fd57e046f0bff303005557e533e | 5dd8370752fb0ab6786378b06c5ca0349508fe77 | /man/tissue_by_celltype_enrichment.Rd | 0cf91d0fc9d9d7777df33d7dc09fdd81a656cbc6 | [] | no_license | cran/scMappR | a63d9c5df1587c9b640d3d1dc6cd1c645b755ad4 | 43e80e29770b26cfa5a4966e3946da9b83e90359 | refs/heads/master | 2023-07-10T19:26:50.006640 | 2023-06-30T07:40:08 | 2023-06-30T07:40:08 | 245,393,295 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,098 | rd | tissue_by_celltype_enrichment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tissue_by_celltype_enirchment.R
\name{tissue_by_celltype_enrichment}
\alias{tissue_by_celltype_enrichment}
\title{tissue_by_celltype_enrichment}
\usage{
tissue_by_celltype_enrichment(
gene_list,
species,
name = "CT_Tissue_example",
p_thresh = 0.05,
rda_path = "",
isect_size = 3,
return_gmt = FALSE
)
}
\arguments{
\item{gene_list}{A character vector of gene symbols with the same designation (e.g. mouse symbol - mouse, human symbol - human) as the gene set database.}
\item{species}{Species of cell-type marker to use ('human' or 'mouse').}
\item{name}{Name of the pdf to be printed.}
\item{p_thresh}{The Fisher's test cut-off for a cell-marker to be enriched.}
\item{rda_path}{Path to a .rda file containing an object called "gmt". Either human or mouse cell-type markers split by experiment. If the correct file isn't present they will be downloaded from https://github.com/wilsonlabgroup/scMappR_Data.}
\item{isect_size}{Number of genes in your list and the cell-type.}
\item{return_gmt}{Return .gmt file -- recommended if downloading from online as it may have updated (T/F).}
}
\value{
List with the following elements:
\item{enriched}{Data frame of enriched cell-types from tissues.}
\item{gmt}{Cell-markers in enriched cell-types from tissues.}
}
\description{
This function uses a Fisher's-exact-test to rank gene-set enrichment.
}
\details{
Complete a Fisher's-exact test of an input list of genes against one of the two curated tissue by cell-type marker datasets from scMappR.
}
\examples{
\donttest{
data(POA_example)
POA_generes <- POA_example$POA_generes
POA_OR_signature <- POA_example$POA_OR_signature
POA_Rank_signature <- POA_example$POA_Rank_signature
Signature <- POA_Rank_signature
rowname <- get_gene_symbol(Signature)
rownames(Signature) <- rowname$rowname
genes <- rownames(Signature)[1:100]
enriched <- tissue_by_celltype_enrichment(gene_list = genes,
species = "mouse",p_thresh = 0.05, isect_size = 3)
}
}
|
8469fc68b0f4708eb2fd2c0041eb39766972b9eb | e091db2af43490f39db8e6649fdb3608cc0f76fd | /man/represent.Rd | e49d33bfc36f23ae5bf9554dede05bc3292bd13e | [] | no_license | cleanzr/representr | 73763a83f5ba77f2de22980401dacf8dd808b324 | 08e1b6d765058e730476ac46a758257eb3e60a99 | refs/heads/master | 2022-10-24T12:26:47.584981 | 2022-10-18T14:45:39 | 2022-10-18T14:45:39 | 135,310,171 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,777 | rd | represent.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/represent.R
\name{represent}
\alias{represent}
\title{Create a representative dataset post record-linkage.}
\usage{
represent(
data,
linkage,
rep_method,
parallel = TRUE,
cores = NULL,
...,
scale = FALSE
)
}
\arguments{
\item{data}{A data frame of records to be represented.}
\item{linkage}{A numeric vector indicating the cluster ids post-record linkage for each record in \code{data}.}
\item{rep_method}{Which method to use for representation. Valid options include "proto_minimax", "proto_random",
and "composite".}
\item{parallel}{Logical flag if to use parallel computation or not (via \code{foreach}).}
\item{cores}{If specified, the number of cores to use with \code{foreach}.}
\item{...}{Additional parameters sent to cluster representation function. See
\link[=clust_proto_minimax]{prototyping} or \link[=clust_composite]{composite} methods.}
\item{scale}{If "proto_minimax" method is specified, logical flag to indicate if the column-type
distance function should be scaled so that each distance takes value in [0, 1]. Defaults to
FALSE.}
}
\description{
Create a representative dataset post record-linkage.
}
\examples{
data("rl_reg1")
## random prototyping
rep_dat_random <- represent(rl_reg1, identity.rl_reg1, "proto_random", id = FALSE, parallel = FALSE)
head(rep_dat_random)
## minimax prototyping
col_type <- c("string", "string", "numeric", "numeric", "numeric", "categorical", "ordinal",
"numeric", "numeric")
orders <- list(education = c("Less than a high school diploma", "High school graduates, no college",
"Some college or associate degree", "Bachelor's degree only", "Advanced degree"))
weights <- c(.25, .25, .05, .05, .1, .15, .05, .05, .05)
rep_dat_minimax <- represent(rl_reg1, identity.rl_reg1, "proto_minimax", id = FALSE,
distance = dist_col_type, col_type = col_type, weights = weights, orders = orders,
scale = TRUE, parallel = FALSE)
head(rep_dat_minimax)
\dontrun{
## with alternative tie breaker
rep_dat_minimax <- represent(rl_reg1, identity.rl_reg1, "proto_minimax", id = FALSE,
distance = dist_col_type, col_type = col_type, weights = weights, orders = orders,
ties_fn = "maxmin_compare", scale = TRUE, parallel = FALSE)
head(rep_dat_minimax)
rep_dat_minimax <- represent(rl_reg1, identity.rl_reg1, "proto_minimax", id = FALSE,
distance = dist_col_type, col_type = col_type, weights = weights, orders = orders,
ties_fn = "within_category_compare_cpp", scale = TRUE, parallel = FALSE)
head(rep_dat_minimax)
## composite prototyping
rep_dat_composite <- represent(rl_reg1, identity.rl_reg1, "composite",
col_type = col_type, parallel = FALSE)
head(rep_dat_composite)
}
}
|
ba12827245d2aae0db3f57dbe5ebc4c936217e2d | 29d34e3302b71d41d77af715727e963aea119392 | /man/plotly.heat.Rd | 388763e54344e0e166ae60d37423afc2ad4424f3 | [] | no_license | bakaibaiazbekov/rtemis | 1f5721990d31ec5000b38354cb7768bd625e185f | a0c47e5f7fed297af5ad20ae821274b328696e5e | refs/heads/master | 2020-05-14T20:21:40.137680 | 2019-04-17T15:42:33 | 2019-04-17T15:42:33 | 181,943,092 | 1 | 0 | null | 2019-04-17T18:00:09 | 2019-04-17T18:00:09 | null | UTF-8 | R | false | true | 667 | rd | plotly.heat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotly.heat.R
\name{plotly.heat}
\alias{plotly.heat}
\title{Heatmap with \code{plotly}}
\usage{
plotly.heat(z, x = NULL, y = NULL, title = NULL,
col = penn.heat(21), xlab = NULL, ylab = NULL, zlab = NULL,
transpose = TRUE)
}
\arguments{
\item{z}{Input matrix}
\item{x, y}{Vectors for x, y axes}
\item{title}{Plot title}
\item{col}{Set of colors to make gradient from}
\item{xlab}{x-axis label}
\item{ylab}{y-axis label}
\item{zlab}{z value label}
\item{transpose}{Logical. Transpose matrix}
}
\description{
Draw a heatmap using \code{plotly}
}
\author{
Efstathios D. Gennatas
}
|
ab629146178121815cae16b5ce52ddb8307364b4 | 335eac7d46975e4c556093aa5012f53cbe550a5a | /plot4.R | c75eae488f1dce5c0cd4c2fba56fd687a48d0ff1 | [] | no_license | nktp/ExData_Plotting1 | b71c13d7c83b677d2be03b3b20de886298245943 | bef761164f93b6f34bb8362f053b6bf3154a13a6 | refs/heads/master | 2021-08-31T15:06:31.913656 | 2017-12-21T20:27:57 | 2017-12-21T20:27:57 | 114,952,529 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,708 | r | plot4.R | unzip("exdata%2Fdata%2Fhousehold_power_consumption.zip")
hh_consump <- read.delim('household_power_consumption.txt', sep=";")
hh_consump$Date <- format(as.Date(hh_consump$Date, format='%d/%m/%Y'), '%d/%m/%Y')
hh_consump$Time <- format(strptime(hh_consump$Time, format='%H:%M:%S'), '%H:%M:%S')
require(dplyr)
df <- hh_consump %>%
subset(Date %in% c('01/02/2007', '02/02/2007')) %>%
mutate(Datetime=as.POSIXct(paste(Date, Time), format="%d/%m/%Y %H:%M:%S"),
Sub_metering_1=as.numeric(as.character(Sub_metering_1)),
Sub_metering_2=as.numeric(as.character(Sub_metering_2)),
Sub_metering_3=as.numeric(as.character(Sub_metering_3)),
Global_active_power=as.numeric(as.character(Global_active_power)),
Voltage=as.numeric(as.character(Voltage)),
Global_reactive_power=as.numeric(as.character(Global_reactive_power))
)
png(file="plot4.png",
width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
plot1 <- plot(df$Datetime, df$Global_active_power, type="l",
ylab="Global Active Power (kilowatts)",
xlab=NA)
plot2 <- plot(df$Datetime, df$Voltage, type="l",
ylab="Voltage",
xlab="datetime")
plot3 <- plot(df$Datetime, df$Sub_metering_1, type="l",
ylab="Energy sub metering",
xlab=NA)
lines(df$Datetime, df$Sub_metering_2, col="red")
lines(df$Datetime, df$Sub_metering_3, col="blue")
legend("topright", bty="n", lwd=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot4 <- plot(df$Datetime, df$Global_reactive_power, type="l",
ylab="Global_reactive_power",
xlab="datetime")
dev.off()
|
f89bb7f0603455f7cac22245c2957df77623db08 | cb51f75bd828801da59d40a60c5ae767d14dc8c3 | /seminar2_tomt.R | 029a45e84e154ad401951517bb5c9f182efbf60f | [] | no_license | elisofieb/4020a21 | 3067c5d2d9210626ea4572df544b999fc4e6867b | 5f59e97f6d3027dda540119656191a719eb2eb93 | refs/heads/main | 2023-08-11T18:25:50.041876 | 2021-09-17T11:27:27 | 2021-09-17T11:27:27 | 405,924,118 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,476 | r | seminar2_tomt.R | #### SEMINAR 2 - GRUPPE 2 ####
## Plan for seminaret ##
# 1. Laste inn data - read_funksjoner()
# 2. Forberede og manipulere data
# 3. Utforsking av data og deskriptiv statistikk
# 4. Plotte-funksjonen ggplot
# 5. Lagre datasett
## 1. Laste inn data - read_funksjoner() ##
# Installere/hente tidyverse og haven
## 2. Forberede og manipulere data ##
# Hva er enhentene i datasettet?
# Hva heter variablene i datasettet?
# Hva slags klasse har variablene?
# Er det manglende informasjon på noen av variablene vi er interesserte i?
# Noen omkodingsfunksjoner
## data$ny_var <- funksjon(data$gammel_var)
# Vi anvender en funksjon som omarbeider informasjonen i en gammel variabel i
# datasettet vårt, og legger den til datasettet vårt med et nytt navn
# Omkoding med tidyverse/dplyr og matematisk omkoding
# Endre klassen til variabelen
# Omkoding med ifelse()
## data$nyvar <- ifelse(test = my_data$my.variabel == "some logical condition",
## yes = "what to return if 'some condition' is TRUE",
## no = "what to return if 'some condition' is FALSE")
# Endre datastruktur ved hjelp av aggregering
## 3. Utforsking av data og deskriptiv statistikk ##
# Univariat statistikk for kontinuerlige variabler
# Bivariat/multivariat statistikk for kontinuerlige variabler
# Kategoriske variabler
## 4. Plotte-funksjonen ggplot ##
# geom_histogram()
# geom_boxplot()
# geom_line()
# geom_point()
## Lagre datasett ##
|
709d1ba5f87ff84f20ebf1a15da6c36d274ea666 | e41391024da2621189081feaabddd20e558a856e | /src/03-descriptive-statistics.R | 712b86ec1e24ad2cdc484bb13e9fa3b92e744d68 | [] | no_license | lucienbaumgartner/legal_tc | 7f297efe3c87aedc0bfc4d808bc0ae85be547880 | 75510edbd75c83114eef7a2b16db7c8fa60f9241 | refs/heads/master | 2023-03-24T11:37:13.700028 | 2021-03-10T11:28:06 | 2021-03-10T11:28:06 | 278,079,343 | 0 | 0 | null | 2020-07-15T11:40:49 | 2020-07-08T12:10:31 | R | UTF-8 | R | false | false | 10,856 | r | 03-descriptive-statistics.R | library(quanteda)
library(ggplot2)
library(dplyr)
library(purrr)
library(xtable)
library(envalysis)
library(gridExtra)
library(pbmcapply)
library(tm)
library(textstem)
library(emmeans)
rm(list=ls())
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
# function for linebreaks in plots
abbrv <- function(x, width = 200) lapply(strwrap(x, width, simplify = FALSE), paste, collapse="\n")
# function to compute quantiles per group in dplyr
p <- c(0.25, 0.5, 0.75)
p_names <- map_chr(p, ~paste0(.x*100, "%"))
p_funs <- map(p, ~partial(quantile, probs = .x, na.rm = TRUE)) %>%
set_names(nm = p_names)
# function to compute diversity measures
k <- c('TTR', 'CTTR', 'K')
k_funs <- map(k, ~partial(textstat_lexdiv, measure = .x)) %>%
set_names(nm = k)
# load data
load('../output/02-finalized-corpora/baseline/reddit/BC_consolidated.RDS')
bc <- df
load('../output/02-finalized-corpora/legal/LC_consolidated.RDS')
lc <- df
rm(df)
### write out adjective list with aggregates
df <- rbind(mutate(bc, TARGET_pol_new = TARGET_pol), select(lc, -id, -year, -court)) %>%
mutate(cat = dplyr::recode(cat, epistemic = 'Epistemic', legal = 'Legal', tc = 'TC', descriptive = 'Descriptive'),
context = dplyr::recode(context, court = 'LC', reddit = 'BC'))
### write out adjective list with aggregates
# quantiles
df_quants <- df %>%
group_by(cat, TARGET, TARGET_pol) %>%
summarize_at(vars(sentiWords), p_funs)
# average
df_avg <- df %>%
group_by(cat, TARGET, TARGET_pol) %>%
summarise(mean = mean(sentiWords, na.rm = T))
# diversity
df_div <- df %>%
group_by(cat, TARGET, TARGET_pol) %>%
summarize(conjuncts = paste0(ADJ, collapse = ' '),
n = n())
df_div <- cbind(df_div, textstat_lexdiv(tokens(df_div$conjuncts), measure = c('TTR', 'CTTR', 'K')))
df_div <- select(df_div, -conjuncts, -document)
# combine
df_summary <- left_join(df_quants, df_avg) %>% left_join(., df_div) %>% arrange(cat, TARGET_pol, TARGET)
# write out results
write.csv(df_summary, file = '../output/03-results/tables/COMBINED_summary_stats.csv', quote = F, row.names = F)
df_summary <- xtable(df_summary)
print(df_summary, include.rownames =F , file = '../output/03-results/tables/COMBINED_summary_stats.tex')
## LC
# quantiles
lc_quants <- lc %>%
group_by(cat, TARGET_pol) %>%
summarize_at(vars(sentiWords), p_funs)
# average
lc_avg <- lc %>%
group_by(cat, TARGET_pol) %>%
summarise(mean = mean(sentiWords, na.rm = T))
# diversity
lc_div <- lc %>%
group_by(cat, TARGET_pol) %>%
summarize(conjuncts = paste0(ADJ, collapse = ' '),
n = n())
lc_div <- cbind(lc_div, textstat_lexdiv(tokens(lc_div$conjuncts), measure = c('TTR', 'CTTR', 'K')))
lc_div <- select(lc_div, -conjuncts, -document)
# combine
lc_summary <- left_join(lc_quants, lc_avg) %>% left_join(., lc_div)
# write out results
write.csv(lc_summary, file = '../output/03-results/tables/LC_summary_stats.csv', quote = F, row.names = F)
lc_summary <- xtable(lc_summary)
print(lc_summary, include.rownames =F , file = '../output/03-results/tables/LC_summary_stats.tex')
## BC
# quantiles
bc_quants <- bc %>%
group_by(cat, TARGET_pol) %>%
summarize_at(vars(sentiWords), p_funs)
# average
bc_avg <- bc %>%
group_by(cat, TARGET_pol) %>%
summarise(mean = mean(sentiWords, na.rm = T))
# diversity
bc_div <- bc %>%
group_by(cat, TARGET_pol) %>%
summarize(conjuncts = paste0(ADJ, collapse = ' '),
n = n())
bc_div <- cbind(bc_div, textstat_lexdiv(tokens(bc_div$conjuncts), measure = c('TTR', 'CTTR', 'K')))
bc_div <- select(bc_div, -conjuncts, -document)
# combine
bc_summary <- left_join(bc_quants, bc_avg) %>% left_join(., bc_div)
# write out results
write.csv(bc_summary, file = '../output/03-results/tables/BC_summary_stats.csv', quote = F, row.names = F)
bc_summary <- xtable(bc_summary)
print(bc_summary, include.rownames =F , file = '../output/03-results/tables/BC_summary_stats.tex')
###
#### adj distribution
means <- df %>% group_by(context, TARGET_pol, cat, TARGET) %>% summarise(avg = mean(sentiWords, na.rm = T))
means_overall <- df %>% group_by(context, TARGET_pol, cat) %>% summarise(avg = mean(sentiWords, na.rm = T))
p1 <-
df %>%
filter(TARGET_pol == 'negative') %>%
ggplot(., aes(x = TARGET, y = sentiWords, fill = context)) +
geom_boxplot(outlier.color = NA) +
geom_point(data = means %>% filter(TARGET_pol == 'negative'), aes(y = avg, color = context)) +
geom_point(data = means %>% filter(TARGET_pol == 'negative'), aes(y = avg), shape = 1) +
geom_hline(data = means_overall %>% filter(TARGET_pol == 'negative'), aes(yintercept = avg, colour = context)) +
geom_hline(aes(yintercept = 0), lty = 'dashed') +
scale_fill_grey(start = 0.5, end = 0.8) +
scale_colour_grey(start = 0.5, end = 0.8) +
scale_y_continuous(limits = c(-1,1), expand = c(0.01,0.01)) +
facet_grid(~ cat, scales = 'free_x') +
labs(
x = 'Target Adjective',
y = 'Conjoined\nSentiment Values',
fill = 'Corpus',
colour = 'Corpus',
title = abbrv(paste0('Negative Concepts'), width = 40)
)
p2 <-
df %>% filter(TARGET_pol == 'positive') %>%
ggplot(., aes(x = TARGET, y = sentiWords, fill = context)) +
geom_boxplot(outlier.color = NA) +
geom_point(data = means %>% filter(TARGET_pol == 'positive'), aes(y = avg, color = context)) +
geom_point(data = means %>% filter(TARGET_pol == 'positive'), aes(y = avg), shape = 1) +
geom_hline(data = means_overall %>% filter(TARGET_pol == 'positive'), aes(yintercept = avg, colour = context)) +
geom_hline(aes(yintercept = 0), lty = 'dashed') +
scale_fill_grey(start = 0.5, end = 0.8) +
scale_colour_grey(start = 0.5, end = 0.8) +
scale_y_continuous(limits = c(-1,1), expand = c(0.01,0.01)) +
facet_grid(~ cat, scales = 'free_x') +
labs(
x = 'Target Adjective',
y = 'Conjoined\nSentiment Values',
fill = 'Corpus',
colour = 'Corpus',
title = abbrv(paste0('Positive Concepts'), width = 40)
)
p3 <-
df %>% filter(cat == 'descriptive') %>%
ggplot(., aes(x = TARGET, y = sentiWords, fill = context)) +
geom_boxplot(outlier.color = NA) +
geom_point(data = means %>% filter(TARGET_pol == 'neutral'), aes(y = avg, color = context)) +
geom_point(data = means %>% filter(TARGET_pol == 'neutral'), aes(y = avg), shape = 1) +
geom_hline(data = means_overall %>% filter(TARGET_pol == 'neutral'), aes(yintercept = avg, colour = context)) +
geom_hline(aes(yintercept = 0), lty = 'dashed') +
scale_fill_grey(start = 0.5, end = 0.8) +
scale_colour_grey(start = 0.5, end = 0.8) +
scale_y_continuous(limits = c(-1,1), expand = c(0.01,0.01)) +
facet_grid(~ cat, scales = 'free_x') +
labs(
x = 'Target Adjective',
y = 'Conjoined\nSentiment Values',
fill = 'Corpus',
colour = 'Corpus',
title = abbrv(paste0('Descriptive Concepts'), width = 40)
)
# set theme
theme_set(theme_light(base_size = 15))
theme_update(
plot.title = element_text(face= 'bold', size = 15),
axis.text.x = element_text(angle = 45, hjust = 1),
strip.text = element_text(colour = 'black')
)
p <- grid.arrange(p2, p1, nrow =2)
ggsave(p, filename = '../output/03-results/plots/bc_lc_summary_stats_adj-distr.pdf', width = 8, height = 9)
ggsave(p3, filename = '../output/03-results/plots/lc_descriptive_adj-distr.pdf', width = 4, height = 3)
### write out overall sentiment analysis
load('../res/sentiWords-db-full-PoS.RDS')
head(sentiWords$num)
collection <- list()
for(i in c(lc, bc)){
#i <- bc
set.seed(562647)
p_source <- sample_n(i, 2000)
p <- p_source$txt %>% setNames(., paste0('doc', 1:2000))
p <- quanteda::tokens(tolower(p), remove_punct = T, remove_symbols = T, remove_numbers = T, remove_url = T, remove_separators = T)
stopwords_regex = paste(tm::stopwords('en'), collapse = '\\b|\\b')
stopwords_regex = paste0('\\b', stopwords_regex, '\\b')
p <- pbmclapply(p, function(x){
#x <- p[[1]]
tmp <- unlist(x)
tmp <- stringr::str_replace_all(tmp, stopwords_regex, '')
tmp <- tmp[nchar(tmp) > 2]
tmp <- tmp[!grepl('[0-9]+|[[:punct:]]', tmp, perl = T)]
tmp <- textstem::lemmatize_words(tmp)
tmp <- tmp[!tmp == '']
return(tmp)
}, mc.cores = 4)
p <- pbmclapply(p, function(x){
#x <- p[[1]]
tmp <- quanteda::tokens_lookup(quanteda::tokens(x), dictionary = sentiWords$num)
tmp <- unlist(tmp)
tmp <- as.numeric(tmp[!tmp == 0])
return(tmp)
}, mc.cores = 4)
save(p, file = '~/Downloads/senti2-tmp.RDS')
length(p)
p_source$doc_sentiment <- p
head(p_source)
#save(p_source, file='../output/02-finalized-corpora/baseline/reddit/BC_consolidated_full_sentiment.RDS', compress = 'gzip')
save(p_source, file='../output/02-finalized-corpora/legal/LC_consolidated_full_sentiment.RDS', compress = 'gzip')
collection <- append(collection, list(p_source))
}
load('../output/02-finalized-corpora/baseline/reddit/BC_consolidated_full_sentiment.RDS')
collection[[1]] <- p_source
load('../output/02-finalized-corpora/legal/LC_consolidated_full_sentiment.RDS')
collection[[2]] <- p_source
str(collection)
### MEANS by POLARITY
## means of means
means_lc_per_doc <- lapply(collection[[1]]$doc_sentiment, mean)
means_bc_per_doc <- lapply(collection[[2]]$doc_sentiment, mean)
mean(unlist(means_lc_per_doc))
mean(unlist(means_bc_per_doc))
## significance test
data_m1 <- rbind(tibble(sentiment = unlist(means_lc_per_doc), corpus = 'lc'),
tibble(sentiment = unlist(means_bc_per_doc), corpus = 'bc'))
m1 <- lm(sentiment ~ corpus, data = data_m1)
emmeans(m1, specs = pairwise ~ corpus)
## means overall
mList_lc <- unlist(collection[[1]]$doc_sentiment)
mean(mList_lc)
mList_bc <- unlist(collection[[2]]$doc_sentiment)
mean(mList_bc)
## significance test
data_m2 <- rbind(tibble(sentiment = mList_lc, corpus = 'lc'),
tibble(sentiment = mList_bc, corpus = 'bc'))
m2 <- lm(sentiment ~ corpus, data = data_m2)
emmeans(m2, specs = pairwise ~ corpus)
### ABSOLUTE MEANS (INTENSITY)
## means of means
abs_means_lc_per_doc <- lapply(collection[[1]]$doc_sentiment, function(x) mean(abs(x)))
abs_means_bc_per_doc <- lapply(collection[[2]]$doc_sentiment, function(x) mean(abs(x)))
mean(unlist(abs_means_lc_per_doc))
mean(unlist(abs_means_bc_per_doc))
## significance test
data_m3 <- rbind(tibble(sentiment = unlist(abs_means_lc_per_doc), corpus = 'lc'),
tibble(sentiment = unlist(abs_means_bc_per_doc), corpus = 'bc'))
m3 <- lm(sentiment ~ corpus, data = data_m3)
emmeans(m3, specs = pairwise ~ corpus)
## means overall
mList_lc <- unlist(collection[[1]]$doc_sentiment)
mean(abs(mList_lc))
mList_bc <- unlist(collection[[2]]$doc_sentiment)
mean(abs(mList_bc))
## significance test
data_m4 <- rbind(tibble(sentiment = abs(mList_lc), corpus = 'lc'),
tibble(sentiment = abs(mList_bc), corpus = 'bc'))
m4 <- lm(sentiment ~ corpus, data = data_m4)
emmeans(m4, specs = pairwise ~ corpus)
|
6f1299bc47a4be6cad7bcc1a4a49e59add31447a | d864b440e7de2bf7b8cf2d2e81a7ba491c930a11 | /R/fx_series.R | ebb1084b7f0a4d77664c1f6c785463e03b35cc14 | [] | no_license | eamoakohene/beamafx | 3323dd505e1c7f28b2facf0e13310c3c4d039b3a | 6ae1eafb265b86db121b628c25229b5cd1c221d4 | refs/heads/master | 2021-07-15T06:08:02.688011 | 2021-03-09T10:06:10 | 2021-03-09T10:06:10 | 53,271,698 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,699 | r | fx_series.R | fx_series <- R6::R6Class(
'fx_series',
inherit = fx_utils,
public = list(
y1= 2010,
y2=2020,
m1=1,
m2=12,
d1=1,
d2=31,
code = NULL,
frq = 'd',
fx_to = NULL,
dtd1 = NULL,
dtd2 = NULL,
filter_on = FALSE,
initialize = function (code = 'USD,GBP,EUR',to='GBP',codes_only=FALSE){
if(self$is_same(code,to)){
cat('Currency FROM and TO are the same ')
return(NULL)
}
my_code <- code
if( !(self$str_pos(code,to)) > 0){
if(!codes_only){
my_code <- paste0(code,',',to)
self$set_filter(TRUE)
}
}
self$set_codes(my_code)
self$convert_to(to)
}
,is_same = function (from,to){
return(
trimws(toupper(from)) == trimws(toupper(to))
)
}
,set_freq = function (value){
if(!missing(value) && !is.null(value)){
self$frq <- value
}
invisible(self)
}
,set_filter = function (value){
if(!missing(value) && !is.null(value)){
self$filter_on <- value
}
invisible(self)
}
,convert_to = function (value){
if(!missing(value) && !is.null(value)){
self$fx_to <- value
}
invisible(self)
}
,set_date1 = function (value){
if(!missing(value) && !is.null(value)){
my_date <- as.Date(value)
self$set_y1( lubridate::year( my_date))
self$set_m1( lubridate::month( my_date))
self$set_d1( lubridate::day( my_date))
self$set_data_days(1)
}
invisible(self)
}
,set_date2 = function (value){
if(!missing(value) && !is.null(value)){
my_date <- as.Date(value)
self$set_y2( lubridate::year( my_date))
self$set_m2( lubridate::month( my_date))
self$set_d2( lubridate::day( my_date))
self$set_data_days(2)
}
invisible(self)
}
,set_date_range = function (value1,value2){
if(!missing(value1) && !is.null(value1)){
self$set_date1(value1)
}
if(!missing(value2) && !is.null(value2)){
self$set_date2(value2)
}
invisible(self)
}
,set_data_days = function (value){
if(value==1){
self$dtd1 <- 372 * self$y1 + 31 * self$m1 + self$d1
}else{
self$dtd2 <- 372 * self$y2 + 31 * self$m2 + self$d2
}
invisible(self)
}
,set_y1 = function (value){
if(!missing(value) && !is.null(value)){
self$y1 <- value
}
invisible(self)
}
,set_y2 = function (value){
if(!missing(value) && !is.null(value)){
self$y2 <- value
}
invisible(self)
}
,set_m1 = function (value){
if(!missing(value) && !is.null(value)){
self$m1 <- value
}
invisible(self)
}
,set_m2 = function (value){
if(!missing(value) && !is.null(value)){
self$m2 <- value
}
invisible(self)
}
,set_d1 = function (value){
if(!missing(value) && !is.null(value)){
self$d1 <- value
}
invisible(self)
}
,set_d2 = function (value){
if(!missing(value) && !is.null(value)){
self$d2 <- value
}
invisible(self)
}
,set_codes = function (value){
if(!missing(value) && !is.null(value)){
self$code <- private$split_str( value )
}
invisible(self)
}
,build_sql = function(){
my_sql <- NULL
group_by <- ""
order_by <- ""
my_prd <- self$frq
if(my_prd=='q'){
my_sql <- "select yr,qtr*3 as mth,qtr,28 as dy,data_code,avg(data_value) as data_value from fx_data "
group_by <- "group by yr,qtr,data_code "
order_by <- " order by data_code,yr,qtr "
}else if(my_prd=='m'){
my_sql <- "select yr,mth,qtr,28 as dy,data_code,avg(data_value) as data_value from fx_data "
group_by <- "group by yr,mth,data_code "
order_by <- " order by data_code,yr,mth "
}else if(my_prd=='y'){
my_sql <- "select yr, 12 as mth, 4 as qtr, 28 as dy, data_code,avg(data_value) as data_value from fx_data "
group_by <- "group by yr,data_code "
order_by <- " order by data_code,yr "
}else{
my_sql <- "select yr,mth,qtr, dy,data_code,data_value from fx_data "
group_by <- "group by yr,mth,dy,data_code "
order_by <- " order by data_code,yr,mth,dy "
}
self$set_data_days(1)
self$set_data_days(2)
qry_where <-" where "
qw_code <- paste0(" data_code in ",self$code)
qw_dtd <- paste0(" and (data_days between ",self$dtd1," and ",self$dtd2,")")
sql_where <- paste0( qry_where, qw_code, qw_dtd)
my_sql <- paste0( my_sql, sql_where, group_by, order_by )
return(my_sql)
}
,get_data = function(){
my_data <- private$run_sql( self$build_sql() )
if(!is.null(my_data)){
if( self$str_pos( self$code, self$fx_to)>0 ){
my_spread <- tidyr::spread( my_data, data_code, data_value)
ncols <- ncol(my_spread)
my_spread_val <- my_spread[,5:ncols]
my_scaler <- my_spread[[ self$fx_to ]]
COL_OP <- 2
my_spread_scaled <- apply(my_spread_val,COL_OP,FUN=function(x){x/my_scaler})
my_spread_scaled <- cbind(my_spread[,1:4],my_spread_scaled)
my_gather <- tidyr::gather(
data = my_spread_scaled,
key = data_code,
value = calc_value,
5:ncol(my_spread_scaled)
)
my_data <- dplyr::arrange(my_data,data_code,yr,mth,dy)
my_gather <- dplyr::arrange(my_gather,data_code,yr,mth,dy)
my_data$value <- my_gather$calc_value
if(self$filter_on){
my_data$data_value <- NULL
my_data <- dplyr::filter(my_data,!(data_code == self$fx_to))
}
}
}
return(my_data)
}
),
private = list(
split_str = function(q="EUR,GBP,USD"){
my_str <- base::gsub(",","','",q)
return (
paste0("('",my_str,"')")
)
}
)
)
get_d <- function(
fx='EUR',
from= as.Date(
paste(
ifelse( lubridate::month(Sys.Date()) == 1, lubridate::year(Sys.Date()) - 1, lubridate::year(Sys.Date())),
ifelse( lubridate::month(Sys.Date()) == 1, 12, lubridate::month(Sys.Date())-1),
1,sep='-')
),
to = Sys.Date()
){
fx_series$
new( paste0( trimws(fx),',GBP') )$
set_date_range( from ,to )$
set_freq( 'd' )$
set_filter( T )$
get_data()
}
get_m <- function(
fx='EUR',
from= as.Date(
paste(
lubridate::year(Sys.Date())-1,
lubridate::month(Sys.Date()),
1,sep='-')
),
to = Sys.Date()
){
fx_series$
new( paste0( trimws(fx),',GBP') )$
set_date_range( from ,to )$
set_freq( 'm' )$
set_filter( T )$
get_data()
}
get_y <- function(
fx='EUR',
from= as.Date(
paste(
lubridate::year(Sys.Date())-10,
1,
1,sep='-')
),
to = Sys.Date()
){
fx_series$
new( paste0( trimws(fx),',GBP') )$
set_date_range(from,to)$
set_freq( 'y' )$
set_filter( T )$
get_data()
}
get_last <- function(x=10,fx='EUR',prd = c('d','m','y')){
today <- Sys.Date()
period <- match.arg(prd)
from <- switch(period,
'd'= today-x-1,
'm'= lubridate::month(today)-x-1,
'y'= lubridate::year(today)-x-1
)
to <- today
fx_series$new( paste0( trimws(fx),',GBP') )$
set_date_range(from,to)$
set_freq( period )$
set_filter(T)$
get_data()
}
download <- function(days = 31) {
beamafx::fx_download$new()$
set_data_points( days )$
update_euro_all()
}
|
b102b667ee468e60fba3a5f0546f3c09e542d162 | 5bfe6b53c227d756ab6267a56e977d085abfbd02 | /R/dCorClass.R | 585eab0d7df36aa2e4be4e915e0f3d0f4bc4559a | [] | no_license | andymckenzie/DGCA | a8de8701cd3aa1703c05f349157831ed7d35afc6 | 16151cbc0e69961c1e07e2560fb70e9c4c76ee7f | refs/heads/master | 2023-04-03T23:21:20.360575 | 2023-03-15T17:48:54 | 2023-03-15T17:48:54 | 72,645,582 | 41 | 12 | null | null | null | null | UTF-8 | R | false | false | 3,074 | r | dCorClass.R | #' @title Classify differential correlations.
#' @description Classifies identifiers (e.g., genes) into one of the different categories pairwise-differential correlation classes. These categories are one of the Cartesian products of "Up Correlation", "No Correlation", and "Down Correlation" in each of the conditions, as well as a category for "no significant differential correlation".
#' @param corsA Numeric vector of correlations between gene pairs in condition A.
#' @param corsB Numeric vector of correlations between gene pairs in condition B.
#' @param pvalsA Numeric vector of the significance of correlation calls between gene pairs in condition A.
#' @param pvalsB Numeric vector of the significance of correlation calls between gene pairs in condition B.
#' @param dCorPVals Numeric vector of the differential correlation p-value calls.
#' @param sigThresh If classify = TRUE, this numeric value specifies the p-value threshold at which a differential correlation p-value is deemed significant for differential correlation class calculation. Default = 1, as investigators may use different cutoff thresholds; however, this can be lowered to establish significant classes as desired.
#' @param corSigThresh Threshold at which the correlation p-values must be below in order to be called "significant". Default = 0.05.
#' @param convertClasses Logical indicating whether the returned classes should be in numeric (factor) format or character format indicating the "actual" class.
#' @return A numeric vector of classes derived from each of the input vectors.
#' @examples
#' rho1 = runif(100, -1, 1); rho2 = runif(100, -1, 1)
#' pvalsA = runif(100, 0, 1); pvalsB = runif(100, 0, 1); dcor_pvals = runif(100, 0, 1)
#' cor_classes = dCorClass(rho1, pvalsA, rho2, pvalsB, dcor_pvals)
#' cor_classes = dCorClass(rho1, pvalsA, rho2, pvalsB, dcor_pvals, convertClasses = TRUE)
#' @export
dCorClass <- function(corsA, pvalsA, corsB, pvalsB, dCorPVals, sigThresh = 1,
corSigThresh = 0.05, convertClasses = FALSE){
if(!(all.equal(length(corsA), length(corsB), length(pvalsA),
length(pvalsB), length(dCorrs)))) stop("All of the input vectors should be the same length.")
classes = rep(0, length(corsA))
sigs = (dCorPVals < sigThresh)
pvA = (pvalsA < corSigThresh)
pvB = (pvalsB < corSigThresh)
cAup = (corsA > 0)
cBup = (corsB > 0)
#UpUp
classes[which(sigs & pvA & pvB & cAup & cBup)] = 1
#UpNon
classes[which(sigs & pvA & !pvB & cAup)] = 2
#UpDown
classes[which(sigs & pvA & pvB & cAup & !cBup)] = 3
#NonUp
classes[which(sigs & !pvA & pvB & cBup)] = 4
#NonNon
classes[which(sigs & !pvA & !pvB)] = 5
#NonDown
classes[which(sigs & !pvA & pvB & !cBup)] = 6
#DownUp
classes[which(sigs & pvA & pvB & !cAup & cBup)] = 7
#DownNon
classes[which(sigs & pvA & !pvB & !cAup)] = 8
#DownDown
classes[which(sigs & pvA & pvB & !cAup & !cBup)] = 9
if(convertClasses){
classes = factor(classes, levels = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
labels = c("NonSig", "+/+", "+/0", "+/-",
"0/+", "0/0", "0/-", "-/+", "-/0", "-/-"))
}
return(classes)
}
|
90e61365e92444123313c1f6827749451758eace | 5cd1539847bc53456795abaa975839ef2e219a89 | /test1.R | ad5b4ecb4cc69ceb05209268e924ef8be8446ae7 | [] | no_license | darimelo/testR | 45351a30e06150a2f90f2de3bc162408ba8eaa75 | d81927743b6893c8a48471fbb4772dc7ceb3bc8e | refs/heads/master | 2020-04-28T17:12:04.392772 | 2019-03-13T14:37:15 | 2019-03-13T14:37:15 | 175,437,504 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15 | r | test1.R | a <- seq(1:10)
|
861a2ac1c1843d4b7b4bda2e312b3c769065e352 | 87bc2495102f8555b1c4ec66f561c868e2d5495b | /man/listToDataFrame.Rd | 6c2a44e76696e2cf9bb8123aacd87148bec9327d | [] | no_license | cran/Fgmutils | a4b716bfb5eccff5cb89133314ab18eb01047875 | 52b9c01a4ee269acc66a2efa29df8411033f0306 | refs/heads/master | 2020-05-21T04:22:25.850012 | 2018-11-17T23:50:22 | 2018-11-17T23:50:22 | 48,080,087 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 367 | rd | listToDataFrame.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listToDataFrame.R
\name{listToDataFrame}
\alias{listToDataFrame}
\title{List to DataFrame}
\usage{
listToDataFrame(dlist)
}
\arguments{
\item{dlist}{a list}
}
\description{
converts a list in a dataframe
}
\examples{
a <- 1:5
listToDataFrame(a)
b = listToDataFrame(a)
}
|
182ecc735213ef123dbe6325380940a5b6066d21 | 5b04c512b208b0688f994933d549c09159ae3c37 | /intensive.site.selection.R | da7679539a7625901747b5074ef4487f4499dd45 | [] | no_license | AleneOnion/LCIReports | 9234b8c492dd66e54b4d044f8493044ebe1f1135 | 4da21717ad5616b55eeaccec9f349a9940ee21b4 | refs/heads/master | 2021-08-07T15:58:28.344838 | 2020-04-23T09:54:39 | 2020-04-23T09:54:39 | 160,554,404 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,431 | r | intensive.site.selection.R | #script to rule it all
#alene Onion
# November 2018
############################################################################################################
#pull historic data, insert 2018 data, and clean up data file
source('L:/DOW/StreamDatabase/Lakes/data/2018.cleanup.R')
############################################################################################################
#set working directory
setwd("C:/Rscripts/LCIReports")
############################################################################################################
#simplifying and merging the tables
source('L:/DOW/StreamDatabase/Lakes/data/2018/Lakes.R')
#Fixing the data set
data$Result.Sample.Fraction[data$Result.Sample.Fraction==""]<-NA
#remove old data
data<-data[data$SAMPLE_DATE>'2000-01-01',]
#remove na
data<-data[!is.na(data$Characteristic.Name),]
#write backup
write.csv(data,file="sections/data/data.backup.all.csv",row.names=FALSE)
#restricting to 2018 only
data<-data[data$SAMPLE_DATE>'2018-01-01',]
backup<-data
#fix info types
#write function to capture last characters in a string
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
data$end<-substrRight(data$SAMPLE_NAME,2)
data$INFO_TYPE<-NA
data$INFO_TYPE<-ifelse(data$end=="DP","DP",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="SD","SD",data$INFO_TYPE)
#remove cslap and habs samples
data$end<-substr(data$SAMPLE_NAME,1,3)
data<-data[data$end!="18-",]
#remove samples that don't start with 18
data$end<-substr(data$SAMPLE_NAME,1,2)
data<-data[data$end=="18",]
#identify samples that end in an even number as BS and odd as OW
data$end<-substrRight(data$SAMPLE_NAME,1)
data$INFO_TYPE<-ifelse(data$end=="1","OW",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="3","OW",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="5","OW",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="7","OW",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="9","OW",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="0","BS",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="2","BS",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="4","BS",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="6","BS",data$INFO_TYPE)
data$INFO_TYPE<-ifelse(data$end=="8","BS",data$INFO_TYPE)
data$end<-NULL
#write backup
write.csv(data,file="sections/data/data.backup.csv",row.names=FALSE)
###########################################################################################################
#reviewing and ranking 2018 data
###########################################################################################################
#loading data
bresults<-read.csv("L:/DOW/StreamDatabase/Lakes/data/Test.Results.csv", stringsAsFactors=FALSE)
bsample<-read.csv("L:/DOW/StreamDatabase/Lakes/data/Sample.csv", stringsAsFactors=FALSE)
blake<-read.csv("L:/DOW/StreamDatabase/Lakes/data/Lake.Master.csv", stringsAsFactors=FALSE)
blocation<-read.csv("L:/DOW/StreamDatabase/Lakes/data/Location.csv", stringsAsFactors=FALSE)
bhabs<-read.csv("L:/DOW/StreamDatabase/Lakes/data/HABstatus.csv", stringsAsFactors=FALSE)
data<-read.csv("sections/data/data.backup.csv", stringsAsFactors=FALSE)
data<-data[!is.na(data$Characteristic.Name),]
data$SAMPLE_DATE<-as.Date(data$SAMPLE_DATE,format="%Y-%m-%d")
#now restrict to only the intensive basins
intensive<-data
intensive$basin<-substring(intensive$LAKE_ID,1,2)
intensive<-intensive[intensive$basin=="17"|
intensive$basin=="06"|
intensive$basin=="10"|
intensive$LAKE_ID=="0902BAR0262"|
intensive$LAKE_ID=="1301MORXXX1"|
intensive$LAKE_ID=="1301THE1027"|
intensive$LAKE_ID=="1301THE1034"|
intensive$LAKE_ID=="1301UWB1031",]
#now had historic data for needs verification and minor impacts waterbody inventory sites
#add needs verification list from waterbody inventory
waterinv<-read.csv("sections/data/Waterbody.Inventory.Input.csv", stringsAsFactors=FALSE)
waterinv<-waterinv[!is.na(waterinv$BASIN_CODE),]
pwl<-unique(blake[c('LakeID','PWLID')])
pwl$basin<-substring(pwl$LakeID,1,2)
pwl$basin<-paste("_",pwl$basin,sep="")
waterinv<-merge(waterinv,pwl,by=c('PWLID'),all.x = TRUE)
waterinv<-unique(waterinv[c('PWLID','LakeID')])
names(waterinv)[names(waterinv)=="LakeID"]<-"LAKE_ID"
rm(pwl)
waterinv$WIPWL<-"needs verification"
#read complete data set
data1<-read.csv("sections/data/data.backup.all.csv", stringsAsFactors=FALSE)
data1<-data1[!is.na(data1$Characteristic.Name),]
data1$SAMPLE_DATE<-as.Date(data1$SAMPLE_DATE,format="%Y-%m-%d")
library(dplyr)
data1<-merge(waterinv,data1,by=c('LAKE_ID','PWLID'),all.x = TRUE)
data1 <- data1 %>%
filter(Characteristic.Name %in% c("AMMONIA", "NITROGEN, NITRATE (AS N)","SODIUM","IRON","MANGANESE","MAGNESIUM","NITROGEN, NITRATE-NITRITE","PH","DEPTH, SECCHI DISK DEPTH","NITROGEN, KJELDAHL, TOTAL","SILICA","CHLORIDE (AS CL)","ALKALINITY, TOTAL (AS CACO3)","TOTAL ORGANIC CARBON","CHLOROPHYLL A","PHOSPHORUS","SULFATE (AS SO4)","TRUE COLOR","CALCIUM","SULFATE","CHLORIDE","SPECIFIC CONDUCTANCE","DEPTH, BOTTOM","SULFATE","DISSOLVED ORGANIC CARBON","NITROGEN","ARSENIC","NITRITE","TEMPERATURE, WATER","DEPTH","TEMPERATURE, AIR","DISSOLVED OXYGEN","CONDUCTIVITY"))
data1<-data1[!is.na(data1$Characteristic.Name),]
intensive<-merge(intensive,data1,all = TRUE)
rm(data1)
############################################################################################################
#ADDING THRESHOLDS
#these thresholds will define thresholds for future plots
thresholds<-read.csv("sections/data/thresholds.csv", stringsAsFactors=FALSE)
thresholds<-thresholds[thresholds$Characteristic.Name!=0,]
thresholds$notes<-NULL
#simplify waterbody classification
intensive$simpleWC<-NA
intensive$simpleWC<-ifelse(grepl("C",intensive$Waterbody_Classification),"C",intensive$simpleWC)
intensive$simpleWC<-ifelse(grepl("B",intensive$Waterbody_Classification),"B",intensive$simpleWC)
intensive$simpleWC<-ifelse(grepl("A",intensive$Waterbody_Classification),"A",intensive$simpleWC)
intensive$simpleWC<-ifelse(grepl("AA",intensive$Waterbody_Classification),"AA",intensive$simpleWC)
intensive<-intensive[!is.na(intensive$simpleWC),]
intensive$simpleT<-NA
intensive$simpleT<-ifelse(grepl("T",intensive$Waterbody_Classification),"T",intensive$simpleT)
intensive$simpleT<-ifelse(grepl("TS",intensive$Waterbody_Classification),"TS",intensive$simpleT)
#class A waters################################################################################################################
source('sections/PWS.R')
#CLASS B WATERS################################################################################################################
source('sections/Recreation.R')
#aquatic life################################################################################################################
source('sections/AquaticLife.R')
#trophic state################################################################################################################
source('sections/trophic.R')
######################################################################################################################
#merge back to intensive
intensive<-merge(intensive,trend,all = TRUE)
rm(trend)
intensive<-intensive[!is.na(intensive$Characteristic.Name),]
#create simplified count table
#remove rejected data
#create simplified table
PWsimple<- intensive %>%
dplyr::select(LAKE_ID,WATER,basin,PIdrinking) %>%
dplyr::group_by(LAKE_ID,WATER,basin) %>%
dplyr::summarize(PIdrinking = sum(PIdrinking, na.rm = TRUE)) %>%
dplyr::ungroup()
head(PWsimple)
unique(PWsimple$PIdrinking)
#create simplified recreation table
RECsimple<- intensive %>%
dplyr::select(LAKE_ID,WATER,basin,PIrecreation) %>%
dplyr::group_by(LAKE_ID,WATER,basin) %>%
dplyr::summarize(PIrecreation = sum(PIrecreation, na.rm = TRUE)) %>%
dplyr::ungroup()
head(RECsimple)
unique(RECsimple$PIrecreation)
#create simplified aquatic life table
AQUsimple<- intensive %>%
dplyr::select(LAKE_ID,WATER,basin,PIaquatic) %>%
dplyr::group_by(LAKE_ID,WATER,basin) %>%
dplyr::summarize(PIaquatic = sum(PIaquatic, na.rm = TRUE)) %>%
dplyr::ungroup()
head(AQUsimple)
unique(AQUsimple$PIaquatic)
#create simplified trophic tables
#eutrophic
EUTsimple<- intensive %>%
dplyr::select(LAKE_ID,WATER,basin,eutrophic) %>%
dplyr::group_by(LAKE_ID,WATER,basin) %>%
dplyr::summarize(eutrophic = sum(eutrophic, na.rm = TRUE)) %>%
dplyr::ungroup()
head(EUTsimple)
unique(EUTsimple$eutrophic)
#mesotrophic
MESsimple<- intensive %>%
dplyr::select(LAKE_ID,WATER,basin,mesotrophic) %>%
dplyr::group_by(LAKE_ID,WATER,basin) %>%
dplyr::summarize(mesotrophic = sum(mesotrophic, na.rm = TRUE)) %>%
dplyr::ungroup()
head(MESsimple)
unique(MESsimple$mesotrophic)
#oligotrophic
OLIsimple<- intensive %>%
dplyr::select(LAKE_ID,WATER,basin,oligotrophic) %>%
dplyr::group_by(LAKE_ID,WATER,basin) %>%
dplyr::summarize(oligotrophic = sum(oligotrophic, na.rm = TRUE)) %>%
dplyr::ungroup()
head(OLIsimple)
unique(OLIsimple$oligotrophic)
trendsimple<-merge(PWsimple,RECsimple,by=c('basin','LAKE_ID','WATER'),all=TRUE)
trendsimple<-merge(trendsimple,AQUsimple,by=c('basin','LAKE_ID','WATER'),all=TRUE)
trendsimple<-merge(trendsimple,EUTsimple,by=c('basin','LAKE_ID','WATER'),all=TRUE)
trendsimple<-merge(trendsimple,MESsimple,by=c('basin','LAKE_ID','WATER'),all=TRUE)
trendsimple<-merge(trendsimple,OLIsimple,by=c('basin','LAKE_ID','WATER'),all=TRUE)
trendsimple<-trendsimple[order(trendsimple$PIdrinking,trendsimple$PIrecreation,trendsimple$PIaquatic,trendsimple$eutrophic,trendsimple$mesotrophic,trendsimple$oligotrophic,decreasing = TRUE),]
rm(list=c('EUTsimple','MESsimple','OLIsimple','PWsimple','RECsimple','AQUsimple'))
#add silver lake which we didn't get to but may be considered for sampling
silver<-data.frame(LAKE_ID="1702SIL1076",basin="17",WATER="Silver Lake Reservoir")
trendsimple<-merge(trendsimple,silver,by=c('LAKE_ID','basin','WATER'),all=TRUE)
rm(silver)
#identify PWS waters and Beaches
class<-unique(blake[c('LakeID','PWS','Beaches','Waterbody_Classification','ACRES','PWLID')])
names(class)[names(class)=="LakeID"]<-"LAKE_ID"
class$PWS<-ifelse(class$PWS=="No",NA,class$PWS)
PWS<-unique(class[c('LAKE_ID','PWS')])
PWS<-PWS[!is.na(PWS$PWS),]
PWS<-unique(PWS)
beach<-unique(class[c('LAKE_ID','Beaches')])
beach<-beach[!is.na(beach$Beaches),]
beach<-unique(beach)
acres<-unique(class[c('LAKE_ID','ACRES')])
acres<-acres[!is.na(acres$ACRES),]
acres<-unique(acres)
wqc<-unique(class[c('LAKE_ID','Waterbody_Classification')])
wqc<-wqc[!is.na(wqc$Waterbody_Classification),]
names(wqc)[names(wqc)=="Waterbody_Classification"]<-"Classification"
wqc<-unique(wqc)
pwl<-unique(class[c('LAKE_ID','PWLID')])
pwl<-pwl[!is.na(pwl$PWLID),]
pwl<-unique(pwl)
hypo<-unique(intensive[c('LAKE_ID','INFO_TYPE')])
hypo<-hypo[hypo$INFO_TYPE=="BS",]
hypo$hypo<-"yes"
hypo<-unique(hypo[c('LAKE_ID','hypo')])
trendsimple<-merge(trendsimple,PWS,all.x=TRUE)
trendsimple<-merge(trendsimple,beach,all.x = TRUE)
trendsimple<-merge(trendsimple,wqc,all.x = TRUE)
trendsimple<-merge(trendsimple,acres,all.x = TRUE)
trendsimple<-merge(trendsimple,pwl,all.x = TRUE)
trendsimple<-merge(trendsimple,hypo,all.x = TRUE)
rm(list=c('class','beach','PWS','acres','pwl','wqc','hypo'))
#pull the coordinates for each lake
local<-blocation[blocation$Type=="Centroid"|blocation$Type=="Deep Hole"|blocation$Type=="centroid",]
names(local)[names(local)=="LakeID"]<-"LAKE_ID"
local<-unique(local[c('LAKE_ID','Y_Coordinate','X_Coordinate')])
local<-local[!duplicated(local$LAKE_ID),]
trendsimple<-merge(trendsimple,local,by=c('LAKE_ID'),all.x = TRUE)
rm(local)
#pull habs samples
lakeids<-unique(trendsimple[c('LAKE_ID','basin')])
lakeids$lakes<-"yes"
lakeids<-unique(lakeids[c('LAKE_ID','lakes')])
intensivehabs<-merge(bhabs,bsample,by=c('SAMPLE_ID'),all.x=TRUE)
intensivehabs<-unique(intensivehabs[c('LAKE_ID','STATUS')])
intensivehabs<-merge(lakeids,intensivehabs,by=c('LAKE_ID'),all.x = TRUE)
rm(lakeids)
#remove No Blooms
intensivehabs<-intensivehabs[intensivehabs$STATUS!="No Bloom",]
intensivehabs<-intensivehabs[!is.na(intensivehabs$LAKE_ID),]
intensivehabs$bloom<-"yes"
intensivehabs<-unique(intensivehabs[c('LAKE_ID','bloom')])
#add to trendsimple
trendsimple<-merge(trendsimple,intensivehabs,by=c('LAKE_ID'),all.x = TRUE)
rm(intensivehabs)
#figuring out when last sampled
samples<-unique(bsample[c('LAKE_ID','SAMPLE_DATE','SAMPLE_ID')])
sampless<-unique(bresults[c('SAMPLE_ID','Characteristic.Name')])
samples<-merge(samples,sampless,by=c('SAMPLE_ID'))
rm(sampless)
samples$SAMPLE_DATE<- as.Date(samples$SAMPLE_DATE,format="%m/%d/%Y")
samples<-samples %>%
filter(Characteristic.Name %in% c("Depth, Secchi disk depth","Depth, Secchi Disk Depth","Disk, Secchi Disk Depth")) %>%
mutate(year = format(SAMPLE_DATE, "%Y")) %>%
select(LAKE_ID,year)%>%
group_by(LAKE_ID) %>%
summarize(year = max(year)) %>%
ungroup()
trendsimple<-merge(trendsimple,samples,by=c('LAKE_ID'),all.x = TRUE)
rm(samples)
trendsimple<-unique(trendsimple[c('basin','LAKE_ID','WATER','PWS','PIdrinking','PIrecreation','PIaquatic','bloom','eutrophic','mesotrophic','oligotrophic','year','PWLID','hypo','Classification','Beaches','ACRES','Y_Coordinate','X_Coordinate')])
#identify those in waterbody inventory needs verification
trendsimple<-merge(trendsimple,waterinv,by=c('PWLID','LAKE_ID'),all=TRUE)
rm(waterinv)
#remove those that are in cslap
trendsimplebackup<-trendsimple
cslap<-read.csv("sections/data/2018CSLAP.csv", stringsAsFactors=FALSE)
cslap$in2018<-"yes"
cslap<-unique(cslap[c('LAKE_ID','in2018')])
trendsimple<-merge(trendsimple,cslap,by=c('LAKE_ID'),all.x =TRUE)
rm(cslap)
trendsimple<-trendsimple[is.na(trendsimple$in2018),]
trendsimple$in2018<-NULL
#remove those that are less than 6.5 acres
trendsimple<-trendsimple[trendsimple$ACRES>6.5,]
#convert NA to 0 before ranking
trendsimple$PIdrinking[is.na(trendsimple$PIdrinking)] <- 0
trendsimple$PIaquatic[is.na(trendsimple$PIaquatic)] <- 0
trendsimple$PIrecreation[is.na(trendsimple$PIrecreation)] <- 0
#remove Kissena lake because it was sampled 5 years ago
trendsimple<-trendsimple[trendsimple$LAKE_ID!="1702KIS0076",]
trendsimple<-trendsimple[order(trendsimple$PWS,trendsimple$PIdrinking,trendsimple$PIrecreation,trendsimple$PIaquatic,trendsimple$bloom,trendsimple$eutrophic,trendsimple$mesotrophic,trendsimple$oligotrophic,trendsimple$Classification,decreasing = TRUE),]
trendsimple$basin<-substr(trendsimple$LAKE_ID,1,2)
#add costs
costs<-read.csv("sections/data/price.list.parameters.csv", stringsAsFactors=FALSE)
costs<-costs[costs$CHEMICAL_NAME=="cost",]
trendsimple$cost<-NA
trendsimple$cost<-ifelse(grepl("A",trendsimple$Classification) & trendsimple$hypo=="yes",4*(costs$class_a_epi[1]+costs$class_a_hyp[1]),trendsimple$cost)
trendsimple$cost<-ifelse(grepl("A",trendsimple$Classification) & is.na(trendsimple$hypo),4*(costs$class_a_unstratified),trendsimple$cost)
trendsimple$cost<-ifelse(!grepl("A",trendsimple$Classification) & trendsimple$hypo=="yes",4*(costs$class_b_epi[1]+costs$class_b_hyp[1]),trendsimple$cost)
trendsimple$cost<-ifelse(!grepl("A",trendsimple$Classification) & is.na(trendsimple$hypo),4*(costs$class_b_unstratified[1]),trendsimple$cost)
trendsimple<-trendsimple[!is.na(trendsimple$LAKE_ID),]
rm(costs)
#make basin _basin
trendsimple$basin<-paste("_",trendsimple$basin,sep="")
#write output
write.csv(trendsimple,file="2018.ranked.intensive.sites.csv",row.names=FALSE)
write.csv(intensive,file="2018.ranked.intensive.Data.csv",row.names=FALSE)
rmarkdown::render("intensive.site.selection.Rmd")
###########################################################################################################
#lakes<-unique(data$LAKE_ID)
#for(lake in lakes){
# temp<-data[data$LAKE_ID==lake,]
# temp<-temp[!is.na(temp$Characteristic.Name),]
# #for the title of the file and the report
# water<-unique(temp$WATER)
# water<-water[!is.na(water)]
# water<-tail(water,1)
# thistitle<-paste("LCI Report for ",water,sep='')
# rmarkdown::render('report.Rmd', # file 2
# output_file = paste("report_", water,"(",lake,")_", Sys.Date(), ".html", sep=''),
# output_dir = 'reports')
# rm(list = c('temp','water','thistitle'))
#}
#rm(list = c('lake','lakes'))
########################################################################################################### |
21c4df9a1907cf0e405bcd030d26d67e0e3f8612 | 79b51c8afb0a6d4d1aa34fe5a43e2a6b420db50f | /R/fit_multi.R | 05db2ca0b6db784fd9ccd7dd5e40236a9f8b5884 | [
"MIT"
] | permissive | aris-budiman/epifitter | ce75aaeac2cd1bfd3ffe7dc977cc37d7a86caa39 | 00af96a8a8c73ef15f548e3850dc42791d2d52b4 | refs/heads/master | 2023-05-30T06:26:15.038508 | 2021-06-14T14:36:46 | 2021-06-14T14:36:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,102 | r | fit_multi.R | fit_multi = function(time_col,
intensity_col,
data,
strata_cols ,
starting_par = list(y0 = 0.01, r = 0.03, K = 0.8),
maxiter=500,
nlin = FALSE,
estimate_K = FALSE){
if (missing(data)) {
stop(gettextf("Missing 'data' argument"))
}
if (missing(intensity_col)) {
stop(gettextf("Missing 'intensity_col' argument"))
}
if (missing(time_col)) {
stop(gettextf("Missing 'time_col' argument"))
}
# if (nlin==T & missing(guess_y0)) {
# stop(gettextf("Missing 'guess_y0' value"))
# }
# if (nlin==T & missing(guess_r)) {
# stop(gettextf("Missing 'guess_r' value"))
# }
# if (estimate_K == T & missing(guess_K)) {
# stop(gettextf("Missing 'guess_K' value"))
# }
box = data.frame()
pred_box = data.frame()
strata_col = strata_cols
if(is.null(strata_col)){
data_uni=data %>%
dplyr::mutate(strata = "")
strata_col= "strata"
}else{
data_uni = data %>%
tidyr::unite(strata, strata_col, sep = "---")
}
STRATA = data_uni[["strata"]]
strata = as.character(unique(STRATA))
for(i in 1:length(strata)){
rowi = data_uni[["strata"]]==strata[i]
datai = data_uni[rowi,]
if(nlin == T & estimate_K == T ){
model = fit_nlin2(time = datai[[time_col]],
y = datai[[intensity_col]],
starting_par = starting_par,
maxiter=maxiter)
}
if(nlin == T & estimate_K == F ){
model = fit_nlin(time = datai[[time_col]],
y = datai[[intensity_col]],
starting_par = starting_par[1:2],
maxiter = maxiter)}
if(nlin == F & estimate_K == F){
model = fit_lin(time = datai[[time_col]],
y = datai[[intensity_col]])
}
if(nlin == F & estimate_K == T){
model = fit_lin(time = datai[[time_col]],
y = datai[[intensity_col]])
gettextf("'K' is not estimated when nlin = F. To estimate K, use nlin = T and estimate_K = T ")
}
# Predictions
lil_pred_box= model$data %>%
dplyr::mutate(strata = strata[i])
pred_box = pred_box %>%
dplyr::bind_rows(lil_pred_box)
#Parameters
lil_box = model$stats_all %>%
dplyr::mutate(strata = strata[i])
box = box %>%
dplyr::bind_rows(lil_box)
}
colnames = colnames(lil_box)[colnames(lil_box)!="strata"]
colnames_prbox = colnames(lil_pred_box)[colnames(lil_pred_box)!="strata"]
box2 = box %>%
dplyr::select("strata",colnames) %>%
tidyr::separate(strata,into = strata_col, sep = "---")
pred_box2 = pred_box %>%
dplyr::select("strata",colnames_prbox) %>%
tidyr::separate(strata,into = strata_col, sep = "---")
if(nlin == F & estimate_K == T){
message("'K' is not estimated when nlin = F. To estimate K, use nlin = T and estimate_K = T ")
}
a = list(Parameters = box2,
Data = pred_box2)
return(a)
}
|
598e45c058edd81b802f21c39187b5cf048d1bcb | fafd26bd65bb0afcc6777ce18726dd8b0a5bb1c8 | /R/Devoted_runners.R | a4e71b1a4df815a21368c01502264d0d81576ac3 | [] | no_license | dtkaplan/SDSdata | 50a3814af475c4793e03ec0200abd5d1cec738e9 | 5f8c9ec18bff5f1a4565cc72f745c4943fd372d9 | refs/heads/master | 2022-06-27T13:33:10.639497 | 2022-06-23T20:26:56 | 2022-06-23T20:26:56 | 133,568,261 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 799 | r | Devoted_runners.R | #' Race results for long-time runners of the Cherry Blossom 10-milers
#'
#'
#'
#' @docType data
#'
#' @usage data(Devoted_runners)
#'
#' @format A data.frame object with one row for each runner-year in the Cherry
#' Blossom 10-miler held in Washington, DC. Only those runners with 10 or more successive
#' participations in the race are included.
#'
#' - `age` the runner's age when the race was run
#' - `seconds` the time it took the runner to complete the 10-mile race in h:mm:ss
#' - `sex`
#' - `division` the sex/age division that the runner was in .
#' - `year`
#' - `Hometown`
#' - `name`: the runners' name
#' - `PiS/TiS` the runner's position among all runners of the same sex
#' - `PiD/TiD`: the runner's position in his or her division.
#'
#' @keywords datasets
#'
#'
"Devoted_runners"
|
46f175e6bc207151e97f39c5d0ebc80075181586 | 584594f4dd0026c90a22df6e5b75702de4bdbd6d | /R/CalDDTDDF.R | 6659d8630c821b552a0f1d93c25630e78fed1070 | [] | no_license | smallwave/FrostNumModel | 51357c1d4549424517fcc49c10f5fe1aadb676a4 | 49c57ee65c51722de3ee443445647aa27ceceecb | refs/heads/master | 2021-01-24T07:47:26.239873 | 2017-06-20T06:58:56 | 2017-06-20T06:58:56 | 93,357,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,578 | r | CalDDTDDF.R | #************************************************************************************************
# Ahthor: wavelet
# Propose : Calcalation DDT and DDF
# Date : 2017/6/20
# Update info :
# 2017/6/20/
#************************************************************************************************
gstinFilePath <- "F:/worktemp/Permafrost(FrostModel)/Data/GST(Kri)/"
ddtddfFilePath <- "F:/worktemp/Permafrost(FrostModel)/Data/DDTDDF/ddtddf.csv"
#************************************************************************************************
# process every file
#************************************************************************************************
searchStr <- paste("*.csv",sep='')
fileList <- list.files(gstinFilePath, pattern = searchStr , full.names = TRUE, recursive = TRUE)
DDTALL <- NULL
DDFALL <- NULL
for(fileName in fileList){
print(fileName)
InData <- read.csv(fileName, head=TRUE,sep=",")
# DDT
if(is.null(DDTALL)){
DDTALL <- rep(c(0), nrow(InData))
}
IndexDDT <- InData$GST > 0
DDT <- rep(c(0), nrow(InData))
DDT[IndexDDT] <- InData$GST[IndexDDT]
DDTALL <- DDTALL + DDT
# DDF
if(is.null(DDFALL)){
DDFALL <- rep(c(0), nrow(InData))
}
IndexDDF <- InData$GST < 0
DDF <- rep(c(0), nrow(InData))
DDF[IndexDDF] <- InData$GST[IndexDDF]
DDFALL <- DDFALL + DDF
}
DDFALL <- abs(DDFALL)
map.Polygon <- data.frame(InData[,c("Lon","Lat")],DDTALL,DDFALL)
write.csv(map.Polygon, file = ddtddfFilePath,row.names = FALSE)
|
9893f7056770fa608b15ca0b1e27bd4a0a1775cd | 02f2be941f38b92936146820f9766470fd648c86 | /domeny_dane_kontaktowe_1.6.R | 1670a2942bc0705fe341e742f658360b3aa0b8f3 | [] | no_license | gsg80/domeny | 5f992bc79404e855288c444d39fa993aaa6d88ca | a4cf2c2d6e41e33b56e746395e7d1f5cf5606f87 | refs/heads/master | 2021-08-10T09:10:56.629784 | 2017-11-12T12:03:35 | 2017-11-12T12:03:35 | 110,427,636 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,693 | r | domeny_dane_kontaktowe_1.6.R | library(XML)
library(RCurl)
setwd("R Projects/domeny")
f = "domeny.csv" #link do pliku
ver <- "1.6"
#funkcja wyszukujaca telefon w tekście
parsePhone <- function(text) {
ptel <- ""
fraza <- ""
# flist <- c('tel','Tel','TelKom','telkom','TelKomórkowy','telkomórkowy','telefon','Telefon','telfax','TELFAX','komórka','Komórka','+48')
# fmatch <- vector(mode="numeric", length=length(flist))
# for(i in 1: length(fmatch)) {
# fmatch[i] <- length(grep(pattern = flist[i], text))
# }
#
# print(fmatch)
if(length(grep(pattern = "telkom", text)) > 0){
if(length(grep(pattern = "telkomórkowy", text)) > 0){
fraza <- "telkomórkowy"
} else {
fraza <- "telkom"
}
} else {
if(length(grep(pattern = "TelKom", text)) > 0){
if(length(grep(pattern = "TelKomórkowy", text)) > 0){
fraza <- "TelKomórkowy"
} else {
fraza <- "TelKom"
}
} else {
if(length(grep(pattern = "Telkom", text)) > 0){
if(length(grep(pattern = "Telkomórkowy", text)) > 0){
fraza <- "Telkomórkowy"
} else {
fraza <- "Telkom"
}
} else {
if(length(grep(pattern = "telfax", text)) > 0){
fraza <- "telfax"
}
else {
if(length(grep(pattern = "tel", text)) > 0){
fraza <- "tel"
}
else {
if(length(grep(pattern = "Tel", text)) > 0){
fraza <- "Tel"
} else {
if(length(grep(pattern = "\\+48", text)) > 0) {
fraza <- "+48"
}
}
}
}
}
}
}
dl <- nchar(fraza)
z <- 0
zer <- substring(text, regexpr(fraza, text)+dl, regexpr(fraza, text)+dl)
#print(zer)
if( zer == "0"){
z <- 1
}
if(fraza == "+48") {
dl <- dl - 1
}
ptel <- substring(text, regexpr(fraza, text)+dl+z, regexpr(fraza, text)+dl+z+8)
return(ptel)
}
#funkcja do wyszukiwania w tekscie adresow e-mail
parseMail <- function(text){
adresEmail <- ""
if(length(grep(pattern = "e-mail", text)) > 0){
em <- regexpr('e-mail', text)
sufiks <- ''
r <- 0
pl <- regexpr('.pl', text)
eu <- regexpr('.eu', text)
com <- regexpr('.com', text)
if(pl < 0) pl <- 100000
if(eu < 0) eu <- 100000
if(com < 0) com <- 100000
if (pl < eu) {
if (pl < com){
r <- pl
sufiks <- 'pl'
} else {
r <- com
sufiks <- 'com'
}
} else {
if (eu < com){
r <- eu
sufiks <- 'eu'
} else {
r <- com
sufiks <- 'com'
}
}
if(r > em){
adresEmail <- substring(text, em+6, r + if(sufiks=='com') 3 else 2 )
}
} else {
if(length(grep(pattern = "email", text)) > 0){
em <- regexpr('email', text)
sufiks <- ''
r <- 0
pl <- regexpr('.pl', text)
eu <- regexpr('.eu', text)
com <- regexpr('.com', text)
if(pl < 0) pl <- 100000
if(eu < 0) eu <- 100000
if(com < 0) com <- 100000
if (pl < eu) {
if (pl < com){
r <- pl
sufiks <- 'pl'
} else {
r <- com
sufiks <- 'com'
}
} else {
if (eu < com){
r <- eu
sufiks <- 'eu'
} else {
r <- com
sufiks <- 'com'
}
}
if(r > em){
adresEmail <- substring(text, em+5, r + if(sufiks=='com') 3 else 2 )
}
}
}
return(adresEmail)
}
#funkcja usuwajaca zbedne znaki przed wyszukaniem telefonu
clearText <- function(itext, typ="TEL") {
ctext <- itext
ctext <- gsub(" ","", ctext, fixed = TRUE)
ctext <- gsub("(","", ctext, fixed = TRUE)
ctext <- gsub(")","", ctext, fixed = TRUE)
ctext <- gsub("\u00A0", "", ctext, fixed = TRUE)
ctext <- gsub("\r", "", ctext, fixed = TRUE)
ctext <- gsub("\n", "", ctext, fixed = TRUE)
ctext <- gsub(",", "", ctext, fixed = TRUE)
ctext <- gsub(":", "", ctext, fixed = TRUE)
ctext <- gsub("/", "", ctext, fixed = TRUE)
ctext <- gsub("<strong>","", ctext, fixed=TRUE)
ctext <- gsub("<li>","", ctext, fixed=TRUE)
if(typ == "NIP") {
ctext <- gsub("PL", "", ctext, fixed = TRUE)
}
if(typ == "TEL" || typ == "NIP") {
ctext <- gsub(".", "", ctext, fixed = TRUE)
ctext <- gsub("-","", ctext, fixed = TRUE)
}
# if(typ == "TEL") {
# ctext <- gsub("+48", "", ctext, fixed = TRUE)
# ctext <- gsub("+ 48", "", ctext, fixed = TRUE)
# ctext <- gsub("0048", "", ctext, fixed = TRUE)
# }
return(ctext)
}
#funkcja usuwajaca zbedne znaki przed wyszukaniem telefonu
clearKierunkowy <- function(itext) {
ctext <- itext
ctext <- gsub("+48", "", ctext, fixed = TRUE)
ctext <- gsub("+ 48", "", ctext, fixed = TRUE)
ctext <- gsub("0048", "", ctext, fixed = TRUE)
return(ctext)
}
#funkcja ograniczająca czas sprawdzania url
checkURL <- function(url) {
e <- FALSE
setTimeLimit(elapsed=60, transient=TRUE)
e <- url.exists(url, .opts = list(timeout = 10, maxredirs = 2, verbose = FALSE))
#print("checkURL ok")
return(e)
}
#funkcja ograniczajace czas pobrania linków
getLinks <- function(url) {
ln <- ""
setTimeLimit(elapsed=60, transient=TRUE)
ln <- getHTMLLinks(url)
#print("getLinks ok")
return(ln)
}
#funkcja ograniczająca czas pobrania htmla
getHTML <- function(url){
h <- ""
setTimeLimit(elapsed=60, transient=TRUE)
h <- htmlParse(url)
#print('getHTML')
return(h)
}
danecsv <- read.csv2(f)
#definicja wektorów z danymi zwrotnymi
urlWorks <- vector(mode="numeric", length=length(danecsv[, "domena"]))
urlKontakt <- vector(mode="character", length=length(danecsv[, "domena"]))
urlRegulamin <- vector(mode="character", length=length(danecsv[, "domena"]))
text <- vector(mode="character", length=length(danecsv[, "domena"]))
textN <- vector(mode="character", length=length(danecsv[, "domena"]))
textE <- vector(mode="character", length=length(danecsv[, "domena"]))
textP <- vector(mode="character", length=length(danecsv[, "domena"]))
tel <- vector(mode="numeric", length=length(danecsv[, "domena"]))
eniro <- vector(mode="numeric", length=length(danecsv[,"domena"]))
nip <-vector(mode="numeric", length=length(danecsv[,"domena"]))
mail <-vector(mode="numeric", length=length(danecsv[,"domena"]))
pna <-vector(mode="numeric", length=length(danecsv[,"domena"]))
wersja <-vector(mode="numeric", length=length(danecsv[,"domena"]))
sunrise <-vector(mode="numeric", length=length(danecsv[,"domena"]))
rzetelna <- vector(mode="numeric", length=length(danecsv[,"domena"]))
koszyk <- vector(mode="numeric", length=length(danecsv[,"domena"]))
dataCrawling <- vector(mode="numeric", length=length(danecsv[,"domena"]))
fkoszyk <- c('koszyk','basket','sklep internetowy','Sklep internetowy','internetowy sklep','księgarnia internetowa','ksiêgarnia internetowa','Ksiêgarnia internetowa')
for(i in 1 : length(danecsv[, "domena"]))
{
#print(paste(i, "start"))
m <- 0
mN <- 0
mE <- 0
mP <- 0
opcja <- 0
dane <- ""
danem <- ""
danep <- ""
html <- ""
links <- ""
dkoszyk <- c(0,0,0,0,0,0,0,0)
dmkoszyk <- c(0,0,0,0,0,0,0,0)
url <- paste ("http://", danecsv[i,"domena"], sep="")
print(paste(i, url))
if(checkURL(url)){
#print(paste(i, "url OK"))
urlWorks[i] <- 1
try(links <- getLinks(url), silent=T)
#print(paste(i, "links"))
kontakt <-links[grep(pattern = "kontakt", links)]
#print(paste(i, kontakt[1]))
if(length(kontakt) > 0){
if(checkURL(links[min(grep(pattern = "kontakt", links))])){
urlKontakt[i] <- links[min(grep(pattern = "kontakt", links))]
#print(paste(url, "1", sep=" - "))
}
else{
urlKontakt[i] <- paste(url,links[min(grep(pattern = "kontakt", links))],sep="")
if(!(checkURL(urlKontakt[i]))){
urlKontakt[i] <- paste(url,"/",links[min(grep(pattern = "kontakt", links))],sep="")
}
#print(paste(url, "2", sep=" - "))
}
}
else{
urlKontakt[i] <- url
#print(paste(url, "3", sep=" - "))
}
#print(paste(i, "urlKontakt ok"))
try({
html <- getHTML(urlKontakt[i])
dane <- xpathSApply(html, "//div", xmlValue)
danem <- xpathSApply(html, "//meta", xmlGetAttr, 'content')
#print(html)
}, silent=T)
#print(paste(i, "html+dane ok"))
if(length(grep(pattern = "T|tel(\\.|:|[:space:])", dane)) > 0){
danetel <- dane[grep(pattern = "T|tel(\\.|:|[:space:])", dane)]
j <- 1
while(tel[i] == 0 && j <= length(danetel)) {
text[i] <- try(clearText(danetel[j]), silent=T)
text[i] <- try(clearKierunkowy(text[i]), silent=T)
tel[i] <- parsePhone(text[i])
if(length(grep(pattern = "[0-9]{9}", tel[i])) == 0 || nchar(tel[i]) != 9){
tel[i] <- 0
}
j <- j + 1
}
}
if(tel[i] == "0")
{
if(length(grep(pattern = "\\+48", dane)) > 0){
print ("tak")
danetel <- dane[grep(pattern = "\\+48", dane)]
j <- 1
while(tel[i] == "0" && j <= length(danetel)) {
text[i] <- try(clearText(danetel[j]), silent=T)
#text[i] <- try(clearKierunkowy(text[i]), silent=T)
tel[i] <- parsePhone(text[i])
if(length(grep(pattern = "[0-9]{9}", tel[i])) == 0 || nchar(tel[i]) != 9){
tel[i] <- 0
}
j <- j + 1
}
}
}
#print(paste(i, "Phone"))
# NIP
if(length(grep(pattern = "(N|n)(I|i)(P|p)", dane)) > 0){
mN <- max(grep(pattern = "(N|n)(I|i)(P|p)", dane))
if(mN > 0) {
textN[i] <- try(clearText(dane[mN], typ="NIP"), silent=T)
if(length(grep(pattern = "NIP", textN[i])) > 0){
nip[i] <- substring(textN[i], regexpr('NIP', textN[i])+3, regexpr('NIP', textN[i])+12)
} else {
if(length(grep(pattern = "nip", textN[i])) > 0){
nip[i] <- substring(textN[i], regexpr('nip', textN[i])+3, regexpr('nip', textN[i])+12)
}
}
}
}
#print(paste(i, "NIP"))
if(length(grep(pattern = "[0-9]{10}", nip[i])) == 0 || nchar(nip[i]) != 10){
nip[i] <- 0
}
if(nip[i] == "0") {
regulamin <-links[grep(pattern = "regulamin", links)]
if(length(regulamin) > 0){
if(checkURL(links[min(grep(pattern = "regulamin", links))])){
urlRegulamin[i] <- links[min(grep(pattern = "regulamin", links))]
}
else{
urlRegulamin[i] <- paste(url,links[min(grep(pattern = "regulamin", links))],sep="")
if(!(checkURL(urlRegulamin[i]))){
urlRegulamin[i] <- paste(url,"/",links[min(grep(pattern = "regulamin", links))],sep="")
}
}
}
daneR <- ""
try({
htmlR <- getHTML(urlRegulamin[i])
daneR <- xpathSApply(htmlR, "//div", xmlValue)
}, silent=T)
if(length(grep(pattern = "(N|n)(I|i)(P|p)", daneR)) > 0){
mN <- max(grep(pattern = "(N|n)(I|i)(P|p)", daneR))
if(mN > 0) {
textN[i] <- try(clearText(daneR[mN], typ="NIP"), silent=T)
if(length(grep(pattern = "NIP", textN[i])) > 0){
nip[i] <- substring(textN[i], regexpr('NIP', textN[i])+3, regexpr('NIP', textN[i])+12)
} else {
if(length(grep(pattern = "nip", textN[i])) > 0){
nip[i] <- substring(textN[i], regexpr('nip', textN[i])+3, regexpr('nip', textN[i])+12)
}
}
}
}
}
#E-MAIL
if(length(grep(pattern = "(E|e)?-?(M|m)(A|a)(I|i)(L|l)", dane)) > 0){
mE <- max(grep(pattern = "(E|e)?-?(M|m)(A|a)(I|i)(L|l)", dane))
if(mE > 0) {
textE[i] <- try(clearText(dane[mE], typ="MAIL"), silent=T)
mail[i] <- parseMail(textE[i])
}
}
#print(paste(i, "MAIL"))
#PNA
if(length(grep(pattern = "[0-9][0-9]-[0-9][0-9][0-9]", dane)) > 0){
mP <- max(grep(pattern = "[0-9][0-9]-[0-9][0-9][0-9]", dane))
if(mP > 0) {
textP[i] <- try(dane[mP], silent=T)
pna[i] <- substring(textP[i], regexpr("[0-9][0-9]-[0-9][0-9][0-9]", textP[i]),
regexpr("[0-9][0-9]-[0-9][0-9][0-9]", textP[i])+5)
}
}
#print(paste(i, "PNA"))
#WEB BY ENIRO
if(length(grep(pattern = "Website by Eniro Polska", dane)) > 0) {
eniro[i] <- 1
} else {
if(length(grep(pattern = "Stwórz własną stronę www z Panoramą Firm", dane)) > 0){
eniro[i] <- 1
} else {
eniro[i] <- 0
}
}
#SUNRISE
if(length(grep(pattern = "Za pozycjonowanie tego serwisu odpowiada Sunrise System", dane)) > 0) {
sunrise[i] <- 1
} else {
sunrise[i] <- 0
}
#RZETELNA FIRMA
if(length(grep(pattern = "wizytowka.rzetelnafirma.pl", dane)) > 0) {
rzetelna[i] <- 1
} else {
rzetelna[i] <- 0
}
#KOSZYK
dkoszyk <- c(0,0,0,0,0,0,0,0)
dmkoszyk <- c(0,0,0,0,0,0,0,0)
for(ik in 1:length(fkoszyk))
{
dkoszyk[ik] <- length(grep(pattern = fkoszyk[ik], dane))
dmkoszyk[ik] <- length(grep(pattern = fkoszyk[ik], danem))
}
#print(max(dkoszyk))
#print(max(dmkoszyk))
if(max(dkoszyk) > 0) {
koszyk[i] <- 1
} else {
if(max(dmkoszyk) > 0) {
koszyk[i] <- 1
} else {
if(length(grep(pattern = "koszyk", dane)) > 0) {
koszyk[i] <- 1
} else {
koszyk[i] <- 0
}
}
}
# }
}
if(length(grep(pattern = "[0-9]{9}", tel[i])) == 0 || nchar(tel[i]) != 9){
tel[i] <- 0
}
if(length(grep(pattern = "[0-9]{10}", nip[i])) == 0 || nchar(nip[i]) != 10){
nip[i] <- 0
}
if(length(grep(pattern = "@", mail[i])) == 0){
mail[i] <- ""
}
wersja[i] <- ver
#Data
dataCrawling[i] <- as.character(Sys.Date())
if(i %% 10 == 0) {
tabelaOut <- data.frame(id_domena = danecsv["id_domena"]
, domena = danecsv["domena"]
, czy_dziala = urlWorks
, telefon = tel
, nip = nip
, mail = mail
, webEniro = eniro
, PNA = pna
, version = wersja
, sunrise = sunrise
, rzetelna = rzetelna
, koszyk = koszyk
, data = dataCrawling
)
write.csv(tabelaOut, "domeny_out.csv")
}
print(paste(i, "koniec"))
}
#danecsv2 <- read.csv("domeny_out.csv")
#n <- rbind(danecsv2[ , !(names(danecsv2) %in% 'X')], tabelaOut)
tabelaOut <- data.frame(id_domena = danecsv["id_domena"]
, domena = danecsv["domena"]
, czy_dziala = urlWorks
, telefon = tel
, nip = nip
, mail = mail
, webEniro = eniro
, PNA = pna
, version = wersja
, sunrise = sunrise
, rzetelna = rzetelna
, koszyk = koszyk
, data = dataCrawling
)
write.csv(tabelaOut, "domeny_out.csv")
|
98590a36fb46da0bf36bf2499027c744a7a4b025 | 9c9328e812b8d3edcd9c97af0494110ea3c64054 | /R/NBSpliceRes-getDSGenes.R | 67c76ce6f7eff28bd4cfc042f45c830996883ad9 | [] | no_license | gamerino/NBSplice | 90fb5ad934f9a118aa8e6feb1215c2764af6bf52 | ba4d4ee1c23a467eed3d0f5ad3ecc5acb000a5ec | refs/heads/master | 2021-09-08T11:30:16.736683 | 2021-09-03T14:24:59 | 2021-09-03T14:24:59 | 124,399,153 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,952 | r | NBSpliceRes-getDSGenes.R | #'@title
#'Get differentially spliced genes.
#'@description
#'\code{GetDSGenes} returns the list of genes identified as differentially
#'spliced.
#'@param myNBRes NBSpliceRes class object.
#'@param adjusted Logical indicating if adjusted p values should be used.
#'@param p.value Numeric value between 0 and 1 giving the required family-wise
#'error rate or false discovery rate.
#'@return A character with the names of differentially spliced genes.
#'@include IsoDataSet-NBTest.R
#'@exportMethod GetDSGenes
#'@docType methods
#'@name GetDSGenes
#'@rdname NBSpliceRes-GetDSGenes
#'@aliases GetDSGenes-methods
#'@seealso \code{\link{NBSpliceRes}}
#'@note see full example in \code{\link{NBSpliceRes-class}}
#'@family NBSpliceRes
#'@author Gabriela A. Merino \email{merino.gabriela33@@gmail.com} and Elmer A.
#'Fernandez \email{efernandez@bdmg.com.ar}
#'@examples
#'data(myDSResults, package="NBSplice")
#'
#'myDSGenes<-GetDSGenes(myDSResults)
setGeneric(name="GetDSGenes", def=function(myNBRes, adjusted=TRUE,p.value=0.05){
standardGeneric("GetDSGenes")
})
#'@name GetDSGenes
#'@rdname NBSpliceRes-GetDSGenes
#'@aliases GetDSGenes,NBSpliceRes-method
#'@inheritParams GetDSGenes
setMethod(f="GetDSGenes", signature="NBSpliceRes",
definition=function(myNBRes, adjusted=TRUE, p.value=0.05){
if(!is.logical(adjusted)){
stop("The parameter 'adjusted' should be TRUE or FALSE")
}
if(!is.numeric(p.value) | p.value < 0 | p.value >1 ){
stop("The parameter 'p.value' should be a number between 0 and 1")
}
sigRes<-results(myNBRes, filter=TRUE)
if(adjusted){
DSGenes<-sigRes[sigRes[, "geneFDR"] < p.value & !is.na(sigRes[,
"geneFDR"] ), "gene"]
}else{
DSGenes<-sigRes[sigRes[, "genePval"] < p.value & !is.na(sigRes[,
"genePval"]), "gene"]
}
return(unique(as.character(DSGenes)))
}) |
64719a40aa5380d95c597745d4b88b6f22ecab15 | 2b1e98bf51e01ae9c703fee411e0ffa2fd98594e | /working with external file/process_csv.R | 8d6baceaa743432a257406e8de01fdf86ebce474 | [] | no_license | Rahulkala/R-workspace | 840e3e51161958b454f5bf460c062b78b4ab2c0c | 76bf9450cf4edaf2e8d6271f3fe641436a1156c8 | refs/heads/master | 2020-07-05T09:08:34.935527 | 2016-08-21T21:19:56 | 2016-08-21T21:19:56 | 66,047,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 318 | r | process_csv.R | data = read.csv("input.csv")
print(data)
summary(data)
min = min(data$salary)
print(min)
minrow = subset(data, salary > min & dept == "IT")
print(minrow)
#write.csv(minrow, "output.csv")
write.csv(minrow,"output.csv",row.names = FALSE) #which will drop the col without name
op = read.csv("output.csv")
print(op)
|
8a0cdb7efdbdb449f86a9c03e4d31093e155cf35 | 9fa290918b0cc0b319d02f421763bbefa398e60d | /R/write.xlsx.R | e148a919551e126769dab7468b7fbbcb617a94a4 | [] | no_license | cran/misty | 634e5bd6bf5e317fa1f4ee1f586d5572a4e47875 | 1a42b63704bf9daf2d920312bc1f04204bac85b4 | refs/heads/master | 2023-08-31T19:04:33.782877 | 2023-08-24T07:30:05 | 2023-08-24T09:31:21 | 239,395,613 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,860 | r | write.xlsx.R | #' Write Excel File
#'
#' This function calls the \code{write_xlsx()} function in the \pkg{writexl} package
#' by Jeroen Ooms to write an Excel file (.xlsx).
#'
#' This function supports strings, numbers, booleans, and dates.
#'
#' @param x a matrix, data frame or (named) list of matrices or data frames
#' that will be written in the Excel file.
#' @param file a character string naming a file with or without file extension
#' '.xlsx', e.g., \code{"My_Excle.xlsx"} or \code{"My_Excel"}.
#' @param col.names logical: if \code{TRUE}, column names are written at the top
#' of the Excel sheet.
#' @param format logical: if \code{TRUE}, column names in the Excel file are
#' centered and bold.
#' @param use.zip64 logical: if \code{TRUE}, zip64 to enable support for 4GB+ Excel
#' files is used.
#' @param check logical: if \code{TRUE}, argument specification is checked.
#'
#' @author
#' Jeroen Ooms
#'
#' @seealso
#' \code{\link{read.xlsx}}
#'
#' @references
#' Jeroen O. (2021). \emph{writexl: Export Data Frames to Excel 'xlsx' Format}.
#' R package version 1.4.0. https://CRAN.R-project.org/package=writexl
#'
#' @note
#' The function was adapted from the \code{write_xlsx()} function in the \pkg{writexl}
#' package by Jeroen Ooms (2021).
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Write Excel file (.xlsx)
#' dat <- data.frame(id = 1:5,
#' gender = c(NA, 0, 1, 1, 0),
#' age = c(16, 19, 17, NA, 16),
#' status = c(1, 2, 3, 1, 4),
#' score = c(511, 506, 497, 502, 491))
#'
#' write.xlsx(dat, file = "Excel.xlsx")
#'
#' # Write Excel file with multiple sheets (.xlsx)
#' write.xlsx(list(cars = cars, mtcars = mtcars), file = "Excel_Sheets.xlsx")
#' }
write.xlsx <- function(x, file = "Excel_Data.xlsx", col.names = TRUE, format = FALSE,
use.zip64 = FALSE, check = TRUE) {
#_____________________________________________________________________________
#
# Input Check ----------------------------------------------------------------
# Check if input 'x' is missing
if (isTRUE(missing(x))) { stop("Please specify a matrix, data frame or list of matrices or data frames for the argument 'x'.", call. = FALSE) }
# Check if input 'x' is NULL
if (isTRUE(is.null(x))) { stop("Input specified for the argument 'x' is NULL.", call. = FALSE) }
# Check input 'check'
if (isTRUE(!is.logical(check))) { stop("Please specify TRUE or FALSE for the argument 'check'.", call. = FALSE) }
if (isTRUE(check)) {
#......
# Check input 'col.names'
if (isTRUE(!is.logical(col.names))) { stop("Please specify TRUE or FALSE for the argument 'col.names'.", call. = FALSE) }
#......
# Check input 'use.zip64'
if (isTRUE(!is.logical(use.zip64))) { stop("Please specify TRUE or FALSE for the argument 'use.zip64'.", call. = FALSE) }
}
#_____________________________________________________________________________
#
# Arguments ------------------------------------------------------------------
# File extension .xlsx
file <- ifelse(length(grep(".xlsx", file)) == 0L, file <- paste0(file, ".xlsx"), file)
# Matrix
if (is.list(x)) {
if(any(sapply(x, is.matrix))) {
x <- lapply(x, as.data.frame)
}
} else {
if (is.matrix(x)) {
x <- as.data.frame(x)
}
}
#_____________________________________________________________________________
#
# Main Function --------------------------------------------------------------
writexl::write_xlsx(x = x, path = file, col_names = col.names, format_headers = format,
use_zip64 = use.zip64)
}
|
8257f80560a478ec19863bd78cecf5e5b5cf5d2c | e88e08d7c9779690bd5fb64c73db822c688cd886 | /025/025.R | 1b376858afdf854e11fcd60f5baa17bad0022b09 | [] | no_license | gslinger/project_euler | 071b20884e3db4df51eda21a1710b13bfe962555 | e49436eb37ca97ba6f1e57d46952becee0d812f3 | refs/heads/master | 2023-04-12T10:51:58.971176 | 2021-05-06T17:52:54 | 2021-05-06T17:52:54 | 93,344,995 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 66 | r | 025.R | n<-1000
as.integer((log(10)*(n-1)+log(5)/2)/log((1+sqrt(5))/2))+1
|
d480ab9fe5a3db64cc3f7fc12851e1ac4c1ef53b | 137971b0047936bb43062f81dedf6ac5a1fe1b00 | /examples/example4.R | 581fa8ca0f80abb11388b034acb85fde33d08eb7 | [] | no_license | predsci/DRAFT-BSEIR | d04c25783d0bc4d0ef8e39edc2b9a13a0626b619 | 1176db45464ccb5bb28350693c20b9146e0ab00f | refs/heads/master | 2022-02-06T05:32:01.086696 | 2019-04-11T18:56:26 | 2019-04-11T18:56:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 438 | r | example4.R | rm(list=ls())
library(DRAFT)
##
## Forecast example bSIR model -dp, dq ts and dL are all provided
## ----------------------------------------------------------------
pop = 4035777
filename = 'lr_ebola_forecast.csv'
epi_model = 3
Tg = 12
sigma = NULL
ts = "2014-07-30"
dL = 20
dp = 0.002
dq = 0.45
results <- runbSEIR(filename = filename, epi_model = epi_model, pop = pop, Tg = Tg, dp = dp, dq = dq, ts = ts, dL = dL)
|
265d2a5f626ca7a1c75cd4373c2309c828602541 | 15cdbf21d3c21ad403f6c680c80d052a9edcb4ad | /2015_SISBID_Module4_Dimension_Reduction_demo.R | cf342f342a280b00ac8653cc98929641789059af | [] | no_license | crazyhottommy/Module4 | c504043ccde7608ac03feab24a311e112ff72525 | 07aa5332e8dee23599409e217b1ec0470c74d65a | refs/heads/gh-pages | 2020-12-11T01:48:45.883164 | 2015-07-15T17:32:34 | 2015-07-15T17:32:34 | 39,236,378 | 1 | 1 | null | 2015-07-17T05:22:41 | 2015-07-17T05:22:40 | null | UTF-8 | R | false | false | 5,623 | r | 2015_SISBID_Module4_Dimension_Reduction_demo.R | #############################################################
#2015 SISBID Module 4 - Unsupervised Learning
#Genevera I. Allen & Yufeng Liu
#Dimension Reduction Demos for use in lecture
############################################################
########################################################
#Data set 1 - College Data
#Small data set to understand R's built in PCA functions
#princomp & prcomp
##########################################################
#read in data
library(ISLR)
data(College)
cdat = College[,2:18]
dim(cdat)
names(cdat)
#PCA
pc = princomp(cdat) #default - centers and scales
#default R plots with princomp
biplot(pc,cex=.7)
screeplot(pc)
#scatter plots - patterns among observations
i = 1; j = 2;
plot(pc$scores[,i],pc$scores[,j],pch=16,cex=.2)
text(pc$scores[,i],pc$scores[,j],rownames(cdat),cex=.6)
#look at a particular college
ind = match("Harvard University",rownames(cdat))
text(pc$scores[ind,i],pc$scores[ind,j],rownames(cdat)[ind],cex=.7,col=2)
#loadings - variables that contribute to these patterns
par(mfrow=c(2,1))
barplot(pc$loadings[,1],cex.names=.6,main="PC 1 Loadings")
barplot(pc$loadings[,2],cex.names=.6,main="PC 2 Loadings")
#variance explained
screeplot(pc)
varex = 100*pc$sdev^2/sum(pc$sdev^2)
plot(varex,type="l",ylab="% Variance Explained",xlab="Component")
#cumulative variance explained
cvarex = NULL
for(i in 1:ncol(cdat)){
cvarex[i] = sum(varex[1:i])
}
plot(cvarex,type="l",ylab="Cumulative Variance Explained",xlab="Component")
######
#sparse PCA
library(PMA)
spc = SPC(scale(cdat),sumabsv=2,K=3)
spcL = spc$v
rownames(spcL) = names(cdat)
#scatterplots of Sparse PCs
i = 1; j = 2;
plot(spc$u[,i],spc$u[,j],pch=16,cex=.2)
text(spc$u[,i],spc$u[,j],rownames(cdat),cex=.6)
#loadings
par(mfrow=c(2,1))
barplot(spc$v[,1],names=names(cdat),cex.names=.6,main="SPC 1 Loadings")
barplot(spc$v[,2],names=names(cdat),cex.names=.6,main="SPC 2 Loadings")
#variance explained
spc$prop.var.explained
##########################################################
#Dataset 2 - NCI Microarray Data
#Understand PCA and Sparse PCA
#PCA solution via the SVD
###########################################################
require("ISLR")
ncidat = t(NCI60$data)
colnames(ncidat) = NCI60$labs
dim(ncidat)
unique(colnames(ncidat))
#PCA - take SVD to get solution
#center genes, but don't scale
X = t(scale(t(ncidat),center=TRUE,scale=FALSE))
sv = svd(t(X));
U = sv$u
V = sv$v
D = sv$d
Z = t(X)%*%V;
#PC scatterplots
cols = as.numeric(as.factor(colnames(ncidat)))
K = 3
pclabs = c("PC1","PC2","PC3","PC4")
par(mfrow=c(1,K))
for(i in 1:K){
j = i+1
plot(U[,i],U[,j],type="n",xlab=pclabs[i],ylab=pclabs[j])
text(U[,i],U[,j],colnames(X),col=cols)
}
#PC loadings - visualize data by limiting to top genes in magnitude in the PC loadings
aa = grep("grey",colors())
bb = grep("green",colors())
cc = grep("red",colors())
gcol2 = colors()[c(aa[1:30],bb[1:20],rep(cc,2))]
j = 2
ord = order(abs(V[,j]),decreasing=TRUE)
x = as.matrix(X[ord[1:250],])
heatmap(x,col=gcol2)
#Variance Explained
varex = 0
cumvar = 0
denom = sum(D^2)
for(i in 1:64){
varex[i] = D[i]^2/denom
cumvar[i] = sum(D[1:i]^2)/denom
}
#screeplot
par(mfrow=c(1,2))
plot(1:64,varex,type="l",lwd=2,xlab="PC",ylab="% Variance Explained")
plot(1:64,cumvar,type="l",lwd=2,xlab="PC",ylab="Cummulative Variance Explained")
#######
#Sparse PCA
require("PMA")
spc = SPC(t(X),sumabsv=10,K=4)
#how many genes selected?
apply(spc$v!=0,2,sum)
#PC scatterplots
cols = as.numeric(as.factor(colnames(ncidat)))
K = 3
pclabs = c("SPC1","SPC2","SPC3","SPC4")
par(mfrow=c(1,K))
for(i in 1:K){
j = i+1
plot(spc$u[,i],spc$u[,j],type="n",xlab=pclabs[i],ylab=pclabs[j])
text(spc$u[,i],spc$u[,j],colnames(X),col=cols)
}
#SPC loadings - visualize data by limiting to gene selected by the sparse PC loadings
aa = grep("grey",colors())
bb = grep("green",colors())
cc = grep("red",colors())
gcol2 = colors()[c(aa[1:30],bb[1:20],rep(cc,2))]
j = 1
ind = which(spc$v[,j]!=0)
x = as.matrix(X[ind,])
heatmap(x,col=gcol2)
#variance explained
spc$prop.var.explained
##########################################################
#Dataset 3 - Digits Data
#Here only use 3's to compare and contrast PCA, NMF and ICA
###########################################################
load("UnsupL.Rdata")
#pull out 3's
dat3 = digits[which(rownames(digits)==3),]
#visulaize
par(mfrow=c(3,4))
for(i in 1:12){
imagedigit(dat3[i,])
}
#PCA - take SVD to get solution
#don't center and scale to retain interpretation as images
svd3 = svd(dat3)
U = svd3$u
V = svd3$v #PC loadings
D = svd3$d
Z = dat3%*%V #PCs
#PC scatterplot
par(mfrow=c(1,1))
plot(Z[,2],Z[,3],pch=16)
#PC loadings
par(mfrow=c(1,4))
for(i in 1:4){
imagedigit(V[,i])
}
#Variance Explained
varex = 0
cumvar = 0
denom = sum(D^2)
for(i in 1:256){
varex[i] = D[i]^2/denom
cumvar[i] = sum(D[1:i]^2)/denom
}
#screeplot
par(mfrow=c(1,2))
plot(1:256,varex,type="l",lwd=2,xlab="PC",ylab="% Variance Explained")
plot(1:256,cumvar,type="l",lwd=2,xlab="PC",ylab="Cummulative Variance Explained")
cumvar[25] #first 25 PCs explain over 90% of variance
pdat3 = dat3%*%V[,1:25] #projected data - a tenth of the original size
#######
#now NMF
require("NMF")
K = 10
nmffit = nmf(dat3+1,rank=K)
W = basis(nmffit)
H = coef(nmffit)
#plot archetypes - try changing K
par(mfrow=c(3,5))
for(i in 1:K){
imagedigit(H[i,])
}
###########
#now ICA
require("fastICA")
K = 10
icafit = fastICA(t(dat3),n.comp=K)
#plot independent source signals - try changing K
par(mfrow=c(3,5))
for(i in 1:K){
imagedigit(icafit$S[,i])
}
#################################################
|
fa07adaac7fbcdc3fe516eb7410eb699c2f367da | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/solaR/examples/writeSolar.Rd.R | 4eb1966e2a1046c152bb7709ab78098ecd0fa585 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 854 | r | writeSolar.Rd.R | library(solaR)
### Name: D_writeSolar-methods
### Title: Exporter of solaR results
### Aliases: writeSolar writeSolar-methods writeSolar,Sol-method
### Keywords: methods
### ** Examples
lat <- 37.2;
G0dm <- c(2766, 3491, 4494, 5912, 6989, 7742, 7919, 7027, 5369, 3562, 2814, 2179)
Ta <- c(10, 14.1, 15.6, 17.2, 19.3, 21.2, 28.4, 29.9, 24.3, 18.2, 17.2, 15.2)
prom <- list(G0dm=G0dm, Ta=Ta)
prodFixed <- prodGCPV(lat=lat, dataRad=prom, modeRad='aguiar', keep.night=FALSE)
old <- setwd(tempdir())
writeSolar(prodFixed, 'prodFixed.csv')
dir()
zI <- read.zoo("prodFixed.csv",
header = TRUE, sep = ",",
FUN = as.POSIXct)
zD<- read.zoo("prodFixed.D.csv",
header = TRUE, sep = ",")
zD<- read.zoo("prodFixed.D.csv",
header = TRUE, sep = ",",
FUN = as.yearmon)
setwd(old)
|
7e0564c0a052496f292cf4ac73d0ce84911c7a51 | ca599f2d2b25bfc5a06da1a6fffac177ced5f887 | /man/data.disabilityWeights.Rd | 3160b16f7eda8d8add6ba847061501a1e543dc60 | [] | no_license | lshtm-vimc/vimr | 4c6e4c1ad33c97927a2a492bd541d7344e6c2f8a | 1ec0049a669b37d454dacccc14e91f37918fcca6 | refs/heads/master | 2022-01-15T04:33:12.645538 | 2019-05-03T09:09:15 | 2019-05-03T09:09:15 | 159,156,196 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 968 | rd | data.disabilityWeights.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data.disabilityWeights}
\alias{data.disabilityWeights}
\title{Disability weights for RV, HPV, Hib and Sp}
\format{An object of class \code{data.table} (inherits from \code{data.frame}) with 19 rows and 7 columns.}
\source{
{Global, regional, and national incidence, prevalence, and years lived
with disability for 354 diseases and injuries for 195 countries, 1990–2017:
a systematic analysis for the Global Burden of Disease Study 2017}
\url{http://ghdx.healthdata.org/record/global-burden-disease-study-2017-gbd-2017-disability-weights}
}
\usage{
data.disabilityWeights
}
\description{
Data: Disability weights for Rotavirus (RV), Human papillomavirus (HPV),
Haemophilus influenzae type b (Hib) and Streptococcus pneumoniae (Sp).
The weights are measured on a scale from 0 to 1, where 0 equals a state of
full health and 1 equals death.
}
\keyword{datasets}
|
3bf61081e507a0ef4b0b27d78d7262c28a535523 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036130-test.R | ba5adc1a67c5400daa43dba718097513365e742f | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 308 | r | 1610036130-test.R | testlist <- list(data = structure(c(Inf, NaN, 4.94065645841247e-324, 4.94065645841247e-324 ), .Dim = c(2L, 2L)), x = structure(c(1.06559816877004e-255, 6.47981537056463e-198, 4.94065645841247e-324, 4.94065645841247e-324 ), .Dim = c(4L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) |
64609fd37fcd80c47db8071fb35eec862699900c | cc937b546ce7a37ebbdd2bbe00073715a819c673 | /Final_ARIMA.R | 017523df13c3b8de40bf752852792cabc4012198 | [] | no_license | kevintchou/TAMIDS-Competition | 0f06a31d4ce3b5426daa9cdcf8ade7b2679110f4 | 280d336c335b482e7ae2b4c47be885d969fa0153 | refs/heads/master | 2022-07-16T04:23:04.983588 | 2018-05-01T19:47:32 | 2018-05-01T19:47:32 | 128,587,812 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,897 | r | Final_ARIMA.R | setwd("C:/Users/Jose/Documents/Comp_Data/Taxi_Data/ARIMA Data")
library(data.table)
library(dplyr)
library(magrittr)
library(MASS)
library(ggplot2)
library(gridExtra)
library(randomForest)
library(mlr)
library(forecast)
library(tseries)
#taxi_2013 <- fread('Chicago_taxi_trips2013.csv')[,c(2,3,13)]
#taxi_2014 <- fread('Chicago_taxi_trips2014.csv')[,c(2,3,12)]
#taxi_2015 <- fread('Chicago_taxi_trips2015.csv')[,c(2,3,12)]
#taxi_2016 <- fread('Chicago_taxi_trips2016.csv')[,c(2,3,12)]
taxi_2017 <- fread('Chicago_taxi_trips2017.csv')[,c(2,3,12)]
taxi_2017COPY <- taxi_2017
#names(taxi_2014) <- names(taxi_2013)
#names(taxi_2015) <- names(taxi_2013)
#names(taxi_2016) <- names(taxi_2013)
#names(taxi_2017) <- names(taxi_df[,1:3])
#taxi_df <- data.table(rbindlist(list(taxi_2013, taxi_2014, taxi_2015, taxi_2016)))
#taxi_df <- na.omit(taxi_df)
# taxi_2017COPY$Fare <- ifelse(taxi_2017COPY$Fare, NA, taxi_2017COPY$Fare)
taxi_2017COPY$Fare[taxi_2017COPY$Fare == ""] <- NA
taxi_2017COPY <- na.omit(taxi_2017COPY)
#date_time <- strptime(taxi_df$`Trip Start Timestamp`, '%m/%d/%Y %I:%M:%S %p')
date_time_2017 <- strptime(taxi_2017COPY$`Trip Start Timestamp`, '%m/%d/%Y %I:%M:%S %p')
# taxi_2017[, `:=` (Month = as.numeric(strftime(date_time_2017, '%m')),
# Year = as.numeric(strftime(date_time_2017, '%Y')),
# Day = as.numeric(strftime(date_time_2017, "%d")),
# Date = as.Date(strftime(date_time_2017, "%Y-%m-%d")),
# ]
taxi_2017COPY$Month <- as.numeric(strftime(date_time_2017, '%m'))
taxi_2017COPY$Year <- as.numeric(strftime(date_time_2017, '%Y'))
taxi_2017COPY$Day <- as.numeric(strftime(date_time_2017, "%d"))
taxi_2017COPY$Date <- strftime(date_time_2017, "%Y-%m-%d")
#taxi_2017$X <- 1:nrow(taxi_2017)
taxi_med17 <- taxi_2017COPY[, .(TotalFare = sum(as.numeric(gsub('[$]', '', Fare))), Date), by = list(`Taxi ID`, Month, Day, Year)][, .(Median = median(TotalFare)), by = Date][order(Date), ]
# taxi_med_2017 <- taxi_2017[, .(TotalFare = sum(as.numeric(gsub('[$]', '', Fare))),
# Date), by = list(`Taxi ID`, Month, Day, Year)][, .(Median = median(TotalFare)), by = Date][order(Date), ]
# $Date <- as.Date(with(taxi_med, paste(Year, Month, Day,sep="-")), "%Y-%m-%d")
# Fare_Data <- taxi_med
taxi_med_2017 <- taxi_2017[, .(TotalFare = sum(as.numeric(gsub('[$]', '', Fare)))), by = list(`Taxi ID`, Month, Day, Year)][, .(Median = median(TotalFare)), by = list(Month, Day, Year)][order(Year, Month, Day), ]
taxi_med$Date <- as.Date(with(taxi_med, paste(Year, Month, Day,sep="-")), "%Y-%m-%d")
#med_ts <- ts(Fare_Data[,c('Median')])
#Fare_Data$clean_med <- tsclean(med_ts)
ggplot() +
geom_line(data = Fare_Data, aes(x = Date, y = clean_med)) + ylab('Cleaned Taxi Median Fare') +
ggtitle("Median Daily Fare") + theme(plot.title = element_text(hjust = .5))
## Calculates Moving averages for Month and Week
#Fare_Data$med_ma = ma(Fare_Data$clean_med, order = 7) # using the clean count with no outliers
#Fare_Data$med_ma30 = ma(Fare_Data$clean_med, order = 30)
## Plots Counts with Moving AVerage for Week and Month
ggplot() +
geom_line(data = Fare_Data, aes(x = Date, y = clean_med, colour = "Median Taxi Fare")) +
geom_line(data = Fare_Data, aes(x = Date, y = med_ma, colour = "Weekly Moving Average")) +
geom_line(data = Fare_Data, aes(x = Date, y = med_ma30, colour = "Monthly Moving Average")) +
ylab('Taxi Fare') +
ggtitle("Median Daily Fare") + theme(plot.title = element_text(hjust = .5))
## Calculates Seasonal components using stl
#med_ma <- ts(na.omit(Fare_Data$med_ma), frequency=52)
#decomp <- stl(med_ma, s.window="periodic")
## seasadj() removes the seasonality by subtracting the seasonal component from the original series
deseasonal_med <- seasadj(decomp)
plot(decomp, main = 'Median Fare Decomposition')
plot(deseasonal_med, main = 'Deseasonal Moving Average')
adf.test(med_ma, alternative = "stationary")
adf.test(deseasonal_med, alternative = "stationary")
Acf(med_ma, main='Median Moving Average') ## EXAMPLE PURPOSES ONLY NOT ANALYSIS
Pacf(med_ma, main='Median Moving Average') ## SEE ABOVE
med_d1 <- diff(deseasonal_med,difference = 1)
plot(med_ma)
plot(med_d1, main = 'Differentiated Moving Average', ylab = '')
adf.test(med_d1, alternative = 'stationary')
Acf(med_d1, main='Differenced Median Moving Average')
Pacf(med_d1, main='Differenced Median Moving Average')
#Arima_med_naive <- Arima(ts(deseasonal_med, frequency = 365), order = c(3,1,8))
#Med_Forecast_naive <- forecast(Arima_med_naive, h = 365)
plot(Med_Forecast_naive, main = 'Naive Model: ARIMA(3,1,8)', ylab = 'Median Daily Fare in $', xaxt = 'n')
axis(1, at=1:6, labels=2013:2018)
#Arima_med_naive_seas <- Arima(ts(deseasonal_med, frequency = 365), order = c(3,1,8), seasonal = c(0,1,0))
#Med_Forecast_naive_seas <- forecast(Arima_med_naive_seas, h = 365)
plot(Med_Forecast_naive_seas, main = 'Naive Seasonal Model: ARIMA(3,1,8)', ylab = 'Median Daily Fare in $', xaxt = 'n')
#autoarima <- auto.arima(deseasonal_med, D = 1, approximation = TRUE)
#equivalent to autoarima <- Arima(ts(deseasonal_med,frequency = 365), order = c(4,1,4), seasonal = c(0,1,0))
plot(forecast(autoarima, h = 365), main = 'Stepwise AIC-Minimized Seasonal Model: ARIMA(4,1,4)', ylab = 'Median Daily Fare in $', xaxt = 'n')
taxi_med17$Fitted <- tail(Med_Forecast_naive_seas$fitted, n = 212)
taxi_med17$AAFitted <- tail(autoarima$fitted, n = 212)
ggplot(taxi_med17) + aes(x = Date, y = Median) + geom_point() + geom_smooth(method = "lm")
plot_internal
ggplot(taxi_med17) + aes(x = Date) + geom_point(aes(y = Median)) + geom_point(aes(y = Fitted), color = "purple") + geom_point(aes(y = AAFitted), color = "red") + geom_segment(aes(xend = Date, y = Fitted, yend = Median), arrow = arrow(length = unit(0.4, "line")), color = "blue")
#tsfit <- tslm(Median~trend + season, data = ts(Fare_Data, freq = 365))
tsfit
|
50afbf72fe39ba93c0817c2d4a2537bd9e5744e7 | bff31d4717246131800a76585b5a516e890e9276 | /averageCalculator.R | eb0f95f3c7ae0db8522c9cc63f1ecae20c6e35b7 | [] | no_license | deriggi/AR5-World-Bank | fbf15007fcb4084c4eb0d5e1d0a9c5d9a9cfac21 | ff8c5666c8f4d70a47b3f9a914034230eb63fbe9 | refs/heads/master | 2021-01-15T08:13:30.041047 | 2015-01-17T23:31:01 | 2015-01-17T23:31:01 | 18,795,854 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,536 | r | averageCalculator.R | library(raster)
doIt <- function (thevar, arealUnit){
rootDir <- paste("F:/climate/monthly/",thevar, "/",arealUnit,"/", sep="")
outcsv <- paste("F:/climate/monthly/", thevar, "/",arealUnit,"/model_means.csv", sep="")
years = c("20", "40", "60", "80")
template = "monthtrendstacked_"
boundarycodes <- list.files(rootDir, full.names=FALSE)
sort(boundarycodes)
months <- c(1:12)
for ( acode in boundarycodes ){
for ( y in years ){
fullPath <- paste(rootDir,acode,'/',template, y,'_/',sep="")
write( fullPath, stdout() )
rasterfiles <- list.files(fullPath, full.names=FALSE, pattern=".*\\.tif$")
for (razzy in rasterfiles){
write(razzy, stdout())
for (month in months){
themean <- cellStats ( raster( paste(fullPath,razzy, sep=""), band=month), stat='mean' )
write( paste(razzy, acode, month, themean, sep=",") , file=outcsv, append=TRUE, sep="\n" )
}
}
write('', stdout())
}
}
}
doItEnsembleStyle <- function (thevar, arealUnit){
rootDir <- paste("F:/climate/monthly/",thevar, "/",arealUnit,"/", sep="")
outcsv <- paste("F:/climate/monthly/", thevar, "/",arealUnit,"/ensemble_means.csv", sep="")
years = c("10", "50", "90")
template = "ensemblestacked_"
boundarycodes <- list.files(rootDir, full.names=FALSE)
months <- c(1:12)
for ( acode in boundarycodes ){
for ( y in years ){
fullPath <- paste(rootDir,acode,'/',template, y,'/',sep="")
write( fullPath, stdout() )
rasterfiles <- list.files(fullPath, full.names=FALSE, pattern=".*\\.tif$")
for (razzy in rasterfiles){
write(razzy, stdout())
for (month in months){
themean <- cellStats ( raster( paste(fullPath,razzy, sep=""), band=month), stat='mean' )
write( paste(razzy, acode, month, themean, sep=","), file=outcsv, append=TRUE, sep="\n" )
}
}
write('', stdout())
}
}
}
# firstCellAverage <- function (){
# rootDir <- "F:/climate/monthly/pr/countries/BGD/ensemblestacked_50_/converted/"
# allFiles <- list.files(rootDir, full.names=FALSE, pattern=".*\\.tif$")
# months <- c(1:12)
# for ( aFile in allFiles ){
# fullPath <- paste(rootDir,aFile,sep="")
# write(aFile,stdout())
# for (month in months){
# theval <- getValues ( raster(fullPath, band=month) )[1]
# write( theval, stdout())
# }
# write('', stdout())
# }
# }
doItEnsembleStyle('pr', 'regions')
doItEnsembleStyle('tas', 'regions')
doItEnsembleStyle('tasmin', 'regions')
doItEnsembleStyle('tasmax', 'regions') |
7be2da7ecab96ba05348a057a9ffd042ff195452 | 8d28b939007e0887f3a1af5b54a24c68dd3d4204 | /R/cross.gammas.R | 2069a6a22bcba40a6ea7134a5d54903f0df712d0 | [] | no_license | cran/VGAMextra | 897c59ab2b532b0aa1d4011130db79f5c95eb443 | ac7e3df54136fd4c9e49b754f6747a11d7c3b122 | refs/heads/master | 2021-06-06T03:52:23.167971 | 2021-05-24T03:10:07 | 2021-05-24T03:10:07 | 138,900,855 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 966 | r | cross.gammas.R | ##########################################################################
# These functions are
# Copyright (C) 2014-2020 V. Miranda & T. Yee
# Auckland University of Technology & University of Auckland
# All rights reserved.
#
### In this function vector'y' is lagged --> yy[1:(n - ii) , 1] ###
cross.gammas <- function(x, y = NULL, lags = 1) {
xx <- matrix(x, ncol = 1)
nx <- nrow(xx)
if (lags < 0 || !Is.Numeric(lags, isInteger = TRUE))
stop("'lags' must be a non-negative integer.")
if (length(y)) {
yy <- matrix(y, ncol = 1)
ny <- nrow(yy)
if (nx != ny)
stop("Number of rows differs.") else
n <- nx
} else {
yy <- xx
n <- nrow(xx)
}
myD <- numeric(lags + 1)
myD[1] <- if (length(y)) cov(xx, yy) else cov(xx, xx) # i.e. var(xx)
if (lags > 0)
for (ii in 1:lags)
myD[ii + 1] <- cov(xx[-(1:ii), 1], yy[1:(n - ii) , 1])
myD
}
|
01f7b842bfa85e0584d5c83619b51f655fd61b82 | a1a68bf3675c0d01bef17bc2ab2291529b718644 | /.tmp/hrapgc.find.pos.R | b97c6745cbbe26f6ba41678cc8bcd5582f2564db | [] | no_license | Tuxkid/Gems | 3d3c1aac6df3f11cf209cf33da23c214b26ff5c6 | c0c5dfe02c826030e6e1c97ab8cd8191caacd33f | refs/heads/master | 2021-01-10T15:17:41.468718 | 2020-08-13T03:15:14 | 2020-08-13T03:15:14 | 50,632,804 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 167 | r | hrapgc.find.pos.R | find.pos <-
structure(function(y, inn)
{
#
# Finds positions of "y" in a vector "inn"
x <- match(inn, y)
seq(along = x)[!is.na(x)]
}
, comment = "30/04/2001")
|
ce47f2bf13ad453cee3d3b12028098e63f7e0041 | bcbd8d904135997b10593d6b8e9a6b07c6b7a9a7 | /slice2D.R | 70f71bd2070fc79bc60d8c490ac53abd246bf7f0 | [] | no_license | dushoff/scratch | ccbde215751069d775977c119b807461e7f9e3c3 | 9d56393cabb198f618f3b5a1d71c0614a49aa5be | refs/heads/master | 2022-11-09T11:06:35.467688 | 2022-10-21T19:48:12 | 2022-10-21T19:48:12 | 58,003,373 | 0 | 2 | null | 2017-08-01T15:30:44 | 2016-05-03T21:41:25 | HTML | UTF-8 | R | false | false | 3,583 | r | slice2D.R | ## hacked version of
slice2D <- function (params, fun, nt = 31,
lower = -Inf, upper = Inf, cutoff = 10,
verbose = TRUE, tranges = NULL, ...)
{
npv <- length(params)
if (is.null(pn <- names(params)))
pn <- seq(npv)
if (is.null(tranges)) {
tranges <- bbmle:::get_all_trange(
params, fun, rep(lower, length.out = npv),
rep(upper, length.out = npv),
cutoff = cutoff,...)
}
slices <- list()
for (i in 1:(npv - 1)) {
slices[[i]] <- vector("list", npv)
for (j in (i + 1):npv) {
if (verbose)
cat("param", i, j, "\n")
t1vec <- seq(tranges[i, 1], tranges[i, 2], length = nt)
t2vec <- seq(tranges[j, 1], tranges[j, 2], length = nt)
mtmp <- matrix(nrow = nt, ncol = nt)
for (t1 in seq_along(t1vec)) {
for (t2 in seq_along(t2vec)) {
mtmp[t1, t2] <- fun(bbmle:::mkpar(params, c(t1vec[t1],
t2vec[t2]), c(i, j)), ...)
}
}
slices[[i]][[j]] <- data.frame(var1 = pn[i], var2 = pn[j],
expand.grid(x = t1vec, y = t2vec), z = c(mtmp))
}
}
r <- list(slices = slices, ranges = tranges, params = params,
dim = 2)
class(r) <- "slice"
r
}
splom.slice <- function (x, data, scale.min = TRUE, at = NULL, which.x = NULL,
which.y = NULL, dstep = 4, contour = FALSE, log="", ...)
{
logz <- grepl("z",log)
## dst
if (x$dim == 1)
stop("can't do splom on 1D slice object")
smat <- t(x$ranges[, 1:2])
if (scale.min) {
all.z <- unlist(sapply(x$slices, function(x) {
sapply(x, function(x) if (is.null(x))
NULL
else x[["z"]])
}))
min.z <- min(all.z[is.finite(all.z)])
if (is.na(dstep)) {
## failsafe
dstep <- diff(range(all.z[is.finite(all.z)]))/10
}
max.z <- dstep * ((max(all.z[is.finite(all.z)]) - min.z)%/%dstep +
1)
if (missing(at)) {
at <- seq(0, max.z, by = dstep)
}
scale.z <- function(X) {
X$z <- X$z - min.z
X
}
x$slices <- bbmle:::slices_apply(x$slices, scale.z)
}
if (logz) {
x$slices <- bbmle:::slices_apply(x$slices,
function(X) transform(X,z=log(z)))
}
up0 <- function(x1, y, groups, subscripts, i, j, ...) {
## browser()
sl <- x$slices[[j]][[i]]
with(sl, panel.levelplot(x = x, y = y, z = z, contour = contour,
at = if (!is.null(at))
at
else pretty(z), subscripts = seq(nrow(sl))))
panel.points(x$params[j], x$params[i], pch = 16)
mm <- matrix(sl$z, nrow = length(unique(sl$x)))
wmin <- which(mm == min(mm), arr.ind = TRUE)
xmin <- unique(sl$x)[wmin[1]]
ymin <- unique(sl$y)[wmin[2]]
panel.points(xmin, ymin, pch = 1)
}
lp0 <- function(...) {
}
splom(smat, lower.panel = lp0, diag.panel = diag.panel.splom,
upper.panel = up0, ...)
}
range.slice <- function(object,na.rm=FALSE, finite=FALSE) {
dropv <- lapply(object$slice, function(x) x[!sapply(x,is.null)])
flatv <- lapply(dropv,function(x) do.call(rbind,x))
flatv2 <- do.call(rbind,flatv)
range(flatv2$z,na.rm=na.rm,finite=finite)
}
|
4d3b1433eb57083ff6f1c58c5a8be502cbde06da | de7d07337f7255dcc63ad23a66a03b9439aa52dc | /app.R | ac7047dd070508d3396e520c08587b14eb17214f | [] | no_license | Juliepvr/shiny_csv_to_sql | 56881d90e022c184e666bac92ca28595ec53e346 | b1838a9ad474caadd451e73f0726315754e5f64f | refs/heads/master | 2021-04-04T08:52:38.988379 | 2020-03-19T08:02:05 | 2020-03-19T08:02:05 | 248,442,815 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,488 | r | app.R | # App for submitting data from an excel file to a MySQL database.
# create insert statements for MySQL
library(shiny)
################################################################################
#### Functions
################################################################################
# check entry
check_entry <- function(entry){
# double single quotes for mysql syntax
entry <- gsub("'", "''",entry, useBytes = TRUE)
# remove leading and trailing whitespace
entry<-trimws(entry)
# don't send empty string and replace NA with NULL
if(entry=="" | is.na(entry)){
entry <- NULL
}
return(entry)
}
# generate mysql statement
generate_insert <- function(table, column, value){
x <- paste0("INSERT INTO ",table," (",column,") VALUES ('",value, "')")
# remove quotes if value is NULL
x <- gsub("'NULL'", "NULL",x, useBytes = TRUE)
return(x)
}
# row of df to msql insert
row_to_insert <- function(row,table,column) {
entry <- lapply(row,check_entry)
values <- paste(entry, collapse = "','")
insert <- generate_insert(table,column, values)
# end with ;
return(paste0(insert,";"))
}
################################################################################
# Define UI for data upload app ----
ui <- fluidPage(
# App title ----
titlePanel("CSV to SQL statements"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
textInput(inputId="db_name",
label = p("Enter the database name:"),
value = ""),
textInput(inputId="table",
label = p("Enter the table to create inserts for:"),
value = ""),
# Input: Select a file ----
fileInput("file1", "Choose CSV File",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Horizontal line ----
tags$hr(),
# Input: Select separator ----
radioButtons("sep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("quote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Horizontal line ----
tags$hr(),
# Input: Select number of rows to display ----
radioButtons("disp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
# Main panel for displaying outputs ----
mainPanel(
h3("Preview of the CSV file:"),
p("check your data, the column names have to be identical to the MySQL columns."),
# Output: Data file ----
tableOutput("contents"),
h3("Preview of the SQL statements:"),
tableOutput("statements"),
h3("Download the SQL statements:"),
downloadButton("download_button", label = "Download")
)
)
)
# Define server logic to read selected file ----
server <- function(input, output, session) {
datafile <- reactive({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file1)
# when reading semicolon separated files,
# having a comma separator causes `read.csv` to error
tryCatch(
{
df <- read.csv(input$file1$datapath,
header = TRUE,
sep = input$sep,
quote = input$quote)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(df)
})
output$contents <- renderTable({
if(input$disp == "head") {
return(head(datafile()))
}
else {
return(datafile())
}
})
# id_Author`, `first_name`, `last_name` ,...
col_names <- reactive({
col <- paste(colnames(datafile()),collapse="`, `")
return(paste0("`",col, "`"))
})
# INSERT INTO `db`.`table` (`id_Author`, ... ) VALUES
table_name <- reactive({
paste0("`", input$db_name, "`.`", input$table,"`")
})
sql_inserts <- reactive({
apply(datafile(),1,row_to_insert, table=table_name(), column= col_names())
})
output$statements <- renderTable({
if(input$disp == "head") {
return(head(sql_inserts()))
}
else {
return(sql_inserts())
}
})
# download SQL lines
output$download_button <- downloadHandler(
filename = function(){
paste0("insert_",input$db_name,"_",input$table,".sql")
},
content = function(file) {
writeLines(c("START TRANSACTION;", paste0("USE `",input$db_name,"` ;"),
sql_inserts(), "COMMIT;"), file)
}
)
# stop when app is closed
session$onSessionEnded(function(session){
stopApp()
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
692f03359b73bd7ac856d3972c618af74a2933ef | fed5ad61ce048fe009e2d51214279907123ee225 | /06/notes06.R | b374e4e417a8b40d86259449bfb669355ae41df9 | [
"MIT"
] | permissive | ionides/401w18 | 438d09fe68ebbb41e7405c532391e43f59effe2f | 1a30e8afeaab9cb1c71dab5321c83ef8efd5f06b | refs/heads/master | 2021-09-26T10:38:39.426467 | 2018-10-29T14:17:35 | 2018-10-29T14:17:35 | 108,592,889 | 2 | 11 | MIT | 2018-04-21T02:05:49 | 2017-10-27T20:30:58 | HTML | UTF-8 | R | false | false | 3,925 | r | notes06.R | ## ----setup,echo=F,results=F,cache=F--------------------------------------
# library(broman) # used for myround
## ----reconstruct_variables,echo=F----------------------------------------
L <- read.table(file="life_expectancy.txt",header=TRUE)
L_fit <- lm(Total~Year,data=L)
L_detrended <- L_fit$residuals
U <- read.table(file="unemployment.csv",sep=",",header=TRUE)
U_annual <- apply(U[,2:13],1,mean)
U_detrended <- lm(U_annual~U$Year)$residuals
L_detrended <- subset(L_detrended,L$Year %in% U$Year)
lm1 <- lm(L_detrended~U_detrended)
## ----lm------------------------------------------------------------------
c1 <- summary(lm(L_detrended~U_detrended))$coefficients ; c1
beta_U <- c1["U_detrended","Estimate"]
SE_U <- c1["U_detrended","Std. Error"]
z <- qnorm(1-0.05/2) # for a 95% CI using a normal approximation
cat("CI = [", beta_U - z * SE_U, ",", beta_U + z * SE_U, "]")
## ----sim-----------------------------------------------------------------
N <- 50000 ; sigma <- 1 ; d <- 10 ; set.seed(23)
X <- matrix(rnorm(N*(d+1),mean=0,sd=sigma),nrow=N)
## ----T_eval--------------------------------------------------------------
T_evaluator <- function(x) x[d+1] / sqrt(sum(x[1:d]^2)/d)
## ----T_sim---------------------------------------------------------------
Tsim <- apply(X,1,T_evaluator)
## ----T_plot_code,echo=T,eval=F-------------------------------------------
## hist(Tsim,freq=F,main="",
## breaks=30,ylim=c(0,0.4))
## x <- seq(length=200,
## min(Tsim),max(Tsim))
## lines(x,dnorm(x),
## col="blue",
## lty="dashed")
## lines(x,dt(x,df=d),
## col="red")
## ----T_plot,echo=F,eval=T,fig.width=4,fig.height=3,out.width="2.5in"-----
par(mai=c(0.8,0.8,0.1,0.1))
hist(Tsim,freq=F,main="",
breaks=30,ylim=c(0,0.4))
x <- seq(length=200,
min(Tsim),max(Tsim))
lines(x,dnorm(x),
col="blue",
lty="dashed")
lines(x,dt(x,df=d),
col="red")
## ----range---------------------------------------------------------------
range(Tsim)
## ----tail_z--------------------------------------------------------------
2*(1-pnorm(5))
2*(1-pnorm(6))
## ----tail_t--------------------------------------------------------------
2*(1-pt(5,df=d))
2*(1-pt(6,df=d))
## ----data----------------------------------------------------------------
goals <- read.table("FieldGoals2003to2006.csv",header=T,sep=",")
goals[1:5,c("Name","Teamt","FGt","FGtM1")]
lm0 <- lm(FGt~FGtM1+Name,data=goals)
## ----factor_class--------------------------------------------------------
class(goals$Name)
## ----design--------------------------------------------------------------
X <- model.matrix(lm0)
dim(X)
unname(X[c(1,5,9,13,17),1:8])
## ----anova---------------------------------------------------------------
anova(lm0)
## ----read_data-----------------------------------------------------------
gpa <- read.table("gpa.txt",header=T); gpa[1,]
## ----gpa_lm--------------------------------------------------------------
lm1 <- lm(GPA~ACT+High_School,data=gpa)
x <- c(1,20,40)
pred <- x%*%coef(lm1)
V <- summary(lm1)$cov.unscaled
s <- summary(lm1)$sigma
SE_pred <-sqrt(x%*%V%*%x)*s
c <- qnorm(0.975)
cat("CI = [", round(pred-c*SE_pred,3),
",", round(pred+c*SE_pred,3), "]")
## ----plot_args,echo=F----------------------------------------------------
par(mai=c(0.8,0.8,0.1,0.1))
## ----plot_gpa_code,eval=F------------------------------------------------
## plot(x=fitted.values(lm1),y=gpa$GPA,ylab="GPA")
## abline(a=0,b=1)
## ----plot_gpa,echo=F,fig.width=4,fig.height=4,out.width="2.5in"----------
plot(x=fitted.values(lm1),y=gpa$GPA,ylab="GPA")
abline(a=0,b=1)
## ----gpa_lm_pred---------------------------------------------------------
lm1 <- lm(GPA~ACT+High_School,data=gpa)
x <- c(1,20,40)
pred <- x%*%coef(lm1)
V <- summary(lm1)$cov.unscaled
s <- summary(lm1)$sigma
SE_pred <-sqrt(x%*%V%*%x + 1)*s
c <- qnorm(0.975)
cat("prediction interval = [", round(pred-c*SE_pred,3),
",", round(pred+c*SE_pred,3), "]")
|
3f70ae24aca6fbcd34982d0efead4abaf8429868 | 87fa807b44c9a63cdfa1f065f2278bd3a1f3693e | /tests/testthat/test1.R | 4abfe3ee39a102a6c8d2f9f79913f434f7764a2f | [] | no_license | ricardomartins2/bldgRpkg | 1b71ca75f9b1891e3573ba9ace91faad125be5fe | fd323d1463b8250bf8fbc0169f1a25ce32127b56 | refs/heads/master | 2021-06-18T05:11:17.494307 | 2017-06-23T19:50:13 | 2017-06-23T19:50:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 139 | r | test1.R | library(bldgRpkg)
expect_error(fars_read("non_existent_file.csv.bz2"))
expect_match(make_filename("2013"), "accident_2013.csv.bz2")
|
5496128cfb27b9f9dc0f2ffff8bdeb7fbd1c2f84 | 956b0c7b6db307f87522fc46fcbbdd1476916f93 | /Functions/pollutantmean.R | e81259f82c31fbff7f3247330de6d9c2cf2949ed | [] | no_license | dataGriff/DataScienceCoursera | ca78ceb8d4f5cd36b79cb1cf1db968c88e075c0a | bf54a379912021b8257a61b5d5882709598688fe | refs/heads/master | 2023-05-01T08:04:25.180098 | 2016-07-19T18:09:38 | 2016-07-19T18:09:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,730 | r | pollutantmean.R | pollutantmean <- function(directory, pollutant, id = 1:332){
## 'directory is a character vector of length 1 indicating
## the locaiton of the CSV files
##'pollutant is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate"
##'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## NOTE; Do not round the result!
#setwd("~/Projects/DataScienceCoursera")
files <- list.files(directory, full.names = TRUE)[id] #relies on order of files though..changed order and worked!
x <- data.frame()
for (i in files){
x <- rbind(x,read.csv(i))##[[pollutant]]
}
y <- mean(x[[pollutant]],na.rm=TRUE)
y
}
###################################################
##Load of notes below (trial and error!!)
####################################################
## x <- data.frame(row.names = "pollutant")
# read.csv("specdata/001.csv")[[pollutant]]
## print(files[1])
## print(files)
##filename <- files[id]
## for (i in id){
## id <- id[i]
# filename <- files[id]
## print(filename);
##if(is.na(filename) == FALSE){
##x <- read.csv(filename)}[[pollutant]]
## }
#x
## y <- mean(x,na.rm=TRUE)
## y
## y <- read.csv(files[i])[[pollutant]]
## x <- rbind(x,y)
## y
## }
## x <- mean(x,na.rm=TRUE)
## x
##x <- read.table(x,na.rm)
## read.csv(path)
## if(pollutant == "sulfate"){
## read.csv(path)
## }
## path = "specdata/001.csv"
## read.csv(path)
|
ce7c126e2f2fe9183acb24e49ef4ec0b40fba8b7 | 52824070453254349e1e82785856cb3d07e3fad0 | /developer/create_data/create_saar.R | 22ff978eeba2040d173a76eb5e109d3807fe878f | [] | no_license | dyarger/GSW-R | fd3f90fa5712a492f69440645b548ae3769421c4 | 93d4cf94389db39ed412d450c593c8a95266463b | refs/heads/master | 2021-10-25T04:12:21.747187 | 2019-03-31T14:54:13 | 2019-03-31T14:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 970 | r | create_saar.R | library(ncdf4)
nc <- nc_open("~/git/GSW-Fortran/test/gsw_data_v3_0.nc")
## Use as.vector() since these will all get handed into C, which does not understand matrices.
p_ref <- as.vector(ncvar_get(nc, "p_ref"))
lats_ref <- as.vector(ncvar_get(nc, "lats_ref"))
longs_ref <- as.vector(ncvar_get(nc, "longs_ref"))
ndepth_ref <- as.vector(ncvar_get(nc, "ndepth_ref"))
ndepth_ref[!is.finite(ndepth_ref)] <- -9e99
saar_ref <- as.vector(ncvar_get(nc, "SAAR_ref"))
saar_ref[!is.finite(saar_ref)] <- -9e99
delta_sa_ref <- as.vector(ncvar_get(nc, "deltaSA_ref"))
delta_sa_ref[!is.finite(delta_sa_ref)] <- -9e99
saar <- list(gsw_nx=length(longs_ref), gsw_ny=length(lats_ref), gsw_nz=length(p_ref),
longs_ref=longs_ref, lats_ref=lats_ref, p_ref=p_ref, ndepth_ref=ndepth_ref,
saar_ref=saar_ref, delta_sa_ref=delta_sa_ref)
save(saar, file="saar.rda")
tools::resaveRdaFiles("saar.rda")
nc_close(nc)
message("Next, do cp saar.rda ../../data/saar.rda")
|
85b2c780936401ed5e174b04d9ffc234057497cc | eb74dde34a3b6b9f337e33a033ca27a119034245 | /man/a.Rd | a76b8304d3de3b4c262e4b1695ee94b3e4820fc0 | [] | no_license | cran/DetLifeInsurance | 3c9a1632fb0ddd78ae9c7dcc4a633ef429e281d8 | d0b5d3a696c5bc72ce0692d6cf7d4e9921336cfc | refs/heads/master | 2022-12-17T07:44:43.324629 | 2020-09-12T08:20:07 | 2020-09-12T08:20:07 | 278,226,350 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,495 | rd | a.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a.R
\name{a}
\alias{a}
\title{Life Annuities}
\usage{
a(x, h, n, k = 1, i = 0.04, data, prop = 1, assumption = "none", cap = 1)
}
\arguments{
\item{x}{An integer. The age of the insuree.}
\item{h}{An integer. The deferral period.}
\item{n}{An integer. Number of years of coverage.}
\item{k}{An integer. Number of payments per year.}
\item{i}{The interest rate. A numeric type value.}
\item{data}{A data.frame of the mortality table, with the first column being the age, and the second one the probability of death.}
\item{prop}{A numeric value. It represents the proportion of the mortality table being used (between 0 and 1).}
\item{assumption}{A character string. The assumption used for fractional ages ("UDD" for uniform distribution of deaths, "constant" for constant force of mortality and "none" if there is no fractional coverage).}
\item{cap}{A numeric type value. The annualized value of the payment.}
}
\value{
Returns a numeric value (actuarial present value).
}
\description{
Calculates the present value of a life annuity.
}
\examples{
a(20,0,15,1,0.04,CSO58FALB,1,"none",1200)
a(23,7,9,1,0.04,GAM71F,1,"none",5000)
a(33,3,10,4,0.04,CSO80MANB,1,"constant",3000)
a(20,5,10,4,0.04,CSO58MANB,1,"UDD",5000)
}
\references{
Chapter 2 of Life Contingencies (1952) by Jordan, chapter 5 of Actuarial Mathematics (1997) by Bowers, Gerber, Hickman, Jones & Nesbitt.
}
\keyword{Annuities}
\keyword{Life}
|
cb651dc671b29b7caec24cf9e6a781481d6fffc9 | 9adce52d55568f1e3fb6b24e721525f25384ee3c | /binomial/tests/testthat/test-aux_funcs.R | f38ada03cb3ff0e58e2d2cf8e0b6a9aab80dcb21 | [] | no_license | stat133-sp19/hw-stat133-lilybhattacharjee5 | 97435b275f11b5ce0cab4d9c58e47e8d5f583b86 | 3bbee1c254962e7cca47b75a165c46205f4e13dd | refs/heads/master | 2020-04-28T19:27:43.208405 | 2019-05-02T06:49:56 | 2019-05-02T06:49:56 | 175,511,609 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,794 | r | test-aux_funcs.R | context("test aux mean")
test_that("0 trials, expect 0 mean regardless of prob", {
trials <- 0
prob <- 0.3
expect_equal(aux_mean(trials, prob), 0)
})
test_that("prob = 1, expect trials to be returned", {
trials <- 100
prob <- 1
expect_equal(aux_mean(trials, prob), 100)
})
test_that("prob, trials both in normal range", {
trials <- 100
prob <- 0.4
expect_equal(aux_mean(trials, prob), 40)
})
context("test aux variance")
test_that("prob, trials both in normal range", {
trials <- 15
prob <- 0.3
expect_equal(aux_variance(trials, prob), 3.15)
})
test_that("prob = 0.5, 1 - prob = 0.5", {
trials <- 100
prob <- 0.5
expect_equal(aux_variance(trials, prob), 25)
})
test_that("prob = 0, variance should be 0", {
trials <- 100
prob <- 0
expect_equal(aux_variance(trials, prob), 0)
})
context("test aux mode")
test_that("prob, trials both in normal range", {
expect_equal(aux_mode(15, 0.3), 4)
})
test_that("trials * prob gives an integer", {
expect_equal(aux_mode(100, 0.3), 30)
})
test_that("prob is 0, mode should also be 0", {
expect_equal(aux_mode(100, 0), 0)
})
context("test aux skewness")
test_that("prob, trials both in normal range", {
expect_equal(aux_skewness(100, 0.4), 0.04082482904)
})
test_that("prob = 0.5, 1 - prob = 0.5", {
expect_equal(aux_skewness(100, 0.5), 0)
})
test_that("prob = 0, 1 - prob = 1 leads to infinite skewness", {
expect_equal(aux_skewness(100, 0), Inf)
})
context("test aux kurtosis")
test_that("trials = 100, prob = 0.3, both in normal range", {
expect_equal(aux_kurtosis(100, 0.3), -0.01238095238)
})
test_that("prob = 0, 1 - prob = 1 leads to infinite kurtosis", {
expect_equal(aux_kurtosis(100, 0), Inf)
})
test_that("prob = 1 - prob = 0.5", {
expect_equal(aux_kurtosis(100, 0.5), -0.02)
})
|
fd407e1fddc88ef3f7fa3051bf35579ac4b805a2 | f812d9bc776592b4a4308cc5925da7fcca8e4c48 | /R/srn.readgcam.R | a77ccf367aa26150f1b9dd576d5a8697ac60bec2 | [
"MIT"
] | permissive | ChaoEcohydroRS/srn | e953a7df6e01fff31d4edaea3ae4aaa126420d91 | 7ad4ab82093141eeab8b73203160e8079af695f6 | refs/heads/master | 2022-12-01T07:13:45.781255 | 2018-11-13T22:35:53 | 2018-11-13T22:35:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,097 | r | srn.readgcam.R | #' srn.readgcam
#'
#' This function connects to a gcamdatabase and uses a query file to
#' out results into a table ready for plotting.
#' @param dirOutputs Full path to directory for outputs
#' @param gcamdatabasePath Path to gcam database folder
#' @param gcamdatabaseName Name of gcam database
#' @param queryxml Full path to query.xml file
#' @param scenOrigNames Original Scenarios names in GCAM database in a string vector.
#' For example c('scenario1','scenario2).
#' @param scenNewNames New Names which may be shorter and more useful for figures etc.
#' Default will use Original Names. For example c('scenario1','scenario2)
#' @param reReadData If TRUE will read the GCAM data base and create a queryData.proj file
#' in the same folder as the GCAM database. If FALSE will load a '.proj' file if a file
#' with full path is provided otherwise it will search for a dataProj.proj file in the existing
#' folder which may have been created from an old run.
#' @param dataProj Optional. A default 'dataProj.proj' is produced if no .Proj file is specified.
#' @param regionsSelect The regions to analyze in a vector. Example c('Colombia','Argentina')
#' @param queriesSelect Default = "All". Vector of queries to read from the queryxml for example
#' c("Total final energy by aggregate end-use sector", "Population by region"). The queries must be
#' availble in the queryxml file. Current list of available paramaters are:
#' \itemize{
#' \item "Total final energy by aggregate end-use sector"
#' \item "GDP per capita MER by region" : Where MER is "Market Exchange Rate"
#' \item "GDP MER by region" : Where MER is "Market Exchange Rate"
#' \item "GDP Growth Rate (Percent)" : Calculated based on the GDP MER by region.
#' \item "Population by region"
#' \item "ag production by tech" : Where technologies signify irrigated or rainfed
#' }
#' @return A list with the scenarios in the gcam database, queries in the queryxml file and a
#' tibble with gcam data formatted for srn charts.
#' @keywords gcam, gcam database, query
#' @import rgcam tibble dplyr
#' @export
srn.readgcam <- function(gcamdatabasePath, gcamdatabaseName, queryxml = "srnQueries.xml",
scenOrigNames, scenNewNames = NULL,
reReadData = T, dataProj = "dataProj.proj", dirOutputs = paste(getwd(), "/outputs", sep = ""),
regionsSelect = NULL, queriesSelect="All") {
#------------------
# Load required Libraries
# -----------------
requireNamespace("tibble",quietly = T)
requireNamespace("dplyr",quietly = T)
#----------------
# Initialize variables by setting to NULL
#----------------
NULL -> vintage -> year -> xLabel -> x -> value -> sector -> scenario -> region -> param -> origX -> origValue ->
origUnits -> origScen -> origQuery -> classPalette2 -> classPalette1 -> classLabel2 -> classLabel1 -> class2 ->
class1 -> connx -> aggregate -> Units -> sources
# Create necessary directories if they dont exist.
if (!dir.exists(dirOutputs)){
dir.create(dirOutputs)} # Output Directory
if (!dir.exists(paste(dirOutputs, "/Tables_gcam", sep = ""))){
dir.create(paste(dirOutputs, "/Tables_gcam", sep = ""))} # GCAM output directory
if (!dir.exists(paste(dirOutputs, "/Tables_Templates", sep = ""))){
dir.create(paste(dirOutputs, "/Tables_Templates", sep = ""))} # GCAM output directory
if (!dir.exists(paste(dirOutputs, "/Tables_Local", sep = ""))){
dir.create(paste(dirOutputs, "/Tables_Local", sep = ""))} # GCAM output directory
# Check for new scenario names
if (is.null(scenNewNames)) {
scenNewNames <- scenOrigNames}
# Read gcam database or existing dataProj.proj
if (!reReadData) {
if (file.exists(paste(gcamdatabasePath, "/", dataProj, sep = ""))) {
dataProjLoaded <- loadProject(paste(gcamdatabasePath, "/", dataProj, sep = ""))
} else {
stop(paste("No ", dataProj, " file exists. Please set reReadData=T to create dataProj.proj"))
}
} else {
if (file.exists(dataProj)){
file.remove(dataProj)} # Delete old project file
for (scenario_i in scenOrigNames) {
dataProj.proj <- addScenario(conn = localDBConn(gcamdatabasePath, gcamdatabaseName), proj = dataProj,
scenario = scenario_i, queryFile = paste(gcamdatabasePath, "/", queryxml, sep = "")) # Check your queries file
}
file.copy(from = paste(getwd(), "/", dataProj, sep = ""), to = gcamdatabasePath, overwrite = T,
copy.mode = TRUE)
file.remove(dataProj)
dataProjLoaded <- loadProject(paste(gcamdatabasePath, "/", dataProj, sep = ""))
}
# Save list of scenarios and queries
scenarios <- listScenarios(dataProjLoaded) # List of Scenarios in GCAM database
queries <- listQueries(dataProjLoaded) # List of Queries in queryxml
# Read in paramaters from query file to create formatted table
datax <- tibble()
if(queriesSelect=="All"){queriesx <- queries} else{
if(!all(queriesSelect %in% queries)){stop("No parameters are available in queryxml.
Please check your queriesSelect entries or your queryxml")} else {
if(length(queriesSelect[!(queriesSelect %in% queries)])>0){
print(paste("Parameters not available in queryxml: ", paste(queriesSelect[!(queriesSelect %in% queries)],collapse=", "), sep=""))
print(paste("Running remaining queriesSelect: ", paste(queriesSelect[(queriesSelect %in% queries)],collapse=", "), sep=""))}
queriesx <- queriesSelect}
}
# Total final energy by aggregate end-use sector
paramx <- "Total final energy by aggregate end-use sector"
if (paramx %in% queriesx) {
tbl <- getQuery(dataProjLoaded, paramx) # Tibble
if (!is.null(regionsSelect)) {
tbl <- tbl %>% dplyr::filter(region %in% regionsSelect)
}
tbl <- tbl %>%
left_join(data_frame(scenOrigNames, scenNewNames), by = c(scenario = "scenOrigNames")) %>%
mutate(param = "finalNrgbySec",
sources = "Sources",
origScen = scenario,
origQuery = paramx,
origValue = value,
origUnits = Units,
origX = year,
scenario = scenNewNames,
value = value * srn.assumptions()$convEJ2TWh,
units = "Final Energy (TWh)",
vintage = paste("Vint_", year, sep = ""),
x = year,
xLabel = "Year",
aggregate = "sum",
class1 = sector,
classLabel1 = "Sector",
classPalette1 = "pal_finalNrg_sec",
class2 = "class2",
classLabel2 = "classLabel2",
classPalette2 = "classPalette2")%>%
dplyr::select(origScen,origQuery, origValue, origUnits, origX, region, param, scenario,
value, units, vintage, x, xLabel, aggregate, class1, classLabel1, classPalette1,
class2, classLabel2, classPalette2)
datax <- bind_rows(datax, tbl)
} else {
print(paste("Paramater '", paramx, "' not found in database", sep = ""))
}
# GDP MER by region
paramx <- "GDP MER by region"
if (paramx %in% queriesx) {
tbl <- getQuery(dataProjLoaded, paramx) # Tibble
if (!is.null(regionsSelect)) {
tbl <- tbl %>% dplyr::filter(region %in% regionsSelect)
}
tbl <- tbl %>%
left_join(data_frame(scenOrigNames, scenNewNames), by = c(scenario = "scenOrigNames")) %>%
mutate(param = "gdp",
sources = "Sources",
origScen = scenario,
origQuery = paramx,
origValue = value,
origUnits = Units,
origX = year,
scenario = scenNewNames,
value = value/1000,
units = "GDP (Billion 1990 USD)",
vintage = paste("Vint_", year, sep = ""),
x = year,
xLabel = "Year",
aggregate = "sum",
class1 = "class1",
classLabel1 = "GDP",
classPalette1 = "pal_16",
class2 = "class2",
classLabel2 = "classLabel2",
classPalette2 = "classPalette2") %>%
dplyr::select(scenario, region, param, sources, class1, class2, x, xLabel, vintage, units, value,
aggregate, classLabel1, classPalette1,classLabel2, classPalette2,
origScen, origQuery, origValue, origUnits, origX)
datax <- bind_rows(datax, tbl)
} else {
print(paste("Paramater '", paramx, "' not found in database", sep = ""))
}
#---------------------
# Create Data Template
#---------------------
dataTemplate <- datax %>%
mutate(scenario = "Local Data", value = 0, sources="Sources", x=2010, vintage="vintage if available") %>%
dplyr::select(scenario, region, sources, param, class1, class2, units, x, value, vintage, xLabel, aggregate,
classLabel1, classPalette1, classLabel2, classPalette2) %>%
unique()
#---------------------
# Save Data in CSV
#---------------------
if (is.null(regionsSelect)) {
utils::write.csv(datax, file = paste(dirOutputs, "/Tables_gcam/gcamDataTable_AllRegions_", min(range(datax$x)),
"to", max(range(datax$x)), ".csv", sep = ""), row.names = F)
utils::write.csv(dataTemplate, file = paste(dirOutputs, "/Tables_Template/template_Regional_AllRegions.csv", sep = ""),
row.names = F)
} else {
if(!all(regionsSelect %in% unique(datax$region))){
print(paste("Regions not available in data: ", paste(regionsSelect[!(regionsSelect %in% unique(datax$region))],collapse=", "), sep=""))
print(paste("Running remaining regions: ", paste(regionsSelect[(regionsSelect %in% unique(datax$region))],collapse=", "), sep=""))
}
for (region_i in regionsSelect[(regionsSelect %in% unique(datax$region))]) {
utils::write.csv(datax %>% dplyr::filter(region == region_i),
file = paste(dirOutputs, "/Tables_gcam/gcamDataTable_",region_i,"_", min(range(datax$x)),
"to", max(range(datax$x)), ".csv", sep = ""),row.names = F)
utils::write.csv(dataTemplate %>% dplyr::filter(region == region_i),
file = paste(dirOutputs, "/Tables_Templates/template_Regional_",region_i,".csv", sep = ""),row.names = F)
utils::write.csv(dataTemplate %>% dplyr::filter(region == region_i),
file = paste(dirOutputs, "/Tables_Local/local_Regional_",region_i,".csv", sep = ""),row.names = F)
}
}
return(list(data = datax, dataTemplate = dataTemplate, scenarios = scenarios, queries = queries))
}
|
01ae369c09c78ab5f440174ff17f25918b54c09f | cd0d3ab074bcf1242b0477e7c2cc434405c7b956 | /plot_heatmap_complexheatmap.r | ca73e73b19e75ae0e95b6ccdc463499d370092fd | [] | no_license | ccoo22/personal_tools | b17d682c401cdfabb3890dfc227415e26408fe0d | 536eea20acc7a95d70a632a0ec0cceeda607548e | refs/heads/master | 2022-11-09T13:23:16.133963 | 2022-10-21T09:41:00 | 2022-10-21T09:41:00 | 234,226,426 | 3 | 4 | null | null | null | null | UTF-8 | R | false | false | 14,378 | r | plot_heatmap_complexheatmap.r | #!/home/genesky/software/r/4.1.2/bin/Rscript
library(docopt)
"Usage: plot_heatmap_complexheatmap.r -i <file> -o <pdf file> --sample_group <file> [--ann_colors <string> --rlib <dir> --pdf_width <int> --pdf_height <int> --display_number --display_number_size <numeric> --row_rename_file <file> --col_rename_file <file> --row_font_size <numeric> --col_font_size <numeric> --cell_height <numeric> --cell_width <numeric> --cell_border_width <numeric> --cell_border_color <string> --title <string> --rm_legend --gene_list <string> --col_temp <string> --col_temp_defined <string> --legend_breaks <string> --legend_name <string> --scale <string> --cluster_row --cluster_col --show_row --show_col]
Options:
-i, --input <file> 输入文件,第一列为基因名,第二列及之后为每一个样本的表达量
--sample_group <file> 样本分组文件,第一列样本名,第二列及之后的列都是样本分组,包含表头。仅对该文件中包含的样本绘制热图。可以只有第一列样本信息,不给样本分组。如果样本有分组,且分组是空值,空值也被认为是一个分组符号。
注:如果有很多注释列,complexheatmap 会自动把legend换行显示
注:legend的颜色都是随机搭配生成的,每一次运行,配色结果都不相同,除非你主动声明颜色搭配
--ann_colors <string> 设定sample_group第二列分组对应的颜色,数量务必等于该列分组数量,示例: red,blue,#648c11
注:该方法可以阻止软件自动配色
注:颜色添加的顺序与sample_group第二列中的分类名称 unique 顺序对应
-o, --output <pdf file> 输出pdf文件路径,示例:./a.pdf
--gene_list <string> 绘制的基因列表,用“逗号”分隔,默认全部绘制 [default: NA]
--col_temp <string> 热图颜色模版, 目前只提供了 2个配色模版,支持: navy_white_red / navy_white_firbrick3 [default: navy_white_firbrick3]
--col_temp_defined <string> 自定义热图颜色模版, 只接收16进制颜色类型,多个颜色之间用逗号分隔, 例如 #0047ab,#e9967a,#648c11 [default: NA]
当定义了该参数,--col_type 参数会被忽略
--legend_breaks <string> 控制热图数值与颜色的对应关系(限制数值显示范围),至少填写三个数值,且从小到大,逗号分隔。 例如: '-1,0,1' [default: NA]
--legend_name <string> 热图legend的名称 [default: none]
--scale <string> 归一化方式,none/row/column [default: none]
--cluster_row 进行行聚类
--cluster_col 进行列聚类
--show_row 显示行名
--show_col 显示列名
--display_number 热图方框里显示数字
--display_number_size <numeric> 修改热图方框里字体大小, 例如 10
--rm_legend 去掉图例, scale图例
--pdf_width <int> pdf宽度 [default: 7]
--pdf_height <int> pdf高度 [default: 7]
--title <string> 标题 [default: heatmap plot]
--row_rename_file <file> 在显示热图时,对热图的行名进行重命名。两列数据,第一列基因名,要与input保持一致,且数量一致;第二列是新的名称,允许重复。包含表头。
--col_rename_file <file> 在显示热图时,对热图的列名进行重命名。两列数据,第一列样本名,要与input保持一致,且数量一致;第二列是新的名称,允许重复。包含表头。
--row_font_size <numeric> 行名字体大小, 例如 10
--col_font_size <numeric> 列名字体大小, 例如 10
--cell_border_color <string> 热图边框的颜色, 16进制字符, 例如 #0047ab [default: NA]
--cell_border_width <numeric> 热图边框的宽度,例如 2 [default: NA]
--cell_width <numeric> cell宽度, 例如 0.5 ,控制每个cell的宽度
--cell_height <numeric> cell高度, 例如 0.5
--rlib <dir> R包路径 [default: /home/genesky/software/r/4.1.2/lib64/R/library]" -> doc
opts <- docopt(doc, version='甘斌,complexheatmap 热图\n')
input <- opts$input
output <- opts$output
sample_group <- opts$sample_group
ann_colors <- opts$ann_colors
gene_list <- opts$gene_list
scale <- opts$scale
col_temp <- opts$col_temp
col_temp_defined <- opts$col_temp_defined
legend_breaks <- opts$legend_breaks
legend_name <- opts$legend_name
title <- opts$title
pdf_width <- as.numeric(opts$pdf_width)
pdf_height <- as.numeric(opts$pdf_height)
rm_legend <- opts$rm_legend
cluster_row <- opts$cluster_row
cluster_col <- opts$cluster_col
show_row <- opts$show_row
show_col <- opts$show_col
display_number <- opts$display_number
display_number_size <- opts$display_number_size
row_font_size <- opts$row_font_size
col_font_size <- opts$col_font_size
row_rename_file <- opts$row_rename_file
col_rename_file <- opts$col_rename_file
cell_width <- opts$cell_width
cell_height <- opts$cell_height
cell_border_width <- opts$cell_border_width
cell_border_color <- opts$cell_border_color
rlib <- opts$rlib
# 加载R包
message('加载ComplexHeatmap')
.libPaths(rlib)
library(ComplexHeatmap, quietly = TRUE)
library(circlize)
library(RColorBrewer)
# input = "/home/zhangshuang/work/other_project/21B0420A/cluster/tcga_brca.fpkm.diff.txt"
# sample_group = "/home/zhangshuang/work/other_project/21B0420A/heatmap/group.txt"
# 读入数据
message('读入数据')
data_raw <- read.table(input, header = T, sep = "\t" , row.names = 1, check.name = F, stringsAsFactors = F, quote = "", comment.char = "") # 第一列为基因名,第二列及之后为每一个样本的表达量
## 读入分组
data_group <- read.table(sample_group, header = T, sep = "\t" , row.names = 1, check.name = F, stringsAsFactors = F, quote = "", comment.char = "", colClasses = 'character') # 读入分组
choose_samples = rownames(data_group)
choose_genes = rownames(data_raw)
## 分组文件是否丢失数据
lost_samples = setdiff(choose_samples, colnames(data_raw))
if (length(lost_samples) > 0 )
{
message("[Error] sample_group 中的样本 在 input 中有缺失 : ", paste(lost_samples, collapse=','))
q()
}
## 选定要分析的基因
if(gene_list != 'NA')
{
choose_genes = unlist(strsplit(gene_list, ','))
lost_genes = setdiff(choose_genes, rownames(data_raw))
if(length(lost_genes) > 0)
{
message("[Error] gene_list 中的基因名在input中没有找到 : ", paste(lost_genes, collapse=','))
q()
}
}
## 原始热图数据准备
data_choose = data_raw[choose_genes, choose_samples, drop=F]
data_choose = as.matrix(data_choose)
## scale 归一化 处理
message('确认是否归一化数据')
data_plot = data_choose
if(scale == 'row')
{
message(' row归一化数据')
data_plot = t(scale(t(data_choose)))
}
if(scale == 'col')
{
message(' col归一化数据')
data_plot = scale(data_choose)
}
## 参考 pheatmap 代码,确定当前绘图的breaks
message('确定热图颜色、数值 breaks')
if (!identical(legend_breaks, 'NA'))
{
# 自定义breaks
message(' 使用自定义 breaks')
legend_breaks = as.numeric(unlist(strsplit(legend_breaks, ',')))
}
## 自动 breaks
if (identical(scale, "row") || identical(scale, "column")) {
if (identical(legend_breaks, 'NA')) {
message(' 使用自动化 breaks')
lim = quantile(abs(data_plot), 0.975)
le = pretty(c(-lim, lim), n = 3)
if (length(le) == 7 && le[1] == -3) {
le = c(-3, -1.5, 0, 1.5, 3)
}
else if (!0 %in% le) {
le = c(le[1], le[1]/2, 0, le[length(le)]/2, le[length(le)])
}
legend_breaks = le
}
}
## 热图颜色模版, 注意 -2,0,2 同时起到了限制热图数值显示范围的作用
message('颜色模版确认')
col_fun = NA
if(identical(legend_breaks, 'NA'))
{
if(col_temp == 'navy_white_red')
{
col_fun = colorRampPalette(c("navy", "white", "red"))(200)
}
if(col_temp == 'navy_white_firbrick3')
{
col_fun = colorRampPalette(c("navy", "white", "firebrick3"))(200)
}
if(col_temp_defined != 'NA')
{
# 自定义配色方案
col_temp_defined = unlist(strsplit(col_temp_defined,','))
col_fun = colorRampPalette(col_temp_defined)(200)
}
}else{
if(col_temp == 'navy_white_red')
{
col_fun = colorRamp2(legend_breaks, colorRampPalette(c("navy", "white", "red"))(length(legend_breaks)))
}
if(col_temp == 'navy_white_firbrick3')
{
col_fun = colorRamp2(legend_breaks, colorRampPalette(c("navy", "white", "firebrick3"))(length(legend_breaks)))
}
if(col_temp_defined != 'NA')
{
# 自定义配色方案
col_temp_defined = unlist(strsplit(col_temp_defined,','))
col_fun = colorRamp2(legend_breaks, colorRampPalette(col_temp_defined)(length(legend_breaks)))
}
}
## cell 边框颜色
if(identical(cell_border_color, 'NA'))
{
cell_border_color = ifelse(nrow(data_plot) <100 & ncol(data_plot) < 100, "grey60", NA)
}
## cell 边框大小
cell_border_width = ifelse(identical(cell_border_width, 'NA'), NA, as.numeric(cell_border_width))
## 列注释(根据 sample_group 文件制作), 绘图矩阵已经按照sample_group 矩阵排过序了,这里可以直接用
top_annotation = NULL
if(ncol(data_group) > 0)
{
if(!is.null(ann_colors))
{
# 自定义配色
ann_colors = unlist(strsplit(ann_colors, ','))
# 数量是否与第二列注释相同
class_second_column = unique(data_group[,1])
if(length(ann_colors) != length(class_second_column))
{
message("[Error] ann_colors 参数中声明的颜色数量与 sample_group 文件的第二列分类数量不符,请重新调整参数: ann_colors = ", ann_colors)
q()
}
color = list(structure(ann_colors, names=class_second_column))
names(color) = colnames(data_group)[1]
top_annotation = HeatmapAnnotation(df = data_group, col=color)
}else{
# 自动随机配色
top_annotation = HeatmapAnnotation(df = data_group)
}
}
## 设置热图区域的宽度、高度
width = NULL
if(!is.null(cell_width))
{
width = unit(as.numeric(cell_width) * ncol(data_plot), "cm")
}
height = NULL
if(!is.null(cell_height))
{
height = unit(as.numeric(cell_height) * nrow(data_plot), "cm")
}
## 行、列名称字体大小
if(!is.null(row_font_size))
{
row_font_size = as.numeric(row_font_size)
}
if(!is.null(col_font_size))
{
col_font_size = as.numeric(col_font_size)
}
## 重设行、列名称
row_labels = rownames(data_plot)
if(!is.null(row_rename_file))
{
row_rename_data = read.table(row_rename_file, header = T, sep = "\t" , row.names = 1, check.name = F, stringsAsFactors = F, quote = "", comment.char = "", colClasses = 'character') # 两列信息
# 检查
lost_rows = setdiff(choose_genes, rownames(row_rename_data))
if(length(lost_rows) > 0)
{
message("[Error] row_rename_file 中必须包含input文件中的所有要分析的基因名称 : ", paste(lost_rows, collapse=','))
q()
}
row_labels = structure(row_rename_data[choose_genes, 1], names = choose_genes)
}
column_labels = colnames(data_plot)
if(!is.null(col_rename_file))
{
col_rename_data = read.table(col_rename_file, header = T, sep = "\t" , row.names = 1, check.name = F, stringsAsFactors = F, quote = "", comment.char = "", colClasses = 'character') # 两列信息
# 检查
lost_cols = setdiff(choose_samples, rownames(col_rename_data))
if(length(lost_cols) > 0)
{
message("[Error] col_rename_file 中必须包含input文件中的所有要分析的样本名称 : ", paste(lost_cols, collapse=','))
q()
}
column_labels = structure(col_rename_data[choose_samples, 1], names = choose_samples)
}
## 热图里显示数字、以及数字大小
if(!is.null(display_number_size))
{
display_number_size = as.numeric(display_number_size)
}else{
display_number_size = NA
}
cell_fun = NULL
if(display_number)
{
cell_fun <- function(j, i, x, y, width, height, fill)
{
grid.text(sprintf("%.1f", data_plot[i, j]), x, y, gp=gpar(fontsize = display_number_size))
}
}
message('开始绘图')
pdf(file=output, width=pdf_width, height=pdf_height)
Heatmap(matrix = data_plot,
col = col_fun, # 热图颜色模版
name = ifelse(legend_name == 'none', ' ', legend_name), # key legend名称
cluster_rows = cluster_row, # 行聚类
cluster_columns = cluster_col, # 列聚类
show_row_names = show_row, # 显示行名称
show_column_names = show_col, # 显示列名称
clustering_distance_rows = "euclidean", # 行距离计算方法
clustering_method_rows = "complete", # 行聚类方法
clustering_distance_columns = "euclidean", # 列距离计算方法
clustering_method_columns = "complete", # 列聚类方法
rect_gp = gpar(col = cell_border_color, lwd = cell_border_width), # cell 边框颜色、边框大小
top_annotation = top_annotation, # 添加列注释
show_heatmap_legend = !rm_legend, # 去掉热图legend
column_title = title, # 列标题
width = width, # 设置热图区域的宽度: 不包括进化树、legend
height = height, # 设置热图区域的高度: 不包括进化树、legend
row_names_gp = gpar(fontsize = row_font_size), # 行名颜色、大小调整
column_names_gp = gpar(fontsize = col_font_size), # 列名颜色、大小调整
row_labels = row_labels, # 重设 行名
column_labels = column_labels, # 重设列名
cell_fun = cell_fun, # 热图里添加数字
)
dev.off()
|
705e43493efbefd21f43d65489ddbac0742505ce | a64716eb81090ebef90752a39d280bba0d870bf4 | /CS 513 - Knowledge Discovery & Data Mining/Final/Q6 - IRIS Dataset.r | 768f222de2a8fe836e14a00458568e82893fc5af | [
"MIT"
] | permissive | ssgarnaik/Stevens-Computer-Science-Courses-Materials | 649dd7150e0761f2b6ca5a31fa7d30a92f47a581 | 75daf93d91a43e0b21523fb32de0da851197e795 | refs/heads/master | 2021-07-23T20:01:25.737599 | 2020-06-10T22:43:48 | 2020-06-10T22:43:48 | 184,960,918 | 0 | 0 | null | 2019-05-05T01:16:42 | 2019-05-05T01:16:42 | null | UTF-8 | R | false | false | 859 | r | Q6 - IRIS Dataset.r | ###### Knowledge Discovery and Data Mining (CS 513) ######
# (Final Exam)
# Course : CS 513 - A
# First Name : PARAS
# Last Name : GARG
# Id : 10414982
# Purpose : Final Exam - IRIS Dataset
###### ******************************************** ######
### Develop the following program in R
# a. Load the IRIS dataset into memory
iris_dataset <- data.frame(iris);
View(iris_dataset);
# b. Create a test dataset by extracting every third (3rd) row of the data, starting with the second row.
extract_range <- seq(from = 2, to = nrow(iris_dataset), by = 3);
test_dataset <- iris_dataset[extract_range, ];
View(test_dataset);
# c. Create a training dataset by excluding the test data from the IRIS dataset
train_dataset <- iris_dataset[-extract_range, ];
View(train_dataset);
### clearing environment
rm(list = ls()) |
e04efd1db1dfb1dc920cd6a0212481d02d587427 | d23f22579a04095c51a4b9cb7e0e779907baab5d | /cachematrix.R | c9ac69b5179fbbb3509ea966412d75133cfc1fa6 | [] | no_license | Charlotte-Mitchell/ProgrammingAssignment2 | 4efe01e4833ff7489db16a0c8726de211a0d154e | 182f56a8d026a7570ffa08ff8feac36f9fb923ea | refs/heads/master | 2020-04-19T18:49:35.397171 | 2019-02-01T16:17:28 | 2019-02-01T16:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,142 | r | cachematrix.R | ## These two functions together help save time when computing the inverse
## of multiple matrices. The first stores the inverse of a matrix in the cache.
## The second checks whether the inverse of a matrix exists, draws it from the
## cache if it does or computes it if it does not
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function() inv <<- solve(x)
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by
## `makeCacheMatrix` above. If the inverse has already been calculated
## (and the matrix has not changed), then`cacheSolve` retrieves the inverse
## from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setinv(inv)
inv
}
|
dabf2bd9431d265e7bc0860c0059e5638c2ca399 | cc3d8c0985ff81ffc940133819b7049b8bf5d59a | /R/getRa.R | 99bc340c62420be465439b6e83cc965003d70cd7 | [] | no_license | roinaveiro/acAra | 3ee93f101bd48cf69876820fe743cc53aa16f668 | 0f32d5a467255b76d6b816e81b58d52b0ede9a1b | refs/heads/master | 2021-01-19T11:08:52.258363 | 2017-06-30T10:54:24 | 2017-06-30T10:54:24 | 87,931,321 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 462 | r | getRa.R | #' Get ra from vector
#'
#' @param emailMatrix dataframe that contains all the emails types
#' @param mail email to get ra.
#' @return ra
#' @keywords ra,
#' @export
#' @examples
#' getRa(x)
getRa <- function(emailMatrix,mail = c(1,0,1,0,1)){
# Check all the emails to find in matrix to find the imput email
for (i in 1:nrow(emailMatrix)){
if (all(emailMatrix[i,1:length(mail)]==mail)){
ra = emailMatrix[i,length(mail)+1]
}
}
return(ra)
} |
100249ec73219d53a7912ad80aa3a6e6a72e03c4 | 6b77fe4093bb4045c58db70975143cb259b2c8a6 | /annotations/bootloader/bootloader.r | 2831717020459ae647a64e9ff33780d2fd0ab6f2 | [
"BSD-3-Clause"
] | permissive | roelandjansen/md380tools | b106c1d0da7449c156948afd936761553909aaef | afea33d0d2b09f1097dd5cb4f3d42d9724393436 | refs/heads/master | 2021-05-23T02:32:42.350275 | 2017-02-05T17:24:33 | 2021-01-03T14:11:00 | 66,765,943 | 26 | 6 | null | 2017-03-05T12:27:40 | 2016-08-28T12:02:10 | C | UTF-8 | R | false | false | 2,827 | r | bootloader.r | # bootloader.r by Travis Goodspeed
# This is a Radare2 script for annotating the Tytera MD380 bootloader,
# or my jailbreak derived from that bootloader. I've tried to make
# this as human-readable as possible, but also to only include those
# symbols which are absolutely necessary to understand the patch.
# Begin by opening the bootloader or jailbreak in r2 with this script.
# r2 -a arm -m 0x08000000 -b 16 -i bootloader.r bootloader.bin
# MD5 (bootloader.bin) = 721df1f98425b66954da8be58c7e5d55
# MD5 (jailbreak.bin) = 32931e5cf5e62400b31a80b1efcd2686
# Define these three functions which relate to the Readout Device
# Protection feature.
CCa 0x08001fb0 rdp_lock(0x55) locks the device, rdp_lock(0xAA) unlocks it.
af+ 0x08001fb0 24 rdp_lock
CCa 0x08001fc8 After calling rdp_lock(), rdp_applylock() sets the state.
af+ 0x08001fc8 28 rdp_applylock
CCa 0x08001fe4 Returns 1 if RDP is not locked. 0 if it is locked.
af+ 0x08001fe4 22 rdp_isnotlocked
# These are child functions, which make things a bit easier to read.
CCa 0x08002060 Waits for a Flash operation to complete.
af+ 0x08002060 40 flash_wait
CCa 0x080049e8 Tests the pins to stay, or not stay, in bootloader mode.
af+ 0x080049e8 98 bootloader_pin_test
# Inside of main(), rdp_lock(0x55) is conditionally called if
# rdp_isnotlocked(). My first jailbreak worked by simply patching
# this to call rdp_lock(0xAA), which leaves the device unlocked.
CCa 0x080043bc This is the main() function of the bootloader.
af+ 0x080043bc 388 main
CCa 0x080044a8 Change this immediate from 0x55 to 0xAA to jailbreak the bootloader.
# This prints the relevant piece of code in main() that is patched to
# jailbreak the bootloader, leaving Readout Device Production (RDP)
# disabled.
# [0x08000000]> pd 8 @ 0x080044a0
# 0x080044a0 fdf7a0fd bl rdp_isnotlocked
# 0x080044a4 0028 cmp r0, 0
# ,=< 0x080044a6 04d1 bne 0x80044b2
# | ; Change this immediate from 0x55 to 0xAA to jailbreak the bootloader.
# | 0x080044a8 5520 movs r0, 0x55
# | 0x080044aa fdf781fd bl rdp_lock
# | 0x080044ae fdf78bfd bl rdp_applylock
# `-> 0x080044b2 fdf776fd bl 0x8001fa2
# 0x080044b6 00f097fa bl bootloader_pin_test
# [0x08000000]>
# Inside of bootloader_pin_test, the I/O pins for the push-to-talk
# button and the button above are tested.
CCa 0x8003af2 Tests pin r1 of port r0.
af+ 0x8003af2 22 gpio_input_test
CCa 0x8002384 Starts DFU recovery mode.
af+ 0x8002384 68 bootloader_setup
# Comments inside bootloader_pin_test
CCa 0x080049f2 Test the first button.
CCa 0x080049fe Test the second button.
CCa 0x08004a36 Calls the address stored at 0x800C004, the reset vector of the application.
CCa 0x08004a2e Set the stack pointer to the value at 0x0800C000.
|
3a88d108b0aea8b1fbe9f955d1991f0175ff48a1 | 506a2b1bee7c97655ca5bf94a2d6edfd11e698e2 | /man/covfx.Rd | 94e3358fbef4e8b9dea9dd81b05b9c1be61cdf9c | [] | no_license | emanuelhuber/GauProMod | 3662d5c2cf1136865f6d45210c8ce938634c03ed | f2d970e3de95da56c6fb029a5f44c5ceac06d98e | refs/heads/master | 2021-07-18T19:55:08.050846 | 2021-07-13T08:54:32 | 2021-07-13T08:54:32 | 56,522,805 | 10 | 1 | null | null | null | null | UTF-8 | R | false | true | 705 | rd | covfx.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{covfx}
\alias{covfx}
\title{Return covariance as a function of distance}
\usage{
covfx(r, covModel)
}
\arguments{
\item{r}{vector of distance}
\item{covModel}{Covariance mdoel}
}
\description{
Return covariance as a function of distance
}
\examples{
covModel <- list(kernel="matern",
l = 5, # correlation length
v = 1, # smoothness
h = 2.45 # std. deviation
)
r <- seq(0, 20, by = 0.1)
myCov <- covfx(r = r, covModel = covModel)
plot(r, myCov, type = "l", ylim = c(0, max(myCov)),
ylab = "covariance", xlab = "distance", xaxs = "i", yaxs = "i")
}
|
d4cab169990c0813ca5f00c62e6dacd0951bcb73 | 597845777259112d3a912256466242500a6f7533 | /R/zzz.R | 75a649f3e64b88ed9ff0256fe4a9859e547eb889 | [
"MIT"
] | permissive | ptitle/envirem | e27187556cd8fb3afac2f8fb0572c660c42a44b9 | 40cccd0485324605724284c6936206327a4f1002 | refs/heads/master | 2023-04-27T14:16:23.463364 | 2023-04-14T16:04:35 | 2023-04-14T16:04:35 | 61,913,099 | 12 | 1 | null | null | null | null | UTF-8 | R | false | false | 197 | r | zzz.R | .onAttach <- function(libname, pkgname) {
packageStartupMessage("\nPlease see the vignette at https://envirem.github.io/ENVIREM_tutorial.html for a detailed walk-through of this package.\n")
}
|
7e9400f81a0e89456b1d4e65d02949dcca201aee | 66a05f70f24795891665f33d04b354c48c56cec3 | /scripts/draw_pi.R | 8827c02b855ae875b10d78306223b6a9a7cabcfe | [] | no_license | rz520/LittorinaPipeline | 355158579d4af6db4fe4a17e54e43e8eed491bca | d3ea12012ada23b33782915a8e92a2008e9459c5 | refs/heads/master | 2023-06-24T02:48:45.693962 | 2021-07-28T14:35:54 | 2021-07-28T14:35:54 | 390,198,890 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,249 | r | draw_pi.R | #Use pi value in angsd thetaStat output in every ecotype and linkage map data
#to draw scatter plot in every chromosome and ecotype category
library(ggplot2)
library(reshape2)
#read linkage map data
lg <- read.table('data/Linkage_map.txt', header = T)
#read crab window theta data
crab_theta <- read.table('data/crab_theta.thetasWindow.pestPG', header = T)
names(crab_theta)[2] <- "contig"
#data wrangling
crab_theta_lg <- merge(crab_theta,lg)
#read hybrid window theta data
hybrid_theta <- read.table('data/hybrid_theta.thetasWindow.pestPG', header = T)
names(hybrid_theta)[2] <- "contig"
#data wrangling
hybrid_theta_lg <- merge(hybrid_theta,lg)
#read wave window theta data
wave_theta <- read.table('data/wave_theta.thetasWindow.pestPG', header = T)
names(wave_theta)[2] <- "contig"
#data wrangling
wave_theta_lg <- merge(wave_theta,lg)
#unite theta data of crab, hybrid and wave
crab_theta_lg$population <- 'crab'
hybrid_theta_lg$population <- 'hybird'
wave_theta_lg$population <- 'wave'
theta_lg <- rbind(crab_theta_lg, hybrid_theta_lg, wave_theta_lg)
#draw and save plot
pdf("pictures/pi_fold_1.pdf",12,12)
theme_set(theme_bw())
ggplot(data = theta_lg, aes(x=position,y=tP)) + geom_point() + facet_grid(population~LG)
graphics.off()
|
2c43bce9a6995bbbf863b1f48999b03d46d837c8 | c7904f189856042d29b280cde615ddbc5b79a168 | /Graph Types Code/Alluvial.R | b4e33ab1d3be02b8900d8adb96a5eec74d92d398 | [] | no_license | marioamz/CentAmMigrationDataViz | cee1b544083863a8860aaad3160b3dd201f1dc9d | 39a4a46ca40212995d5d7f99cc9442c80adeef76 | refs/heads/master | 2020-04-17T11:55:18.355764 | 2019-03-04T04:04:23 | 2019-03-04T04:04:23 | 166,559,646 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,249 | r | Alluvial.R | # Read in all 2017 data
install.packages('ggalluvial')
library(dplyr)
library(ggplot2)
library(ggalluvial)
hmex <- read.csv('Data/honddev2017mex.csv')
hmex$pais <- 'Honduras'
hmex$deported <- 'Mexico'
husa <- read.csv('Data/honddev2017usa.csv')
husa$pais <- 'Honduras'
husa$deported <- 'USA'
gmex <- read.csv('Data/guatedev2017mex.csv')
gmex$pais <- 'Guatemala'
gmex$deported <- 'Mexico'
gusa <- read.csv('Data/guatedev2017usa.csv')
gusa$pais <- 'Guatemala'
gusa$deported <- 'USA'
emex <- read.csv('Data/elsdev2017mex.csv')
emex$pais <- 'El Salvador'
emex$deported <- 'Mexico'
eusa <- read.csv('Data/elsdev2017usa.csv')
eusa$pais <- 'El Salvador'
eusa$deported <- 'USA'
usa1 <- smartbind(eusa, gusa)
usa <- smartbind(usa1, husa)
rm(usa1)
mex1 <- smartbind(emex, gmex)
mex <- smartbind(mex1, hmex)
rm(mex1)
# 1) Do Central American migrants take different paths to arrive at the United States?
## This is an alluvial graph charting travel from Guatemala border to
## US-Mexico border for all countries in 2017
### Subsetting the data
alluvial <- usa[c('pais', 'deported', 'p14_1', 'p19_1e', 'p20e', 'p26', 'p35cn')]
### Dropping NA observations for each question
drop1 <- alluvial %>% filter(p14_1 > 0, p19_1e > 0, p20e > 0, p26 > 0, p35cn > 0)
clean <- drop1 %>% filter(p14_1 < 2000000, p19_1e < 40, p20e < 40, p26 < 285000000, p35cn < 58000)
rm(drop1)
### Frequency dataframe
alluvialnodes <- rename(count(clean, pais, p14_1, p19_1e, p20e, p26, p35cn), Freq = n)
touse <- alluvialnodes %>% filter(Freq>5)
touse$Freq <- order(touse$Freq)
#### Get string values
touse$p15[touse$p15 == 1217001] <- 'Tecun Uman'
touse$p15[touse$p15 == 1705099] <- 'Naranjo'
touse$p15[touse$p15 == 1705016] <- 'El Ceibo'
touse$p15[touse$p15 == 1705015] <- 'Bethel'
touse$p15[touse$p15 == 1312047] <- 'La Mesilla'
touse$p15[touse$p15 == 1305024] <- 'Gracias a Dios'
touse$p15[touse$p15 == 1215013] <- 'El Carmen'
touse$p22_1e[touse$p22_1e == 7] <- 'Chiapas'
touse$p22_1e[touse$p22_1e == 27] <- 'Tabasco'
touse$p22_2e[touse$p22_2e == 7] <- 'Chiapas'
touse$p22_2e[touse$p22_2e == 27] <- 'Tabasco'
touse$p22_2e[touse$p22_2e == 30] <- 'Veracruz'
touse$p22_2e[touse$p22_2e == 28] <- 'Tamaulipas'
touse$p22_2e[touse$p22_2e == 24] <- 'San Luis Potosi'
touse$p22_2e[touse$p22_2e == 21] <- 'Puebla'
touse$p22_2e[touse$p22_2e == 20] <- 'Oaxaca'
touse$p22_2e[touse$p22_2e == 19] <- 'Nuevo Leon'
touse$p22_2e[touse$p22_2e == 11] <- 'Guanajuato'
touse$p22_2e[touse$p22_2e == 9] <- 'Mexico City'
touse$p34e[touse$p34e == 7] <- 'Chiapas'
touse$p34e[touse$p34e == 27] <- 'Tabasco'
touse$p34e[touse$p34e == 30] <- 'Veracruz'
touse$p34e[touse$p34e == 28] <- 'Tamaulipas'
touse$p34e[touse$p34e == 24] <- 'San Luis Potosi'
touse$p34e[touse$p34e == 21] <- 'Puebla'
touse$p34e[touse$p34e == 20] <- 'Oaxaca'
touse$p34e[touse$p34e == 19] <- 'Nuevo Leon'
touse$p34e[touse$p34e == 11] <- 'Guanajuato'
touse$p34e[touse$p34e == 9] <- 'Mexico City'
ggplot(touse,
aes(y = Freq,
axis1 = p14_1, axis2 = p19_1e, axis3 = p20e, axis4 = p26)) +
geom_alluvium(aes(fill = pais),
width = 0, knot.pos = 0, reverse = FALSE) +
guides(fill = FALSE) +
geom_stratum(width = 1/8, reverse = FALSE) +
geom_text(stat = "stratum", label.strata = TRUE, reverse = FALSE) +
scale_x_continuous(breaks = 1:4, labels = c("Mexico Port of Entry", "Mexican City Visited", "Mexican City Where Spent Most Time", "US Port of Entry")) +
#coord_flip() +
ggtitle("Titanic survival by class and sex")
# 2) Has the path changed over years for Honduran migrants?
## This is an alluvial graph of Honduran migrants from 2014 to 2017,
## where each flow is a year of migrants.
### Read in 2014, 2015, 2017 Honduras data
husa14 <- read.csv('Data/honddev2014usa.csv')
husa15 <- read.csv('Data/honddev2015usa.csv')
husa16 <- read.csv('Data/honddev2016usa.csv')
husa17 <- read.csv('Data/honddev2017usa.csv')
h17 <- husa17[c('year', 'p14_1', 'p19_1e', 'p20e', 'p26')]
h16 <- husa16[c('year', 'p14_1', 'p19e1', 'p20e', 'p26')]
h15 <- husa15[c('year', 'p14_1', 'p19e1', 'p20e', 'p26')]
h14 <- husa14[c('year', 'p14_1', 'p19e1', 'p20e', 'p25')]
colnames(h14)[colnames(h14)=="p25"] <- "p26"
colnames(h17)[colnames(h17)=="p19_1e"] <- "p19e1"
bind1 <- smartbind(h14, h15)
bind2 <- smartbind(bind1, h16)
hond <- smartbind(bind2, h17)
rm(h14, h15, h16, h17, bind1, bind2)
### Dropping NA observations for each question
drop1 <- hond %>% filter(p14_1 > 0, p19e1 > 0, p20e > 0, p26 > 0)
clean <- drop1 %>% filter(p14_1 < 2000000, p19e1 < 40, p20e < 40, p26 < 285000000)
rm(drop1)
### Frequency dataframe
alluvialnodes <- rename(count(clean, year,p14_1, p19e1, p20e, p26), Freq = n)
touse <- alluvialnodes %>% filter(Freq>5)
touse$Freq <- order(touse$Freq)
rm(clean, alluvialnodes)
#### Get string values
touse$p15[touse$p15 == 1217001] <- 'Tecun Uman'
touse$p15[touse$p15 == 1705099] <- 'Naranjo'
touse$p15[touse$p15 == 1705016] <- 'El Ceibo'
touse$p15[touse$p15 == 1705015] <- 'Bethel'
touse$p15[touse$p15 == 1312047] <- 'La Mesilla'
touse$p15[touse$p15 == 1305024] <- 'Gracias a Dios'
touse$p15[touse$p15 == 1215013] <- 'El Carmen'
touse$p22_1e[touse$p22_1e == 7] <- 'Chiapas'
touse$p22_1e[touse$p22_1e == 27] <- 'Tabasco'
touse$p22_2e[touse$p22_2e == 7] <- 'Chiapas'
touse$p22_2e[touse$p22_2e == 27] <- 'Tabasco'
touse$p22_2e[touse$p22_2e == 30] <- 'Veracruz'
touse$p22_2e[touse$p22_2e == 28] <- 'Tamaulipas'
touse$p22_2e[touse$p22_2e == 24] <- 'San Luis Potosi'
touse$p22_2e[touse$p22_2e == 21] <- 'Puebla'
touse$p22_2e[touse$p22_2e == 20] <- 'Oaxaca'
touse$p22_2e[touse$p22_2e == 19] <- 'Nuevo Leon'
touse$p22_2e[touse$p22_2e == 11] <- 'Guanajuato'
touse$p22_2e[touse$p22_2e == 9] <- 'Mexico City'
touse$p34e[touse$p34e == 7] <- 'Chiapas'
touse$p34e[touse$p34e == 27] <- 'Tabasco'
touse$p34e[touse$p34e == 30] <- 'Veracruz'
touse$p34e[touse$p34e == 28] <- 'Tamaulipas'
touse$p34e[touse$p34e == 24] <- 'San Luis Potosi'
touse$p34e[touse$p34e == 21] <- 'Puebla'
touse$p34e[touse$p34e == 20] <- 'Oaxaca'
touse$p34e[touse$p34e == 19] <- 'Nuevo Leon'
touse$p34e[touse$p34e == 11] <- 'Guanajuato'
touse$p34e[touse$p34e == 9] <- 'Mexico City'
ggplot(touse,
aes(y = Freq,
axis1 = p14_1, axis2 = p19e1, axis3 = p20e, axis4 = p26)) +
geom_alluvium(aes(fill = year),
width = 0, knot.pos = 0, reverse = FALSE) +
guides(fill = FALSE) +
geom_stratum(width = 1/8, reverse = FALSE) +
geom_text(stat = "stratum", label.strata = TRUE, reverse = FALSE) +
scale_x_continuous(breaks = 1:4, labels = c("Mexico Port of Entry", "Mexican City Visited", "Mexican City Where Spent Most Time", "US Port of Entry")) +
#coord_flip() +
ggtitle("Titanic survival by class and sex")
# 3) Are the paths different for Honduran immigrants that make it to the US versus
# those who don't?
hmex <- read.csv('Data/honddev2017mex.csv')
hmex$pais <- 'Honduras'
hmex$deported <- 'Mexico'
husa <- read.csv('Data/honddev2017usa.csv')
husa$pais <- 'Honduras'
husa$deported <- 'USA'
cols_to_keep_mex <- c('pais', 'deported', 'p15', 'p22_1e', 'p22_1l', 'p22_2e', 'p22_2l', 'p24l', 'p34l')
cols_to_keep_usa <- c('pais', 'deported', 'p14_1', 'p19_1e', 'p19_1l', 'p19_2e', 'p19_2l', 'p20l', 'p26')
husa1 <- husa[cols_to_keep_usa]
hmex1 <- hmex[cols_to_keep_mex]
colnames(husa1)[colnames(husa1)=="p14_1"] <- "p15"
colnames(husa1)[colnames(husa1)=="p19_1e"] <- "p22_1e"
colnames(husa1)[colnames(husa1)=="p19_1l"] <- "p22_1l"
colnames(husa1)[colnames(husa1)=="p19_2e"] <- "p22_2e"
colnames(husa1)[colnames(husa1)=="p19_2l"] <- "p22_2l"
colnames(husa1)[colnames(husa1)=="p20l"] <- "p24l"
finalhond <- dplyr::bind_rows(husa1, hmex1)
finalhond[is.na(finalhond)] <- 0
## Alluvial graph charting travel from Guatemala to Mexico or US for Honduran migrants
drop1 <- finalhond %>% filter(p15 >= 0, p22_1l >= 0, p22_2l >= 0, p26 >= 0, p24l >= 0)
clean <- drop1 %>% filter(p15 < 2000000, p22_1l < 900000000, p22_2l < 900000000, p26 < 300000000, p24l < 9000000000)
rm(drop1)
alluvialnodeshond <- rename(count(clean, deported, p15, p22_1l, p22_2l, p24l, p26, p34l), Freq = n)
touse <- alluvialnodeshond %>% filter(Freq>2)
## Make names for it
touse$p15[touse$p15 == 1217001] <- 'Tecun Uman'
touse$p15[touse$p15 == 1705099] <- 'Naranjo'
touse$p15[touse$p15 == 1705016] <- 'El Ceibo'
touse$p15[touse$p15 == 1705015] <- 'Bethel'
touse$p15[touse$p15 == 1312047] <- 'La Mesilla'
touse$p22_1l[touse$p22_1l == 301930001] <- 'Veracruz, VER'
touse$p22_1l[touse$p22_1l == 300390001] <- 'Coatzacoalcos, VER'
touse$p22_1l[touse$p22_1l == 270170001] <- 'Tenosique de Pino Suarez, TAB'
touse$p22_1l[touse$p22_1l == 270040001] <- 'Villahermosa, TAB'
touse$p22_1l[touse$p22_1l == 240280001] <- 'San Luis Potosi, SLP'
touse$p22_1l[touse$p22_1l == 190390001] <- 'Monterrey, NL'
touse$p22_1l[touse$p22_1l == 70650001] <- 'Palenque, CHIS'
touse$p22_1l[touse$p22_1l == 70270001] <- 'Chiapa de Corzo, CHIS'
touse$p24l[touse$p24l == 300030001] <- 'Acayucan, VER'
touse$p24l[touse$p24l == 280320001] <- 'Reynosa, TAMPS'
touse$p24l[touse$p24l == 280270001] <- 'N. Laredo, TAMPS'
touse$p24l[touse$p24l == 270020001] <- 'Cardenas, TAB'
touse$p24l[touse$p24l == 301930001] <- 'Veracruz, VER'
touse$p24l[touse$p24l == 300390001] <- 'Coatzacoalcos, VER'
touse$p24l[touse$p24l == 270170001] <- 'Tenosique de Pino Suarez, TAB'
touse$p24l[touse$p24l == 270040001] <- 'Villahermosa, TAB'
touse$p24l[touse$p24l == 70650001] <- 'Palenque, CHIS'
touse$p34l[touse$p34l == 0] <- 'Avoided Deportation'
touse$p34l[touse$p34l == 300030001] <- 'Acayucan, VER'
touse$p34l[touse$p34l == 270020001] <- 'Cardenas, TAB'
touse$p34l[touse$p34l == 301930001] <- 'Veracruz, VER'
touse$p34l[touse$p34l == 300390001] <- 'Coatzacoalcos, VER'
touse$p34l[touse$p34l == 270170001] <- 'Tenosique de Pino Suarez, TAB'
touse$p34l[touse$p34l == 70650001] <- 'Palenque, CHIS'
touse$p34l[touse$p34l == 270040001] <- 'Villahermosa, TAB'
touse$p26[touse$p26 == 0] <- 'Deported Prior'
touse$p26[touse$p26 == 280320001] <- 'Reynosa, TAMPS'
touse$p26[touse$p26 == 280270001] <- 'N. Laredo, TAMPS'
ggplot(touse,
aes(y = Freq,
axis1 = p15, axis2 = p22_1l, axis3 = p24l, axis4 = p26)) +
geom_alluvium(aes(fill = deported),
width = 0.1, knot.pos = 0, reverse = FALSE) +
guides(fill = FALSE) +
geom_stratum(width = 1/10, reverse = FALSE) +
geom_text(stat = "stratum", label.strata = TRUE, reverse = FALSE) +
scale_x_continuous(breaks = 1:4, labels = c("Mexican Port of Entry", "Mexican City Visited", "Mexican City Where Spent Most Time" ,"US Port of Entry")) +
labs(
title = "As they journey northward, Honduran migrants who get \n deported in Mexico (red) get stuck in cities that border the Gulf of Mexico",
subtitle = "Most migrants who make it to the US (blue) enter through Reynosa",
caption = "Source: EMIF Sur (2014-2017)") + theme(
plot.title = element_text(color="black", size=12, face="bold", hjust=0.5),
plot.subtitle = element_text(color="black", size=10, face="italic", hjust=0.5),
axis.title.x = element_text(color="black", size=8),
axis.title.y = element_text(color="black", size=8),
plot.caption = element_text(color="black", size=6, face="italic"))
ggsave('Alluvial.pdf', path="Visualizations/", width=11, height=8, units='in')
|
7b75257ffd09bea904391a0b227aa73ccb0686ff | ab9734a8d40e8d4934c54ee5d8a238a9ac5f1f50 | /R/cleaningscript.R | d6f780d0cbcf0413cc195a701dd9fd09dcc68e8b | [] | no_license | sball33/stt863 | 14b9dc579e8b58bc6199ce2ecd9f293a28466dc0 | db032a90383df858dabf27ee29d4579c6542ccd9 | refs/heads/main | 2023-01-07T16:39:07.384282 | 2020-11-05T21:32:50 | 2020-11-05T21:32:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,767 | r | cleaningscript.R | library(tidyverse)
# Read and Clean ----------------------------------------------------------
files <- list.files(path = here::here("raw_data"), pattern = ".csv")
files
cleaning <- function(df){
df <- pivot_longer(df, cols = -country, names_to = "year")
}
data <- files %>%
map(function(x) read_csv(paste0("raw_data/", x))) %>%
setNames(gsub("\\.csv$", "", files)) %>%
map(cleaning) %>%
bind_rows(.id = "id") %>%
pivot_wider(names_from = id)
# Filtering ---------------------------------------------------------------
# data <- read_csv(here::here("data", "df.csv"))
OECD <- tibble::tribble(
~Country, ~Application, ~Negotiations, ~Invitation, ~`Membership[1]`, ~Geographic.location, ~Notes,
"Australia", NA, NA, NA, "7 June 1971", "Oceania", NA,
"Austria", NA, NA, NA, "29 September 1961", "Europe", "OEEC member.[6]",
"Belgium", NA, NA, NA, "13 September 1961", "Europe", "OEEC member.[6]",
"Canada", NA, NA, NA, "10 April 1961", "North America", NA,
"Chile", "November 2003[57][58]", "16 May 2007[26]", "15 December 2009[59]", "7 May 2010", "South America", NA,
"Colombia", "24 January 2011[60]", "30 May 2013[30]", "25 May 2018[61]", "28 April 2020", "South America", NA,
"Czech Republic", "January 1994[62]", "8 June 1994[63]", "24 November 1995[62]", "21 December 1995", "Europe", "Was a member of the rival Comecon from 1949 to 1991 as part of Czechoslovakia.",
"Denmark", NA, NA, NA, "30 May 1961", "Europe", "OEEC member.[6]",
"Estonia", NA, "16 May 2007[26]", "10 May 2010[64]", "9 December 2010", "Europe", NA,
"Finland", NA, NA, NA, "28 January 1969", "Europe", NA,
"France", NA, NA, NA, "7 August 1961", "Europe", "OEEC member.[6]",
"Germany", NA, NA, NA, "27 September 1961", "Europe", "Joined OEEC in 1949 (West Germany).[65] Previously represented by the Trizone.[6] East Germany was a member of the rival Comecon from 1950 until German reunification in 1990.",
"Greece", NA, NA, NA, "27 September 1961", "Europe", "OEEC member.[6]",
"Hungary", "December 1993[66]", "8 June 1994[63]", NA, "7 May 1996", "Europe", "Was a member of the rival Comecon from 1949 to 1991.",
"Iceland", NA, NA, NA, "5 June 1961", "Europe", "OEEC member.[6]",
"Ireland", NA, NA, NA, "17 August 1961", "Europe", "OEEC member.[6]",
"Israel", "15 March 2004[67]", "16 May 2007[26]", "10 May 2010[64]", "7 September 2010", "West Asia", NA,
"Italy", NA, NA, NA, "29 March 1962", "Europe", "OEEC member.[6]",
"Japan", "November 1962[68]", NA, "July 1963[68]", "28 April 1964", "East Asia", NA,
"South Korea", "29 March 1995[69]", NA, "25 October 1996[70]", "12 December 1996", "East Asia", "Officially the Republic of Korea",
"Latvia", NA, "29 May 2013[71]", "11 May 2016[72]", "1 July 2016[73]", "Europe", NA,
"Lithuania", NA, "9 April 2015[74]", "31 May 2018", "5 July 2018[75]", "Europe", NA,
"Luxembourg", NA, NA, NA, "7 December 1961", "Europe", "OEEC member.[6]",
"Mexico", NA, NA, "14 April 1994[76]", "18 May 1994", "North America", NA,
"Netherlands", NA, NA, NA, "13 November 1961", "Europe", "OEEC member.[6]",
"New Zealand", NA, NA, NA, "29 May 1973", "Oceania", NA,
"Norway", NA, NA, NA, "4 July 1961", "Europe", "OEEC member.[6]",
"Poland", "1 February 1994[77]", "8 June 1994[63]", "11 July 1996[78]", "22 November 1996", "Europe", "Was a member of the rival Comecon from 1949 to 1991.",
"Portugal", NA, NA, NA, "4 August 1961", "Europe", "OEEC member.[6]",
"Slovakia", "February 1994[79]", "8 June 1994[63]", "July 2000[79]", "14 December 2000", "Europe", "Was a member of the rival Comecon from 1949 to 1991 as part of Czechoslovakia.",
"Slovenia", "March 1996[80]", "16 May 2007[26]", "10 May 2010[64]", "21 July 2010", "Europe", NA,
"Spain", NA, NA, NA, "3 August 1961", "Europe", "Joined OEEC in 1958.[81]",
"Sweden", NA, NA, NA, "28 September 1961", "Europe", "OEEC member.[6]",
"Switzerland", NA, NA, NA, "28 September 1961", "Europe", "OEEC member.[6]",
"Turkey", NA, NA, NA, "2 August 1961", "West Asia", "OEEC member.[6]",
"United Kingdom", NA, NA, NA, "2 May 1961", "Europe", "OEEC member.[6]",
"United States", NA, NA, NA, "12 April 1961", "North America", NA
)
latin_am <- psData::countrycode_data %>% filter(
region %in% c("Caribbean", "Central America", "South America"))
saveRDS(c(latin_am,OECD$Country), here::here("data", "countries.RDS"))
countries <- readRDS(here::here("data", "countries.RDS"))
data %>%
filter(year > 2000, year < 2019, country %in% countries) %>%
write_csv(here::here("data", "df.csv"))
|
4741e9fbcb65bae9dc871536a7f196708ae4ceb3 | 338205cf3d15c26c8aef420fe2c47cde6f10f73a | /SharedCode/Scripts/cleanCMAP.R | 5abe0cd02e123249be586096940d14ee348544ca | [] | no_license | nlenssen/ENSOImpacts | 517a67cf3c55d7d9db189b6a7334d3e2cbb54d58 | 3cd359b266f4936603d94e06a97ecf5f7c2de46c | refs/heads/master | 2023-01-23T08:49:18.529165 | 2020-11-30T16:49:35 | 2020-11-30T16:49:35 | 283,616,399 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 440 | r | cleanCMAP.R | # Clear workspace and garbage collect
rm(list = ls())
gc()
# Load namelist
source('SharedCode/Namelists/cleanData.Rnl')
# clean the 0.5 degree data
cmapFile <- 'cmap07.2018v1_2.5_1979_2017.nc'
cmapOutFileName <- 'cmapSeasonalv1_2.5.Rda'
source('SharedCode/Code/cleanDataCMAP.R')
# clean the 2.5 degree data
cmapFile <- 'cmap07.2018v2_2.5_1979_2017.nc'
cmapOutFileName <- 'cmapSeasonalv2_2.5.Rda'
source('SharedCode/Code/cleanDataCMAP.R') |
7eb29e740e628120ef807d8886f6f4d366bcf094 | 3c158405be85fb631944a1b8019db6e9b531583d | /iris.R | ec5537fe54f3382d514c8e032be97b7af58528e5 | [] | no_license | ganeshbalajiai/RCodeSupportVectorMachines | f6e85d7b71a602106182493f66d631b877bb0a5f | b46d7b12488a8146def8b4528b6daa05727d8922 | refs/heads/master | 2022-04-04T04:51:46.780754 | 2020-02-22T12:54:15 | 2020-02-22T12:54:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 544 | r | iris.R | data(iris)
View(iris)
library(caret)
split <- createDataPartition(iris$Species, p = 0.75, list= FALSE)
Train_data <- iris[split,]
Test_data <- iris[-split,]
Train_data[,-5] <- scale(Train_data[,-5])
Test_data[,-5] <- scale(Test_data[,-5])
View(Train_data)
library(e1071)
attach(Train_data)
model1 <- svm(Train_data$Species~., data = Train_data, kernel = "linear")
summary(model1)
pred <- predict(model1, Test_data)
pred
library(gmodels)
a <- CrossTable(pred, Test_data$Species)
mean(pred==Test_data$Species)
|
de855fa24c550511d2dfb7f03722065f1af0a60d | 63683d8f8a5ff62681fb51feb4512a1918a2de0c | /data-vis/ridgeplot.R | 5a742fc40a55556feb9567f63c5982c946c5c298 | [] | no_license | hendersontrent/critical-role | 794cff324b31d259a2eb350e38234eca8e7170a9 | 15709a43ef42cc9b5557e7e290d8ee5debb889a9 | refs/heads/master | 2022-12-02T13:09:58.292766 | 2020-08-14T23:59:39 | 2020-08-14T23:59:39 | 282,812,673 | 1 | 0 | null | 2020-08-14T23:59:41 | 2020-07-27T06:28:45 | R | UTF-8 | R | false | false | 1,322 | r | ridgeplot.R | #---------------------------------------
# This script sets out to produce some
# interesting data visualisations for
# Critical Role: Wildemouth rolls
#---------------------------------------
#----------------------------------------
# Author: Trent Henderson, 28 July 2020
#----------------------------------------
# Load data
load("data/clean.Rda")
if (!exists(keepers)) {
keepers <- c("keepers", "clean")
} else {
keepers <- union(keepers, "clean")
}
#---------------------DATA VISUALISATION----------------------------
the_dens <- clean %>%
filter(character %in% the_nein) %>%
mutate(character = case_when(
character == "Nott" ~ "Veth/Nott",
character == "Veth" ~ "Veth/Nott",
TRUE ~ character)) %>%
filter(total_value < 100) %>%
ggplot(aes(x = total_value, y = character, fill = ..x..)) +
geom_density_ridges_gradient(scale = 3, rel_min_height = 0.01) +
labs(title = "Distribution of The Mighty Nein's roll values",
subtitle = "Excludes Nat1s and Nat20s",
x = "Total Roll Value",
y = NULL,
fill = "Roll value") +
theme_bw() +
scale_x_continuous(limits = c(0,50),
breaks = c(0,10,20,30,40,50)) +
scale_fill_gradient(low = "#A0E7E5", high = "#FD62AD") +
theme(panel.grid.minor = element_blank())
print(the_dens)
|
1add846783ab62313593138331628f59d1caa757 | 341b66d831198c801945f552608ed5e6d8c554d2 | /code_R/FADN_sample_script.R | 165bb3630efca01e8df30ca0cd366db366133a60 | [] | no_license | progillespie/data_FADN_PUBLIC | df257d0152c32951cea781cc2c08afc10f8b2432 | 99ebb4e17d0a97ca3aa1d2acc14f96fd132fc338 | refs/heads/master | 2016-09-05T19:11:30.441859 | 2014-07-23T21:59:10 | 2014-07-23T21:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 416 | r | FADN_sample_script.R | # Gets (public) FADN data from the web, reads csv's and converts
# them to RData files (as well as giving vars descriptive names),
# and loads all of them into memory as separate objects (merging
# can proceed from there as needed)
library(data.table)
source('D:/Data/data_FADN_PUBLIC/code_R/getFADN.R')
source('D:/Data/data_FADN_PUBLIC/code_R/readFADN.R')
source('D:/Data/data_FADN_PUBLIC/code_R/loadFADN.R') |
b90a63aee3efb6748e9155ed01ef8bdfcfbab555 | f7200e99e92c04b4237a2f4055f73f98b196bbad | /R/nlin_CI.R | f1352adbe402017ba545ed77047a96c85261d0b7 | [] | no_license | ChrisKust/rexpar | 812d10a7eb8c3be26de46a44b4bf36d1bfc64abb | f2c3c7be835233729a0d12b723888ffaf0ef2a91 | refs/heads/master | 2020-04-06T07:01:50.286427 | 2016-07-28T12:02:59 | 2016-07-28T12:02:59 | 25,927,083 | 1 | 4 | null | 2016-06-22T17:22:10 | 2014-10-29T15:22:43 | R | UTF-8 | R | false | false | 2,563 | r | nlin_CI.R | nlin_CI <- function(y, level, plots = FALSE, notion = "dS1", ncoresC = 1, addPar = FALSE, spar = 0.8, eps = 1e-19)
{
cands <- nlin1_theta_f(y)
cands0 <- cbind(cands$t1, cands$t2) + eps
#cands1<-cbind(cands$t1,cands$t2)+eps
#cands2<-cbind(cands$t1,cands$t2)+eps
#cands3<-cbind(cands$t1,cands$t2)-eps
#cands4<-cbind(cands$t1,cands$t2)-eps
#cands5<-cbind(cands$t1+eps,cands$t2-eps)
#cands6<-cbind(cands$t1-eps,cands$t2+eps)
#cands<-rbind(cands0,cands1,cands2,cands3,cands4,cands5,cands6)
cands <- cands0
TS <- switch(notion,
"dS1" =
{
unlist(apply(cands, 1, dS1_nlin_test, y = y, alpha = (1 - level)))
},
"dS2" =
{
unlist(apply(cands, 1, dS2_nlin_test, y = y, alpha = (1 - level)))
},
"dS3" =
{
unlist(apply(cands, 1, dS3_nlin_test, y = y, alpha = (1 - level)))
},
"dS_pre" =
{
if(is.numeric(ncoresC) && ncoresC > 1 && ncoresC <= parallel::detectCores() * 2)
{
cl <- makeCluster(ncoresC)
TS_temp <- unlist(parApply(cl, cands, 1, dS1_nlin_test, y = y, alpha = (1 - level)))
inCIs_temp <- as.vector(TS_temp[seq(2, length(TS_temp), 2)])
cands <- cands[inCIs_temp == 0, ]
return(unlist(parApply(cl, cands, 1, dS_nlin_test, y = y, alpha = (1 - level), ncores = 1)))
stopCluster(cl)
}
if(ncoresC == 1)
{
TS_temp <- unlist(apply(cands, 1, dS1_nlin_test, y = y, alpha = (1 - level)))
inCIs_temp <- as.vector(TS_temp[seq(2, length(TS_temp), 2)])
cands <- cands[inCIs_temp == 0, ]
unlist(apply(cands, 1, dS_nlin_test, y = y, alpha = (1 - level), ncores = 1))
}
},
"dS" =
{
if(is.numeric(ncoresC) && ncoresC > 1 && ncoresC <= detectCores() * 2)
{
cl <- makeCluster(ncoresC)
return(unlist(parApply(cl, cands, 1, dS_nlin_test, y = y, alpha = (1 - level), ncores = 1)))
stopCluster(cl)
}
if(ncoresC == 1)
{
unlist(apply(cands, 1, dS_nlin_test, y = y, alpha = (1 - level), ncores = 1))
}
},
stop("Insert a valid notion!")
)
inCIs <- as.vector(TS[seq(2, length(TS), 2)])
if(plots && requireNamespace("alphahull", character.only = TRUE))
{
requireNamespace("alphahull", character.only = TRUE)
a <- which(inCIs == 0)
points(cands[a, ], col = 2)
ah <- alphahull::ashape(cands[a, 1], cands[a, 2], alpha = spar)
plot(ah, add = addPar, col = 3)
}
list(par = cands, inCI = inCIs)
} |
033d01dced488688960fa4da5c9c8f2a17d1c8fb | de8d204d522eccdd8e388bff122ef4e765ec7d17 | /R/simulation_more_final.R | 4f9e9bf3a016ae3226cee59aafbbeaa71a8ca765 | [] | no_license | kevinmcgregor/CausalChallenge2016 | ecd9e5e2cd46c9d60b64b1d8d236f111cda65924 | 18966ecd91c36c5638c3dcf558a0d9e3fc029601 | refs/heads/master | 2021-01-20T20:32:44.601031 | 2016-07-28T15:14:03 | 2016-07-28T15:14:03 | 61,646,823 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,332 | r | simulation_more_final.R | #Another 'final' simulation with some of the predictions coming from directly applying formulas
library(mice)
library(Metrics)
# read data
mouse.data <- readRDS("mouse_data.rds")
var.names = readRDS("variable_names.RDS")
# consider only complete observations: delete genotypes with missing values
full_mouse <- mouse.data[-which(mouse.data$geno %in% c("1796_1", "3621_1", "4045_1", "3803_1", "3887_1")),]
Nsimul = 100
res_direct <- matrix(0, Nsimul, 5)
colnames(res_direct) = c("IMPC_HEM_027_001", "IMPC_HEM_029_001", "IMPC_HEM_031_001", "IMPC_HEM_034_001", "IMPC_HEM_038_001")
sim_geno = matrix("", Nsimul, 5)
wh_var_27 = which(colnames(full_mouse)=="IMPC_HEM_027_001")
wh_var_29 = which(colnames(full_mouse)=="IMPC_HEM_029_001")
wh_var_31 = which(colnames(full_mouse)=="IMPC_HEM_031_001")
# consider only complete observations: delete genotypes with missing values
full_mouse <- mouse.data[-which(mouse.data$geno %in% c("1796_1", "3621_1", "4045_1", "3803_1", "3887_1")),]
for(i in 1:Nsimul)
{
# temporary dataframe
temp <- full_mouse
# empty method vector
method <- c("", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "")
# randomly select phenotypes to be deleted
NAgeno <- sample(unique(temp$geno)[2:9], 5)
NAvar <- c("IMPC_HEM_027_001", "IMPC_HEM_029_001", "IMPC_HEM_031_001", "IMPC_HEM_034_001", "IMPC_HEM_038_001")
# save true values + delete in temp
true = list(IMPC_HEM_027_001=temp[temp$geno==NAgeno[1],NAvar[1]],
IMPC_HEM_029_001=temp[temp$geno==NAgeno[2],NAvar[2]],
IMPC_HEM_031_001=temp[temp$geno==NAgeno[3],NAvar[3]],
IMPC_HEM_034_001=temp[temp$geno==NAgeno[4],NAvar[4]],
IMPC_HEM_038_001=temp[temp$geno==NAgeno[5],NAvar[5]])
for (j in 1:5) {
temp[temp$geno == NAgeno[j], colnames(temp) == NAvar[j]] <- NA
}
sim_geno[i,] = NAgeno
#Predict lympho diff before MICE
predic = predic_no29 = list(IMPC_HEM_027_001=NULL,IMPC_HEM_029_001=NULL,IMPC_HEM_031_001=NULL,
IMPC_HEM_034_001=NULL,IMPC_HEM_038_001=NULL)
temp$IMPC_HEM_031_001[temp$geno==NAgeno[3]] =
temp$IMPC_HEM_032_001[temp$geno==NAgeno[3]]/temp$IMPC_HEM_001_001[temp$geno==NAgeno[3]]*100
predic[[3]] = temp$IMPC_HEM_031_001[temp$geno==NAgeno[3]]
#Predict RBCDW before MICE
sdcv = NA
sdcv = temp$IMPC_HEM_005_001*temp$IMPC_HEM_027_001/100
lm_sdcv = lm(log(sdcv)~.-IMPC_HEM_005_001-IMPC_HEM_027_001, data=temp[,-c(1,3,4)])
pred_sdcv = exp(predict(lm_sdcv, newdata = temp[temp$geno==NAgeno[1],]))
temp$IMPC_HEM_027_001[temp$geno==NAgeno[1]] = pred_sdcv/temp$IMPC_HEM_005_001[temp$geno==NAgeno[1]] * 100
predic[[1]] = temp$IMPC_HEM_027_001[temp$geno==NAgeno[1]]
# create data frame with all variables to impute + predictors
impMICE <- as.data.frame(cbind(temp[,2:3], temp[,5:26]))
# column ids of variables with missing values
id <- which(colnames(impMICE) %in% NAvar[-c(1,3)])
# method norm + pmm
method[id] <- "norm"
# matrix to specify which preditors to use
pred <- matrix(0, nrow = ncol(impMICE), ncol = ncol(impMICE))
pred[id,] <- rep(1, ncol(impMICE))
diag(pred) <- rep(0, ncol(impMICE))
# apply MICE
MICE <- mice(impMICE, m = 30, method = method, predictorMatrix = pred, printFlag = FALSE)
# get predictions and compute mse
mse = rep(0,5)
mse_no29 = rep(0,5)
imputed_mat = impMICE
k=1
for (j in 1:5) {
if (j %in% c(2,4,5)) {
predic[[j]] = apply(MICE$imp[id[k]][[1]], 1, mean)
#Put imputed values back into data frame
imputed_mat[imputed_mat$geno==NAgeno[j],id[k]] = predic[[j]]
k=k+1
}
mse[j] <- mse(actual = true[[j]], predicted = predic[[j]])
}
#Now, given the predictions for the other variables, predict var 29
lm_mod = lm(IMPC_HEM_029_001~., data=imputed_mat[!imputed_mat$geno %in% NAgeno,-2])
predic[[2]] = predict(lm_mod, newdata=imputed_mat[imputed_mat$geno==NAgeno[2],])
mse[2] = mse(actual = true[[2]], predicted = predic[[2]])
# save results
res_direct[i, ] <- mse
cat(i, "\n")
}
save(res_direct, file="simulation_more_final.RData")
#Comparing to "final" simulation
load("simulation_final.RData")
#By variable
colMeans(res_replace29)
colMeans(res_direct)
#Overall
mean(res_replace29)
mean(res_direct)
|
12d9509d2058cadbaad6641c36070b1aed583f7e | 5ccb3d615c18eaf15743ad2fa4fdf4d3ca58b108 | /cachemean.R | 10960da4d7c98f970240f9fe139ac71d666a9c83 | [] | no_license | cnovacyu/datasciencecoursera | ea4e341d9413287020c4e85e10a25b2666fd5d1c | b33fe95a04b1a0eb08e7c0761e26510c4d443e37 | refs/heads/master | 2022-10-22T23:28:12.022023 | 2020-06-08T23:10:09 | 2020-06-08T23:10:09 | 257,652,402 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 936 | r | cachemean.R | ##create a special object that stores a numeric vector and cache's its mean
##create list containing a function to set the value of the vector, get the
##value of the vector, set the value of the mean, and get the value of the mean
makeVector <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## checks to see if mean has already been computed with previous function. If
## yes, gets mean from cache and does not recompute inverse. If not, it will
## calculate the mean of the cache
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
} |
acdc5893e1db38cc4384b343d7540985f1e0db57 | 9107afe4e896649270406a53ff1ea93f745e788c | /timeSinceFire.R | ee6f16486470d3711a45c16f4e246a463e191ba7 | [] | no_license | PredictiveEcology/timeSinceFire | 688bd924d637182df7d9a5572add8f82fa73f3d0 | 3cb09e475b039232dfc1c8bca5e63bf787bb76d7 | refs/heads/master | 2022-10-28T08:58:28.889380 | 2022-10-13T03:34:27 | 2022-10-13T03:34:27 | 221,830,171 | 0 | 0 | null | 2019-11-15T02:45:58 | 2019-11-15T02:45:56 | null | UTF-8 | R | false | false | 5,411 | r | timeSinceFire.R | defineModule(sim, list(
name = "timeSinceFire",
description = "This tracks time since fire for the LandWeb application.",
keywords = c("fire", "LandWeb"),
authors = c(person(c("Steve", "G"), "Cumming", email = "stevec@sbf.ulaval.ca", role = c("aut", "cre"))),
childModules = character(),
version = list(SpaDES.core = "0.2.3.9009", numeric_version("1.2.1")),
spatialExtent = raster::extent(rep(NA_real_, 4)),
timeframe = as.POSIXlt(c(NA, NA)),
timeunit = "year",
citation = list(),
documentation = list("README.txt", "timeSinceFire.Rmd"),
reqdPkgs = list("raster"),
parameters = rbind(
#defineParameter("paramName", "paramClass", value, min, max, "parameter description")),
defineParameter("returnInterval", "numeric", 1.0, NA, NA, desc = "interval between main events"),
defineParameter("startTime","numeric", 0, NA, NA, desc = "time of first burn event"),
defineParameter(".plotInitialTime", "numeric", NA, NA, NA,
desc = "simulation time at which the first plot event should occur"),
defineParameter(".plotInterval", "numeric", NA, NA, NA,
desc = "simulation time at which the first plot event should occur"),
defineParameter(".saveInitialTime", "numeric", NA, NA, NA,
desc = "simulation time at which the first save event should occur"),
defineParameter(".saveInterval", "numeric", NA, NA, NA,
desc = "simulation time at which the first save event should occur"),
defineParameter(".useCache", "logical", FALSE, NA, NA,
desc = "simulation time at which the first save event should occur")
),
inputObjects = data.frame(
objectName = c("rstFlammable", "fireReturnInterval", "rstCurrentBurn", "fireTimestep"),
objectClass = c("RasterLayer","RasterLayer", "RasterLayer", "numeric"),
sourceURL = "",
desc = c("A binary Raster, where 1 means 'can burn' ",
"A Raster where the pixels represent the fire return interval, in years",
"A binary Raster, where 1 means that there was a fire in the current year in that pixel",
"The time between burn events, in years. Only tested with this equal to 1"),
stringsAsFactors = FALSE
),
outputObjects = data.frame(
objectName = c("rstTimeSinceFire", "burnLoci"),
objectClass = c("RasterLayer", "numeric"),
desc = c("A Raster where the pixels represent the number of years since last burn.",
"A integer vector of cell indices where burns occurred in the latest year. It is derived from rstCurrentBurn"),
stringsAsFactors = FALSE
)
))
doEvent.timeSinceFire <- function(sim, eventTime, eventType, debug = FALSE) {
if (eventType == "init") {
### check for more detailed object dependencies:
### (use `checkObject` or similar)
# do stuff for this event
sim <- Init(sim)
# schedule future event(s)
sim <- scheduleEvent(sim, P(sim)$.plotInitialTime, "timeSinceFire", "plot")
sim <- scheduleEvent(sim, P(sim)$.saveInitialTime, "timeSinceFire", "save")
sim <- scheduleEvent(sim, P(sim)$startTime, "timeSinceFire", "age")
} else if (eventType == "age") {
sim$burnLoci <- which(sim$rstCurrentBurn[] == 1)
fireTimestep <- if (is.null(sim$fireTimestep)) P(sim)$returnInterval else sim$fireTimestep
sim$rstTimeSinceFire[] <- as.integer(sim$rstTimeSinceFire[]) + as.integer(fireTimestep) # preserves NAs
sim$rstTimeSinceFire[sim$burnLoci] <- 0L
# schedule next age event
sim <- scheduleEvent(sim, time(sim) + fireTimestep, "timeSinceFire", "age")
} else if (eventType == "plot") {
rtsf <- sim$rstTimeSinceFire
plotFn(rtsf, title = "Time since fire (age)", new = TRUE)
# schedule next plot event
sim <- scheduleEvent(sim, time(sim) + P(sim)$.plotInterval, "timeSinceFire", "plot")
} else if (eventType == "save") {
# ! ----- EDIT BELOW ----- ! #
# do stuff for this event
# e.g., call your custom functions/methods here
# you can define your own methods below this `doEvent` function
# schedule future event(s)
# e.g.,
# sim <- scheduleEvent(sim, time(sim) + increment, "timeSinceFire", "save")
# ! ----- STOP EDITING ----- ! #
} else {
warning(paste("Undefined event type: '", current(sim)[1, "eventType", with = FALSE],
"' in module '", current(sim)[1, "moduleName", with = FALSE], "'", sep = ""))
}
return(invisible(sim))
}
Init <- function(sim) {
if (is.null(sim$burnLoci)) {
sim$burnLoci <- which(sim$rstCurrentBurn[] == 1)
}
if (is.null(sim$rstTimeSinceFire)) {
if (is.null(sim$fireReturnInterval)) {
stop(currentModule(sim), " needs a rstTimeSinceFire map. If this does not exist, then passing ",
"a fireReturnInterval map will assign the fireReturnInterval as rstTimeSinceFire")
}
# Much faster than calling rasterize() again
sim$rstTimeSinceFire <- sim$fireReturnInterval
#sim$rstTimeSinceFire[] <- factorValues(sim$rasterToMatch, sim$rasterToMatch[],
# att = "fireReturnInterval")[[1]]
sim$rstTimeSinceFire[sim$rstFlammable[] == 0L] <- NA #non-flammable areas are permanent.
sim$rstTimeSinceFire[] <- as.integer(sim$rstTimeSinceFire[])
}
return(invisible(sim))
}
plotFn <- function(rtsf, title = "Time since fire (age)", new = TRUE) {
Plot(rtsf, title = title, new = new)
}
|
b1db4d63956855cc11b797477b110febcacf4c0e | caa170d4e3874240d1cb26d972d9f4659ef05fd1 | /M20200518.R | 33896f346aeb813578bdedb5e741f705af1b46e6 | [] | no_license | peolgok/WorkspaceR | abf35bd663faf348064d6d91c668cc1fde18f7fc | e2f02be3a691229a03103edabc690d8ddab1963f | refs/heads/master | 2022-07-22T15:36:45.357200 | 2020-05-18T08:16:31 | 2020-05-18T08:16:31 | 263,275,717 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,489 | r | M20200518.R | # 5일차 # 엑셀파일 읽는 방법, 별도로 설치하는 환경 있어야.#텍스트, 엑셀파일 읽기함.
# 범주형, 연속형. 두가지의 유형 데이터. 단일 변수형태냐, 단일 변수형태냐. 0#다변 일변량이냐 단수량이냐? 문제정의,데이터수집, 데이터전처리, 탐색적, 데이터분석, 보고서, 발표 #이 절차는 순차적이긴 하지만 계속 피드백하는 과정을 거쳐.
#분석절차 1. 문제정의 2. 자료수집 3. 자료 전처리 4. 자료 탐색(EDA) "오늘 18,내일 19 자료 탐색에 대해서 배울 것. 단일 변수 범주형 자료, 단일 변수 범주형, 연속형. 전처리라는 것은 수집한 데이터가 완벽한 자료가 아니므로 완벽한 형태로 수정하는 작업을 대이터 전처리라고 한다. 데이터 전처리 하는 과정에 더 많은 시간 걸릴 수 있어. 3,4, 핵심. 우리의 실습도 이것이 될 것. 정해진 데이터를 주겠다. 직접 수집하는 것은 다음주에 하겠다.
#GIT을 사용하는 것에 대하여서. #관리자권한으로 실행하면 오류가 줄 것이다. #패스명령에 익숙해지도록 합시다. 안되면 단계단계 끊어서 할 수 있도록 합니다.
#명령하고 한칸 뛰고 쓰는 습관을 가지도록 합니다. []있으면 디렉토리, 없으면 파일.
#git status 추적하지 않은 목록의 파일이 나옵니다. #git hub desktop
# 명령을 내리고, 화면에 나타나는 메시지를 잘 읽어야. 결정을 해야.
#git으로 관리할 필요가 있는 파일들은 어떤 파일인가? git창엔 관리안하고 있는 것들이 주욱 나 와. 관리할 것만 선택하면 된다. git add, git commit 했던 동작들을 git hub desktop으로 간 편 히 할 수 있어. # history에 내용들. # file - new repository - " 이것이 깃 인 잇과 같은 것입니다.
#여기서 사용한 것들을 집에서 가져다 쓸 수 있는 방법인 것. git hub 파일 사이즈에 제한은 있다. 25mb넘으면 안됩니다. 그럴 땐 파일을 분할해서 올려야. # 집에서 깃허브, 피일- 클론 리파지토리, 클론이란 가지고 오는 것. 사이트에서. #clone 복사본 만들기 #클론 리파지토리 이용해서, push 다운로드, # 푸시에러 풀에라 날수, 이때는 업로드 파일. 동기화를 잘할 수 있어야.
#2.1 txt/excel 파일 읽기
#2.2 자료의 종류
#2.3 단일 변수 범주형 자료 탐색
#2.4 단일 변수 연속형 자료 탐색
setwd("C:\\tomwaitz\\WorkspaceR")
setwd("C:/tomwaitz/WorkspaceR")
# "directory 경로 : directory 변경 함수. 스크립트파일 위치 디렉토리 데이타위치를 분리해서 저장하는 경우가 왕왕있어. 데이터랑 스크립트가 다를 떄. 이 때 setwd로 변경해주는 것이다. 더블 "" 포테이션? 이 안에다가 넣어야. 그리고 '\' 두개씩 써, 그러니까 하나씩 추가.
#text 파일 읽기
df <- read.table(file = "airquality.txt", header = T)
d f
class(df)
#Excel 파일 읽기
#Excel 파일 읽기
install.packages("xlsx") # excel 파일 읽을 때 패키지
install.packages("rJava") # Java 실행 패키지
# 기본 패키지 외에 설치된 패키지 사용 - library load 작업.
library(rJava) # 라이브러리 순서 이거대로 지켜야한다.
library(xlsx) # 껐다 키면 로드작업만 다시
df.xlsx <- read.xlsx(file = "airquality.xlsx",
sheetIndex = 1,
encoding = "UTF-8")
df.xlsx # r의 장점 내가 기능이 필요하면 공개용으로 제공이 된다. # change
class(df.xlsx); str(df.xlsx);head(df.xlsx);tail(df.xlsx)
# 파일을 왜 못 읽을까 위치가 안 맞기 때문. # 스크립트에 남겨놓은 것을 중요하게 생각해야.
setwd("C:\\tomwaitz\\WorkspaceR")
score <- c(76, 84, 69, 5, 95, 6, 85, 71, 88, 84)
which(score ==69)
which(score>=85)
max(score)
which.max(score) #최고값의 인덱스
min(score) #최저값의 인덱스
which.min(score)
idx <- which(score>=60)
score[idx] <- 61
score
idx <- which(df.xlsx[,1:2]=="NA", arr.ind = T)
# arr.ind=T : 해당조건의 행/열값을 확인 할때
idx
#Text File 읽기
df.txt <- read.table(file="airquality.txt",
header=T, #첫번째 줄이 제목이다. 원본부터 확인
encoding = "UTP-8")
df.txt
class(df.txt)
str(df.txt)
head(df.txt)
tail(df.txt)
#엑셀, 탭으로 분류된 텍스트!? read.table
#csv, text, excel 파일 읽기. 전부다 read.csv, read.table read.xlsx
#XML(exfended Markup Language), JSON(Java Script Object Notation) : jason 많이 써
# 자바 스크립트, 라고 해서 웹 프로그래밍 만들 때 쓰는 언어. 파이썬아는 사람은 jason을 이해하기가 더 용이하다고 한다.
#자료의 종류, 자료의 종류에 따라 적용할 수 있는 분석 방법이 다르다. 분석을 해야할 자료를 가지고 있을 떄 1차적으로 해야 할 일은 해당 자료가 어떤 분류에 속하는지를 파악하는 일이다. -자료 특성에 따른 분류 분석 대상 자료의 특성에 따라 범주형 자료와 연속형 자료로 분류한다. 1. 범주형 자료 (categorical data) : -범주 또는 그룹으로 구분할 수 있는 값 - 범주형 자료의 값은 기본적으로 숫자로 표현할 수 없고, 대소비교나 산술연산이 적용되지 않는다. -범주형 자료는 대체로 문자형 값을 갖는데, 숫자로 표기할 수 있으나 계산이 가능한 연속형 자료가 되는 것은 아니다. 자료분류할 수 있어야.-------------팩터형 #2.연속형자료(이산형자료):크기가 있는 숫자들로 구성된 자료,연속형자료의 값들은 대소비교,산술연산이가능하기때문에 다양한 분석방법이 존재한다.
#자료(data) : 어떤 주제를 가지고 값을 모아 놓은 것 전체 관측값 (observation) : 자료에 포함된 값들 통계학에서의 변수는 "연구, 조사, 관찰하고 싶은 대상의 특성(feature)" 변수 개수에 따라 1. 단일변수 자료 ; 일변량 자료 하나의 변수로 구성된 자료, 2. 다중변수자료, 다변량 자료, 두개 이상의 변수로 구성된 자료 특별히 두개의 변수로 구성된 자료를 이변량 자료 단일 변수 자료는 vector에 저장하여 분석 다중변수자료는 matrix, dataframe에 저장하여 분석
#변수의 개수와 자료의 특성에 따른 분류 1. 단일변수 범주형자료 2. 단일변수 연속형 자료 3. 다중변수 범주형자료 4. 다중변수 연속형 자료
#2.3 단일 변수 범주형 자료 탐색 # 단일 변수 범주형 자료 : 특성이 하나이면서 자료의 특성이 범주형인 자료, 범주형 자료에 할 수 있는 기본 작업 : 자료에 포함된 관측값들의 종류별로 개수를 세는 것, 중류별로 비율을 구하는 것 시각화는 막대/원 그래프를 사용 테이블함수 통해서 몇개 정도인지나 그정도 알수 있다. 도수분표표 <- 종류별로 개수를 세는 것, 종류별로 비율을 구하는 것, 시각화는 막대/원 그래프를 사용. 시각화라고 하는 것은 GUI, 그림으로 보여주겠다는 것. 지도로 표현해주겠다고. 대부분은 그래프를 이용한다. #ML은 빅데이터. 머신 러닝, 딥 러닝, 데이터분석이 없으면 수행하기 어려워. 하나의 변수.#ML은 빅데이터. 머신 러닝 딥 러닝, 데이터분석이 없으면 수행하기 어려워.
favorite <- c('winter','summer','spring', 'summer', 'summer','fall',
'fall','summer', 'spring', 'spring'); favorite
class(favorite) ; str(favorite) ; dim(favorite)
# : 봄, 여름, 가을, 겨울로 그룹화가 가능하다. 이런 내용을 범주형 데이터라고 함. R이 팩터라고 이야기해줄 수 있지만. 우리서 여기는 백터 형식으로 문자만 넣었을 뿐이야.
as.factor(favorite)
favorite
#################### 단일변수의 범주형 <- 도수분포표
table(favorite)#도수분포표작성
table(favorite)/length(favorite)#도수분포비율
ds <- table(favorite);
ds
class(ds)
str(ds)
dim(ds)
barplot(ds, main = 'favorite season') #기본 막대 그래프
#ds 도수분포표, maind은 제목인 것이다
ds.new <- ds[c(2,3,1,4)] # 내가 원하는 순서로 만들 수 있어
ds.new
barplot(ds.new, main = 'favorite season')
pie(ds.new, main = 'favorite season')
favoirte.color <- c(2,3,2,1,1,2,2,1,3,2,1,3,2,1,2)
str(favoirte.color)
ds <- table(favoirte.color)
ds
barplot(ds, main = 'favorite season')
color <- c('green','red','blue')
names(ds) <- colors;
ds
barplot(ds, main = 'favorite season',
col=colors)
pie(ds, main = 'favorite season', col = colors)
#############################################################
favorite.color <- c(2, 3, 2, 1, 1, 2, 2,
1, 3, 2, 1, 3, 2, 1, 2)
ds <- table( favorite.color )
ds
barplot( ds, main = 'favorite season' )
colors <- c('black', 'red', 'blue') ############### 색깔 어디서부터 어디까지 있나.
names(ds) <- colors;
ds
barplot(ds, main = 'favorite season',
col = colors)
pie(ds, main = 'favorite season',
col = colors)
#
# 단일 변수 연속형 자료 탐색
# 단일변수연속형 자료 : 특성이 하나이면서 자료의 특성이 연속형인 자료 연속형 자료는 관측값들이 크기를 가지므로 다양한 분석방법 존재 #1. 평균 : 하나의 값으로 전체를 대표할 수 있는 값, 이상 값에 영향을 받는다. 2. 중앙값 / 자료의 값들을 크기순으로 정려하였을 떄 가장 중앙에 위한 값, 이상값에 영향을 받지 않는다. 3. 절사평균 자료의 관측 값들 중에서 작은 값들의 하위 n%와 큰 값의 상위 n%를 제외하고 중간에 있는 나머지값들만 가지고 평균을 계산하는 방식 4. 사분위수/ 주어진 자료에 있는 값들을 크기순으로 나열했을 때 4등분하는 지점에 있는 값 1사분위수(Q1), 2사분위수(Q2, 중앙값과 동일), 3사분위수(Q3), 전체 자료를 4개로 나누므로 4개 구간은 25%의 자료 가 존재 5. 산포(distribution) : 주어진 자료에 있는 값들이 퍼져 있는정도분산(variance) : 주어진 자료의 각각의 값이 평균으로부터 떨어져있는 정도를 게산하여 합산한 후 값들의 개수로 나누어 계산 #표준편차(standard deviation) : 분산의 제곱근으로 계산####################3 계산이 가능한 것은 연속형이라고 볼 수.#### 어떤 범위를 벗어나면은 '이상치'라고 한다. 이상치 때문에 분석에 영향을 미칠 수 있어. 전처리 때에 이상치를 제거해준다. 탐색적데이터 분석에서 우리가 하는 일.
###############R은 도구일 뿐이다. - 연속형 자료의 시각화 따라빠빠빠
# 시각화는 히스토그램(Histogram)과 상자 그래프(box plot)를 사용
weight <- c(60, 62,64,65,68,69)
weight
weight.heavy <- c(weight, 120)
weight.heavy
# "120"은 이상치라고 할 수. #이상치는 어떤 기준으로 세워야할까.
mean(weight)
mean(weight.heavy)
median(weight)
median(weight.heavy)
mean(weight, trim=0.2) #상하위 20% 떼어 내겠다.
mean(weight.heavy, trim = 0.2) #
####################기계적으로 보면 안된다는 말.
#사분위수
quantile(weight.heavy)
quantile(weight.heavy, (0:10)/10)
#(0:10)구간을 몇개로 나눌것인지를 지정
#0.1~1.0 사이 백분율로 10%~100%
summary(weight.heavy)#summary를 많이 사용할 듯
# 산포
var(weight) # 분산
sd(weight)#표준편차
range(weight)#값의 범위(최소값과 최대값)
diff(range(weight))#최대값과 최소값의 차이
#연속형 자료 시각화
#histogram : 연속형 자료의 분포를 시각화하는 도구 연속형 자료에서 구간을 나누고 구간에 속한 값들의 개수를 세는 방법으로 사용
class(cars)
str(cars)
dist <- cars[,2]
dist <- cars$dist
dist
boxplot.st(ats(dist) # 구체적인 값을 파악할 떄 사용 # $stats : 사분위수 # %n 관측값의 수 # %conf : 중앙값에 대한 신뢰구간 $out : 특이값(이상치) 목록
cars$dist
a <- cars$dist
a
sort(a, decreasing = T)
############################### 히스토그램 그리기
hist(dist, main = "Histogram for 제동거리",
xlab = "제동거리", ylab = "빈도수",
border="blue", col = 'green',
las=1, breaks =5 ) # las x축 바로할건지 눕힐건지
hist
rm(list=ls())
cars
#상자 그래프(boxplot, 상자 수염 그래프) # 사분위수를 그래프형태로 시각화하는 도구#상자그래프는 하나의 그래프로 데이터의 분포형태를 포함한 다양한 정보를 전달 - 자료의 전반적인 분포를 이해하는데 도움-구체적인 최소/최대/중앙값을 알기는 어렵다.
boxplot (dist, main = '자동차 제동거리')
boxplot.stats(dist)
########################## 우리가 하는 모든 작업은 데이터를 이해하는 작업을 하고 있는 것, 이미 발생된 데이터를 가지고 한다. 그리고 이면의 데이터를 파악, 유추하는 것이다. 이전에는 분석할 수 있을만한 자원들이 없었던 것이다. 이제는 모든 것을 할 수. 모든 것을 파악할 수 있다는 말인 듯. ^^ <- 데이터의 이면을 찾기 위해서.
|
6e4254d7871d29cd331b03b2178f051a44f8f7fe | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Spectrum/examples/kernel_pca.Rd.R | bc12e0fb04c07f54716a7f4f6411a1beb0ce7590 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 187 | r | kernel_pca.Rd.R | library(Spectrum)
### Name: kernel_pca
### Title: kernel_pca: A kernel pca function
### Aliases: kernel_pca
### ** Examples
ex_kernel_pca <- kernel_pca(blobs[,1:50], kernel=FALSE)
|
8649a6a12f171553f9574c7ded0f44ad525a0496 | 3aac39bd37372b8bc2371e5e31c6da65e11cfdb8 | /dataAnalysis.R | 2480a38b2dd204535b19d525c282748ea95c388e | [] | no_license | bcaffo/jhu_dash | b2fa10f4f048e23ce9304a97778fb2ea35e5e991 | 5f886da76ea40af4996de2607bb1ee2880723c83 | refs/heads/master | 2021-01-24T02:39:29.393034 | 2015-09-21T17:42:15 | 2015-09-21T17:42:15 | 42,886,844 | 1 | 0 | null | 2015-09-21T18:48:16 | 2015-09-21T18:48:16 | null | UTF-8 | R | false | false | 206 | r | dataAnalysis.R | require(XML)
library(plyr)
#Read data into R working directory
data <- read.csv("Downloads/DataFromXML1.csv")
summary(data)
#Plot for displaying calories burned
plot(data$Burned, type = "l", col = "red")
|
0ffca67c69f142cde057d99a6ffbdd9a1d3c5af7 | 75b1f6fa114f5df24bd120da511b8ef72d678a9a | /Scripts/Programming_Concepts/R2018_Programming_Concepts_Practical2.R | eefcfb1e8e1b5a7bb3d342325fbe178e633dead1 | [] | no_license | si-medbif/SIRE506-R-Intro | f26108e5117dc2b2bf603ff7d1844ea89bd1b27e | 1c7ff90c1a2c3dd5b83c7fefebb8ef29ccc16d8a | refs/heads/master | 2020-04-14T19:41:52.205208 | 2019-01-29T07:43:07 | 2019-01-29T07:43:07 | 164,067,986 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 700 | r | R2018_Programming_Concepts_Practical2.R | paste("I","love","you.")
paste0("I","love","you.")
for(k in 2005:2018){
#Do something with k
#print(something)
}
#####Your tasks
#1) Our input is 2005:2018 (i.e. 2005, 2006,…,2017, 2018)
#2) We will use the for loop and the ‘paste’ function to generate the following sentences:
# This year is 2005.
# This year is 2006.
# This year is 2007.
# …
# …
# This year is 2018.
###BONUS
#1) Leap years are divisible by 4
#2) For leap yeard, add ‘This is a leap year.’ at the end of the sentence
#3) Generate the following sentences:
# This year is 2005.
# …
# This year is 2008. This is a leap year.
# …
# This year is 2018.
# HINT 1: If-else
# HINT 2: Modulo
|
5fb37532331cedba9ecf569720f239e449f3a205 | 9e8936a8cc7beae524251c8660fa755609de9ce5 | /man/discrim_regularized.Rd | 4c5e1c6d3485e4f26002183764be54ec75e5e0d0 | [
"MIT"
] | permissive | tidymodels/parsnip | bfca10e2b58485e5b21db64517dadd4d3c924648 | 907d2164a093f10cbbc1921e4b73264ca4053f6b | refs/heads/main | 2023-09-05T18:33:59.301116 | 2023-08-17T23:45:42 | 2023-08-17T23:45:42 | 113,789,613 | 451 | 93 | NOASSERTION | 2023-08-17T23:43:21 | 2017-12-10T22:48:42 | R | UTF-8 | R | false | true | 3,284 | rd | discrim_regularized.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discrim_regularized.R
\name{discrim_regularized}
\alias{discrim_regularized}
\title{Regularized discriminant analysis}
\usage{
discrim_regularized(
mode = "classification",
frac_common_cov = NULL,
frac_identity = NULL,
engine = "klaR"
)
}
\arguments{
\item{mode}{A single character string for the prediction outcome mode.
Possible values for this model are "unknown", "regression", or
"classification".}
\item{frac_common_cov, frac_identity}{Numeric values between zero and one.}
\item{engine}{A single character string specifying what computational engine
to use for fitting.}
}
\description{
\code{discrim_regularized()} defines a model that estimates a multivariate
distribution for the predictors separately for the data in each class. The
structure of the model can be LDA, QDA, or some amalgam of the two. Bayes'
theorem is used to compute the probability of each class, given the
predictor values. This function can fit classification models.
\Sexpr[stage=render,results=rd]{parsnip:::make_engine_list("discrim_regularized")}
More information on how \pkg{parsnip} is used for modeling is at
\url{https://www.tidymodels.org/}.
}
\details{
There are many ways of regularizing models. For example, one form of
regularization is to penalize model parameters. Similarly, the classic
James–Stein regularization approach shrinks the model structure to a less
complex form.
The model fits a very specific type of regularized model by Friedman (1989)
that uses two types of regularization. One modulates how class-specific the
covariance matrix should be. This allows the model to balance between LDA
and QDA. The second regularization component shrinks the covariance matrix
towards the identity matrix.
For the penalization approach, \code{\link[=discrim_linear]{discrim_linear()}} with a \code{mda} engine can be
used. Other regularization methods can be used with \code{\link[=discrim_linear]{discrim_linear()}} and
\code{\link[=discrim_quad]{discrim_quad()}} can used via the \code{sparsediscrim} engine for those functions.
This function only defines what \emph{type} of model is being fit. Once an engine
is specified, the \emph{method} to fit the model is also defined. See
\code{\link[=set_engine]{set_engine()}} for more on setting the engine, including how to set engine
arguments.
The model is not trained or fit until the \code{\link[=fit.model_spec]{fit()}} function is used
with the data.
Each of the arguments in this function other than \code{mode} and \code{engine} are
captured as \link[rlang:topic-quosure]{quosures}. To pass values
programmatically, use the \link[rlang:injection-operator]{injection operator} like so:
\if{html}{\out{<div class="sourceCode r">}}\preformatted{value <- 1
discrim_regularized(argument = !!value)
}\if{html}{\out{</div>}}
}
\references{
\url{https://www.tidymodels.org}, \href{https://www.tmwr.org/}{\emph{Tidy Modeling with R}}, \href{https://www.tidymodels.org/find/parsnip/}{searchable table of parsnip models}
Friedman, J (1989). Regularized Discriminant Analysis. \emph{Journal of the
American Statistical Association}, 84, 165-175.
}
\seealso{
\Sexpr[stage=render,results=rd]{parsnip:::make_seealso_list("discrim_regularized")}
}
|
c800003ffd021b6318315802324ae3a42f2d0d57 | 3a8c913f2ab6bd9f5062a956d9f1b828217bf123 | /Code/dataFromGit.R | 96566f924e586bdee7bcb43ce3b4c406c21d83c2 | [] | no_license | ericdfoster/draftparty | d0ae6229fc7e3cf8cf2835e92a2e874c09c74145 | 6c9b531e6d549dbcc2ef04dcfc78c1372ad28f53 | refs/heads/master | 2020-04-29T09:25:37.337304 | 2020-03-08T22:20:17 | 2020-03-08T22:20:17 | 176,024,512 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,317 | r | dataFromGit.R | ######################################################################################
### ###
### dataFromGit ###
### ###
### DESCRIPTION: A function that can be used for reading data frames from the git ###
### repository. ###
### ###
### INPUTS: ###
### ###
### + fp.git - The relative filepath/filename to use when storing the data on ###
### git. For example, "/Data/GUESSES_XXXX". ###
### ###
### + fp.repo - The filepath of the local git repository. Defaults to the ###
### "C:/R Projects/draftparty" repository. ###
### ###
### OUTPUTS: ###
### ###
### + The requested dataframe. ###
### ###
######################################################################################
dataFromGit <- function(fp.git, fp.repo = ""){
### LIBRARIES ###
require(git2rdata)
### USE THE FOLLOWING BASH COMMAND TO FIND THE REPOSITORY LOCATION, IF NECESSARY ###
### git rev-parse --show-toplevel
if(fp.repo == ""){
repo <- repository(path = "C:/R Projects/draftparty")
}else{
repo <- repository(path = fp.repo)
}
### BE SURE TO PULL FROM THE REPOSITORY FIRST ###
pull(repo = repo)
### READ THE FILE ###
DAT_01 <- read_vc(file = fp.git, root = repo)
### RETURN THE DATAFRAME ###
return(DAT_01)
} |
144880aca3f2c57f1882d182d8af3dfa862327ff | 932207d42917d0f4b95cea5d310e0a2db11d4d55 | /plot3.R | f5941721830b5db0f5dbad9915b99d1341b5a77d | [] | no_license | erinboyle05/ExData_Plotting1 | 8a3b7e42170c87664d003b4d0046db2c8def3be1 | 1b503ce7b8f9d5f54f514afffc7d1b5f3a01eccd | refs/heads/master | 2021-01-12T21:03:37.341335 | 2015-10-07T02:24:21 | 2015-10-07T02:24:21 | 43,707,495 | 0 | 0 | null | 2015-10-05T19:12:10 | 2015-10-05T19:12:10 | null | UTF-8 | R | false | false | 465 | r | plot3.R |
if (!exists("febdata")) {
source("getdata.R")
}
png(filename = "plot3.png")
plot(febdata$Time, febdata$Sub_metering_1, type = "l", ylab = "Energy sub metering",
xlab = "")
lines(febdata$Time, febdata$Sub_metering_2, col = "red")
lines(febdata$Time, febdata$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd = 1)
dev.off()
|
0978ccd5b003b53c72ea7d31cb89ff8e038bbb6a | 4a9ba6f4101715dc2419c9c40c16ab392a7161f2 | /PseudoCode.R | 30be8b94308d7444d1697185aff2909005fe93b0 | [] | no_license | bpalakur/Intro_Biocomp_ND_317_Tutorial10 | 6673053605646dfcd2555f8deca8b80d2b76b213 | 9b134b871c42c0a44a6c7965f50502dc554022fa | refs/heads/master | 2021-08-08T06:10:22.977260 | 2017-11-09T18:22:01 | 2017-11-09T18:22:01 | 109,405,953 | 0 | 0 | null | 2017-11-03T14:35:05 | 2017-11-03T14:35:05 | null | UTF-8 | R | false | false | 365 | r | PseudoCode.R | #Load Packages
#Define custom model function
#Set of 'pool' of values for the parameter of
#create a dataframe to hold results
#use for loop to simulate the different models and store parameters of interest
#create different plots for various r, k, N0
#Number 2
#
#Build model
#Run simulation
#Data exploration - making plots of parameters of interest |
5e0ba106a6d7856ee36a735af3fcf6f4bd9da277 | f4d0bf674d06b00a0867ca9b02d98d17ad7c0dfb | /R/teamBattingScorecardMatch.R | f19ea1ae0209802c57d5e9e2ffe360e489a8f666 | [] | no_license | tvganesh/yorkr | bd86283ae7abb4551cd3c29d8242af1dca49f3a0 | c0b907e41be891b9c5cfabd5c0d2f618753e17ed | refs/heads/master | 2023-05-10T16:24:56.548214 | 2023-05-08T13:46:58 | 2023-05-08T13:46:58 | 54,775,374 | 17 | 8 | null | 2017-04-10T00:22:14 | 2016-03-26T11:07:53 | R | UTF-8 | R | false | false | 3,273 | r | teamBattingScorecardMatch.R | ##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 20 Mar 2016
# Function: teamBattingScorecardMatch
# This function gets the batting scorecard of team in a match. The result is
# returned as a data frame
#
###########################################################################################
#' @title
#' Team batting scorecard of a team in a match
#'
#' @description
#' This function computes returns the batting scorecard (runs, fours, sixes, balls played) for the
#' team
#' @usage
#' teamBattingScorecardMatch(match,theTeam)
#'
#' @param match
#' The match for which the score card is required e.g.
#'
#' @param theTeam
#' Team for which scorecard required
#'
#' @return scorecard
#' A data frame with the batting scorecard
#' @references
#' \url{https://cricsheet.org/}\cr
#' \url{https://gigadom.in/}\cr
#' \url{https://github.com/tvganesh/yorkrData/}
#'
#' @author
#' Tinniam V Ganesh
#' @note
#' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
#'
#' @examples
#' \dontrun{
#' a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
#' teamBowlingScorecardMatch(a,'England')
#' }
#'
#' @seealso
#' \code{\link{teamBatsmenPartnershipAllOppnAllMatches}}\cr
#' \code{\link{teamBatsmenPartnershipAllOppnAllMatchesPlot}}\cr
#' \code{\link{teamBatsmenPartnershipOppnAllMatchesChart}}\cr
#'
#' @export
#'
teamBattingScorecardMatch <- function(match,theTeam){
team=batsman=runs=fours=sixes=NULL
byes=legbyes=noballs=wides=NULL
a <-filter(match,team==theTeam)
sz <- dim(a)
if(sz[1] == 0){
cat("No batting records.\n")
return(NULL)
}
b <- select(a,batsman,runs)
names(b) <-c("batsman","runs")
#Compute the number of 4s
c <-
b %>%
mutate(fours=(runs>=4 & runs <6)) %>%
filter(fours==TRUE)
# Group by batsman. Count 4s
d <- summarise(group_by(c, batsman),fours=n())
# Get the total runs for each batsman
e <-summarise(group_by(a,batsman),sum(runs))
names(b) <-c("batsman","runs")
details <- full_join(e,d,by="batsman")
names(details) <-c("batsman","runs","fours")
f <-
b %>%
mutate(sixes=(runs ==6)) %>%
filter(sixes == TRUE)
# Group by batsman. oOunt 6s
g <- summarise(group_by(f, batsman),sixes=n())
names(g) <-c("batsman","sixes")
#Full join with 4s and 6s
details <- full_join(details,g,by="batsman")
# Count the balls played by the batsman
ballsPlayed <-
a %>%
select(batsman,byes,legbyes,wides,noballs,runs) %>%
filter(wides ==0,noballs ==0,byes ==0,legbyes == 0) %>%
select(batsman,runs)
ballsPlayed<- summarise(group_by(ballsPlayed,batsman),count=n())
names(ballsPlayed) <- c("batsman","ballsPlayed")
details <- full_join(details,ballsPlayed,by="batsman")
cat("Total=",sum(details$runs),"\n")
# If there are NAs then
if(sum(is.na(details$fours)) != 0){
details[is.na(details$fours),]$fours <- 0
}
if(sum(is.na(details$sixes)) != 0){
details[is.na(details$sixes),]$sixes <- 0
}
# Out the details
details <- select(details,batsman,ballsPlayed,fours,sixes,runs)
details
}
|
40c7bc12c90113e6a33855ec4baf9acdd2bb845d | 6fae586199220e2cd9a1b73a0225eca617561c53 | /Bib.R | 02ed9f72b5c6af4e5d94e19a6e85c5bf5117337c | [] | no_license | KatherineDuchesneau/Endophyte_project | 4f1cdbc0fd4b774f82eb3c2bb0576df0ba16cae7 | c27bb481afd63584d2f8f5696e1c2f52530adcc0 | refs/heads/master | 2021-04-27T09:47:28.670655 | 2018-04-19T02:36:37 | 2018-04-19T02:36:37 | 122,522,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,324 | r | Bib.R | #############################
# #
# Assemble literature cited #
# #
#############################
setwd("C:/Users/Brody/Desktop/Murre spp. Population Genomics and Conservation/BIOL812 - Bioinformatics/Endophytes")
install.packages("dplyr")
library(dplyr)
install.packages("rcrossref")
library(rcrossref)
# base program
# cr_cn(dois = "10.1111/j.1469-8137.2006.01750.x",format = "text", style = "journal-of-evolutionary-biology")
# 4 articles from poster
# 10.1080/0735-260291044377
# 10.1038/srep40914
# 10.1111/j.1469-8137.2009.02773.x
# 10.1111/j.1365-2745.2012.01997.x
# first paper from manuscript - Rillig 2006 mycorrhizas and soil structure
# 10.1111/j.1469-8137.2006.01750.x
# QIIME
# 10.1038/nmeth.f.303
# picrust
# 10.1038/nbt.2676
# trimmomatic
# 10.1093/bioinformatics/btu170
# data papers
# Pinus flexilis 10.3389/fmicb.2014.00333
# Solanum 10.1111/1574-6968.12377
# Espeletia 10.1128/AEM.02781-15
# Pinus pinaster 10.1038/s41598-017-04141-6
# Santiria 10.1007/s00248-017-1002-2
# Paeonia spp. 10.1016/j.bjm.2017.02.009
# Oryza 10.1016/j.gdata.2017.02.010
# paper on ubiquitous nature of endophytes
# 10.1128/MMBR.67.4.491-502.2003
dois = c('10.1128/MMBR.67.4.491-502.2003','10.3389/fmicb.2014.00333','10.1111/1574-6968.12377','10.1128/AEM.02781-15','10.1038/s41598-017-04141-6','10.1007/s00248-017-1002-2','10.1016/j.bjm.2017.02.009','10.1016/j.gdata.2017.02.010','10.1080/0735-260291044377','10.1038/srep40914','10.1111/j.1469-8137.2009.02773.x','10.1111/j.1365-2745.2012.01997.x','10.1111/j.1469-8137.2006.01750.x','10.1038/nmeth.f.303','10.1038/nbt.2676','10.1093/bioinformatics/btu170')
poster_litcited = function(dois=""){
write("",file = "literature_cited.txt")
for (i in dois){
ref = cr_cn(dois = i,format = "text", style = "journal-of-evolutionary-biology")
write(ref,file="literature_cited.txt",append = TRUE)
}
write("",file="poster_literature_cited.txt")
lit = read.delim("literature_cited.txt",sep="\t",fill=TRUE,header=FALSE,as.is=TRUE)
for (line in lit){
short_ref = gsub("(\\d\\d\\d\\d)(\\. .*)(\\. \\w*.*\\d+)(.*\\.).*\\.$","\\1\\3\\4", line)
write(short_ref,file="poster_literature_cited.txt",append = TRUE)
}
}
poster_litcited(dois)
|
0d82c749565a27a818e49bfb4bac145ebf211e0d | 6b0f6cfd899add326a4fe835feabb2c7d5c92278 | /R/ols-residual-plus-component-plot.R | bee2b7ccf07891101f5f19a9ea46d4ed015b1845 | [] | no_license | SvetiStefan/olsrr | 9c7b4b82fdae631cab778107381d3d259408996d | db9f0427771f07ca93e8600a1573aef7779e76f5 | refs/heads/master | 2021-06-17T11:56:39.893077 | 2017-06-06T10:31:45 | 2017-06-06T10:31:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,305 | r | ols-residual-plus-component-plot.R | #' @title Residual Plus Component Plot
#' @description The residual plus component plot indicates whether any non-linearity is present
#' in the relationship between response and predictor variables and can suggest possible transformations
#' for linearizing the data.
#' @param model an object of class \code{lm}
#' @references Chatterjee, Samprit and Hadi, Ali. Regression Analysis by Example. 5th ed. N.p.: John Wiley & Sons, 2012. Print.
#' Kutner, MH, Nachtscheim CJ, Neter J and Li W., 2004, Applied Linear Statistical Models (5th edition).
#' Chicago, IL., McGraw Hill/Irwin.
#' @examples
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#' ols_rpc_plot(model)
#' @export
#'
ols_rpc_plot <- function(model) {
if (!all(class(model) == 'lm')) {
stop('Please specify a OLS linear regression model.', call. = FALSE)
}
pl <- cpout(model)
x <- NULL
y <- NULL
myplots <- list()
for (i in seq_len(pl$lmc)) {
k <- cpdata(pl$data, pl$mc, pl$e, i)
p <- eval(substitute(ggplot(k, aes(x = x, y = y)) +
geom_point(colour = 'blue', size = 2) + xlab(pl$nam[i]) +
ylab(paste0("Residual + Component (", pl$indvar, ")")) +
stat_smooth(method="lm", se=FALSE), list(i = i)))
# print(p)
myplots[[i]] <- p
}
do.call(grid.arrange, c(myplots, list(ncol = 2)))
}
|
659037bab59d9b51f071e96fbc015ba234f4c646 | e281a598b57e998bad32224445a63cb95a5860a7 | /04_Primary_TripletNonTripletComplex_RNApolIIMetagenePlot.R | 5fc6803cb5806af321b772eb8798135fab0400fb | [] | no_license | itsvenu/MYC-Manuscript | 7e6a31fbce638c96c8939efabecf67e755681c3a | d3949a945133cae4557ceacb96b9292eb39867b3 | refs/heads/main | 2023-01-01T21:55:31.695982 | 2020-10-16T17:05:31 | 2020-10-16T17:05:31 | 197,205,887 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,424 | r | 04_Primary_TripletNonTripletComplex_RNApolIIMetagenePlot.R | ## source: /b06x-lsdf/infra5-lsdf/mbHDAC2/scripts/affy-ge/all_timepoints_deg/closestEnhancers/metaplot_smoothing.R
## Triplet_complex vs non_complex RNApolII metagene plot
setwd("~/Desktop/ServerView/MYC/scripts/MycHdaci_code/scripts")
library(tidyverse)
# theme_vt2 <- function(){
# theme_classic(base_size=18) %+replace%
# theme(axis.ticks.length = unit(0.3, "cm"),
# panel.border = element_rect(size=1, color="black", fill=NA),
# axis.text = element_text(color="black", face = "bold"),
# axis.title = element_text(color="black", face = "bold"),
# legend.text = element_text(color="black", face = "bold"),
# legend.title = element_text(color="black", face = "bold")
# )
# }
theme_vt2 <- function(){
theme_classic(base_size=18) %+replace%
theme(axis.ticks.length = unit(0.3, "cm"),
panel.border = element_rect(size=1.3, color="black", fill=NA),
axis.text = element_text(color="black"),
axis.title = element_text(color="black"),
legend.text = element_text(color="black"),
legend.title = element_text(color="black")
)
}
## function to format `deeptools plotProfile` output
return_plotProfile_formated_RNApolII_primaryTriplet <- function(file_path){
# file_path = tripletComplex_nonComplex
dat <- read.delim(file_path, header = TRUE, skip = 1)
dat_mlt <- dat %>%
dplyr::select(-c(bins)) %>%
tibble::column_to_rownames(var = "X") %>% t() %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "bin") %>%
dplyr::filter(bin != "X") %>%
dplyr::mutate(bin = gsub("X", "", bin)) %>%
dplyr::rename(Triplet = Triplet_complex, Non_triplet = non_complex) %>%
reshape2::melt() %>%
dplyr::mutate(bin = as.numeric(bin))
return(dat_mlt)
}
## function to plot metagene plot the data returned by above function
plotProfile_plot_RNApolII_themeClassic <- function(melted_dat){
# melted_dat <- up_rnapol_mlt
## custom labels
lab_pos <- c(100, 400)
lab_nm <- c("TSS", "TES")
spline_int <- as.data.frame(spline(melted_dat$bin, melted_dat$value))
my_plt <- ggplot(melted_dat, aes(bin, value, color = variable))+
geom_line(size = 2)+
scale_x_continuous(breaks = lab_pos, labels = lab_nm, expand = expand_scale(mult = c(0, 0)))+
theme_vt2()+
xlab("")+
ylab("input normalized RNApolII signal (Log2)")+
#scale_color_manual(values = c("RNApolII_untreated" = "darkgray", "RNApolII_treated" = "black"))+
theme(legend.title = element_blank(),
legend.position = c(0.85, 0.9))
return(my_plt)
}
## analysis
tripletComplex_nonComplex <- "../data/TRIPLETCOMPLEX_NONCOMPLEX_EXPRESSED_RNApolII_grays.data"
tripletComplex_nonComplex_mlt <- return_plotProfile_formated_RNApolII_primaryTriplet(file_path = tripletComplex_nonComplex)
tripletComplex_nonComplex_mlt2 <- tripletComplex_nonComplex_mlt %>%
dplyr::mutate(variable = gsub("_", " ", variable))
tripletComplex_nonComplex_mlt2$variable <- factor(tripletComplex_nonComplex_mlt2$variable, levels = c('Triplet', 'Non triplet'))
##
pdf("../figures/TRIPLET_vs_NONTRIPLET_RNApolII_metagene.pdf", height = 5.5, width = 8)
plotProfile_plot_RNApolII_themeClassic(melted_dat = tripletComplex_nonComplex_mlt2)+
scale_color_manual(values = c("Triplet" = "black", "Non triplet" = "darkgray"))
dev.off()
triplet_nontriplet_metagene_primary <- plotProfile_plot_RNApolII_themeClassic(melted_dat = tripletComplex_nonComplex_mlt2)+
scale_color_manual(values = c("Triplet" = "black", "Non triplet" = "darkgray"))
triplet_nontriplet_metagene_primary_classic <- plotProfile_plot_RNApolII_themeClassic(melted_dat = tripletComplex_nonComplex_mlt2)+
scale_color_manual(values = c("Triplet" = "black", "Non triplet" = "darkgray"))+
theme_classic(base_size = 18)+
scale_y_continuous(breaks = c(0, 1, 2, 3), labels = c("0", "1", "2", "3"))+
theme(axis.text = element_text(color = "black"),
axis.ticks.length = unit(0.3, "cm"),
legend.title = element_blank(),
legend.position = c(0.7, 0.9))
save_plot("../figures/TRIPLET_vs_NONTRIPLET_RNApolII_metagene.pdf",triplet_nontriplet_metagene_primary, base_aspect_ratio = 1.3, base_height = 5.5, base_width = 8)
save_plot("../figures/TRIPLET_vs_NONTRIPLET_RNApolII_metagene_classic.pdf", triplet_nontriplet_metagene_primary_classic, base_aspect_ratio = 1.3, base_height = 5, base_width = 7.5)
|
bf6829e9b5f205fc3570fb1c4fd15311ef41854f | a88281b2569d088f7c100f04c7a5f1b6c33b5d62 | /R/function.relief.R | 097ef3eb24bbbbdad2b54f9decbbdba26e449ebd | [] | no_license | faridehbagherzadeh/VariableSelection | 380401d68d5a9873d19fe68523023bf2cf46e124 | 49410553ee20c3d8f6c07029fff8db209d6615f8 | refs/heads/master | 2021-01-18T12:19:24.470552 | 2015-11-11T02:35:03 | 2015-11-11T02:35:55 | 45,986,476 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,565 | r | function.relief.R | function.relief <- function(input, target, data, control = control.selection(), trace = FALSE, ...){
field_distance <- function(input_idx, instance1, instance2) {
value1 <- instance1[1, input_idx]
value2 <- instance2[1, input_idx]
if (is.factor(value1) && is.factor(value2)) {
if (is.na(value1) && is.na(value2)) {
return(1 - sum(p_val_in_class[[input_idx]]
[, instance1[1, target]] *
p_val_in_class[[
input_idx]][, instance2[1, target]]))
} else if (is.na(value1) || is.na(value2)) {
if (is.na(value1)) {
known_value <- value2
unknown_class <- instance1[1, target]
} else {
known_value <- value1
unknown_class <- instance2[1, target]
}
return(1 - p_val_in_class[[input_idx]][
known_value, unknown_class])
} else if (value1 == value2) {
return(0)
} else {
# if(value1 != value2)
return(1)
}
} else if (is.numeric(value1) && is.numeric(value2)) {
if (is.na(value1) && is.na(value2)) {
return(1)
} else if (is.na(value1)) {
return(max(value2, 1 - value2))
} else if (is.na(value2)) {
return(max(value1, 1 - value1))
} else {
return(abs(value1 - value2))
}
} else {
stop("Unsupported value type")
}
}
instance_distance <- function(instance1, instance2) {
result <- sapply(input, function(i) {
return(field_distance(i, instance1, instance2))
})
res <- sum(result ^ 2)
if (is.na(res)) {
stop("Internal error. Distance NA.")
}
return(res)
}
# uses parent.env
find_neighbours <- function(instance_idx) {
instance <- new_data[instance_idx, , drop = FALSE]
# for every other instance
for (current_idx in 1:instances_count) {
if (instance_idx == current_idx)
(next)()
current_instance <- new_data[current_idx, , drop = FALSE]
if (is.na(current_instance[, target]))
(next)()
dist <- instance_distance(instance, current_instance)
class_no <- 1
if (nn_stored_count[class_no] < control$Relief.nneighbour) {
nn_stored_count[class_no] <<- nn_stored_count[class_no] + 1
n_array[class_no, nn_stored_count[class_no], ] <<-
c(dist, current_idx)
} else {
max_idx <- which.max(n_array[class_no, , 1])
max_value <- n_array[class_no, max_idx, 1]
if (dist < max_value) {
n_array[class_no, max_idx, ] <<- c(dist, current_idx)
}
}
}
}
# uses parent.env
update_weights <- function(instance_idx) {
instance <- new_data[instance_idx, , drop = FALSE]
instance_class <- instance[1, target]
instance_class_no <- which(classes == instance_class)
# for each attribute
for (input_idx in 1:attributes_count) {
# nearest hits
hits_sum <- 0
if (nn_stored_count[instance_class_no] > 0) {
hits_sum <- sum(sapply(1:nn_stored_count[
instance_class_no], function(n_idx) {
n_instance_idx <- n_array[instance_class_no, n_idx, 2]
n_instance <- new_data[n_instance_idx, , drop = FALSE]
return(field_distance(input_idx, instance, n_instance))
}))
hits_sum <- hits_sum / nn_stored_count[instance_class_no]
}
# nearest misses
misses_sum <- 0
if (class_count > 1) {
misses_sum <- sum(sapply((1:class_count)[-instance_class_no], function(class_no) {
class_misses_sum <- 0
if (nn_stored_count[class_no] > 0) {
class_misses_sum <- sum(sapply(1:nn_stored_count[class_no], function(n_idx) {
n_instance_idx <- n_array[class_no, n_idx, 2]
n_instance <- new_data[, input][n_instance_idx, , drop = FALSE]
return(field_distance(input_idx, instance, n_instance))
}))
class_misses_sum <-
class_misses_sum *
class_prob[class_no] / nn_stored_count[class_no]
}
return(class_misses_sum)
}))
misses_sum <- misses_sum/(1 - class_prob[instance_class_no])
}
weights[input_idx] <<- weights[input_idx] - hits_sum + misses_sum
}
}
# uses parent.env
new_data <- data[, c(input, target)]
new_data <- normalize.min.max(new_data)
class_count <- NULL
class_prob <- NULL
classes <- NULL
p_val_in_class <- NULL
weights <- NULL
n_array <- NULL
nn_stored_count <- NULL
sample_instances_idx <- NULL
instances_count <- dim(new_data)[1]
attributes_count <- length(input)
if (control$Relief.nneighbour < 1) {
control$Relief.nneighbour <- 1
warning(paste("Assumed: control$Relief.nneighbour = ",
control$Relief.nneighbour))
}
if (control$Relief.nsample < 1) {
control$Relief.nsample <- 1
warning(paste("Assumed: control$Relief.nsample = ",
control$Relief.nsample))
sample_instances_idx <- sample(1:instances_count, 1)
} else if (control$Relief.nsample > instances_count) {
warning(paste("Assumed: control$Relief.nsample = ",
control$Relief.nsample))
control$Relief.nsample <- instances_count
sample_instances_idx <- 1:instances_count
} else {
sample_instances_idx <- sort(sample(
1:instances_count, control$Relief.nsample, replace = TRUE))
}
tab <- table(new_data[[target]])
class_prob <- tab / sum(tab)
classes <- names(class_prob)
class_count <- length(classes)
p_val_in_class <- lapply(new_data[input], function(vec) {
tab <- table(vec, new_data[[target]])
return(apply(tab, 2, function(x) {
s <- sum(x)
if (s == 0) return(x) else return(x / s)
}))
})
n_array <- array(0, c(class_count, control$Relief.nneighbour, 2))
nn_stored_count <- array(0, class_count)
weights <- rep(0, attributes_count)
sapply(sample_instances_idx, function(current_instance_idx) {
current_instance <- new_data[current_instance_idx, , drop = FALSE]
if (is.na(current_instance[[target]]))
return(NULL)
nn_stored_count[] <<- 0
n_array[] <<- Inf
find_neighbours(current_instance_idx)
update_weights(current_instance_idx)
})
res <- list()
weights <- weights / control$Relief.nsample
names(weights) <- input
weights <- sort(weights, decreasing = TRUE)
for (s in control$ranker.search) {
x <- ranker.search(weights, target, data,
control = within(control, ranker.search <- s), trace = trace)
res[["relief"]][[s]] <- list(weights = weights, subset = x$subset)
}
return(unlist(res, recursive = FALSE))
}
|
15a190e444045603baa4ce39db81f8d3e7f7f0bb | 25f1c949f6c7ec5b5aece3baa90c8076f59f886f | /mala_roznica_glosow/150927_Hackaton_Mala_roznica_glosow.R | cd9362672c7dfcd4ca4927dea07c5722d9e83806 | [] | no_license | mi2-warsaw/JakOniGlosowali | d759273f0f91510276e58236b15da2375db9837d | e1ecb7f0c8aaf9cb8b7334ec0ea40e9a7e55a94d | refs/heads/master | 2021-01-10T08:08:41.723169 | 2015-12-09T20:56:01 | 2015-12-09T20:56:01 | 43,201,017 | 5 | 3 | null | 2018-08-29T20:18:52 | 2015-09-26T10:12:13 | HTML | UTF-8 | R | false | false | 2,555 | r | 150927_Hackaton_Mala_roznica_glosow.R | library(dplyr)
library(tidyr)
library(rvest)
#tylko plik all_votes
setwd("/Users/Alicja/Desktop")
load("all_votes.rda")
#scrapowanie nazw legislacji ze strony Mam Prawo Wiedziec
title_scrap <- function(page){
page <- html(page)
title <- html_nodes(page, ".naglowek-1")
title <- html_text(title)
return(title)
}
ids <- all_votes[,c(5,6)]
ids <- unique(ids)
n <- nrow(ids)
title <- character(n)
for(i in seq_len(n)){
link <- paste0("http://mamprawowiedziec.pl/strona/glosowanie/7,",
ids[i, 1] , "," , ids[i, 2])
title_enc <- title_scrap(link)
#windows
#title[i] <- iconv(title_enc, from = "UTF-8", to = "Windows-1250")
#mac
title[i] <- title_enc
}
idiki <- cbind(ids, title=title)
idiki[,3] <- as.character(idiki[,3])
idiki <- filter(idiki, title>0)
#filtrowanie po scrapowanych danych
all_votes %>%
filter(nr_meeting %in% idiki[,1], nr_voting %in% idiki[,2]) -> votes
votes <- merge(votes, idiki, by.x=c("nr_meeting", "nr_voting"), by.y=c("nr_meeting", "nr_voting"))
#glosowania dla legislacji wedlug partii
votes %>%
group_by(title, id_voting, club, vote) %>%
summarise(liczba=n()) %>%
spread(vote, liczba) %>%
replace(is.na(.), 0) %>%
mutate(roznica = abs(Za - Przeciw)) %>%
arrange(title, id_voting, club) -> glosowania_partie
#glosowania dla legislacji ogolem
votes %>%
group_by(title, id_voting, vote) %>%
summarise(liczba=n()) %>%
mutate(liczba = ifelse(is.na(liczba),0,liczba)) %>%
spread(vote, liczba) %>%
replace(is.na(.), 0) %>%
mutate(roznica = abs(Za - Przeciw)) %>%
arrange(roznica) -> glosowania_ogolem
#wybor glosowan, w ktorych roznica glosow byla mniejsza od 21
glos <- as.numeric(unlist(unique(subset(glosowania_ogolem, glosowania_ogolem$roznica < 21)[,2])))
glosowania_ogolem_20 <- subset(glosowania_ogolem, glosowania_ogolem$id_voting %in% glos)
glosowania_partie_20 <- subset(glosowania_partie, glosowania_partie$id_voting %in% glos)
#wybor poslow zmieniajacych partie
#ile glosowan w kazdym klubie
migranci <- all_votes %>%
group_by(surname_name, club) %>%
summarise(ile_glosowal = n()) %>%
spread(club, ile_glosowal) %>%
replace(is.na(.), 0)
#ile razy zmienial klub
migranci2 <- all_votes %>%
group_by(surname_name, club) %>%
summarise(ile_glosowal = n()) %>%
group_by(surname_name) %>%
summarise(ile_klubow = n())
#zbiorowa informacja o migrantach
mig <- merge(migranci, migranci2)
mig %>%
arrange(desc(ile_klubow)) %>%
filter(ile_klubow > 1) -> mig
glosowania_mig <- subset(votes, votes$surname_name %in% mig$surname_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.