blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a502f071cb8e8defc8f98548bfeef593be83e30f
|
00046ef7671d5a02ea27deb30bf5657c45d45acb
|
/clients/r-client/run_tests.R
|
86eee7bbdc3dcb658ad7f6f7777436a84f4bf2b1
|
[
"BSD-3-Clause"
] |
permissive
|
uptake/groundhog
|
b9a7915b5bf269dafc69ee337a3493cd1c1aa895
|
41b143f528ddcf7be581fcb2bde90e5d360cd827
|
refs/heads/master
| 2021-07-17T17:05:04.768249
| 2019-10-27T21:31:49
| 2019-10-27T21:31:49
| 200,898,621
| 7
| 11
|
BSD-3-Clause
| 2020-08-27T19:05:31
| 2019-08-06T17:48:17
|
Python
|
UTF-8
|
R
| false
| false
| 1,192
|
r
|
run_tests.R
|
# Define a function to check testthat results.
CheckTestThatResults <- function(testthat_results){
outcomes <- unlist(lapply(testthat_results
, function(x){lapply(x$results, function(y){class(y)[1]})}))
numErrors <- sum(outcomes %in% c('expectation_failure','expectation_error'))
numWarnings <- sum(outcomes == 'expectation_warning')
numPassed <- sum(outcomes == 'expectation_success')
# generate and trim outcomes table for display
outcomesTable <- data.table::as.data.table(testthat_results)
outcomesTable[, file := strtrim(outcomesTable[, file], 17)]
outcomesTable[, test := strtrim(outcomesTable[, test], 48)]
data.table::setnames(outcomesTable, old = 'real', new = 'testTime')
print("The Test Results are:")
print(paste('Num Errors:', numErrors))
print(paste('Num Warnings:', numWarnings))
print(paste('Num Passed:', numPassed))
print("")
return(numErrors == 0)
}
# Run our unit tests, return with a non-zero exit code if any failed
testStatus <- CheckTestThatResults(devtools::test())
if (!testStatus) {
print('Tests have failed!')
q(status = 1, save = 'no')
}
|
2a41b7c5a9b78a6fd60bcedd81ca550ca84f5d46
|
11ca614b32749f369af93febbb04571b7f95748a
|
/statistical_inference/quizzes/quiz4.R
|
4f48dac8ccbf254120c7b9ad188da91a3fb5701e
|
[] |
no_license
|
richelm/course_notes
|
61c525ca433d4faf90216509575432665fd7f461
|
0677708e8a05e67063a94a6dbee7e303b7b5d55a
|
refs/heads/master
| 2021-01-18T21:51:10.181408
| 2019-12-03T22:36:29
| 2019-12-03T22:36:29
| 42,669,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 577
|
r
|
quiz4.R
|
# question 1
b <- c(140, 138, 150, 148, 135)
w2 <- c(132, 135, 151, 146, 130)
t.test(b,w2,paired = TRUE)$p.value
# question 2
mu <- 1100
s <- 30
n <- 9
df <- n - 1
mu + c(-1,1) * qt(0.975, df, lower.tail = FALSE)*s/sqrt(n)
# question 3
n <- 4
mu0 <- 2
mua <- 3
# question 4
ppois(1787-1,.01*1787,lower.tail = FALSE)
# question 5
mt <- -3
mp <- 1
st <- 1.5
sp <- 1.8
s <- st - sp
(mt - mp) + c(-1,0)*qt(0.95,8)*s/sqrt(9)
# question 6
# question 7
n = 100
mu = 0.01
sd = 0.04
mu + qnorm(0.975)*sd/sqrt(n)
# question 8
z <- qnorm(1-0.05)
s <- 0.04
p <- .9
n = (z*s/p)^2
n
|
7666fd3c329adced2c70335c0e266efac36054c9
|
93730e6fee0ae4e3bb028714d6e8a5e335632121
|
/man/clear_results.Rd
|
2f80b15d4707073b49ee42d982772e3d2bf8e0b4
|
[] |
no_license
|
jeevanyue/PIVOT
|
56ef7c2d2eb30a197b1c0387ae79e73605ab7eae
|
46fa6af11f19c320ee338452ccff745aa93a1f6d
|
refs/heads/master
| 2021-01-01T06:30:45.988532
| 2017-07-17T02:51:20
| 2017-07-17T02:51:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 231
|
rd
|
clear_results.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{clear_results}
\alias{clear_results}
\title{PIVOT Data Control}
\usage{
clear_results(r_data)
}
\description{
Clean all analysis results
}
|
21beee98d8fee1f262b65d29175cb12310955502
|
11fe2a2d8bf3327517e9c76e667ee68c06cf5599
|
/man/Dist_mat_edge.Rd
|
66ba3f6b8ef1ea749b72f33c9cdf3e0db132e26e
|
[] |
no_license
|
BenjamenSimon/EpidemicR
|
b07e115b42eb50d45e2ea47c87b5ee6440bb0693
|
7d0a6876affa2adf1f34c6b77dbf21bd8512c076
|
refs/heads/master
| 2020-09-11T05:53:27.546250
| 2020-03-16T12:47:44
| 2020-03-16T12:47:44
| 186,651,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,404
|
rd
|
Dist_mat_edge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Dist_mat_edge.R
\name{Dist_mat_edge}
\alias{Dist_mat_edge}
\title{Clustered edge population distribution.}
\usage{
Dist_mat_edge(N, xlim = 20, ylim = 20, clusters = 8)
}
\arguments{
\item{N}{The total number of individuals in the population.}
\item{xlim}{The width of the plane on which individuals will be generated (defaults to 20 units wide).}
\item{ylim}{The height of the place on which individuals will be generated (defaults to 20 units high).}
\item{clusters}{The total number of clusters. Half the clusters will be on the right edge, the other half on the left.
(Must be even, greater than 2, and divide N). Clusters are generated on blocks of size (xlim/10, ylim/10).}
}
\value{
The function returns a list. The first object in the list is a Nx2 matrix of the coordinates of the individuals.
The second object is an NxN distance matrix.
}
\description{
This function generates a distance matrix for a population that is clustered on the left and right edges of the space only.
}
\examples{
xy.coords <- Dist_mat_edge(N=100, xlim = 20, ylim = 20, clusters = 10)[[1]]
distance_mat <- Dist_mat_edge(N=100, xlim = 20, ylim = 20, clusters = 10)[[2]]
plot(xy.coords[,1], xy.coords[,2], type = "p")
}
\keyword{Clustered}
\keyword{distance}
\keyword{distribution}
\keyword{edge}
\keyword{matrix}
\keyword{population}
|
0ac3370da96915221d0465db17f189ac9f447ce7
|
a528173483407425c55cbbdf278a2b724830a01e
|
/man/zapm_direct.Rd
|
1aec68bfb405306a73a29a65538e5ff92d205ac4
|
[
"MIT"
] |
permissive
|
gmke/zernike
|
7ea52f89dc353f7d72a8385078e03bc2853a22c1
|
397a5d2f316e2f95cc1a1209007780855da16b13
|
refs/heads/master
| 2023-05-28T21:58:50.075555
| 2023-05-10T15:07:23
| 2023-05-10T15:07:23
| 166,230,701
| 0
| 0
|
MIT
| 2021-06-18T12:00:04
| 2019-01-17T13:30:49
|
R
|
UTF-8
|
R
| false
| true
| 973
|
rd
|
zapm_direct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zapm_direct.r
\name{zapm_direct}
\alias{zapm_direct}
\title{Zernike Annular polynomials from formulas}
\usage{
zapm_direct(rho, theta, eps)
}
\arguments{
\item{rho}{a vector of radial coordinates with eps <= rho <= 1.}
\item{theta}{a vector of angular coordinates, in radians.}
\item{eps}{the obstruction fraction 0 <= eps < 1.}
}
\value{
a \verb{length(rho) x 16} matrix of Zernike annular polynomial values.
}
\description{
Create a matrix of Zernike Annular polynomial values
for the counterparts of primary and secondary
optical aberrations.
}
\details{
The values are from published formulas. This function is
included for testing and reference only.
}
\seealso{
Calls \code{\link[=rzernike_ann_direct]{rzernike_ann_direct()}} for radial Zernike annular values.
Use \code{\link[=zapm]{zapm()}} or \code{\link[=zapm_128]{zapm_128()}} for complete sequences
of values to arbitrary order.
}
|
925d3015295392f7073745c267e2a177b22bf423
|
7676c2d5b77b588adde0e787a501dac27ad8efcd
|
/work/r데이터분석_예제파일/예제/2_09.R
|
b501e6cc44c458aa77535171dd49eb74a006384e
|
[] |
no_license
|
bass4th/R
|
a834ce1e455ca2c018364a48ea8ba0fbe8bf95e9
|
fb774770140a5792d9916fc3782708e09c12273d
|
refs/heads/master
| 2020-05-05T09:02:52.210737
| 2019-04-06T19:48:37
| 2019-04-06T19:48:37
| 179,888,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 78
|
r
|
2_09.R
|
excel <- read.csv("exceldata.csv")
excel
excel[1, ]
excel[,2]
str(excel)
|
c793009ed3eead786ca3996933521ec5f9f3f082
|
30bdba988524885dc0fb3487476194299d21a05c
|
/tests/testthat/test-print.R
|
42f7250ee27818ccae00c0f17497228fd628feb4
|
[
"MIT"
] |
permissive
|
applied-statistic-using-r/valaddin
|
d66b01144f519c12b856b6f2ad3addbac27d3cb2
|
44a446a1f7433161a80154e1ca0d2cb22eeaacd2
|
refs/heads/master
| 2021-01-19T00:50:33.078839
| 2017-04-04T06:13:41
| 2017-04-04T06:13:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,611
|
r
|
test-print.R
|
context("Printing")
# Like expect_output(), but accepts expectation strings with less escaping
expect_output_p <- perlize(expect_output)
# firm_closure ----------------------------------------------------------
has_xy <- map_lgl(args_list, ~ all(c("x", "y") %in% names(.)))
fs <- lapply(args_list[has_xy], pass_args)
chks <- list(
list("Not numeric" ~ x) ~ is.numeric,
list("Not character" ~ y) ~ is.character
)
fs_firm <- lapply(fs, firmly, .checklist = chks)
test_that("firm closure original function body is displayed", {
for (i in seq_along(fs)) {
original_fn <- capture_output(print(fs[[i]]))
expect_output_p(print(fs_firm[[i]]), original_fn)
}
})
test_that("firm closure checks are displayed", {
for (f in fs_firm) {
expect_output_p(print(f), "is.numeric(x):\n\"Not numeric\"")
expect_output_p(print(f), "is.character(y):\n\"Not character\"")
}
})
test_that("firm closure arguments whose absence is checked are displayed", {
nms <- list("x", "y", c("x", "y"), c("y", "x"))
for (f in fs) {
f_firm <- firmly(f, .checklist = chks)
expect_output_p(print(f_firm), "missing arguments:\nNot checked")
for (nm in nms) {
f_warn <- firmly(f, .warn_missing = nm)
f_firm_warn <- firmly(f_firm, .warn_missing = nm)
msg <- paste0("missing arguments:\n",
paste(encodeString(nm, quote = "`"), collapse = ", "))
expect_output_p(print(f_warn), msg)
expect_output_p(print(f_firm_warn), msg)
}
}
})
# check_maker -------------------------------------------------------------
test_that("local checker predicate is displayed", {
header <- "* Predicate function:"
fmls <- list(
"Not data frame" ~ is.data.frame,
"Not unsorted" ~ is.unsorted,
"Not NaN" ~ is.nan,
"Not positive" ~ function(x) x > 0
)
for (f in fmls) {
pred <- lazyeval::f_rhs(f)
out <- paste(header, capture_output(pred), sep = "\n")
expect_output_p(print(localize(f)), out)
}
})
test_that("local checker error message is displayed", {
header <- "* Error message:"
# vld_numeric, vld_scalar_numeric have exceptional error messages
nms_chkrs <- setdiff(
grep("^vld_", getNamespaceExports("valaddin"), value = TRUE),
c("vld_numeric", "vld_scalar_numeric", "vld_true", "vld_false")
)
chkrs <- lapply(nms_chkrs, getExportedValue, ns = "valaddin")
names(chkrs) <- sub("^vld_", "", nms_chkrs)
for (nm in names(chkrs)) {
msg <- sprintf("Not %s", gsub("_", " ", nm))
out <- paste(header, encodeString(msg, quote = "\""), sep = "\n")
expect_output_p(print(chkrs[[nm]]), out)
}
})
|
a824d13653cc1ad1a84c8ffd075ebc466533fd23
|
ede1c2211638eb1b9dbf1c5e57297dd71529708c
|
/Data import and cleaning/Coal_Consumption.R
|
30d385b40777b291ea4c0ee2c2b1aaa1874be412
|
[] |
no_license
|
sbrylka/Statistical_Review_of_World_Energy
|
ea7cb4d125f1c0e719f31b9fe429f875e1101b41
|
9306cdf6c415278b9b1373f321c8b09b2ebb7dcf
|
refs/heads/main
| 2023-01-31T20:27:07.697369
| 2020-12-09T19:08:10
| 2020-12-09T19:08:10
| 312,024,263
| 0
| 0
| null | 2020-11-11T18:31:54
| 2020-11-11T16:18:17
| null |
UTF-8
|
R
| false
| false
| 1,238
|
r
|
Coal_Consumption.R
|
library(readxl)
library(dplyr)
library(tidyr)
#reading BP data
CC <- read_excel("bp-stats-review-2020-all-data.xlsx", sheet = "Coal Consumption - EJ", na = "n/a")
CC <- as.data.frame(CC, stringsAsFactors = False)
#fixing few names
CC[111,1] = 'OECD'
CC[113,1] = 'European Union'
CC[2,1] = 'Country' #changing JEDNOSTKA to Country will help us later
#first variable without NAs
List_of_observations <- na.exclude(CC[,1])
CC <- CC[CC[,1] %in% List_of_observations,]
#creating function which negates %in%
'%!in%' = Negate('%in%')
#creating two variables to contain names of Organisations and Totals
Totals <- c("Total North America","Total S. & Cent. America", "Total Asia Pacific", "Total Europe", "Total CIS", "Total Africa", "Total World")
Organizations <- c("of which: OECD" = "OECD", "Non-OECD", "European Union")
#deleting totals' rows
CC <- CC[CC[,1] %!in% Totals,]
#deleting organistions' rows
CC <- CC[CC[,1] %!in% Organizations,]
#adding column's names
colnames(CC) <- CC[1,]
#deleting comments in the last seven rows and the last one
CC <- head(CC, -7)
#deleting few last columns and first row
CC <- CC[2:94,1:56]
#unPivoting columns
CC2 <- gather(CC, key = 'Year', value = 'Coal Consumption - EJ"', -'Country' )
|
6e06648552aecd0f3cd09303a0b05160ba361f85
|
37184e75eddc045944b7cfe9c0834523c4ebb666
|
/src/generate_deseq_object.salmon.R
|
5f25ad0ce836000ad3e331fdf04684a624c2d22e
|
[] |
no_license
|
TomHarrop/wasprnaseq-test
|
ecfd1b7d72108a0b958c58beea362125a4298adc
|
d583391650049a982cafbeaf5a10ad8d937dfb53
|
refs/heads/master
| 2023-01-05T04:12:28.424647
| 2020-11-03T23:56:22
| 2020-11-03T23:56:22
| 308,509,710
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,005
|
r
|
generate_deseq_object.salmon.R
|
#!/usr/bin/env Rscript
log <- file(snakemake@log[[1]],
open = "wt")
sink(log,
type = "message")
sink(log,
append = TRUE,
type = "output")
library(data.table)
library(DESeq2)
library(tximeta)
transcript_file <- snakemake@input[["mrna"]]
gff_file <- snakemake@input[["gff"]]
file_list <- snakemake@input[["quant_files"]]
index_dir <- snakemake@params[["index"]]
dds_file <- snakemake@output[["dds"]]
# dev
# transcript_file <- "output/000_ref/vvulg.mrna.fa" # mrna
# gff_file <- "data/ref/GCA_014466185.1_ASM1446618v1_genomic.gff" # gff
# index_dir <- "output/005_index"
# file_list <- list.files("output/020_salmon",
# full.names = TRUE,
# recursive = TRUE,
# pattern = "quant.sf")
# generate the txome
genome_name <- sub("_genomic.gff", "", basename(gff_file))
tmp <- tempdir()
json_file <- file.path(tmp, paste0(genome_name, ".json"))
setTximetaBFC(tmp)
makeLinkedTxome(indexDir = index_dir,
source = "ncbi", # should be RefSeq but NCBI hasn't posted genome info
organism = "Vespula vulgaris",
release = "1",
genome = genome_name,
fasta = transcript_file,
gtf = gff_file,
jsonFile = json_file)
# generate col_data
names(file_list) <- gsub(".*/", "", dirname(file_list))
col_data <- data.table(
files = file_list,
names = names(file_list))
col_data[, c("caste", "indiv") := tstrsplit(names, "_")]
# read the salmon info
se <- tximeta(col_data)
# mapping info (don't run)
# metadata(se)[["quantInfo"]]$percent_mapped
# metadata(se)[["quantInfo"]]
# metadata(se)[["quantInfo"]]$num_decoy_fragments / metadata(se)[["quantInfo"]]$num_processed * 100
# summarize counts to gene-level
gse <- summarizeToGene(se)
# generate deseq object
dds <- DESeqDataSet(gse,
design = ~ caste )
# write output
saveRDS(dds, dds_file)
# log
sessionInfo()
|
b9445788ae7f5067e3d2de79560a8c998288b70a
|
5e01f8ae5fa31589c85c15d482d93a1d2ae1f3dd
|
/R/expr.R
|
68e186dc29c3d49ddf4dff84674cf859298bd7d9
|
[] |
no_license
|
Yafei611/CFGL
|
719b070afa508df65da7658d26c6f96a74b40b70
|
f71894cc57c273ba04fed0aeef3c9a9855dc66c8
|
refs/heads/master
| 2021-06-14T22:20:26.657969
| 2018-07-24T19:56:02
| 2018-07-24T19:56:02
| 141,628,393
| 2
| 3
| null | 2021-05-26T15:46:35
| 2018-07-19T20:37:08
|
R
|
UTF-8
|
R
| false
| false
| 617
|
r
|
expr.R
|
#' Gene expression data for 2 rat tissues
#'
#' A dataset containing gene expression levels for rat brain and heart tissues. The expression intensity were measured using Affymetrix Rat Exon 1.0 ST Array. 100 differential genes are selected to construct example co-expression network for 2 tissues.
#'
#' @format A data list that containing 2 data matrices
#' \describe{
#' \item{brain}{expression levels of 100 gene for brain tissue}
#' \item{heart}{expression levels of 100 gene for heart tissue}
#' }
#' @source More detailed information can be found in PhenoGen website (http://phenogen.ucdenver.edu).
"expr"
|
c6060d16889ea608f3bcf0a98f15612aec10bbde
|
21f123043fc90b4abfd504a6bdf189bb51276e38
|
/ROC curves/roc-curve.R
|
561b0312b01a47a8e60ec38f9d50cec602d6f4fc
|
[] |
no_license
|
EudaldTejero/MasterThesis
|
722d22c1487844255ef9fdf2242f0a7e072fa6f7
|
dfb638874d134f424bc509a42ccb0065263989cd
|
refs/heads/master
| 2020-03-22T14:35:46.803426
| 2018-07-09T06:46:14
| 2018-07-09T06:46:14
| 140,191,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,395
|
r
|
roc-curve.R
|
library("cvAUC")
category1 <- lapply("category1.txt",readLines)
category1 <- as.numeric(unlist(category1))
category2 <- lapply("category2.txt",readLines)
category2 <- as.numeric(unlist(category2))
category3 <- lapply("category3.txt",readLines)
category3 <- as.numeric(unlist(category3))
category4 <- lapply("category4.txt",readLines)
category4 <- as.numeric(unlist(category4))
category5 <- lapply("category5.txt",readLines)
category5 <- as.numeric(unlist(category5))
category <- list(category1,category2,category3,category4,category5)
prediction1 <- seq(1, 0, length.out = length(category1))
prediction2 <- seq(1, 0, length.out = length(category2))
prediction3 <- seq(1, 0, length.out = length(category3))
prediction4 <- seq(1, 0, length.out = length(category4))
prediction5 <- seq(1, 0, length.out = length(category5))
prediction <- list(prediction1,prediction2,prediction3,prediction4,prediction5)
ids1 <- lapply("result1.txt",readLines)
ids2 <- lapply("result2.txt",readLines)
ids3 <- lapply("result3.txt",readLines)
ids4 <- lapply("result4.txt",readLines)
ids5 <- lapply("result5.txt",readLines)
ids <- list(ids1, ids2, ids3, ids4, ids5)
out <- cvAUC(prediction, category)
out
ci.cvAUC(prediction, category, label.ordering = NULL, confidence = 0.95)
plot(out$perf, col="red", avg="vertical", lwd=2, main="ROC curve")
abline(a=0, b=1, lty=2, lwd=2, col="black")
|
071f049ed041c86b1fbc57808ddc3fe090e44741
|
7a7777dae0c4d694f1e600bb3f8bdbb1284d0e6a
|
/man/gpdd_timeperiod.Rd
|
9698142525d88cb16abefce8fa433b56ca07297a
|
[
"CC0-1.0"
] |
permissive
|
ropensci/rgpdd
|
b8bf4340d11914a7b559bab06c0949cd2b9349e5
|
7406de253d397e7fa7485ecd4b4816ede9a25a2e
|
refs/heads/master
| 2021-07-18T19:23:14.049930
| 2020-05-18T16:02:50
| 2020-05-18T16:02:50
| 12,953,752
| 9
| 5
|
NOASSERTION
| 2020-05-13T14:44:43
| 2013-09-19T16:44:52
|
R
|
UTF-8
|
R
| false
| false
| 507
|
rd
|
gpdd_timeperiod.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{gpdd_timeperiod}
\alias{gpdd_timeperiod}
\title{The time period table}
\description{
TimePeriod is a look-up table that provides text descriptions of
the temporal period the sample relates to such as 'January',
'Spring', 'Week 1' and 'Day 1'.
}
\author{
GPDD Administrator \email{cpb-gpdd-dl@imperial.ac.uk}
}
\references{
\url{http://www3.imperial.ac.uk/cpb/databases/gpdd}
}
\keyword{data}
|
30f2abe140116581e4579f2bdb759f6a1ce1443f
|
b616673f7b104256e8a7bff99cb6e11c5fff199b
|
/amto/array/htmlpdfr/fs_ary_basics.R
|
7429b4325539a12729bfc343eed9816d0e4647f0
|
[
"MIT"
] |
permissive
|
yanliangs/R4Econ
|
3fac5b2dec797297e21ee32d2a0078af78422c39
|
06f9633c523c21aed5ac744caee9d8ccd3c906cc
|
refs/heads/master
| 2023-02-23T00:36:44.142180
| 2021-01-24T17:57:49
| 2021-01-24T17:57:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,989
|
r
|
fs_ary_basics.R
|
## ----global_options, include = FALSE----------------------------------------------------------------------------------
try(source("../../.Rprofile"))
## ---------------------------------------------------------------------------------------------------------------------
ar_a <- c(1,2,3)
ar_b <- c(1,2,3/1,2,3)
rep(0, length(ar_a))
## ----amto.array.fs_array_basics.multi.2d------------------------------------------------------------------------------
# Multidimensional Array
# 1 is r1c1t1, 1.5 in r2c1t1, 0 in r1c2t1, etc.
# Three dimensions, row first, column second, and tensor third
x <- array(c(1, 1.5, 0, 2, 0, 4, 0, 3), dim=c(2, 2, 2))
dim(x)
print(x)
## ----amto.array.fs_array_basics.slice.lastelement---------------------------------------------------------------------
# Remove last element of array
vars.group.bydf <- c('23','dfa', 'wer')
vars.group.bydf[-length(vars.group.bydf)]
# Use the head function to remove last element
head(vars.group.bydf, -1)
head(vars.group.bydf, 2)
## ---------------------------------------------------------------------------------------------------------------------
# Remove first element of array
vars.group.bydf <- c('23','dfa', 'wer')
vars.group.bydf[2:length(vars.group.bydf)]
# Use Tail function
tail(vars.group.bydf, -1)
tail(vars.group.bydf, 2)
## ---------------------------------------------------------------------------------------------------------------------
# define array
ar_amin <- c(0, 0.25, 0.50, 0.75, 1)
# select without head and tail
tail(head(ar_amin, -1), -1)
## ---------------------------------------------------------------------------------------------------------------------
# define array
ar_amin <- c(0, 0.25, 0.50, 0.75, 1)
# select head and tail
c(head(ar_amin, 1), tail(ar_amin, 1))
## ----amto.array.fs_array_basics.NA.check------------------------------------------------------------------------------
# Convert Inf and -Inf to NA
x <- c(1, -1, Inf, 10, -Inf)
na_if(na_if(x, -Inf), Inf)
|
e308a5b65ba92346d45e1e7ce49855f6bb991859
|
fee47bdbb5917055c5c0e02d183e1767bd8db512
|
/MontesReylaEmigration.R
|
98acd4999c571f6414e7f56d040b0ff28330a3fa
|
[] |
no_license
|
acmontes393/Assignment3MontesReyla
|
b8480b9e12fe038d317228613b15cad9d7ccc16d
|
bd357a3f38f4469f6776c13e41158b8b7b5f0161
|
refs/heads/master
| 2021-01-10T14:52:25.093855
| 2015-11-20T11:09:59
| 2015-11-20T11:09:59
| 45,249,459
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,017
|
r
|
MontesReylaEmigration.R
|
########################################################################################
########################## Collaborative Data Analysis Assigement 3 ####################
########################################################################################
# 0. Clearing the workspace
rm(list = ls())
# 1. Installing and loading packages
#install.packages('WDI')
#install.packages('tidyr')
#install.packages('rio')
#install.packages('countrycode')
#install.packages("RJOSONIO")
#install.packages ("ggplot2")
#install.packages("rworldmap")
#install.packages("sp")
#install.packages("joinCountryData2Map")
install.packages("plm")
install.packages("Formula")
install.packages("pglm")
library("ggmap")
library("maptools")
library("countrycode")
library("RJSONIO")
library("WDI")
library("tidyr")
library("rio")
library("ggplot2")
library("rworldmap")
library("sp")
library('rworldmap')
library('Formula')
library('plm')
library('pglm')
#2. Setting directory
setwd('/Users/AnaCe/Desktop/Assignment3MontesReyla')
#setwd('/Users/ayrarowenareyla/Desktop/The Hertie School of Governance/Collaborative Social Sciences/Assignment3MontesReyla/Assignment3MontesReyla')
########################################################################################
################################# LOADING AND CLEANING DATA ############################
########################################################################################
# 4. Loading Migration UN Data
### loop that loads into R each table in the file and extracts the relevant information for this assigment
tables <-c(2, 5, 8, 11)
for (i in tables) {
Migration<- import("UN_MigrantStockByOriginAndDestination_2013T10.xls",
format = "xls", sheet =i)
emigration<- Migration[c(15,16),]
emigration<- t(emigration)
emigration<-as.data.frame(emigration)
emigration<- emigration[c(10:241),]
colnames(emigration) <- c("Country","Emigration")
assign(paste0("emigration", i), emigration)
}
emigrationtotal <- cbind(emigration11, emigration8, emigration5, emigration2)
emigrationtotal <-emigrationtotal[,c(1,2, 4, 6, 8)]
emigrationtotal <- gather(emigrationtotal, year, emigration, 2:5)
emigrationtotal$year <- as.character(emigrationtotal$year)
emigrationtotal$year[emigrationtotal$year=="Emigration"] <- "2013"
emigrationtotal$year[emigrationtotal$year=="Emigration.1"] <- "2010"
emigrationtotal$year[emigrationtotal$year=="Emigration.2"] <- "2000"
emigrationtotal$year[emigrationtotal$year=="Emigration.3"] <- "1990"
ls()
rm(list = c("emigration","emigration11", "emigration2", "emigration5", "emigration8",
"i", "tables"))
# 5. Loading data from the Worldbank database
wbdata <- c ("IT.CEL.SETS.P2", "IT.NET.USER.P2", "NY.GDP.PCAP.PP.CD","SP.POP.TOTL","SI.POV.DDAY","SL.UEM.TOTL.ZS","VC.IHR.PSRC.P5"
,"CC.EST","GE.EST","PV.EST","RQ.EST","RL.EST","VA.EST","SP.DYN.TFRT.IN")
WDI_indi<- WDI(country = "all", indicator = wbdata,
start = 1990, end = 2013, extra = FALSE, cache = NULL)
# 6. Creating an unique identifier for both data frames
emigrationtotal$iso2c <- countrycode (emigrationtotal$Country, origin = 'country.name',
destination = 'iso2c', warn = TRUE)
WDI_indi$iso2c <- countrycode (WDI_indi$country, origin = 'country.name',
destination = 'iso2c', warn = TRUE)
# Deleting agregates in the WDI indicators
WDI_indi <- WDI_indi[!is.na(WDI_indi$iso2c),]
# 7. Merging "WDI Indicators " and "UN Migration stocks"
Merged <- merge(emigrationtotal, WDI_indi, by = c('iso2c','year'))
summary(Merged)
# 8. Cleaning the data
Merged <- plyr::rename(Merged, c("IT.CEL.SETS.P2" = "CellphoneUsers"))
Merged <- plyr::rename(Merged, c("IT.NET.USER.P2" = "InternetUsers"))
Merged <- plyr::rename(Merged, c("NY.GDP.PCAP.PP.CD" = "GDPPerCapita"))
Merged <- plyr::rename(Merged, c("SP.POP.TOTL" = "TotalPopulation"))
Merged <- plyr::rename(Merged, c("SI.POV.DDAY" = "Poverty"))
Merged <- plyr::rename(Merged, c("SL.UEM.TOTL.ZS" = "UnemploymentRate"))
Merged <- plyr::rename(Merged, c("VC.IHR.PSRC.P5" = "IntentionalHomocides"))
Merged <- plyr::rename(Merged, c("CC.EST" = "Corruption"))
Merged <- plyr::rename(Merged, c("GE.EST" = "GovernmentEffectiveness"))
Merged <- plyr::rename(Merged, c("PV.EST" = "PoliticalStability"))
Merged <- plyr::rename(Merged, c("RQ.EST" = "RegulatoryQuality"))
Merged <- plyr::rename(Merged, c("RL.EST" = "RuleOfLaw"))
Merged <- plyr::rename(Merged, c("VA.EST" = " VoiceAndAccountability"))
Merged <- plyr::rename(Merged, c("SP.DYN.TFRT.IN" = "FertilityRate"))
# Counting missing information in the Independent Variables
variables <-c("CellphoneUsers", "InternetUsers", "GDPPerCapita", "TotalPopulation", "Poverty",
"UnemploymentRate", "IntentionalHomocides", "Corruption", "FertilityRate",
"GovernmentEffectivness", "PoliticalStability", "RegulatoryStability",
"RegulatoryQuality", "RuleOfLaw", "VoiceAndAccountability")
NAs<- sum(is.na(Merged$CellphoneUsers))/nrow(Merged)
NAs$InternetUsers<- sum(is.na(Merged$InternetUsers))/nrow(Merged)
NAs$GDPPerCapita<- sum(is.na(Merged$GDPPerCapita))/nrow(Merged)
NAs$TotalPopulation<- sum(is.na(Merged$TotalPopulation))/nrow(Merged)
NAs$Poverty<- sum(is.na(Merged$Poverty))/nrow(Merged)
NAs$UnemploymentRate<- sum(is.na(Merged$UnemploymentRate))/nrow(Merged)
NAs$Corruption<- sum(is.na(Merged$Corruption))/nrow(Merged)
NAs$IntentionalHomocides<- sum(is.na(Merged$IntentionalHomocides))/nrow(Merged)
NAs$GovernmentEffectivness<- sum(is.na(Merged$GovernmentEffectivness))/nrow(Merged)
NAs$PoliticalStability<- sum(is.na(Merged$PoliticalStability))/nrow(Merged)
NAs$RegulatoryStability<- sum(is.na(Merged$RegulatoryStability))/nrow(Merged)
NAs$VoiceAndAccountability<- sum(is.na(Merged$VoiceAndAccountability))/nrow(Merged)
NAs$FertilityRate<- sum(is.na(Merged$FertilityRate))/nrow(Merged)
# After looking at the number of missing variables in the Merged data frame.
# Also, we are dropping independent variables with more than 15% of the total observations NA
Merged <- Merged[, !(colnames(Merged)) %in% c("Poverty", "IntentionalHomocides","PoliticalStability","Corruption", "UnemploymentRate")]
# Dropping missing values
Merged <- Merged[!is.na(Merged$InternetUsers),]
Merged <- Merged[!is.na(Merged$CellphoneUsers),]
Merged <- Merged[!is.na(Merged$GDPPerCapita),]
Merged <- Merged[!is.na(Merged$FertilityRate),]
# Check Variables structure
str(Merged)
summary(Merged)
table (Merged$year)
# Code variables as numeric
Merged$year <- as.numeric(Merged$year)
Merged$emigration <- as.numeric(Merged$emigration)
# Removing extra country name coloumn
Merged <-subset.data.frame(Merged, select = -Country)
# 9. Generating variables
Merged$emigration2 = Merged$emigration/1000
Merged$emigrationpercap = Merged$emigration/Merged$TotalPopulation
# sub dataframes by year
merged90 <-subset(Merged, year==1990)
merged00 <-subset(Merged, year==2000)
merged10 <-subset(Merged, year==2010)
merged13 <-subset(Merged, year==2013)
# Creating a .csv file with the final version of the data
write.csv(Merged, file="MontesandReyla")
###############################################################################################
############################### DESCRIPTIVE STATISTICS ##############################
####################################################################################
##Set data as panel data
Merged <- plm.data(Merged, index=c("iso2c", "year"))
# Mapping global emigration
# 1990
sPDF <- joinCountryData2Map( merged90
,joinCode = "ISO2"
,nameJoinColumn = "iso2c")
mapDevice(Map1)
mapCountryData(sPDF, nameColumnToPlot='emigrationpercap', mapTitle= 'Number of emigrants per capita 1990',
colourPalette = c("darkorange", "coral2","gold","aquamarine1", "cyan3", "blue","magenta"),
borderCol='black')
# 2000
sPDFII <- joinCountryData2Map( merged00
,joinCode = "ISO2"
,nameJoinColumn = "iso2c")
mapDevice(Map2)
mapCountryData(sPDFII, nameColumnToPlot='emigrationpercap', mapTitle= 'Number of emigrants per capita 2000',
colourPalette = c("darkorange", "coral2","gold","aquamarine1", "cyan3", "blue","magenta"),
borderCol='black')
# 2010
sPDFIII <- joinCountryData2Map( merged10
,joinCode = "ISO2"
,nameJoinColumn = "iso2c")
mapDevice(Map3)
mapCountryData(sPDFIII, nameColumnToPlot='emigrationpercap', mapTitle= 'Number of emigrants per capita 2010',
colourPalette = c("darkorange", "coral2","gold","aquamarine1", "cyan3", "blue","magenta"),
borderCol='black')
# 2013
sPDFIV <- joinCountryData2Map( merged13
,joinCode = "ISO2"
,nameJoinColumn = "iso2c")
mapDevice(Map4)
mapCountryData(sPDFIV, nameColumnToPlot='emigrationpercap', mapTitle= 'Number of emigrants per capita 2013',
colourPalette = c("darkorange", "coral2","gold","aquamarine1", "cyan3", "blue","magenta"),
borderCol='black')
## Historgram
hist(Merged$emigration2, xlab = "Tousands of emigrants", main = "Histogram", xlim=range(0:14170))
hist(Merged$CellphoneUsers, xlab = "CellUsers", main = "Histogram")
## Summary
summary(Merged$emigration2, na.rm = TRUE)
summary(Merged$CellphoneUsers, na.rm = TRUE)
summary(Merged$InternetUsers, na.rm = TRUE)
summary(Merged$GDPPerCapita, na.rm = TRUE)
summary(Merged$TotalPopulation, na.rm = TRUE)
summary(Merged$FertilityRate, na.rm = TRUE)
summary(Merged$GovernmentEffectivness, na.rm = TRUE)
summary(Merged$RegulatoryStability, na.rm = TRUE)
summary(Merged$RegulatoryQuality, na.rm = TRUE)
summary(Merged$RuleOfLaw, na.rm = TRUE)
summary(Merged$VoiceAndAccountability, na.rm = TRUE)
#Range
range(Merged$emigration)
#Interquantile Range
IQR(Merged$emigration)
# Boxplots
boxplot(Merged$emigration2, main = 'Emigration')
boxplot(Merged$CellphoneUsers, main = 'Cellphone Users')
#Variance
var(Merged$emigration2)
var(Merged$CellphoneUsers)
var(Merged$InternetUsers)
#Standar Deviation
sd(Merged$emigration2)
sd(Merged$CellphoneUsers)
sd(Merged$InternetUsers)
#Standar Error function
sd_error <- function(x) {
sd(x)/sqrt(length(x))
}
sd_error(Merged$)
# Joint Distribution
plot(emigration ~ InternetUsers, data = merged13,
xlab = "E", las = 1,
ylab = "C",
main = "Emigration data and fitted curve")
plot(emigrationpercap ~ GDPPerCapita, data = merged13,
xlab = "E", las = 1,
ylab = "C",
main = "Emigration data and fitted curve")
plot(emigrationpercap ~ GDPPerCapita, data = merged13,
xlab = "E", las = 1,
ylab = "C",
main = "Emigration data and fitted curve")
plot(emigration ~ year, data = Merged,
xlab = "E", las = 1,
ylab = "C",
main = "Emigration data and fitted curve")
# Correlation
cor.test(Merged$emigrationpercap, Merged$InternetUsers)
cor.test(Merged$emigration, Merged$InternetUsers)
cor.test(Merged$emigrationpercap, Merged$CellphoneUsers)
cor.test(Merged$InternetUsers, Merged$CellphoneUsers, na.rm = TRUE)
cor.test(merged90$emigrationpercap, merged90$InternetUsers)
cor.test(merged10$emigrationpercap, merged10$InternetUsers)
cor.test(merged13$emigrationpercap, merged13$InternetUsers)
####################################################################################
#################################### PANEL MODEL ###################################
####################################################################################
M1 <- glm(emigration ~ CellphoneUsers + TotalPopulation + GDPPerCapita + year + RegulatoryQuality + RuleOfLaw + FertilityRate, data = Merged, family = 'poisson')
summary(M1)
M2 <- glm(emigration ~ InternetUsers + TotalPopulation, data = Merged, family = 'poisson')
summary(M2)
####################################################################################
|
828056d2f88cc8684bf4f110b240b89289328819
|
3adadd11d662f5f18d2e568053b1e6704aba610d
|
/Project 2/Problem3Sim.R
|
da85d2f71692ad75c9638c7f7c2ae623e81b6ea7
|
[] |
no_license
|
Helenerb/TMA4250
|
76e3baf06b696084b256bb73d095f3d625d12c6e
|
ca9dc67c41f8df83a2c5c8af898d66234afe069a
|
refs/heads/master
| 2020-12-29T17:43:22.732370
| 2020-04-27T09:00:51
| 2020-04-27T09:00:51
| 238,686,601
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,865
|
r
|
Problem3Sim.R
|
# NS simulation
library(matrixStats)
library(spatial)
library(MASS)
library(ggplot2)
library(gridExtra)
library(reshape2)
library(RColorBrewer)
library(datasets)
library(MASS)
torus.2d <- function(x){ #transforms to torus domain
while(x[1] < 0 | x[1] > 1 | x[2] < 0 | x[2] > 1){
if(x[1] < 0){
x[1] <- 1 + x[1]
}
if(x[1] > 1){
x[1] <- x[1] - 1
}
if(x[2] < 0){
x[2] <- 1 + x[2]
}
if(x[2] > 1){
x[2] <- x[2] - 1
}
}
return(x)
}
NS <- function(lamb.M, sigma.c, lamb.p){
k <- 0
k.M <- rpois(1,lamb.M) # sample no of mother points
# initiate matrix to hold child events
x.C.all <- matrix(0L, nrow = 0, ncol=2)
colnames(x.C.all) <- c("x", "y")
#initiate matrix to hold mother events
x.mother <- matrix(0L, nrow = 0, ncol=2)
colnames(x.mother) <- c("x", "y")
for(j in 1:k.M){
# sample location of mother node
x.M <- runif(1)
y.M <- runif(1)
x.mother <- rbind(x.mother, c(x.M, y.M)) # store mother event
k.C <- rpois(1,lamb.p) # sample k.C child points
for(i in 1:k.C){
x.C <- mvrnorm(1,c(x.M, y.M),sigma.c*diag(2))
x.C <- torus.2d(x.C) # torus representation of D
x.C.all <- rbind(x.C.all, x.C) # store child events
}
}
return(list("x.C.all" = x.C.all, "x.mother"=x.mother))
}
NS.sim <- function(lamb.M, sigma.c, lamb.p, S){
# initate matrix to store simulated values
L.x <- matrix(0L, nrow=0, ncol=50)
L.y <- matrix(0L, nrow=0, ncol=50)
for(s in 1:S){
NS <- NS(lamb.M, sigma.c, lamb.p)
x <- NS$x.C.all[,"x"]
y <- NS$x.C.all[,"y"]
NS.df <- data.frame("x" = x, "y" = y)
L.NS <- Kfn(NS.df, fs = sqrt(2))
L.x <- rbind(L.x, L.NS$x)
L.y <- rbind(L.y, L.NS$y)
}
L.mean = colMeans(L.y)
L.min = colMins(L.y)
L.max = colMaxs(L.y)
L.var = colVars(L.y)
return(list("L.x" = L.x, "L.y" = L.y, "mean" = L.mean, "min" = L.min, "max" = L.max, "var" = L.var ))
}
redwood <- read.table("redwood.dat.txt", col.names=c('x', 'y'))
ppregion()
L.redwood <- Kfn(redwood, fs = sqrt(2))
#potensielt første, sånn som du tenkte tidligere: generelt for høy tetthet
#NS.sim.result <- NS.sim(lamb.M = 7, sigma.c = 0.125^2, lamb.p = 8, 100)
#potensielt andre, minke verdi for sigma: innenfor, men fortsatt på den høye siden
#NS.sim.result <- NS.sim(lamb.M = 10, sigma.c = 0.05^2, lamb.p = 6, 100)
# øker litt til:
#NS.sim.result <- NS.sim(lamb.M = 11, sigma.c = 0.05^2, lamb.p = 5, 100)
#prøver å minke enda litt mer: fortsatt for høyt!
#NS.sim.result <- NS.sim(lamb.M = 12, sigma.c = 0.05^2, lamb.p = 4, 100)
#prøver å senke variansen litt
#NS.sim.result <- NS.sim(lamb.M = 12, sigma.c = 0.03^2, lamb.p = 4, 100)
#pøver å øke variansen i stedet:
#NS.sim.result <- NS.sim(lamb.M = 12, sigma.c = 0.06^2, lamb.p = 4, 100)
#prøver nå med lamb.p 5 i stedet:
#NS.sim.result <- NS.sim(lamb.M = 12, sigma.c = 0.05^2, lamb.p = 5, 100)
#minker variansen bittelitt: så langt beste? Valgt som beste
NS.sim.result <- NS.sim(lamb.M = 12, sigma.c = 0.045^2, lamb.p = 5, 100)
#prøver med 4 og 12:
#NS.sim.result <- NS.sim(lamb.M = 12, sigma.c = 0.045^2, lamb.p = 4, 100)
# 95% confidence interval:
NS.upper <- NS.sim.result$mean + 0.9*(NS.sim.result$max - NS.sim.result$mean)
NS.lower <- NS.sim.result$mean - 0.9*(NS.sim.result$mean - NS.sim.result$min)
NS.lower.regular <- NS.sim.result$mean - 1.645*sqrt(NS.sim.result$var)/sqrt(100)
NS.upper.regular <- NS.sim.result$mean + 1.645*sqrt(NS.sim.result$var)/sqrt(100)
# plot confidence interval together with redwood:
#collect in dataframe for ggplot:
gg.df <- data.frame("redwood" = L.redwood$y, "x" = L.redwood$x, "mean" = NS.sim.result$mean,
"lower" = NS.lower, "upper" = NS.upper,
"upper.reg" = NS.upper.regular, "lower.reg" = NS.lower.regular)
NS.plot <- ggplot(data=gg.df) + geom_point(aes(x=x, y=redwood), color="sienna") +
geom_line(aes(x=x, y=upper), color="red", size=0.75) +
geom_line(aes(x=x, y=lower), color="red", size=0.75) +
#geom_line(aes(x=x, y=upper.reg), color="green", size=0.75) +
#geom_line(aes(x=x, y=lower.reg), color="green", size=0.75) +
xlab(label="t") + ylab(label="L")
NS.plot
# function forplotting one realization
plot.real.NS <- function(real){
x <- real$x.C.all[,"x"]
y <- real$x.C.all[,"y"]
x.M <- real$x.mother[,1]
y.M <- real$x.mother[,2]
gg.df <- data.frame("x" = x, "y"=y)
gg.mother.df <- data.frame("x.M" = x.M, "y.M" = y.M)
real.plot <- ggplot(data=gg.df) + geom_point(aes(x=x, y=y), color="sienna3")
real.plot <- real.plot +geom_point(data=gg.mother.df, aes(x=x.M, y=y.M), color="limegreen")
real.plot
}
# plot final guestimate:
plot.real.NS(real = NS(12, 0.045^2, lamb.p = 5))
|
fca28f03a7d9572cd3645fba00c1f549d4b6ebbb
|
1c34da0873c513bd40028072bb525a999e4fff32
|
/R/zzz.R
|
10a8f0dcb1db560f5fe80d13fef5192ae5e22afc
|
[] |
no_license
|
paris-appartemnt-project/apartment_project
|
b2378b1a1c93861a808b8ca43decf9c0bd59550c
|
52b609f78f15ab0d0c571a69e9ca78b04c43e648
|
refs/heads/master
| 2020-04-07T10:49:04.377690
| 2018-12-14T21:37:57
| 2018-12-14T21:37:57
| 158,301,473
| 0
| 0
| null | 2018-12-10T13:35:21
| 2018-11-19T23:05:29
| null |
UTF-8
|
R
| false
| false
| 452
|
r
|
zzz.R
|
globalVariables(c(
# data_scraping.R
"ville", "Type", "Description", "piec.", "m2", "arrondissement", "latitude", "longitude","station_a_proximite","coord", "prix","lat", "lng", "my.env", "lextrait.n", "lextrait.stationextrait", "priority", "restaurant","decile","anoncesmini","apartment_index","anime_resto_cine_qt","commerce_qt","schools_qt","stations_qt","score","titre","vue.le","iconUrl","tri_categories","X","X1", "depuis", "Change_price"
))
|
d42337a387f044837f31aa8743f6b6be06fdb257
|
8306bfb1438a2516d3f9e7bea51f9d805793798d
|
/tests/testthat/test-miss-prop-summary.R
|
1ceebc368998799c689d693bf3520753087c6316
|
[] |
no_license
|
cran/naniar
|
849ad432eea4e343ffc4302b3ae7612759f9a552
|
30710de1ca289d1dd994e203c650bebc62a61a0f
|
refs/heads/master
| 2023-02-21T19:46:58.125455
| 2023-02-02T08:50:02
| 2023-02-02T08:50:02
| 99,764,801
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,625
|
r
|
test-miss-prop-summary.R
|
test_that("miss_prop_summary returns a data.frame",{
expect_s3_class(miss_prop_summary(airquality),
"data.frame")
})
test_that("miss_prop_summary returns a tibble",{
expect_equal(class(miss_prop_summary(airquality)),
c("tbl_df", "tbl", "data.frame"))
})
test_that("miss_prop_summary errors when given non dataframe or 0 entry",{
expect_error(miss_prop_summary(0))
expect_error(miss_prop_summary("a"))
expect_error(miss_prop_summary(matrix(airquality)))
expect_error(miss_prop_summary(NULL))
})
test_that("There are 3 columns",{
expect_equal(ncol(miss_prop_summary(airquality)),3)
})
test_that("The columns are named df, var, case",{
expect_named(miss_prop_summary(airquality),c("df","var","case"))
})
aq_group <- dplyr::group_by(airquality, Month)
test_that("miss_prop_summary grouped_df returns a tibble", {
expect_s3_class(miss_prop_summary(aq_group), "tbl_df")
})
test_that("grouped_df returns 1 more column than regular miss_prop_summary", {
expect_equal(ncol(miss_prop_summary(aq_group)),
ncol(miss_prop_summary(airquality))+1)
})
test_that("grouped_df returns a column named 'Month'", {
expect_identical(names(miss_prop_summary(aq_group)),
c("Month", "df", "var","case"))
})
test_that("grouped_df returns a dataframe with more rows than regular", {
expect_gt(nrow(miss_prop_summary(aq_group)),
nrow(miss_prop_summary(airquality)))
})
test_that("grouped_df returns a column named 'Month' with the right levels", {
expect_identical(unique(miss_prop_summary(aq_group)$Month),
5:9)
})
|
929726b02f1255d727b12cc917de2e2326214bf8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/precrec/examples/create_sim_samples.Rd.R
|
8608b5b4fbc2caf41cbb745d3759ca025910727f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 916
|
r
|
create_sim_samples.Rd.R
|
library(precrec)
### Name: create_sim_samples
### Title: Create random samples for simulations
### Aliases: create_sim_samples
### ** Examples
##################################################
### Create a set of samples with 10 positives and 10 negatives
### for the random performance level
###
samps1 <- create_sim_samples(1, 10, 10, "random")
## Show the list structure
str(samps1)
##################################################
### Create two sets of samples with 10 positives and 20 negatives
### for the random and the poor early retrieval performance levels
###
samps2 <- create_sim_samples(2, 10, 20, c("random", "poor_er"))
## Show the list structure
str(samps2)
##################################################
### Create 3 sets of samples with 5 positives and 5 negatives
### for all 5 levels
###
samps3 <- create_sim_samples(3, 5, 5, "all")
## Show the list structure
str(samps3)
|
a4b9489e70ee2c1dc801a1de3a22b4895df6ed93
|
246945df242f21d7730bdcb81c303eea249bff8e
|
/OldFiles/Aging/Code/aging1.R
|
4e50b726879c4667cbcebd21e303e4ecab2fc8bb
|
[] |
no_license
|
gautam-sabnis/Visual-Frailty-Index-Analysis
|
229c746a69dcf838e1b0163491b45e5f434a30bc
|
f55f90e35d54b38d6ef43460578cb5c8fc827d4f
|
refs/heads/master
| 2023-06-30T08:46:50.226617
| 2021-08-06T12:33:16
| 2021-08-06T12:33:16
| 278,666,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,678
|
r
|
aging1.R
|
rm(list = ls())
libraries <- c('glmnet','leaps','caret','e1071','reshape')
lapply(libraries, require, character.only = TRUE)
setwd('/Users/sabnig/Documents/Projects/Aging/Temp')
frailty_parameters <- c('Alopecia','Loss.of.fur.colour','Dermatitis','Loss.of.whiskers','Coat.condition',
'Piloerection','Cataracts','Eye.discharge.swelling','Microphthalmia','Corneal.opacity','Nasal.discharge',
'Rectal.prolapse','Vaginal.uterine.','Diarrhea','Vestibular.disturbance','Vision.loss..Visual.Placing.',
'Menace.reflex','Tail.stiffening','Gait.disorders','Tremor','Tumours','Distended.abdomen','Kyphosis',
'Body.condition','Breathing.rate.depth','Malocclusions','Righting.Reflex')
avg_gait_measures_linear <- c('avg_angular_velocity','avg_base_tail_lateral_displacement',
'avg_limb_duty_factor','avg_nose_lateral_displacement','avg_speed_cm_per_sec',
'avg_step_length1','avg_step_length2','avg_step_width','avg_stride_length','avg_temporal_symmetry',
'avg_tip_tail_lateral_displacement')
avg_gait_measures_circular <- c('avg_base_tail_lateral_displacement_phase','avg_nose_lateral_displacement_phase',
'avg_tip_tail_lateral_displacement_phase')
median_gait_measures_linear <- c('median_angular_velocity','median_base_tail_lateral_displacement',
'median_limb_duty_factor','median_nose_lateral_displacement','median_speed_cm_per_sec',
'median_step_length1','median_step_length2','median_step_width','median_stride_length','median_temporal_symmetry',
'median_tip_tail_lateral_displacement')
var_gait_measures_linear <- c('angular_velocity_var','base_tail_lateral_displacement_var',
'limb_duty_factor_var','nose_lateral_displacement_var','speed_cm_per_sec_var',
'step_length1_var','step_length2_var','step_width_var','stride_length_var',
'tip_tail_lateral_displacement_var')
std_gait_measures_linear <- c('angular_velocity_std','base_tail_lateral_displacement_std',
'limb_duty_factor_std','nose_lateral_displacement_std','speed_cm_per_sec_std',
'step_length1_std','step_length2_std','step_width_std','stride_length_std',
'tip_tail_lateral_displacement_std')
iqr_gait_measures_linear <- c('angular_velocity_iqr','base_tail_lateral_displacement_iqr',
'limb_duty_factor_iqr','nose_lateral_displacement_iqr','speed_cm_per_sec_iqr',
'step_length1_iqr','step_length2_iqr','step_width_iqr','stride_length_iqr',
'tip_tail_lateral_displacement_iqr')
median_gait_measures_circular <- c('median_base_tail_lateral_displacement_phase','median_nose_lateral_displacement_phase',
'median_tip_tail_lateral_displacement_phase')
OFA_measures <- c('stride_count','Distance.cm.sc','center_time_secs','periphery_time_secs','corner_time_secs',
'center_distance_cm','periphery_distance_cm','corner_distance_cm','grooming_number_bouts',
'grooming_duration_secs')
engineered_features_mean <- c('dAC_mean','dB_mean','aABC_mean')
engineered_features_stdev <- c('dAC_stdev','dB_stdev','aABC_stdev')
engineered_features_min <- c('dAC_min','dB_min','aABC_min')
engineered_features_max <- c('dAC_max','dB_max','aABC_max')
animal_features <- c('Sex','Weight') #TestAge, Weight were removed
aging <- read.csv("~/Documents/Projects/Aging/Data/completeagedb6.csv", header=TRUE, stringsAsFactors = FALSE)
ellipsefit <- read.csv("~/Documents/Projects/Aging/Data/ellipsefit_all.csv", header=TRUE, stringsAsFactors = FALSE)
rearpaw <- read.csv("~/Documents/Projects/Aging/Data/rearpaw.csv", header=TRUE, stringsAsFactors = FALSE)
var_agingall <- read.csv("~/Documents/Projects/Aging/Data/var_agingall.csv", header=TRUE, stringsAsFactors = FALSE)
aging <- cbind(aging, ellipsefit[1:nrow(aging),],rearpaw[1:nrow(aging),])
aging <- cbind(aging, var_agingall)
names(aging)[names(aging) == 'Overall.Score'] <- 'score'
names(aging)[names(aging) == 'Age.at.Test'] <- 'TestAge'
names(aging)[names(aging) == 'Body.Weight'] <- 'Weight'
names(aging)[names(aging) == 'Collected.By'] <- 'Tester'
aging$Tester <- factor(aging$Tester, levels = c('Amanda','Gaven','Hannah','Mackenzie'))
aging$Sex <- factor(aging$Sex, levels = c('-1','1'))
#levels(aging$Sex) <- c('Male','Female')
df <- aging[,names(aging) %in% c('score',animal_features,
OFA_measures,engineered_features_mean, avg_gait_measures_linear, median_gait_measures_linear,
std_gait_measures_linear, var_gait_measures_linear,iqr_gait_measures_linear,
engineered_features_stdev)]
#df <- aging[,names(aging) %in% c('score',animal_features,avg_gait_measures_linear, median_gait_measures_linear)]
#EXPLORATORY MODELING
#Some plots
pp_df <- preProcess(df[,-which(names(df) %in% c('score','Sex','Tester'))], method = c('center','scale'))
dfX <- predict(pp_df, newdata = df[,-which(names(df) %in% c('score','Sex','Tester'))])
dfX <- cbind(id = 1:dim(dfX)[1], dfX)
dfX.melt <- melt(dfX, id.vars = 'id')
ggplot(dfX.melt, aes(x = value)) + geom_density() + facet_wrap(~ variable)
ggsave('../Plots/densityPlot.pdf')
pp_df <- preProcess(df[,-which(names(df) %in% c('score','Sex','Tester'))])
dfX <- predict(pp_df, newdata = df[,-which(names(df) %in% c('score','Sex','Tester'))])
dfX <- cbind(id = 1:dim(dfX)[1], dfX)
dfX.melt <- melt(dfX, id.vars = 'id')
dfX.melt <- cbind(dfX.melt, score = rep(df$score, dim(dfX)[2]-1), Sex = rep(df$Sex, dim(dfX)[2]-1))
ggplot(dfX.melt, aes(x = value, y = score)) + geom_point(alpha = 0.6, aes(color = as.factor(Sex))) + geom_smooth(method = 'loess', aes(color = 'black'), se = FALSE) +
facet_wrap(~ variable, scales = 'free') + theme(legend.position = 'none') + scale_color_manual(values = c('#f16913','#2b8cbe','#000000'))
ggsave('../Plots/featurePlot2.pdf')
corrplot::corrplot(cor(df[,-which(names(df) %in% c('Sex','score','Tester'))]), order = "hclust",
tl.col = 'black',type = 'upper', tl.cex = 0.7)
dev.print(pdf,'../Plots/corrplot.pdf')
corr.mat <- cor(df[,-which(names(df) %in% c('score','Sex','Tester'))])[upper.tri(cor(df[,-which(names(df) %in% c('score','Sex','Tester'))]), diag = FALSE)]
hist(corr.mat, xlab = 'Correlation', main = 'Summary of Correlations' )
dev.print(pdf,'../Plots/corr-summ.pdf')
corr.mat <- cor(df[,sapply(df,is.numeric)])
summary(corr.mat[upper.tri(corr.mat,diag = FALSE)])
corr.mat <- cor(df[,-which(names(df) %in% c('score','Sex','Tester'))])
#PCA
pp_df <- preProcess(df[,-which(names(df) %in% c('score','Sex','Tester'))], method = c('center','scale'))
df.X <- predict(pp_df, newdata = df[,-which(names(df) %in% c('score','Sex','Tester'))])
X.cov <- cov(df.X[,sapply(df.X, is.numeric)])
X.eigen <- eigen(X.cov)
plot(X.eigen$values/sum(X.eigen$values), xlab = 'Number of PCs', ylab = '% Variance Explained', main = 'Scree plot')
lines(X.eigen$values/sum(X.eigen$values))
dev.print(pdf,'../Plots/scree-plot.pdf')
#featurePlot(x = df[,-which(names(df) %in% c('score','Tester','Sex'))], y = df$score, grid = TRUE,
# type = c('p','smooth'), col.line = 'red', lwd = 3, alpha = 0.6, col = 'black', pch = 16)
#PREDICTIVE MODELING
#Repeated 10-fold CV
fitControl <- trainControl(method = 'repeatedcv', number = 10, repeats = 10, savePredictions = 'final')
#Data preprocess
set.seed(100)
trainIndex <- createDataPartition(df$score, p = 0.8, times = 1, list = FALSE)
dfTrain <- df[trainIndex,-which(names(df) %in% c('Sex'))]
names(dfTrain) <- colnames(dfTrain)
dfTrainY <- dfTrain$score
dfTrainX <- dfTrain[,-which(names(dfTrain) %in% c('score'))]
#dfTrainXcat <- model.matrix(score ~ Sex, data = dfTrain)
#dfTrainX <- cbind(dfTrainXnum,dfTrainXcat)
dfTest <- df[-trainIndex,]
names(dfTest) <- colnames(dfTest)
dfTestX <- dfTest[,-which(names(dfTest) %in% c('score','Sex'))]
dfTestY <- dfTest$score
#Simple Linear Model
#dfTrainXfilt <- dfTrainX[,-findCorrelation(corr.mat, cutoff = 0.75)]
#names(df)[names(df) %in% -which(names(df) %in% c('score','Sex','Tester'))][findCorrelation(corr.mat, cutoff = 0.75]
set.seed(4)
linearFit <- train(dfTrainX,dfTrainY, method = 'lm', trControl = fitControl,
preProc = c('center','scale'))
set.seed(4)
linearFit.rlm <- train(dfTrainX,dfTrainY, method = 'rlm', trControl = fitControl,
preProc = c('center','scale'))
boost_Grid <- expand.grid(.mstop = seq(50,300,50), .nu = c(0.1,0.2,0.3))
set.seed(4)
linearFit.boosted <- train(dfTrainX,dfTrainY, method = 'BstLm', trControl = fitControl,
preProc = c('center','scale'), tuneGrid = boost_Grid)
set.seed(4)
linearFit.cubist <- train(dfTrainX,dfTrainY, method = 'cubist', trControl = fitControl,
preProc = c('center','scale'))
ridgeGrid <- expand.grid(lambda = seq(0.1,0.5, length = 20))
set.seed(4)
linearFit.ridge <- train(dfTrainX, dfTrainY, method = 'ridge', trControl = fitControl,
preProc = c('center','scale'), tuneGrid = ridgeGrid)
#enetGrid <- expand.grid(.lambda = c(0,0.01,0.1,0.2,0.3), .fraction = seq(.05,1,length = 20))
glmn_grid <- expand.grid(alpha = seq(0, 1, by = .25), lambda = 10^seq(-3, -.5, length = 20))
set.seed(4)
linearFit.enet <- train(dfTrainX, dfTrainY, method = 'glmnet', tuneGrid = glmn_grid,
trControl = fitControl, preProc = c('center','scale'))
set.seed(4)
linearFit.glm <- train(dfTrainX,dfTrainY, method = 'glmboost', trControl = fitControl,
preProc = c('center','scale'))
idGrid <- expand.grid(.n.comp = seq(1,10,1))
set.seed(4)
linearFit.IC <- train(dfTrainX,dfTrainY, method = 'icr', trControl = fitControl,
preProc = c('center','scale'), tuneGrid = idGrid)
idGrid <- expand.grid(.ncomp = seq(1,10,1))
set.seed(4)
linearFit.pcr <- train(dfTrainX,dfTrainY, method = 'pcr', trControl = fitControl,
preProc = c('center','scale'), tuneGrid = idGrid)
set.seed(4)
linearFit.pls <- train(dfTrainX, dfTrainY, method = 'pls', tuneLength = 20, trControl = fitControl,
preProc = c('center','scale'))
set.seed(4)
nonlinearFit.mars <- train(dfTrainX,dfTrainY, method = 'earth', trControl = fitControl,
preProc = c('center','scale'))
set.seed(4)
nonlinearFit.gam <- train(dfTrainX,dfTrainY, method = 'gamboost', trControl = fitControl,
preProc = c('center','scale'))
set.seed(4)
nonlinearFit.svm <- train(dfTrainX, dfTrainY, method = 'svmRadial', preProc = c('center','scale'), tuneLength = 14,
trControl = fitControl)
rfGrid <- expand.grid(mtry = seq(2,ncol(dfTrainX), by = 10), min.node.size = seq(3,9,by = 2),
splitrule = 'variance')
set.seed(4)
nonlinearFit.rf <- train(score ~ ., data = dfTrain, method = 'ranger', preProc = c('center','scale'),
trControl = fitControl, tuneGrid = rfGrid)
gbmGrid <- expand.grid(interaction.depth = seq(1,11,by = 2), n.trees = seq(100,1000,by = 50),
shrinkage = c(0.01,0.02),n.minobsinnode = c(5,7,10))
set.seed(4)
nonlinearFit.gbm <- train(dfTrainX, dfTrainY, method = 'gbm', tuneGrid = gbmGrid, verbose = FALSE, trControl = fitControl)
modelList <- list(LM = linearFit, RLM = linearFit.rlm, LMB = linearFit.boosted,
LMnet = linearFit.enet, PCR = linearFit.pcr, PLS = linearFit.pls, SVM = nonlinearFit.svm,
RF = nonlinearFit.rf, GBM = nonlinearFit.gbm)
resamps <- resamples(modelList)
finalResults <- resamps$values
finalResults <- finalResults[,-1]
finalResultsX <- cbind(id = 1:dim(finalResults)[1], finalResults)
finalResultsX.melt <- melt(finalResultsX, id.vars = 'id')
finalResultsX.melt <- cbind(finalResultsX.melt, metric = matrix(replicate(ncol(finalResults)/3,c(rep('MAE',100), rep('RMSE',100),
rep('Rsquared',100))), ncol = 1),
model = c(rep(names(modelList)[1],300), rep(names(modelList)[2],300),rep(names(modelList)[3],300),
rep(names(modelList)[4],300),rep(names(modelList)[5],300),rep(names(modelList)[6],300),
rep(names(modelList)[7],300), rep(names(modelList)[8],300), rep(names(modelList)[9],300)))
ggplot(finalResultsX.melt, aes(x = model, y = value)) + geom_boxplot(alpha = 0.1) +
facet_wrap(~ metric, scales = 'free') + coord_flip() + labs(y = 'Repeated 10-fold CV Error', x= 'Model') +
theme_bw(base_size = 22)
ggsave('Reg-train-Res.pdf', width = 18, height = 6)
testResults <- data.frame(obs = dfTestY)
testResults$LM <- predict(linearFit, dfTestX)
testResults$RLM <- predict(linearFit.rlm, dfTestX)
testResults$LMB <- predict(linearFit.boosted, dfTestX)
testResults$ElasticNet <- predict(linearFit.enet, dfTestX)
testResults$PCR <- predict(linearFit.pcr, dfTestX)
testResults$PLS <- predict(linearFit.pls, dfTestX)
testResults$SVM <- predict(nonlinearFit.svm, dfTestX)
testResults$RandomForest <- predict(nonlinearFit.rf, dfTestX)
testResults$GBM <- predict(nonlinearFit.gbm, dfTestX)
Results.Test <- data.frame(Model = c('LM','RLM','LMB','LMnet','PCR','PLS','SVM','RF','GBM'),
RMSE = c(RMSE(dfTestY, testResults$LM), RMSE(dfTestY, testResults$RLM),RMSE(dfTestY, testResults$LMB),RMSE(dfTestY, testResults$ElasticNet),
RMSE(dfTestY, testResults$PCR),RMSE(dfTestY, testResults$PLS), RMSE(dfTestY, testResults$SVM), RMSE(dfTestY, testResults$RandomForest),
RMSE(dfTestY, testResults$GBM)),
Rsquared = c(R2(dfTestY, testResults$LM), R2(dfTestY, testResults$RLM),R2(dfTestY, testResults$LMB),R2(dfTestY, testResults$ElasticNet),
R2(dfTestY, testResults$PCR),R2(dfTestY, testResults$PLS), R2(dfTestY, testResults$SVM), R2(dfTestY, testResults$RandomForest),
R2(dfTestY, testResults$GBM)),
Correlation = c(cor(dfTestY, testResults$LM), cor(dfTestY, testResults$RLM),cor(dfTestY, testResults$LMB),cor(dfTestY, testResults$ElasticNet),
cor(dfTestY, testResults$PCR),cor(dfTestY, testResults$PLS), cor(dfTestY, testResults$SVM), cor(dfTestY, testResults$RandomForest),
cor(dfTestY, testResults$GBM)))
p1 <- ggplot(Results.Test, aes(x = Model, y = RMSE, color = Model)) + geom_point(size = 11) + theme_bw(base_size = 24) + coord_flip() + theme(legend.position = 'none')
p2 <- ggplot(Results.Test, aes(x = Model, y = Rsquared, color = Model)) + geom_point(size = 11) + theme_bw(base_size = 24) + coord_flip() + theme(legend.position = 'none')
p3 <- ggplot(Results.Test, aes(x = Model, y = Correlation, color = Model)) + geom_point(size = 11) + coord_flip() + theme_bw(base_size = 24) + theme(legend.position = 'none')
plot_grid(p1,p2,p3,nrow = 1, align = 'h', labels = c('A','B','C'))
ggsave('Reg-test-Res.pdf', width = 25.1, height = 7)
tmp.df <- data.frame(pred = predict(linearFit, newdata = dfTest), obs = dfTest$score, Sex = dfTest$Sex)
ggplot(tmp.df, aes(y = obs, x = pred, color = Sex)) + geom_point(alpha = 0.6) +
geom_abline(intercept = 0, slope = 1) + labs(y = 'Observed', x = 'Predicted') +
scale_color_manual(values = c('#f16913','#2b8cbe')) + scale_y_continuous(breaks = seq(2,8,2)) +
theme(legend.position = 'none')
ggsave('../Plots/linmod-perf.pdf')
ggplot(tmp.df, aes(y = obs - pred, x = pred, color = Sex)) + geom_point(alpha = 0.6) +
geom_hline(yintercept = 0) + labs(y = 'Residuals', x = 'Predicted') +
scale_color_manual(values = c('#f16913','#2b8cbe')) + theme(legend.position = 'none')
ggsave('../Plots/linmod-res.pdf')
#Partial Least Squares
set.seed(4)
plsFit <- train(dfTrainX, dfTrainY, method = 'pls', tuneLength = 20, trControl = fitControl,
preProc = c('center','scale'))
ggplot(plsFit)
ggsave('../Plots/plsmod-cv.pdf')
ggplot(varImp(plsFit))
ggsave('../Plots/pls-varImp.pdf')
tmp.df <- data.frame(pred = predict(plsFit, newdata = dfTest), obs = dfTest$score, Sex = dfTest$Sex)
ggplot(tmp.df, aes(y = obs, x = pred, color = Sex)) + geom_point(alpha = 0.6) +
geom_abline(intercept = 0, slope = 1) + labs(y = 'Observed', x = 'Predicted') +
scale_color_manual(values = c('#f16913','#2b8cbe')) + scale_y_continuous(breaks = seq(2,8,2)) +
theme(legend.position = 'none')
ggsave('../Plots/plsmod-perf.pdf')
ggplot(tmp.df, aes(y = obs - pred, x = pred, color = Sex)) + geom_point(alpha = 0.6) +
geom_hline(yintercept = 0) + labs(y = 'Residuals', x = 'Predicted') +
scale_color_manual(values = c('#f16913','#2b8cbe')) + theme(legend.position = 'none')
ggsave('../Plots/plsmod-res.pdf')
#Ridge Regression
dfTrainX <- data.frame(dfTrainX)
ridgeGrid <- expand.grid(lambda = seq(0.1,0.2, length = 20))
set.seed(4)
ridgeFit <- train(dfTrainX, dfTrainY, method = 'ridge', trControl = fitControl,
preProc = c('center','scale'), tuneGrid = ridgeGrid)
ggplot(ridgeFit)
ggsave('../Plots/plsmod-cv.pdf')
ggplot(varImp(ridgeFit))
ggsave('../Plots/ridge-varImp.pdf')
ggplot(data.frame(predict(ridgeFit, newdata = dfTest),dfTest$score), aes(y = dfTest$score, x = predict(ridgeFit, newdata = dfTest))) +
geom_point(alpha = 0.6) + geom_abline(intercept = 0, slope = 1) + labs(x = 'Observed', y = 'Predicted')
ggplot(data.frame(predict(ridgeFit, newdata = dfTest),dfTest$score), aes(y = dfTest$score - predict(ridgeFit, newdata = dfTest),
x = predict(ridgeFit, newdata = dfTest))) + geom_point(alpha = 0.6) +
geom_hline(yintercept = 0) + labs(y = 'Residuals', x = 'Predicted')
#Elastic Net Regression
enetGrid <- expand.grid(.lambda = c(0,0.01,0.1,0.2,0.3), .fraction = seq(.05,1,length = 20))
set.seed(4)
enetFit <- train(dfTrainX, dfTrainY, method = 'enet', tuneGrid = enetGrid,
trControl = fitControl, preProc = c('center','scale'))
ggplot(enetFit)
ggsave('../Plots/enet-cv.pdf')
ggplot(varImp(enetFit))
ggsave('../Plots/enet-varImp.pdf')
ggplot(data.frame(predict(ridgeFit, newdata = dfTest),dfTest$score), aes(y = dfTest$score, x = predict(ridgeFit, newdata = dfTest))) +
geom_point(alpha = 0.6) + geom_abline(intercept = 0, slope = 1) + labs(x = 'Observed', y = 'Predicted')
ggplot(data.frame(predict(ridgeFit, newdata = dfTest),dfTest$score), aes(y = dfTest$score - predict(ridgeFit, newdata = dfTest),
x = predict(ridgeFit, newdata = dfTest))) + geom_point(alpha = 0.6) +
geom_hline(yintercept = 0) + labs(y = 'Residuals', x = 'Predicted')
#MARS
marsGrid <- expand.grid(.degree = 1:2, .nprune = 2:38)
set.seed(4)
marsFit <- train(dfTrainX, dfTrainY, method = "earth", tuneGrid = marsGrid, trControl = fitControl,
preProc = c('center','scale'))
#SVM
set.seed(4)
svmFit <- train(dfTrainX, dfTrainY, method = 'svmRadial', preProc = c('center','scale'), tuneLength = 14,
trControl = fitControl)
#Random Forest
rfFit <- randomForest(dfTrainX, dfTrainY, importance = TRUE, ntrees = 10000)
#Gradient Boosted Trees
gbmGrid <- expand.grid(.interaction.depth = seq(1,11,by = 2), .n.trees = seq(100,1000,by = 50),
.shrinkage = c(0.01,0.001))
set.seed(4)
gbmFit <- train(dfTrainX, dfTrainY, method = 'gbm', tuneGrid = gbmGrid, verbose = FALSE)
#XGBoost
set.seed(316)
xgbFit <- train(dfTrainX, dfTrainY, method = 'xgbTree', trControl = fitControl)
Test <- sample(1:nrow(df),50)
Train <- setdiff(1:nrow(df), Test)
dfTrain <- df[Train,]
dfTest <- df[Test,]
dfTrainX <- dfTrain[,-which(names(dfTrain) %in% c('score'))]
dfTrainY <- dfTrain$score
pp_Train <- preProcess(dfTrain[,-which(names(dfTrain) %in% c('score'))], method = c('center','scale','BoxCox'))
dfTrainXtrans <- predict(pp_df, newdata = dfTrain[,-which(names(dfTrain) %in% c('score'))])
dfTestX <- dfTest[,-which(names(dfTest) %in% c('score'))]
dfTestY <- dfTest$score
pp_Test <- preProcess(dfTest[,-which(names(dfTest) %in% c('score'))], method = c('center','scale','BoxCox'))
dfTestXtrans <- predict(pp_df, newdata = dfTest[,-which(names(dfTrain) %in% c('score'))])
trainingData <- dfTrainX
trainingData$score <- dfTrainY
#Simple Linear Regression
lmFitAllPredictors <- lm(score ~ ., data = trainingData)
lmPred1 <- predict(lmFitAllPredictors, dfTestX)
lmValuesTest <- data.frame(obs = dfTestY, pred = lmPred1)
defaultSummary(lmValuesTest)
lmValuesTrain <- data.frame(obs = dfTrainY, pred = predict(lmFitAllPredictors, dfTrainX))
defaultSummary(lmValuesTrain)
ggplot(lmValuesTrain, aes(x = pred, y = obs)) + geom_point(alpha = 0.6) + labs(x = 'Predicted', y = 'Observed')
ctrl <- trainControl(method = 'cv', number = 10)
lmFit1 <- train(x = dfTrainX, y = dfTrainY, method = 'lm', trControl = ctrl)
lmValuesTrain <-
df[,-which(names(df) %in% c('Sex','Tester'))] <- apply(df[,-which(names(df) %in% c('Sex','Tester'))],MARGIN = 2, FUN = function(x) (x - mean(x[!is.na(x)]))/sd(x[!is.na(x)]))
X <- data.matrix(df[,-which(names(df) %in% c('score'))]) #Column 4 contains the scores.
y <- df['score']
df$Tester <- factor(df$Tester)
#Preliminary Model
mod1 <- lm(score ~ ., data = df)
mod2 <- lm(resid(lm(score ~ Tester)) ~ ., data = df)
indx <- createFolds(df[,'score'], returnTrain = TRUE)
ctrl <- trainControl(method = 'cv', index = indx)
lmTune0 <- train(x = df[,-which(names(df) %in% c('TestAge'))], y = df[,'score'], method = 'lm', trControl = ctrl)
testResults <- data.frame(obs = df[,'score'], Linear_Regression = predict(lmTune0, df[,-which(names(df) %in% c('TestAge'))]))
#Best Subset Selection
mod1 <- regsubsets(score ~ ., data = df)
best.summary <- summary(mod1)
plot(best.summary$adjr2, type = 'l', xlab = 'Number of variables', ylab = 'Adjusted Rsq', frame = FALSE)
axis(1,at = 1:28)
points(which.max(best.summary$adjr2), best.summary$adjr2[which.max(best.summary$adjr2)], col = 'red', cex = 2, pch = 20)
plot(mod1,scale = 'adjr2')
plot(mod1, scale = 'Cp')
plot(mod1, scale = 'bic')
#Validation Approach
df$cut <- cut(df$TestAge, breaks = c(0,50,101,134), labels = c('Y','M','O'), right = TRUE)
#test <- stratified(df, 'cut', size = round(100*prop.table(table(df$cut)),0))
test <- stratified(df, 'cut', size = c(17,17,16))
train <- df[!test,]
#Naive approach
test <- sample(1:nrow(df),50)
train <- setdiff(1:nrow(df), test)
train.mod <- regsubsets(score ~ . , data = df[train,],nvmax = 28)
test.mat <- model.matrix(score ~ . , data = df[test,])
val.errors = rep(NA, 28)
for (i in 1:28){
coefi = coef(train.mod, id = i)
pred = test.mat[,names(coefi)]%*%coefi
val.errors[i] = mean((df$score[test] - pred)^2)
}
###Checking for presence of multicollinearity
corrplot::corrplot(cor(df[,-which(names(df) %in% c('Sex','score','Tester'))]), order = "hclust",
tl.col = 'black',type = 'upper', tl.cex = 0.4)
dev.print(pdf,'../Plots/corrplot.pdf')
corr.mat <- cor(df[,sapply(df,is.numeric)])[upper.tri(cor(df[,sapply(df,is.numeric)]), diag = FALSE)]
hist(corr.mat, xlab = 'Correlation', main = 'Summary of Correlations' )
dev.print(pdf,'../Plots/corr-summ.pdf')
corr.mat <- cor(df[,sapply(df,is.numeric)])
summary(corr.mat[upper.tri(corr.mat,diag = FALSE)])
#PCA
df.X <- df[,-which(names(df) %in% c('score'))]
X.cov <- cov(df.X[,sapply(df.X, is.numeric)])
X.eigen <- eigen(X.cov)
plot(X.eigen$values/sum(X.eigen$values), xlab = 'Number of PCs', ylab = '% Variance Explained', main = 'Scree plot')
lines(X.eigen$values/sum(X.eigen$values))
dev.print(pdf,'../Plots/scree-plot.pdf')
ridge.mod <- cv.glmnet(X,y, alpha = 0)
lapply(1:nrow(aging), function(i) )
stratified <- function(df, group, size, select = NULL,
replace = FALSE, bothSets = FALSE) {
if (is.null(select)) {
df <- df
} else {
if (is.null(names(select))) stop("'select' must be a named list")
if (!all(names(select) %in% names(df)))
stop("Please verify your 'select' argument")
temp <- sapply(names(select),
function(x) df[[x]] %in% select[[x]])
df <- df[rowSums(temp) == length(select), ]
}
df.interaction <- interaction(df[group], drop = TRUE)
df.table <- table(df.interaction)
df.split <- split(df, df.interaction)
if (length(size) > 1) {
if (length(size) != length(df.split))
stop("Number of groups is ", length(df.split),
" but number of sizes supplied is ", length(size))
if (is.null(names(size))) {
n <- setNames(size, names(df.split))
message(sQuote("size"), " vector entered as:\n\nsize = structure(c(",
paste(n, collapse = ", "), "),\n.Names = c(",
paste(shQuote(names(n)), collapse = ", "), ")) \n\n")
} else {
ifelse(all(names(size) %in% names(df.split)),
n <- size[names(df.split)],
stop("Named vector supplied with names ",
paste(names(size), collapse = ", "),
"\n but the names for the group levels are ",
paste(names(df.split), collapse = ", ")))
}
} else if (size < 1) {
n <- round(df.table * size, digits = 0)
} else if (size >= 1) {
if (all(df.table >= size) || isTRUE(replace)) {
n <- setNames(rep(size, length.out = length(df.split)),
names(df.split))
} else {
message(
"Some groups\n---",
paste(names(df.table[df.table < size]), collapse = ", "),
"---\ncontain fewer observations",
" than desired number of samples.\n",
"All observations have been returned from those groups.")
n <- c(sapply(df.table[df.table >= size], function(x) x = size),
df.table[df.table < size])
}
}
temp <- lapply(
names(df.split),
function(x) df.split[[x]][sample(df.table[x],
n[x], replace = replace), ])
set1 <- do.call("rbind", temp)
if (isTRUE(bothSets)) {
set2 <- df[!rownames(df) %in% rownames(set1), ]
list(SET1 = set1, SET2 = set2)
} else {
set1
}
}
|
7daf00eb7bddd3e83df82ea60aaccc3b626acef8
|
8724ef8dede95067991ba5216536c942bea20a36
|
/man/format_number.Rd
|
1b151acfa3103bf26d80a8a5679781f2f9406837
|
[
"MIT"
] |
permissive
|
lee269/iapdashboard
|
716fb83ab0da12cbd369a8ee730a43fb9ad8dff1
|
28983023644c6af26d26817f7d0f8318679b46f8
|
refs/heads/master
| 2020-12-14T13:59:02.209407
| 2020-06-16T10:08:54
| 2020-06-16T10:08:54
| 234,764,891
| 0
| 0
|
NOASSERTION
| 2020-04-20T17:20:04
| 2020-01-18T16:42:38
|
R
|
UTF-8
|
R
| false
| true
| 290
|
rd
|
format_number.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{format_number}
\alias{format_number}
\title{Format numbers}
\usage{
format_number(x, format)
}
\arguments{
\item{x}{}
\item{format}{}
}
\value{
}
\description{
Format numbers
}
\keyword{internal}
|
5a79ceb9444f15b8f6c07223eba9f9c893bac1ca
|
16066007ac4fccb116f4a5e255aaf62233f1ec58
|
/Rcode/sesion000_creardocumento.R
|
45f3afa6ee3ae1db417b76ad00e17d3b336257a2
|
[
"MIT"
] |
permissive
|
NeoMapas/Monitoreo-acustico-JBM
|
9b1435e25d4fd4d2f1c858dab8774f11bf8acf15
|
8611ad0ad79596c7e1c1265a8f4bfa1fa068199a
|
refs/heads/master
| 2023-04-11T23:47:07.079757
| 2021-08-20T03:46:47
| 2021-08-20T03:46:47
| 279,161,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 978
|
r
|
sesion000_creardocumento.R
|
##R --vanilla
setwd("~/tmp/CEBA")
hoy <- format(Sys.time(), "%Y%m%d")
mi.dir <- "400_InventarioJardinBotanicoMaracaibo"
mi.arch <- "Documento1_climaJBM"
titulo <- "EstacionalidadClimaJBM"
cdg.doc <- ""
mi.arch <- "Documento2_MonitoreoAcustico"
titulo <- "Ensayo_Monitoreo_Acustico_JBM"
cdg.doc <- ""
if (file.exists("~/Dropbox/CEBA/doc/")) {
mi.path <- "~/Dropbox/CEBA/doc/"
} else {
mi.path <- "~/CEBA/doc/"
}
##system(sprintf("rm %s.*",mi.arch))
Sweave(file=paste(mi.path,mi.dir,"/",mi.arch,".Rnw",sep=""),eps=F)
mi.arch <- "Documento3_ManuscritoMonitoreoAcustico"
Sweave(file=paste(mi.path,mi.dir,"/",mi.arch,".Rnw",sep=""),eps=F)
Stangle(file=paste(mi.path,mi.dir,"/",mi.arch,".Rnw",sep=""))
tools::texi2dvi(paste(mi.arch,".tex",sep=""), pdf=TRUE)
##system(sprintf("evince %s.pdf &",mi.arch))
system(paste("mv ",mi.arch,".pdf ",mi.path,"/",mi.dir,"/",hoy,"_",titulo,".pdf",sep=""))
system(paste("mv ",mi.arch,".R ",mi.path,"/",mi.dir,"/",hoy,"_",titulo,".R",sep=""))
|
5a1fab496dce724b3278270fb93df092f962f114
|
7a36292bf7357844a8bde2d7a0cf92a722777489
|
/source/workflow/18_stl_zip_plots.R
|
3d15f885970938ab9494e8be9103dba9977c24c3
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
slu-openGIS/covid_daily_viz
|
ed573d4195f4097f5d101a0b908f9c1d0bb5bb21
|
ec69fa0e5819f043fe9024af31e77f61ea4fdc57
|
refs/heads/master
| 2023-04-11T09:59:25.238840
| 2022-04-01T18:50:18
| 2022-04-01T18:50:18
| 249,867,864
| 9
| 9
|
CC-BY-4.0
| 2020-07-05T19:03:25
| 2020-03-25T02:26:17
|
R
|
UTF-8
|
R
| false
| false
| 15,927
|
r
|
18_stl_zip_plots.R
|
# st louis zip code level plots
# =============================================================================
# load data
regional_zip_sf <- st_read("data/MO_HEALTH_Covid_Tracking/data/zip/daily_snapshot_regional.geojson", crs = 4326,
stringsAsFactors = FALSE) %>%
st_transform(crs = 26915)
regional_counties <- st_read("data/MO_HEALTH_Covid_Tracking/data/county/daily_snapshot_mo.geojson") %>%
st_transform(crs = 26915) %>%
filter(GEOID %in% c("29099", "29183", "29189", "29510")) %>%
select(GEOID, county)
regional_centroids <- regional_counties %>%
st_centroid() %>%
get_coords(crs = 26915)
st_geometry(regional_centroids) <- NULL
city_county_zip_sf <- st_read("data/MO_HEALTH_Covid_Tracking/data/zip/daily_snapshot_city_county.geojson", crs = 4326,
stringsAsFactors = FALSE) %>%
st_transform(crs = 26915)
jeffco <- st_read("data/MO_HEALTH_Covid_Tracking/data/zip/daily_snapshot_jefferson_county.geojson", crs = 4326,
stringsAsFactors = FALSE) %>%
st_transform(crs = 26915)
st_charles <- st_read("data/MO_HEALTH_Covid_Tracking/data/zip/daily_snapshot_st_charles_county.geojson", crs = 4326,
stringsAsFactors = FALSE) %>%
st_transform(crs = 26915)
stl_city <- st_read("data/MO_HEALTH_Covid_Tracking/data/zip/daily_snapshot_stl_city.geojson", crs = 4326,
stringsAsFactors = FALSE) %>%
st_transform(crs = 26915)
stl_county <- st_read("data/MO_HEALTH_Covid_Tracking/data/zip/daily_snapshot_stl_county.geojson", crs = 4326,
stringsAsFactors = FALSE) %>%
st_transform(crs = 26915)
# =============================================================================
# store breaks
breaks <- classInt::classIntervals(regional_zip_sf$case_rate, n = 5, style = "quantile")
# identify minimum value and apply to breaks
breaks$brks[1] <- min(c(min(city_county_zip_sf$case_rate, na.rm = TRUE),
min(regional_zip_sf$case_rate, na.rm = TRUE),
min(st_charles$case_rate, na.rm = TRUE),
min(jeffco$case_rate, na.rm = TRUE),
min(stl_city$case_rate, na.rm = TRUE),
min(stl_county$case_rate, na.rm = TRUE)))
# =============================================================================
# map reported rate
## create breaks
zip_valid <- filter(regional_zip_sf, is.na(case_rate) == FALSE)
zip_na <- filter(regional_zip_sf, is.na(case_rate) == TRUE)
zip_valid <- map_breaks(zip_valid, var = "case_rate", newvar = "map_breaks",
breaks = breaks, dig_lab = 2)
# create palette
pal <- RColorBrewer::brewer.pal(n = 5, name = "GnBu")
names(pal) <- levels(zip_valid$map_breaks)
## create map
p <- ggplot() +
geom_sf(data = zip_na, fill = "#9d9d9d") +
geom_sf(data = zip_valid, mapping = aes(fill = map_breaks)) +
geom_sf(data = regional_counties, fill = NA, color = "black", size = .75) +
geom_text_repel(data = regional_centroids, mapping = aes(x = x, y = y, label = county),
nudge_x = c(-35000, -20000, -40000, 10000),
nudge_y = c(-10000, 20000, -20000, -35000),
size = 6) +
scale_fill_manual(values = pal, name = "Rate per 1,000") +
labs(
title = "Reported COVID-19 Cases by \nRegional St. Louis ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
caption = "Plot by Christopher Prener, Ph.D.\nData via the included counties and the U.S. Census Bureau"
) +
sequoia_theme(base_size = 22, background = "white", map = TRUE)
## save map
save_plots(filename = "results/high_res/stl_zip/a_case_map_regional.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/a_case_map_regional.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# map 14-day average rate
## create breaks
regional_zip_sf <- mutate(regional_zip_sf, case_avg_rate = ifelse(case_avg_rate < 0, NA, case_avg_rate))
zip_valid <- filter(regional_zip_sf, is.na(case_avg_rate) == FALSE)
zip_na <- filter(regional_zip_sf, is.na(case_avg_rate) == TRUE)
zip_valid <- map_breaks(zip_valid, var = "case_avg_rate", newvar = "map_breaks",
style = "quantile", classes = 5, dig_lab = 2)
## create map
p <- ggplot() +
geom_sf(data = zip_na, fill = "#9d9d9d") +
geom_sf(data = zip_valid, mapping = aes(fill = map_breaks)) +
geom_sf(data = regional_counties, fill = NA, color = "black", size = .75) +
geom_text_repel(data = regional_centroids, mapping = aes(x = x, y = y, label = county),
nudge_x = c(-35000, -20000, -40000, 10000),
nudge_y = c(-10000, 20000, -20000, -35000),
size = 6) +
scale_fill_brewer(palette = "RdPu", name = "Rate per 10,000") +
labs(
title = "14-day Average of New COVID-19 Cases\nby Regional St. Louis ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
caption = "Plot by Christopher Prener, Ph.D.\nData via the included counties and the U.S. Census Bureau"
) +
sequoia_theme(base_size = 22, background = "white", map = TRUE)
## save map
save_plots(filename = "results/high_res/stl_zip/d_avg_map_regional.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/d_avg_map_regional.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# modify regional objects
regional_centroids <- filter(regional_centroids, GEOID %in% c("29510", "29189"))
regional_counties <- filter(regional_counties, GEOID %in% c("29510", "29189"))
# =============================================================================
# clean-up
rm(p, regional_zip_sf)
# =============================================================================
## highlight focal zips
focal_zips <- filter(city_county_zip_sf, GEOID_ZCTA %in% c("63103", "63025"))
non_focal_zips <- filter(city_county_zip_sf, GEOID_ZCTA %in% c("63103", "63025") == FALSE)
# =============================================================================
# map reported rate
## create breaks
zip_valid <- filter(city_county_zip_sf, is.na(case_rate) == FALSE)
zip_na <- filter(city_county_zip_sf, is.na(case_rate) == TRUE)
zip_valid <- map_breaks(zip_valid, var = "case_rate", newvar = "map_breaks",
breaks = breaks, dig_lab = 2)
## create map
p <- ggplot() +
geom_sf(data = zip_na, fill = "#9d9d9d") +
geom_sf(data = zip_valid, mapping = aes(fill = map_breaks)) +
geom_sf(data = regional_counties, fill = NA, color = "black", size = .75) +
scale_fill_manual(values = pal, name = "Rate per 1,000") +
labs(
title = "Reported COVID-19 Cases by \nCore St. Louis ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
caption = "Plot by Christopher Prener, Ph.D.\nData via the City of St. Louis, St. Louis County, and the U.S. Census Bureau"
) +
sequoia_theme(base_size = 22, background = "white", map = TRUE)
## save map
save_plots(filename = "results/high_res/stl_zip/a_case_map_core.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/a_case_map_core.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot poverty rates by zip
## define top_val
top_val <- round_any(x = max(city_county_zip_sf$case_rate, na.rm = TRUE), accuracy = 1, f = ceiling)
## plot poverty position = "jitter"
p <- ggplot() +
geom_smooth(data = city_county_zip_sf, mapping = aes(x = case_rate, pvty_pct),
method = "lm", color = "#D95F02", size = 1.5, linetype = "dashed") +
geom_point(data = focal_zips, mapping = aes(x = case_rate, pvty_pct), color = "#D95F02", size = 6) +
geom_point(data = focal_zips, mapping = aes(x = case_rate, pvty_pct), color = "#1B9E77", size = 4) +
geom_point(data = non_focal_zips, mapping = aes(x = case_rate, pvty_pct), color = "#1B9E77", size = 4,
position = "jitter") +
geom_label_repel(data = focal_zips, mapping = aes(x = case_rate, pvty_pct, label = GEOID_ZCTA),
size = 6,
box.padding = 0.35,
point.padding = 0.5,
nudge_y = -4,
nudge_x = .5,
segment.color = 'grey50') +
scale_x_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 2)) +
scale_y_continuous(limits = c(0,60), breaks = c(0,10,20,30,40,50,60)) +
labs(
title = "Reported COVID-19 Cases by St. Louis ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
x = "Reported Rate per 1,000 Residents",
y = "Residents Below Poverty Line (%)",
caption = "Plot by Christopher Prener, Ph.D.\nData via the City of St. Louis, St. Louis County, and the U.S. Census Bureau\n63103 and 63025 have significant nursing home outbreaks"
) +
sequoia_theme(base_size = 22, background = "white")
## save plots
save_plots(filename = "results/high_res/stl_zip/b_poverty_plot.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/b_poverty_plot.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot race
## plot race
p <- ggplot() +
geom_smooth(data = city_county_zip_sf, mapping = aes(x = case_rate, blk_pct),
method = "lm", color = "#D95F02", size = 1.5, linetype = "dashed") +
geom_point(data = focal_zips, mapping = aes(x = case_rate, blk_pct), color = "#D95F02", size = 6) +
geom_point(data = focal_zips, mapping = aes(x = case_rate, blk_pct), color = "#1B9E77", size = 4) +
geom_point(data = non_focal_zips, mapping = aes(x = case_rate, blk_pct), color = "#1B9E77", size = 4,
position = "jitter") +
geom_label_repel(data = focal_zips, mapping = aes(x = case_rate, blk_pct, label = GEOID_ZCTA),
size = 6,
box.padding = 0.35,
point.padding = 0.5,
nudge_y = -4,
nudge_x = .5,
segment.color = 'grey50') +
scale_x_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 2)) +
scale_y_continuous(limits = c(0,100), breaks = c(0,20,40,60,80,100)) +
labs(
title = "Reported COVID-19 Cases by St. Louis ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
x = "Reported Rate per 1,000 Residents",
y = "African American Residents (%)",
caption = "Plot by Christopher Prener, Ph.D.\nData via the City of St. Louis, St. Louis County, and the U.S. Census Bureau\n63103 and 63025 have significant nursing home outbreaks"
) +
sequoia_theme(base_size = 22, background = "white")
## save plots
save_plots(filename = "results/high_res/stl_zip/c_race_plot.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/c_race_plot.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# clean-up
rm(city_county_zip_sf, focal_zips, non_focal_zips, zip_na, zip_valid,
regional_centroids, regional_counties)
rm(p, top_val)
# =============================================================================
# map reported rate
## create breaks
zip_valid <- filter(jeffco, is.na(case_rate) == FALSE)
zip_na <- filter(jeffco, is.na(case_rate) == TRUE)
zip_valid <- map_breaks(zip_valid, var = "case_rate", newvar = "map_breaks",
breaks = breaks, dig_lab = 2)
## create map
p <- ggplot() +
geom_sf(data = zip_na, fill = "#9d9d9d") +
geom_sf(data = zip_valid, mapping = aes(fill = map_breaks)) +
scale_fill_manual(values = pal, name = "Rate per 1,000") +
labs(
title = "Reported COVID-19 Cases by \nJefferson County ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
caption = "Plot by Christopher Prener, Ph.D.\nData via Jefferson County and the U.S. Census Bureau"
) +
sequoia_theme(base_size = 22, background = "white", map = TRUE)
## save map
save_plots(filename = "results/high_res/stl_zip/a_case_map_jefferson.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/a_case_map_jefferson.png", plot = p, preset = "lg", dpi = 72)
## clean-up
rm(jeffco, p, zip_na, zip_valid)
# =============================================================================
# map reported rate
## create breaks
zip_valid <- filter(st_charles, is.na(case_rate) == FALSE)
zip_na <- filter(st_charles, is.na(case_rate) == TRUE)
zip_valid <- map_breaks(zip_valid, var = "case_rate", newvar = "map_breaks",
breaks = breaks, dig_lab = 2)
## create map
p <- ggplot() +
geom_sf(data = zip_na, fill = "#9d9d9d") +
geom_sf(data = zip_valid, mapping = aes(fill = map_breaks)) +
scale_fill_manual(values = pal, name = "Rate per 1,000") +
labs(
title = "Reported COVID-19 Cases by \nSt. Charles County ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
caption = "Plot by Christopher Prener, Ph.D.\nData via St. Charles County and the U.S. Census Bureau"
) +
sequoia_theme(base_size = 22, background = "white", map = TRUE)
## save map
save_plots(filename = "results/high_res/stl_zip/a_case_map_st_charles.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/a_case_map_st_charles.png", plot = p, preset = "lg", dpi = 72)
## clean-up
rm(st_charles, p, zip_na, zip_valid)
# =============================================================================
# map reported rate
## create breaks
zip_valid <- filter(stl_county, is.na(case_rate) == FALSE)
zip_na <- filter(stl_county, is.na(case_rate) == TRUE)
zip_valid <- map_breaks(zip_valid, var = "case_rate", newvar = "map_breaks",
breaks = breaks, dig_lab = 2)
## create map
p <- ggplot() +
geom_sf(data = zip_na, fill = "#9d9d9d") +
geom_sf(data = zip_valid, mapping = aes(fill = map_breaks)) +
scale_fill_manual(values = pal, name = "Rate per 1,000") +
labs(
title = "Reported COVID-19 Cases by \nSt. Louis County ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
caption = "Plot by Christopher Prener, Ph.D.\nData via St. Louis County and the U.S. Census Bureau"
) +
sequoia_theme(base_size = 22, background = "white", map = TRUE)
## save map
save_plots(filename = "results/high_res/stl_zip/a_case_map_stl_county.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/a_case_map_stl_county.png", plot = p, preset = "lg", dpi = 72)
## clean-up
rm(stl_county, p, zip_na, zip_valid)
# =============================================================================
# map reported rate
## create breaks
zip_valid <- filter(stl_city, is.na(case_rate) == FALSE)
zip_na <- filter(stl_city, is.na(case_rate) == TRUE)
zip_valid <- map_breaks(zip_valid, var = "case_rate", newvar = "map_breaks",
breaks = breaks, dig_lab = 2)
## create map
p <- ggplot() +
geom_sf(data = zip_na, fill = "#9d9d9d") +
geom_sf(data = zip_valid, mapping = aes(fill = map_breaks)) +
scale_fill_manual(values = pal, name = "Rate per 1,000") +
labs(
title = "Reported COVID-19 Cases by \nSt. Louis City ZCTA",
subtitle = paste0("Current as of ", as.character(date)),
caption = "Plot by Christopher Prener, Ph.D.\nData via St. Louis City and the U.S. Census Bureau"
) +
sequoia_theme(base_size = 22, background = "white", map = TRUE)
## save map
save_plots(filename = "results/high_res/stl_zip/a_case_map_stl_city.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_zip/a_case_map_stl_city.png", plot = p, preset = "lg", dpi = 72)
## clean-up
rm(stl_city, p, zip_na, zip_valid)
# =============================================================================
## clean-up
rm(breaks, pal)
|
7de611a98949a6cb732d0e8c29c086bc57f2b0e0
|
69ad7c31d1bfa947acf3f35f96704a95c0c31a89
|
/code/8-create-grid-panel.R
|
f353205a51d39da2097d291874b224e31a5fefcb
|
[] |
no_license
|
Copepoda/witch-trials
|
f0958ed44ca1a9fcd143522b50f053a5e871f7fb
|
b6c77d91b9f60860ff44052bf254b83f811690e9
|
refs/heads/master
| 2021-06-18T11:24:54.549251
| 2017-05-19T02:04:26
| 2017-05-19T02:04:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,833
|
r
|
8-create-grid-panel.R
|
# Title: 8-create-grid-cells.R
# Description: Create a panel dataset by grid cell.
# Authors: Peter T. Leeson and Jacob W. Russ
library(readr)
library(raster)
library(dplyr)
library(magrittr)
library(tidyr)
library(maptools)
library(sp)
library(spatialEco)
library(Grid2Polygons)
library(rgdal)
library(ggplot2)
# Import datasets -------------------------------------------------------------
trials <- read_csv("data/trials.csv")
battles <- read_csv("data/battles.csv")
euro_spdf <- readShapePoly("data/raw/Eurostat/resolution_60_mil/NUTS_RG_60M_2010.shp",
proj4string = CRS("+init=epsg:4326"))
# GADM 2012
europe_adm2 <- read_csv("data/raw/GADM/europe_gadm_adm2.csv")
# Calculate the centroid of the counties
centroids_adm2 <- europe_adm2 %>%
select(gadm.adm0 = NAME_0, gadm.adm1 = NAME_1, gadm.adm2 = id,
lon = long, lat) %>%
group_by(gadm.adm0, gadm.adm1, gadm.adm2) %>%
summarise(c.lon = mean(lon),
c.lat = mean(lat)) %>%
ungroup()
# Recode United Kingdom countries to GADM 1 Regions ---------------------------
trials <- trials %>%
mutate(gadm.adm0 = if_else(condition = gadm.adm0 %in% "United Kingdom",
true = gadm.adm1,
false = gadm.adm0))
battles <- battles %>%
mutate(gadm.adm0 = if_else(condition = gadm.adm0 %in% "United Kingdom",
true = gadm.adm1,
false = gadm.adm0))
centroids_adm2 <- centroids_adm2 %>%
mutate(gadm.adm0 = if_else(condition = gadm.adm0 %in% "United Kingdom",
true = gadm.adm1,
false = gadm.adm0))
# Merge centroids onto trials
trials_w_centroids <- centroids_adm2 %>%
left_join(x = trials, y = ., by = c("gadm.adm2", "gadm.adm1", "gadm.adm0")) %>%
mutate(lon = if_else(is.na(lon) & !is.na(c.lon), c.lon, lon),
lat = if_else(is.na(lat) & !is.na(c.lat), c.lat, lat))
# Make events into point shapefiles
trials_coords <- trials_w_centroids %>% filter(!is.na(lat)) %>% select(lon, lat)
battles_coords <- battles %>% filter(!is.na(lat)) %>% select(lon, lat)
trials_spdf <- trials_w_centroids %>%
filter(!is.na(lat)) %>%
SpatialPointsDataFrame(coords = trials_coords,
data = .,
proj4string = CRS("+init=epsg:4326"))
battles_spdf <- battles %>%
filter(!is.na(lat)) %>%
SpatialPointsDataFrame(coords = battles_coords,
data = .,
proj4string = CRS("+init=epsg:4326"))
# Convert to Euro equal area projection in meters
euro_spdf %<>% spTransform(CRS("+init=epsg:3035"))
trials_spdf %<>% spTransform(CRS("+init=epsg:3035"))
battles_spdf %<>% spTransform(CRS("+init=epsg:3035"))
# Define SpatialGrid object in the same projection
bb <- bbox(euro_spdf)
cs <- c(1, 1) * 250000 # cell size 100km x 100km (for illustration)
# Units for epsg:3035 = meters
cc <- bb[ , 1] + (cs / 2) # cell offset
cd <- ceiling(diff(t(bb))/cs) # number of cells per direction
grd <- GridTopology(cellcentre.offset = cc, cellsize = cs, cells.dim = cd)
grd
sp_grd <- SpatialGridDataFrame(grid = grd,
data = data.frame(id = 1:prod(cd)),
proj4string = CRS("+init=epsg:3035"))
poly_grid <- Grid2Polygons(sp_grd)
# Keep only the grids that are over land --------------------------------------
poly_grid_subset <- raster::intersect(x = poly_grid, y = euro_spdf)
euro_df <- fortify(euro_spdf)
grid_df <- fortify(poly_grid_subset)
trials_df <- data.frame(trials_spdf)
battles_df <- data.frame(battles_spdf)
trials_pts_poly <- point.in.poly(trials_spdf, poly_grid_subset)
battles_pts_poly <- point.in.poly(battles_spdf, poly_grid_subset)
trials_by_grid <- trials_pts_poly@data %>%
as_data_frame() %>%
rename(grid.id = z) %>%
group_by(grid.id, decade) %>%
summarise(trials = sum(tried))
battles_by_grid <- battles_pts_poly@data %>%
as_data_frame() %>%
rename(grid.id = z) %>%
group_by(grid.id, decade) %>%
summarise(battles = n())
combined <- poly_grid_subset@data %>%
as_data_frame() %>%
rename(grid.id = z) %>%
expand(grid.id, decade = seq(1300, 1850, 10)) %>%
left_join(y = trials_by_grid, by = c("grid.id", "decade")) %>%
left_join(y = battles_by_grid, by = c("grid.id", "decade")) %>%
# Change "missing" battles to zeroes
replace_na(list(battles = 0, trials = 0)) %>%
# Add three "future" battles columns for the placebo test. For leads or lags
# to work correctly, we need to sort the data frame and use only one grouping
# variable. In this case use country name.
arrange(grid.id, decade) %>%
group_by(grid.id) %>%
mutate(ln.trials = if_else(trials %in% 0, NA_real_, log(trials)),
ln1p.trials = log1p(trials),
battles.tp1 = lead(battles, 1),
battles.tp2 = lead(battles, 2),
battles.tp3 = lead(battles, 3)) %>%
ungroup() %>%
mutate(grid.id = factor(grid.id) %>% as.numeric)
# Plot map of Europe as a check
map <- ggplot() +
geom_polygon(mapping = aes(x = long, y = lat, group = group),
fill = "white",
colour = "black", data = euro_df) +
coord_cartesian(xlim = c(2000000, 8000000), ylim = c(1000000, 5500000)) +
geom_point(mapping = aes(x = lon.1, y = lat.1),
size = 2,
data = trials_df) +
geom_point(mapping = aes(x = lon.1, y = lat.1),
size = 2, colour = "red",
data = battles_df) +
geom_polygon(mapping = aes(x = long, y = lat, group = group),
fill = NA,
colour = "black", data = grid_df)
map
# Export grids to csv
write_csv(combined, "data/clean/panel_dataset_grids.csv")
|
c4fad3dc62b23eec50e9d8fe9f0568acb056f24f
|
27834a88caf9870538887f398b094a246d7b62c8
|
/workflow/scripts/homer_clean.R
|
0ff822521d2e1eb8838e22478a2bc49c973f3f1c
|
[] |
no_license
|
pd321/chipseq
|
6d88a9b7e508bdbf62283c1cd2e620552c889761
|
94c990f0b212d1d55144520c70414653269c388c
|
refs/heads/master
| 2023-05-10T07:14:34.507092
| 2023-05-04T01:47:40
| 2023-05-04T01:47:40
| 208,949,740
| 0
| 0
| null | 2023-05-04T01:47:41
| 2019-09-17T03:19:25
|
Python
|
UTF-8
|
R
| false
| false
| 743
|
r
|
homer_clean.R
|
log <- file(snakemake@log[[1]], open="wt")
sink(log)
sink(log, type="message")
# Libload
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(readr))
# Read in homer file
homer.df <- readr::read_tsv(snakemake@input[[1]])
# Remove the feature details
homer.df <- homer.df %>% mutate(Annotation = sapply(Annotation, function(x){strsplit(as.character(x), split = " (", fixed = TRUE)[[1]][1]}))
# Keep only required columns
homer.df <- homer.df %>% select(c("Chr", "Start", "End", "Annotation", "Gene Name", "Gene Type", "Distance to TSS", "Gene Alias", "Gene Description"))
# Write output
write.table(homer.df, file = snakemake@output[[1]], sep = "\t", row.names = FALSE, quote = FALSE, na = "")
sessionInfo()
|
8d78f517bc5a162ecc1025e5c456352c3937ea3f
|
ce5e5a07068c7054376e016722ae9a5ea88d1479
|
/21.Data_Visualization_XY.R
|
fbc79ede304b6360988544270ab0cb85648c220c
|
[] |
no_license
|
phamdinhthang/DataScience_Utils_R
|
463d44a222e118d4089b9581043b3f4abfb8fc0f
|
2bf97198fbd262bf8690ba6cd7d238415a5b6d56
|
refs/heads/master
| 2020-03-18T23:34:07.870444
| 2018-05-30T08:22:17
| 2018-05-30T08:22:17
| 135,411,024
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,769
|
r
|
21.Data_Visualization_XY.R
|
#load the neccessary package
#library(MASS)
#library(grid)
#library(ggplot2)
#library(insuranceData)
#library(robustbase)
#library(car) #qqPlot() available inside car package
#library(aplpack) #bagplot() available inside aplpack
#library(corrplot) #corrplot() available inside corrplot
#library(rpart)
#library(wordcloud) #draw word in to image
#library(lattice)
#library(ggplot2)
#Type 1: Scatterplot & Sunflowerplot
par(mfrow = c(1,2))
plot(Boston$zn,Boston$rad)
title("Standard scatterplot")
sunflowerplot(Boston$zn,Boston$rad)
title("Sunflower plot")
#Type 2: Box plots: y vs some categorical x
boxplot(formula = crim ~ rad, data = Boston, varwidth = TRUE,las = 1, log = "y")
title("Crime rate vs. radial highway index")
#Type 3: Mosaic plots: two categorical var
mosaicplot(carb ~ cyl, data = mtcars)
#Type 4: bagplot()
par(mfrow = c(1,2))
boxplot(Cars93$Min.Price,Cars93$Max.Price)
bagplot(Cars93$Min.Price, Cars93$Max.Price, cex = 1.2)
abline(a=0,b=1,lty = 2, col = "red")
#Type 5: correlation matrix and corrrelation plot
numericalVars <- UScereal[,sapply(UScereal,is.numeric)] #Extract only numeric col from datafram. Corrplot only work with numeric df
corrMat <- cor(numericalVars) # Compute the correlation matrix for these variables
corrplot(corrMat, method = "ellipse") # Generate the correlation ellipse plot
#Type 6: Build decision tree model
tree_model <- rpart(medv ~ ., data = Boston) # Build a model to predict medv from all other Boston variables
plot(tree_model)
text(tree_model, cex = 0.7)
#Type 7: Multiple scatter plot using matplot()
df <- UScereal[, c("calories", "protein", "fat","fibre", "carbo", "sugars")] #Keep neccessary colums
matplot(df$calories,df[,c("protein","fat","fibre","carbo","sugars")], xlab = "calories", ylab = "")
|
b2104b6bf2775449f08a09135bc69daa323de359
|
750423288021c0d0bcd0d656d09351e4f86870de
|
/analysis/2016/shiny_apps/snake_draft/v2_4/ui.R
|
f3e483ade4514775c8f89a7704f1813dc176266e
|
[] |
no_license
|
johnckane/fantasy-football
|
b51ae061dc221ad9e17900d1915b95c231a454ad
|
2ccfdb62f0011738172d774f9f4e2ba72936de2b
|
refs/heads/master
| 2022-12-05T09:18:40.070217
| 2020-09-03T02:28:39
| 2020-09-03T02:28:39
| 106,360,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,451
|
r
|
ui.R
|
library(shiny)
df <- read.csv("/home/john/stats_corner/2016/shiny_apps/snake_draft/v2_1/faa_projection_data.csv",
#df <- read.csv("/srv/shiny-server/stats-corner/2016/snake-assistant/faa_projection_data.csv",
stringsAsFactors = FALSE,
header = TRUE)
shinyUI(fixedPage(
titlePanel("Snake Draft Assistant 2.4"),
fixedRow(
column(3,wellPanel(
numericInput("picks_made",label = h4("Picks Made"), value = 0)
)),
column(3,wellPanel(
h4("Your Next Picks are:"),
textOutput("next_pick"),
textOutput("next_pick1"),
textOutput("next_pick2")
)),
column(6,wellPanel(
h4("Your Recommended Positions to Draft Are:"),
textOutput("pos_recs")
))
),
fixedRow(
column(4,wellPanel(
h4("Drafted Players"),
selectizeInput("drafted_players", label = "Enter Players as they get Drafted", multiple = TRUE, choices = df$player_team)
)),
column(8,
tabsetPanel(
tabPanel("Quick Start",
p("1. Enter your draft parameters in the next tab once and once only."),
p("2. As all players, not just yours, get drafted enter their names into the 'Drafted Players' window."),
p("3. Everytime you make a selection update the 'Picks Made' counter in the upper left")
),
tabPanel("Draft Parameters",
numericInput("first_pick", label = h6("Round 1 Pick #"), value = 1),
numericInput("league_teams", label = h6("How Many Teams in League?"), value = 10),
selectInput("scoring_format", label = h6("Scoring Format"), choices = c("Standard","PPR"), selected = "Standard"),
selectInput("extra_pos", label = h6("Additional Positions"), choices = c("FLEX","OP"), selected = "FLEX")
#h5("Staring Lineup"),
#numericInput("starting_qbs", label = h6("QBs"), value = 1),
#numericInput("starting_rbs", label = h6("RBs"), value = 2),
#numericInput("starting_wrs", label = h6("WRs"), value = 2),
#numericInput("starting_tes", label = h6("TEs"), value = 1),
#numericInput("starting_flex", label = h6("Flex: Non-QB"), value = 1),
#numericInput("starting_op", label = h6("OP: Including QB"), value = 0),
#numericInput("starting_dst", label = h6("D/ST"), value = 1),
#numericInput("starting_k", label = h6("K"), value = 1)
),
tabPanel("Team Parameters",
h4("Enter the # of Players in Each Position of your Starting Lineup"),
numericInput("num_qb", label = "# QB", value = 1),
numericInput("num_rb", label = "# RB", value = 2),
numericInput("num_wr", label = "# WR", value = 3),
numericInput("num_te", label = "# TE", value = 1),
numericInput("num_flex", label = "# FLEX", value = 1),
numericInput("num_op", label = "# OP", value = 0),
numericInput("num_k", label = "# K", value = 1),
numericInput("num_dst", label = "# DST", value = 1)
),
tabPanel("Recomendations",
h4("PPG and Dropoffs of Best Available (BA) Now and Next Time (BANT)"),
checkboxGroupInput("pos_to_rec", label = h4("Positions to Recommend"),
choices = list("QB" = "QB", "RB" = "RB", "WR" = "WR", "TE" = "TE", "K" = "K",
"DST" = "DST", "FLEX" = "FLEX", "OP" = "OP"),
selected = c("QB","RB","WR","TE","K","DST","FLEX","OP"),
inline = T),
checkboxGroupInput("byes_to_filter", label = h4("BYE weeks to exclude:"),
choices = list("3" = 3, "4" = 4, "5" = 5, "6" = 6, "7" = 7,"8" = 8, "9" = 9,
"10" = 10, "11" = 11, "12" = 12, "13" = 13),
selected = c(),
inline = T),
radioButtons("one_or_two", label = h4("Recommend Based on One Pick From Now or Two?"),
choices = list("One" = 1, "Two" = 2),
selected = 1,
inline = T),
dataTableOutput("rec_table")
),
tabPanel("Available Players",
dataTableOutput("available_players")
),
tabPanel("Lineup Optimizer",
p("How to Use:"),
p("1. Enter players you've drafted on the left"),
p("2. Observe how your weekly point totals change based on players drafted and their bye weeks"),
p("3. Example Use Case: compare how weekly totals change depending on players you're considering drafting"),
p("4. This was built with intention of using this feature later in the draft when filling out your bench"),
column(6,
h5("Your Team"),
selectizeInput("your_team", label = "Enter Players YOU Drafted", multiple = TRUE, choices = df$player_team)
),
column(6,
h5("Weekly Expected Points From Starting Lineup"),
dataTableOutput("optimized_lineup"))
),
tabPanel("About",
a("Projection and ADP data downloaded from Fantasy Football Analytics",
href="http://fantasyfootballanalytics.net/"),
p("Data last updated on 9/4"),
p("Questions? Email me: StatsCorner@gmail.com"),
p(""),
p("App Updated on 2016-12-17, version 2.3"),
p("What's new in version 2.3 (vs 2.0)?"),
p("1. Data update"),
p("2. BYE week filters"),
p("3. Position filters"),
p("4. OP and FLEX recommendations"),
p("5. Lineup Optimizer"),
p("6. A 'Value Added' variable in recommendations, based on current roster and BYE Weeks")
)
)
)
)
)
)
|
900451e58ac40ffaa11a7ac16bddb044c4a1b2be
|
d657afbb753d08c15eb57f57f2b02b58001504ea
|
/Corrr.R
|
a2eed8a512e8df2a1ca6d61206bc0b3e16e0161c
|
[] |
no_license
|
MackSkippy/datasciencecoursera
|
3f7ca2f210e238032f41212e04a3c0ae6fb7fa0a
|
158eedca031794cc419d504c370b986207de5a23
|
refs/heads/master
| 2021-01-19T15:06:19.387720
| 2017-04-30T01:15:20
| 2017-04-30T01:15:20
| 88,196,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,266
|
r
|
Corrr.R
|
corr <- function(directory, threshold = 0){
#take all csv which meet threshold, then run correlation between sulfate and
#nitrate columns
allCases <- NULL
complete <-function(directory, id = 1:332, allCases){
#set working directory to make it easier to get to data
setwd(paste0("~/R-Practice/", directory))
#Clear aggregate variable
idList <-NULL
completeList <-NULL
#start looping for operations since multiple csv files
for(x in id){
#adjust filename into 3 digit format
if (x<10){
fixid <- paste0("00",x,sep="")
}else if (x<100){
fixid <- paste0("0",x,sep="")
}else {fixid <- x}
#create filename using variables
fileName <- paste(fixid,".csv",sep="")
#Get the data for each file name into a variable
DataCopy<- read.csv(fileName)
#count number of complete cases in this ID
numComp <- sum(complete.cases(DataCopy))
idList <- append(idList,x)
completeList <- append(completeList, numComp)}
#Show Answer
allCases <-data.frame(idList, completeList)
names(allCases) <- c("id","nobs")
corAll = NULL
for(n in 1:332){
if(allCases[n,"nobs"]>threshold)
#adjust filename into 3 digit format
if (n<10){
fixidat <- paste0("00",n,sep="")
}else if (n<100){
fixidat <- paste0("0",n,sep="")
}else {fixidat <- n}
#create filename using variables
fileNamea <- paste(fixidat,".csv",sep="")
#print(fileNamea)
corData <-read.csv(fileNamea)
good <-complete.cases(corData)
#print(corData)
corAll <-c(corAll,corData[good,])
}
print(cor(corAll$nitrate,corAll$sulfate))
#Set the working directory back to what it was for GTS comp
setwd("~/R-Practice/")
}
#create report with all items
complete(directory)
#Find id with nobs GT threshold
# thresh <- allCases$nobs > threshold
#print(thresh)
#read CSV with those id
#correlate nitrate and sulfate from this group
}
|
6f3069dfb0181c4d78f627526e714ca045ab634e
|
ca34075095171c0fc733302a27ccf32156ccda99
|
/R/computeAuxillaryParameters.R
|
b44e244ed4ec026259ffedbb06eac9a1ff1325d6
|
[] |
no_license
|
cran/EquiNorm
|
e3dbb55ae859ee1690fc7fce8c16aa44e1ee013c
|
99862de9040e0f7e2479808b9be5748a15bf675f
|
refs/heads/master
| 2020-08-09T19:48:31.706218
| 2011-03-24T00:00:00
| 2011-03-24T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,192
|
r
|
computeAuxillaryParameters.R
|
computeAuxillaryParameters <-
function (data.y, data.x, alpha.hat, e.hat)
{
G <- dim(data.y)[1]
n <- length(data.x)
y <- data.y
group <- t(apply(rbind(1:G), 2, rep, each = n))
xo <- matrix(0, nrow = G, ncol = n)
xo[e.hat$o.hat == 1, data.x == 1] <- 1
xu <- matrix(0, nrow = G, ncol = n)
xu[e.hat$u.hat == 1, data.x == 1] <- 1
data.lme <- data.frame(y = as.vector(y), xo = as.vector(xo),
xu = as.vector(xu), group = as.vector(group))
temp.lm <- lm(y ~ -1 + xo + xu, data = data.lme)
temp <- lmer(y ~ -1 + xo + xu + (1 + xo + xu | group), data = data.lme,
REML = TRUE, start = c(xo = temp.lm$coefficients[[1]],
xu = temp.lm$coefficients[[2]]), control = list(maxIter = 3000))
muO.hat <- fixef(temp)[1]
muU.hat <- fixef(temp)[2]
var.hat <- as.double(rbind(diag(VarCorr(temp)$group)[[1]],
diag(VarCorr(temp)$group)[[2]], diag(VarCorr(temp)$group)[[3]],
attr(VarCorr(temp), "sc")[[1]]^2))
return(list(muU.hat = muU.hat, muO.hat = muO.hat, tau2.hat = var.hat[1],
psi2.hat = var.hat[2], xi2.hat = var.hat[3], sigma2.hat = P *
var.hat[4]))
}
|
d47efc4911b6c19d56ca0c388752c883ff17038a
|
3f3ce02d418819460d1251a83d0f8b378325ba54
|
/ridge_regression.R
|
2d0bc3dd9875e152d66d48f27eb9c05cc6f52057
|
[] |
no_license
|
clbwvr/Teaching
|
5ac07e9a68314bc4c6bbb6f10de0d4d0aaaf6ffd
|
8118deab6e5e36a75864351f1404490a4596b0b8
|
refs/heads/master
| 2021-06-01T02:12:27.502728
| 2016-07-10T14:50:43
| 2016-07-10T14:50:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,217
|
r
|
ridge_regression.R
|
# Ridge Regression
# OLS just minimizes RSS
# Ridge regression minimizes RSS + lambda * sum(coef^2)
# Ridge improves on ols because it decreases variance in the model with shrinkage
# and thus can make a better fit when we have a large p.
# Let's compare ridge regression and plain old least squares to fit "score" using pscl.admit (phd admitance data)
set.seed(123)
install.packages("pscl")
library(MASS)
library(pscl)
data <- admit
data[,1] <- as.numeric(data[,1])
# Let's also add a bunch of junk variables to mess with our linear fit.
X <- matrix(runif(nrow(data)*30),ncol=30)
colnames(X) <- colnames(X,do.NULL=F,prefix="junk.")
data <- cbind(data,X)
idx <- sample(1:nrow(data), (2/3) * nrow(data),replace=F)
train <- data[idx,]
test <- data[-idx,]
# simple linear fit
pairs(train)
fit.slr <- lm(score~.,data=train)
summary(fit.slr)
# we can see all those junk variables we all insignificant.
# But they're inflating the variance of our estimates
preds <- predict(fit.slr,newdata=test)
fit.slr.testmse <- mean((preds-test$score)^2)
# ridge regression
# we hope to shrink the junk variables effects towards zero.
lambda.grid <- seq(50,0,length.out = 1000)
library(glmnet)
x <- model.matrix(score~.,data=data)[,-1]
y <- data[,1]
fit.ridge <- glmnet(x[idx,],y[idx],lambda = lambda.grid,alpha=0)
plot(range(lambda.grid),range(fit.ridge$beta),type="n")
for(i in 1:nrow(fit.ridge$beta)){lines(lambda.grid,fit.ridge$beta[i,],col=i)}
# the coefficients at lambda = 0 are equal to the ones from the slr (after standardization)
# but as lambda grows, they shrink towards 0
# Let's use test set validation to find tphe best lambda, then compare that ridge regression fit
# to the slr's fit's test mse.
mse <- rep(NA,length(lambda.grid))
for(i in 1:length(mse)){
fit.r <- glmnet(x[idx,],y[idx],lambda = lambda.grid[i],alpha=0)
pred <- predict(fit.r, newx=x[-idx,])
mse[i] <- mean((pred - test$score)^2)
}
lambda.grid[which.min(mse)]
plot(lambda.grid,mse,type="l")
points(lambda.grid[which.min(mse)],min(mse),col="red",pch=19)
# lambda = .55 is gives the best mse for us with an mse of 1.43
fit.slr.testmse
fit.ridge.testmse
# Ridge has .3 lower mse. On the scale of the response (1 to 5), this is significant!
|
373c82bf30a6820cba923efb2376ace49285b10e
|
d74844d9ff34a8711d014b04fafb0cb5954e2bba
|
/exploration.r
|
59c5347da9cc6a17f67732c39e7624a158a32413
|
[] |
no_license
|
taraeicher/ProteinAbundanceDnaRna
|
3a2d3e9730f5d5d202ed3c79eacb30fd1a51c38b
|
96bb786686ecda286d3171b154870e0a6ff7a033
|
refs/heads/master
| 2021-03-21T23:50:32.557190
| 2018-02-15T04:39:07
| 2018-02-15T04:39:07
| 121,591,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
exploration.r
|
if (!require(corrplot)) {
install.packages("corrplot")
library(corrplot)
}
breast_dna = read.table("C:\\Users\\tarae\\OneDrive\\Documents\\PhD\\BMI7830_Integrative_Methods_of_Bioinformatics_for_Human_Diseases\\dream_challenge_2\\breast_dna_corr.rds", fill = TRUE)
corrplot(as.numeric(as.character(breast_dna))
|
169f415a81022a2c9187f8273445be2b10d05edf
|
34cc3889d10a02ea1629dd2d1bcbbace8c04c5f5
|
/scratch/makeMetaDataset.r
|
619252cc7bc448ebb9ba38625b7e37f45f5edb82
|
[
"Apache-2.0"
] |
permissive
|
kaneplusplus/dc
|
66cca7bf129c7d651f6cd2b08cd7dda0c8f20340
|
20cf923c464d75b1c78716d105784c5718c44ffc
|
refs/heads/master
| 2020-05-17T07:43:42.303440
| 2014-04-03T14:38:45
| 2014-04-03T14:38:45
| 13,815,109
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,491
|
r
|
makeMetaDataset.r
|
require(XML)
require(foreach)
doc <- xmlParse("rvf_result.xml", error=function(...){})
x <- foreach(n=getNodeSet(doc, "//PubmedArticle"), .combine=rbind) %do% {
numAuth <- length(getNodeSet(n, ".//Author"))
if (length(numAuth) == 0)
numAuth <- NA
pmid <- xmlValue(getNodeSet(n, ".//PMID")[[1]])
if (length(pmid) == 0)
pmid <- NA
lasts <- xmlSApply(getNodeSet(n, ".//AuthorList//Author//LastName"), xmlValue)
firsts <- xmlSApply(getNodeSet(n, ".//AuthorList//Author//ForeName"),
xmlValue)
authSet <- paste(firsts, lasts, collapse=";")
if (length(authSet) == 0)
authSet <- NA
lang <- xmlValue(getNodeSet(n, ".//Language")[[1]])
if (length(lang) == 0)
lang <- NA
pubTypeSet <- paste(xmlSApply(getNodeSet(n, ".//PublicationType"), xmlValue),
collapse=";")
if (length(pubTypeSet) == 0)
pubTypeSet <- NA
eArticleYear <- xmlValue(
getNodeSet(n, './/ArticleDate[@DateType="Electronic"]//Year')[[1]])
eArticleMonth <- xmlValue(
getNodeSet(n, './/ArticleDate[@DateType="Electronic"]//Month')[[1]])
eArticleDay <- xmlValue(
getNodeSet(n, './/ArticleDate[@DateType="Electronic"]//Day')[[1]])
date <- strptime(paste(eArticleYear, eArticleMonth, eArticleDay, sep="-"),
format = "%Y-%m-%d")
if (length(date) == 0)
date <- NA
journal <- xmlValue(getNodeSet(n, './/MedlineJournalInfo')[[1]])
if (length(journal) == 0)
journal <- NA
keywordSet <- paste(xmlSApply(getNodeSet(n, './/Keyword'), xmlValue),
collapse=";")
if (length(keywordSet) == 0)
keywordSet <- NA
l <- list(pmid=pmid, authSet=authSet, lang=lang, pubTypeSet=pubTypeSet,
date=date, journal=journal, keywordSet=keywordSet)
data.frame(l, stringsAsFactors=TRUE)
}
x$pmid <- as.integer(x$pmid)
x$authSet <- toupper(as.character(x$authSet))
x$lang <- toupper(x$lang)
x$pubTypeSet <- toupper(x$pubTypeSet)
x$journal <- toupper(x$journal)
x$keywordSet <- toupper(as.character(x$keywordSet))
write.csv(x, "pubmed_rvf.csv", row.names=FALSE)
x <- foreach(n=getNodeSet(doc, "//PubmedArticle")) %do% {
title <- xmlSApply(getNodeSet(n, ".//ArticleTitle"), xmlValue)
abstract <- xmlSApply(getNodeSet(n, ".//Abstract"), xmlValue)
if (length(abstract) == 0)
abstract <- as.character(NA)
list(title=title, abstract=abstract)
}
title <- Reduce(c, Map(function(x) x$title, x))
abstract <- Reduce(c, Map(function(x) x$abstract, x))
write.csv(data.frame(list(title=title, abstract=abstract)),
"title_abstract.csv", row.names=FALSE)
|
afc6e8db030044816541d19a87ad370bcddd575d
|
12e3f4c86e41b21e1aced2269c14a267d0bb09b8
|
/clase_13_sept.R
|
dba590bce31e48dcf1fab8c1d0009a8c63a84cd8
|
[] |
no_license
|
ricardomayerb/ico8869
|
ec8e5fc051b2ff819cf1d1635bfdf35fd7be13b3
|
39da698bb573cab23ca7a8a8a0ae0a5bb76af4e1
|
refs/heads/master
| 2020-07-05T10:09:07.678184
| 2019-09-13T17:28:38
| 2019-09-13T17:28:38
| 202,618,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,797
|
r
|
clase_13_sept.R
|
library(fpp2)
# General id ea: evaluate forecast accuracy:
# First example from the book
# Second, two series from BCCh
# Third, download a financial series using quantmod
# Review the measures and program by hand a hardcoded evaluation of
# Naive or season naive or trend naive against an auto.arima
# Introduce seasonal arima notation
# Then introduce the forecast package functions for accuracy
# Fourth, introduce time series cross validation
# Forecast errors: e_{T+h} = y_{T+h} - \hat{T+h|T}
# We can have as many forecast errors as we have observations in the test set
# MAE, mean(|e_t|)
beer2 <- window(ausbeer, start=1992, end=c(2007,4))
beer3 <- window(ausbeer, start=c(2008,1))
fita <- auto.arima(beer2)
fca <- forecast(object = fita, h = 10)
arima_fc_errors <- beer3 - fca$mean
mae_arima <- mean(abs(arima_fc_errors))
mae_arima
rmse_arima <- sqrt(mean(arima_fc_errors^2))
rmse_arima
diff(beer2, lag = 4)
cbind(beer2, lag(beer2, k = -4))
diff_beer2 <- diff(beer2, lag = 4, differences = 1)
denom <- mean(abs(diff_beer2))
q_arima <- arima_fc_errors/denom
mase_arima <- mean(abs(q_arima))
mae_arima
rmse_arima
mase_arima
accuracy(fca)
cbind(arima_fc_errors, q_arima)
beer_01 <- window(ausbeer, start=1992, end=c(2005,4))
ari_01 <- auto.arima(beer_01)
fc_ari_01 <- forecast(ari_01, h=5)
fc_ari_01$mean
pred_ari_05 <- fc_ari_01$mean[5]
pred_ari_05
obs <- window(ausbeer, start=c(2007,1), end=c(2007,1))
fe_ari_01 <- obs - pred_ari_05
fe_ari_01
# para el segundo error con h=5
beer_02 <- window(ausbeer, start=1992, end=c(2006,1))
ari_02 <- auto.arima(beer_02)
fc_ari_02 <- forecast(ari_02, h=5)
fc_ari_02$mean
pred_ari_05 <- fc_ari_02$mean[5]
pred_ari_05
obs <- window(ausbeer, start=c(2007,2), end=c(2007,2))
fe_ari_02 <- obs - pred_ari_05
fe_ari_02
fe_ari_01
|
bb289dc10189ab2e4483b832d491c0417d0b52d0
|
601fbe6791e143a717066b4def5eb9a2666c8c46
|
/R/tycho2.R
|
313db13214feed5a3cd5486950572b997cbc5f7a
|
[
"CC-BY-4.0"
] |
permissive
|
allopole/tycho2
|
f08e2d81ecc67c7e48818cc48093b22ecc0b1f44
|
e4648147c7dfa0ed06374c888e920986d0551fcb
|
refs/heads/master
| 2021-06-04T00:33:27.013827
| 2019-12-10T19:51:22
| 2019-12-10T19:51:22
| 139,588,329
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,152
|
r
|
tycho2.R
|
#' Get data from Tycho 2.0 database
#'
#' Calls the Tycho 2.0 database using the Tycho 2.0 web API.
#'
#' \strong{Project Tycho, a repository for global health data}
#'
#' Project Tycho is a repository for global health data in a standardized format compliant with FAIR
#' (Findable, Accessible, Interoperable, and Reusable) guidelines.
#'
#' Version 2.0 of the database currently contains:
#'
#' \itemize{ \item Weekly case counts for 78 notifiable conditions for 50 states and 1284 cities
#' between 1888 and 2014, reported by health agencies in the United States. \item Data for
#' dengue-related conditions for 100 countries between 1955 and 2010, obtained from the World Health
#' Organization and national health agencies. }
#'
#' Project Tycho 2.0 datasets are represented in a standard format registered with FAIRsharing
#' (bsg-s000718) and include standard SNOMED-CT codes for reported conditions, ISO 3166 codes for
#' countries and first administrative level subdivisions, and NCBI TaxonID numbers for pathogens.
#'
#' Precompiled datasets with DOI's are also available for download directly from
#' \href{https://www.tycho.pitt.edu/}{Project Tycho}.
#'
#' See \url{https://www.tycho.pitt.edu/dataset/api/} for a complete documentation of the API.
#'
#' \strong{\code{tycho2()}}
#'
#' \code{tycho2} calls \code{\link{apicall}} with the base URL
#' "https://www.tycho.pitt.edu/api/". If \code{path} is the name of a data field in the Tycho 2.0
#' database, \code{tycho2} will return a dataframe of possible values for the field with additional
#' information. See \url{https://www.tycho.pitt.edu/dataset/api/} for more details. If \code{path}
#' is "query", \code{tycho2} will return a dataframe of case counts with associated variables for
#' the query terms specified. See \url{https://www.tycho.pitt.edu/dataset/api/} for more details.
#' Queries are built from a list of key-value pairs passed to the \code{param} argument, and/or a
#' character vector of query terms (conditions) passed to the \code{queryterms} argument. An account
#' with Project Tycho and an API Key is required to access the database. The API Key can be
#' retrieved from your Tycho account. The API key can be set with the \code{apikey} argument, or
#' passed to \code{param} or \code{queryterms}. Any combination of \code{queryterms}, \code{param}
#' and \code{apikey} can be used.
#'
#' \code{tycho2()} automatically replaces spaces with \code{\%20} in the final URL.
#'
#' To pull large datasets, \code{tycho2()} repeatedly calls the API to retrieve partial datasets in
#' chunks of 5000 records until all the requested data has been received, then outputs a single
#' large dataframe. Therefore, the \code{limit} and \code{offset} querry parameters described in the
#' API do not need to be specified. \code{tycho2()} handles these parameters invisibly.
#'
#' To avoid errors, date ranges should be specified in YYYY-MM-DD format using
#' \code{PeriodStartDate} and \code{PeriodEndDate} query parameters with the \code{>=} and \code{<=}
#' operators. The use of \code{>=} and \code{<=} requires passing dates using the "queryterms"
#' argument.
#'
#' Although the Tycho 2.0 database can be querried directly by passing a manually assembled API call
#' URL to \code{read.csv}, as below...
#'
#' \code{read.csv('https://www.tycho.pitt.edu/api/query?CountryISO=US&ConditionName=Gonorrhea&apikey=YOURAPIKEY')}
#'
#' ...use of \code{tycho2} allows querries to be assembled more flexibly and programmatically.
#'
#' Accessing the Project Tycho API using \code{tycho2} requires an API key, which can be retrieved
#' from your Project Tycho account. You must have a Project Tycho account to receive an API key.
#'
#' The Project Tycho 2.0 database and API are by
#' \href{https://www.tycho.pitt.edu/people/person/49/}{Wilbert van Panhuis} (Principal
#' Investogator), \href{https://www.tycho.pitt.edu/people/person/66/}{Donald Burke} (Principal
#' Investogator), \href{https://www.tycho.pitt.edu/people/person/50/}{Anne Cross} (Database
#' Programmer). Project Tycho is published under a
#' \href{http://creativecommons.org/licenses/by/4.0/}{Creative Commons Attribution 4.0 International
#' Public License}.
#'
#' @param path string (optional). Must be either "query" to perform data queries, or one of the
#' tycho 2.0 database fields to retrieve variable listings.
#' @param params list (optional). A list of query terms in the form
#' \code{list(var1=value1,var2=value2,...)}
#' @param queryterms character vector (optional). Vector of query terms passed as strings in the
#' form \code{c("var1[operator]value1", "var2[operator]value2", ...)}. Dates must be passed this
#' way using the \code{>=} and \code{<=} operators (i.e.
#' \code{queryterms=c("PeriodStartDate>=2000-01-01")})
#' @param apikey string. (required). Your Project Tycho API key. This can also be passed with
#' \code{params} or \code{queryterms}.
#' @param baseurl string. Defaults to "https://www.tycho.pitt.edu/api/".
#' @param fixdates "cdc", "iso", or NULL. If \code{fixdates="cdc"}, PeriodStartDate and PeriodEndDate
#' are rounded to nearest CDC epidemiological week ("epiweek") start and end days
#' (Sunday - Saturday), respectively. If \code{fixdates="cdc"}, PeriodStartDate and PeriodEndDate are
#' rounded to nearest ISO week start and end dates (Monday - Sunday), respectively. CDC epiweeks are
#' used for US data reporting. Elsewhere, epiweeks are synonymous with ISO weeks. Rounding is done
#' with \code{\link{round2wday}}. This param may be necessary because some entries in the Tycho 2.0
#' database have incorrect dates that may be off by one day from the actual epiweek start or end
#' dates. default=NULL.
#' @param start Date, POSIXct, POSIXlt, or character string in "YYYY-MM-DD" format. The start date.
#' If present, overrides "PeriodStartDate" passed to \code{queryterms}. Default = NULL
#' @param end Date, POSIXct, POSIXlt, or character string in "YYYY-MM-DD" format. The end date.
#' If present, overrides "PeriodEndDate" passed to \code{queryterms}. Default = NULL
#'
#' @return dataframe with the following possible columns:
#' \item{$ConditionName}{factor. Name of reported condition as listed in \href{https://doi.org/10.25504/FAIRsharing.d88s6e}{SNOMED-CT}}
#' \item{$ConditionSNOMED}{factor. \href{https://doi.org/10.25504/FAIRsharing.d88s6e}{SNOMED-CT} code for reported condition}
#' \item{$PathogenName}{factor. \href{https://doi.org/10.25504/FAIRsharing.fj07xj}{NCBI Taxonomy} organism name for pathogen causing reported condition}
#' \item{$PathogenTaxonID}{factor. \href{https://doi.org/10.25504/FAIRsharing.fj07xj}{NCBI Taxonomy} identifier for pathogen causing reported condition}
#' \item{$Fatalities}{logical. Counts of reported condition ($CountValue) represent fatalities}
#' \item{$CountryName}{factor. \href{https://www.iso.org/obp/ui/#search/code/}{ISO 3166} English Short Name of country}
#' \item{$CountryCode}{factor. \href{https://www.iso.org/obp/ui/#search/code/}{ISO 3166} 2-letter code for country}
#' \item{$Admin1Name}{factor. \href{https://www.iso.org/standard/63546.html}{ISO 3166-2} Name of first administrative subdivision (such as US state)}
#' \item{$Admin1ISO}{factor. \href{https://www.iso.org/standard/63546.html}{ISO 3166-2} code for first administrative subdivision}
#' \item{$Admin2Name}{factor. \href{http://www.geonames.org/}{Geonames} Placename of second order administrative division}
#' \item{$CityName}{factor. \href{http://www.geonames.org/}{Geonames} Name of populated place}
#' \item{$PeriodStartDate}{Date, format: YYYY-MM-DD. Start date of time interval for which a count was reported}
#' \item{$PeriodEndDate}{Date, format: YYYY-MM-DD. End date of time interval for which a count was reported}
#' \item{$PartOfCumulativeCountSeries}{logical. Count is part of a series of cumulative counts
#' (instead of being part of a series of fixed time interval counts)}
#' \item{$AgeRange}{Ordered factor. Age range in years for which a count was reported e.g. "0-18". Max age = 130}
#' \item{$Subpopulation}{factor. "Civilian", "Military", or "None specified"}
#' \item{$PlaceOfAcquisition}{factor. "Domestic", "Abroad", or NA}
#' \item{$DiagnosisCertainty}{factor. \href{https://doi.org/10.25504/FAIRsharing.d88s6e}{SNOMED-CT}
#' Qualifier for certainty of diagnosis for a count condition: "Definite", "Equivocal", "Possible diagnosis","Probable diagnosis", or NA}
#' \item{$SourceName}{factor. Name of the source (system, database, institution) from which counts
#' were obtained by the Project Tycho team}
#' \item{$CountValue}{integer. The count value.}
#' Variables described in detail here:
#' \url{https://www.tycho.pitt.edu/dataformat/ProjectTychoCustomCompiledDataFormat.pdf}
#
#'
#' @examples
#' \dontrun{
#' # Note: retrive your API key from your Project Tycho account
#'
#' # List of conditions showing "ConditionName", "ConditionSNOMED"
#'
#' TYCHOKEY <- 'some1long2alphanumeric3string'
#' conditions <- tycho2("condition", apikey = TYCHOKEY)
#'
#' # All cases of scarlet fever in California
#'
#' params <- list(ConditionName = "Scarlet fever", Admin1ISO = "US-CA")
#' Scarlet <- tycho2("query", params = params, apikey = TYCHOKEY)
#'
#' # All measles cases in California from 2000 to 2010
#'
#' queryterms <- c(
#' "ConditionName=Measles",
#' "Admin1ISO=US-CA",
#' "PeriodStartDate>=2000-01-01",
#' "PeriodEndDate<=2010-01-01"
#' )
#' Measles_CA_2000_2010 <- tycho2("query", queryterms=queryterms, apikey=TYCHOKEY)
#' }
#'
#' @export
#' @importFrom utils read.csv
#'
tycho2 <- function(path="", params=NULL, queryterms=NULL, apikey=NULL,
baseurl="https://www.tycho.pitt.edu/api/",
fixdates=NULL, start=NULL, end=NULL){
p <- params
p$offset <- 0
p$limit <- 5000
q <- queryterms[grep("offset",queryterms,invert = TRUE)]
q <- queryterms[grep("limit",queryterms,invert = TRUE)]
if(!is.null(start)){
q <- c(
q[grep("PeriodStartDate",q,invert = TRUE)],
paste0("PeriodStartDate>=",as.Date(start))
)
}
if(!is.null(end)){
q <- c(
q[grep("PeriodEndDate",q,invert = TRUE)],
paste0("PeriodEndDate<=",as.Date(end))
)
}
out <- utils::read.csv(apicall(baseurl=baseurl,path=path,params=p,queryterms=q,apikey=apikey))
more <- nrow(out)>=5000
while (more == TRUE) {
p$offset <- p$offset+5000
df <- read.csv(apicall(baseurl=baseurl, path, params=p, queryterms=q, apikey=apikey))
more <- nrow(df)>0
if (more) {
out <- rbind(out,df)
}
}
if(!is.null(fixdates)) {
if (fixdates %in% c("cdc","iso")) {
week.start <- weekdaynumbers[paste0(fixdates,"week.start")]
week.end <- weekdaynumbers[paste0(fixdates,"week.end")]
}else{
warning('"fixdates" must be "cdc", "iso" or NULL. Ignoring "fixdates".')
}
}
# classes
vars <- colnames(out)
if("ConditionName" %in% vars) {
out$ConditionName <- as.factor(out$ConditionName)
}
if("ConditionSNOMED" %in% vars) {
out$ConditionSNOMED <- as.factor(out$ConditionSNOMED)
}
if("PathogenName" %in% vars) {
out$PathogenName <- as.factor(out$PathogenName)
}
if("PathogenTaxonID" %in% vars) {
out$PathogenTaxonID <- as.factor(out$PathogenTaxonID)
}
if("Fatalities" %in% vars) {
out$Fatalities <- as.logical(out$Fatalities)
}
if("CountryName" %in% vars) {
out$CountryName <- as.factor(out$CountryName)
}
if("CountryCode" %in% vars) {
out$CountryCode <- as.factor(out$CountryCode)
}
if("Admin1Name" %in% vars) {
out$Admin1Name <- as.factor(out$Admin1Name)
}
if("Admin1ISO" %in% vars) {
out$Admin1ISO <- as.factor(out$Admin1ISO)
}
if("Admin2Name" %in% vars) {
out$Admin2Name <- as.factor(out$Admin2Name)
}
if("CityName" %in% vars) {
out$CityName <- as.factor(out$CityName)
}
if("PeriodStartDate" %in% vars) {
out$PeriodStartDate <- as.Date(out$PeriodStartDate,format = "%Y-%m-%d")
if(!is.null(fixdates)) {
out$PeriodStartDate <- round2wday(out$PeriodStartDate,week.start)
}
}
if("PeriodEndDate" %in% vars) {
out$PeriodEndDate <- as.Date(out$PeriodEndDate,format = "%Y-%m-%d")
if(!is.null(fixdates)) {
out$PeriodEndDate <- round2wday(out$PeriodEndDate,week.end)
}
}
if("PartOfCumulativeCountSeries" %in% vars) {
out$PartOfCumulativeCountSeries <- as.logical(out$PartOfCumulativeCountSeries)
}
if("AgeRange" %in% vars) {
out$AgeRange <- as.ordered(out$AgeRange)
}
if("Subpopulation" %in% vars) {
out$Subpopulation <- factor(out$Subpopulation,
levels = c("Civilian", "Military","None specified"))
}
if("PlaceOfAcquisition" %in% vars) {
out$PlaceOfAcquisition <- factor(out$PlaceOfAcquisition,
levels = c("Domestic", "Abroad"))
}
if("DiagnosisCertainty" %in% vars) {
out$DiagnosisCertainty <- as.factor(out$DiagnosisCertainty)
}
if("SourceName" %in% vars) {
out$SourceName <- as.factor(out$SourceName)
}
if("CountValue" %in% vars) {
out$CountValue <- as.integer(out$CountValue)
}
return(out)
}
|
d36d3ab5f39e45ea16aca4833ada8ece66bdfcff
|
1543497d67c193543a037abbe77d4f44b52889ea
|
/R/error_plot.R
|
d27e99ea8f717ca9f83d8aa6d03246457a0a58ad
|
[
"MIT"
] |
permissive
|
Yuanproj/IRon
|
81a2c46179f32df18559b45bf235943fc63c4799
|
2ff8f3867c432f85e25f5dd3df1b1773ad8190ce
|
refs/heads/master
| 2023-02-26T04:44:40.210576
| 2021-02-04T21:00:00
| 2021-02-04T21:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,638
|
r
|
error_plot.R
|
# ======================================================================
#' Visualization of comparison between prediction error and item's relevance
#'
#' @description The errorPlot function takes a set of target and predicted values to calculate the deviation as a function of phi and creates a plot with points and an approximate function err(phi).
#'
#' @param trues True target values of a given test set
#' @param preds Predicted values for the test set
#' @param phi.parms The relevance function providing the data points where the pairs of values-relevance are known (use ?phi.control() for more information)
#' @param e Exponent for distance - 1 is linear (mae) and 2 is quadratic (mse)
#' @param thr Relevance threshold (default 0)
#'
#' @return Produces a plot with a visualization comparing prediction error and item's relevance
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(IRon)
#' require(earth)
#' require(mgcv)
#'
#' data(accel)
#'
#' ind <- sample(1:nrow(accel),0.75*nrow(accel))
#'
#' train <- accel[ind,]
#' test <- accel[-ind,]
#'
#' ph <- phi.control(accel$acceleration)
#'
#' m <- earth::earth(acceleration ~ ., train)
#' preds <- as.vector(predict(m,test))
#'
#' trues <- accel$acceleration
#'
#' errorPlot(trues,preds,phi.parms = ph)
#' }
#'
#'
errorPlot <- function(trues, preds, phi.parms, e=1, thr=0) {
if(is.null(phi.parms)) {
stop("A relevance function is required: Use ?phi.control()")
}
phis <- phi(trues, phi.parms)
err <- abs(trues-preds)^e
df <- data.frame(err=err,phi=phis)
ggplot2::ggplot(df[df$phi>=thr,],aes(x=phi,y=err)) + ggplot2::geom_point() + ggplot2::geom_smooth()
}
# ======================================================================
#' 3d Visualization of comparison between true value, prediction error and item's relevance
#'
#' @description This function produces a 3d plot describing the prediction error of a given set pair of true and predicted values. The objective is to allow a 3-way comparison between the true values of a given data set, the predicted value of a certain model, and the relevance of each true value.
#'
#' @param trues Target values from a test set of a given data set. Should be a vector and have the same size as the variable preds
#' @param preds Predicted values given a certain test set of a given data set. Should be a vector and have the same size as the variable preds
#' @param phi.parms The relevance function providing the data points where the pairs of values-relevance are known (use ?phi.control() for more information)
#' @param modelname The name attributed to the prediction model used for caption of the plot - empty by default
#' @param e Exponent for distance - 1 is linear (mae) and 2 is quadratic (mse)
#' @param thr Relevance threshold (default 0)
#' @param absolute Boolean for calculating absolute distance or not (default yes)
#' @param errlim Definition of the error limits for the plot (optional)
#'
#' @return Produces a 3D plot with a visualization comparing prediction error, target value and item's relevance
#' @export
#'
#' @examples
#' \dontrun{
#' library(IRon)
#' require(rpart)
#' require(plot3D)
#'
#' data(accel)
#' form <- acceleration ~ .
#'
#' ind <- sample(1:nrow(accel),0.75*nrow(accel))
#' train <- accel[ind,]
#' test <- accel[-ind,]
#'
#' # In the case of a single plot this is a simple solution
#'
#' phi.parms <- phi.control(accel$acceleration)
#' trues <- accel$acceleration
#'
#' m1 <- rpart::rpart(form,train)
#' p1 <- predict(m1,test)
#'
#' errorPlot3D(trues,p1,phi.parms)
#' errorPlot3D(trues,p1,phi.parms,modelname="Regression Trees")
#' errorPlot3D(trues,p1,phi.parms,modelname="Regression Trees",errlim=c(0,10))
#' errorPlot3D(trues,p1,phi.parms,modelname="Regression Trees",absolute=FALSE)
#'
#' #Example for multiple plots w.r.t. various underlying prediction models
#'
#' require(randomForest)
#' require(e1071)
#' require(earth)
#'
#' m2 <- randomForest::randomForest(form,train)
#' p2 <- predict(m2,test)
#'
#' m3 <- e1071::svm(form,train)
#' p3 <- predict(m3,test)
#'
#' m4 <- earth::earth(form,train)
#' p4 <- as.vector(predict(m4,test))
#'
#' par(mfrow = c(2, 2), # 2x2 layout
#' oma = c(2, 2, 1, 0), # two rows of text at the outer left and bottom margin
#' mar = c(2, 2, 0, 0), # space for one row of text at ticks and to separate plots
#' mgp = c(2, 1, 0), # axis label at 2 rows distance, tick labels at 1 row
#' xpd = NA) # allow content to protrude into outer margin (and beyond)
#'
#' errorPlot3D(trues,p1,phi.parms,modelname="Regression Trees")
#' errorPlot3D(trues,p2,phi.parms,modelname="Random Forests")
#' errorPlot3D(trues,p3,phi.parms,modelname="SVM")
#' errorPlot3D(trues,p4,phi.parms,modelname="MARS")
#' }
errorPlot3D <- function(trues, preds, phi.parms, modelname=NULL, e=1, thr=0, absolute=TRUE, errlim=NULL) {
if(is.null(phi.parms)) stop("A relevance function is required. Use ?phi.control()")
if(!is.vector(trues)) stop("Parameter trues is required to be a vector.")
if(!is.vector(preds)) stop("Parameter preds is required to be a vector.")
err <- if(absolute) { abs(trues-preds) } else { trues-preds }
phis <- phi(trues,phi.parms = phi.parms)
elim <- if(is.null(errlim)) {
if(absolute) {
c(0,ceiling(max(err)))
} else {
c(floor(min(err)),ceiling(max(err)))
}
} else { errlim }
mname <- ifelse(is.null(modelname),"",modelname)
plot3D::scatter3D(phis,trues,err,
cex=1.5,bty="b2",pch=16,
colkey = FALSE, col = plot3D::ramp.col(c("grey", "red")),
ticktype="detailed",phi=18,type="h",zlim=elim,
xlab=expression(phi), ylab="Y",zlab="Error",expand=0.5,main=mname);
}
|
965e5e08cf49ea514a1142e5c91b592804474775
|
7ce35c255fe7506795ff7abc15b5222e582451bb
|
/5-visualizations/stunting/figED10-stunting-flow-line-region.R
|
d5f92cdaf49fab0b40e0c7b906b08df870625786
|
[] |
no_license
|
child-growth/ki-longitudinal-growth
|
e464d11756c950e759dd3eea90b94b2d25fbae70
|
d8806bf14c2fa11cdaf94677175c18b86314fd21
|
refs/heads/master
| 2023-05-25T03:45:23.848005
| 2023-05-15T14:58:06
| 2023-05-15T14:58:06
| 269,440,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,919
|
r
|
figED10-stunting-flow-line-region.R
|
##########################################
# ki longitudinal manuscripts
# stunting analysis
# figure: line plot showing
# proportion of children who were newly stunted,
# relapsed, recovered
# includes category for mortality
# stratified by region and cohort
# inputs:
# stunt-flow-data-pooled.RDS
# stunt-flow-data-region.RDS
# stunt-flow-data-cohort.RDS
# outputs: UPDATE
# fig-stunt-2-flow-overall--allage-primary.png
# fig-stunt-2-flow-region--allage-primary.png
# figdata-stunt-2-flow-overall--allage-primary.RDS
# figdata-stunt-2-flow-region--allage-primary.RDS
##########################################
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
# cohort data
plot_cohort = readRDS(paste0(res_dir, "stunting/stunt-flow-data-cohort.RDS"))
# plot_region = readRDS(paste0(res_dir, "stunting/stunt-flow-data-region.RDS"))
# pooled data
pooled_data = readRDS(paste0(res_dir, "stunting/stuntflow_pooled_reml.RDS"))
region_data = readRDS(paste0(res_dir, "stunting/stuntflow_pooled_region_reml.RDS"))
# define color palette -----------------------------------------------------
pink_green = rev(brewer.pal(n = 7, name = "PiYG"))
pink_green[3] = "#CDF592"
pink_green[5] = "#EA67AE"
pink_green[4] = "#FFB7DC"
pink_green[6] = "#BF0C6D"
pink_green[7] = "#000000"
# process pooled data -----------------------------------------------------
pooled <- region_data %>%
dplyr::select(label, region, agem, nmeas, est, lb, ub) %>%
mutate(n = NA,
cohort_country = "Pooled") %>%
rename(classif = label,
tot = nmeas,
percent = est) %>%
mutate(agem = as.numeric(as.character(agem))) %>%
filter(agem<=15) %>%
mutate(classif = ifelse(classif == "Recovered", "Stunting reversed", classif))
# drop rows that pooled over age not in underlying cohort data
pooled = pooled[-which(pooled$agem<2 & pooled$classif=="Stunting relapse"),]
pooled = pooled[-which(pooled$agem<1 & pooled$classif=="Stunting reversed"),]
# pooled = pooled[-which(pooled$agem<1 & pooled$classif=="Newly stunted"),]
plot_cohort= plot_cohort %>% mutate(agem = as.numeric(as.character(agem)),
lb = NA,
ub = NA) %>%
mutate(region = case_when(
cohort_country=="CMC-V-BCS-2002 - INDIA" ~ "South Asia",
cohort_country=="CMIN Bangladesh93 - BANGLADESH" ~ "South Asia",
cohort_country=="CMIN Peru89 - PERU" ~ "Latin America",
cohort_country=="CMIN Peru95 - PERU" ~ "Latin America",
cohort_country=="CONTENT - PERU" ~ "Latin America",
cohort_country=="EE - PAKISTAN" ~ "South Asia",
cohort_country=="GMS-Nepal - NEPAL" ~ "South Asia",
cohort_country=="Guatemala BSC - GUATEMALA" ~ "Latin America",
cohort_country=="IRC - INDIA" ~ "South Asia",
cohort_country=="Keneba - GAMBIA" ~ "Africa",
cohort_country=="MAL-ED - BANGLADESH" ~ "South Asia",
cohort_country=="MAL-ED - BRAZIL" ~ "Latin America",
cohort_country=="MAL-ED - INDIA" ~ "South Asia",
cohort_country=="MAL-ED - NEPAL" ~ "South Asia",
cohort_country=="MAL-ED - PERU" ~ "Latin America",
cohort_country=="MAL-ED - SOUTH AFRICA" ~ "Africa",
cohort_country=="MAL-ED - TANZANIA" ~ "Africa",
cohort_country=="PROVIDE - BANGLADESH" ~ "South Asia",
cohort_country=="ResPak - PAKISTAN" ~ "South Asia",
cohort_country=="TanzaniaChild2 - TANZANIA" ~ "Africa",
cohort_country=="TDC - INDIA" ~ "South Asia"
))
# combine overall and cohort data -----------------------------------------------------
plot_combine = bind_rows(pooled, plot_cohort) %>%
# drop if small number of obs
mutate(percent= ifelse(tot<50, NA, percent)) %>% filter(classif!="Never stunted" &
classif!="No longer stunted" &
classif!="Still stunted" &
classif!="Recovered" &
classif!="Not stunted") %>%
mutate(classif = factor(classif, levels = c(
"Newly stunted",
"Stunting relapse",
"Stunting reversed"))) %>%
mutate(cohort_country= ifelse(is.na(cohort_country), "Pooled", cohort_country)) %>%
filter(region!="Overall")
# make plot -----------------------------------------------------
p_inc = ggplot(plot_combine ,
aes(x=agem, y = percent))+
facet_grid(classif ~region, scales = "free") +
# cohort-specific
geom_line(aes(group = cohort_country, col = classif), size = 0.5, alpha = 0.75,
data = plot_combine %>% filter(cohort_country!="Pooled")) +
# pooled
geom_line(aes( group = cohort_country), size=1,
data = plot_combine %>% filter(cohort_country=="Pooled")) +
# pooled bounds
geom_errorbar(aes(ymin = lb,
ymax = ub,
group = cohort_country), size=0.5,
data = plot_combine %>% filter(cohort_country=="Pooled"),
width = 0.3) +
scale_color_manual("", values = c(pink_green[c(4,5)],"#ADDE66")) +
# scale_x_continuous(limits = c(0,27), breaks = seq(0,27,3), labels = seq(0,27,3)) +
xlab("Child age, months") +
ylab("Incidence proportion (%)") +
theme(legend.position = "bottom",
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.x = element_text(size=14),
axis.title.y = element_text(size=14),
legend.text = element_text(size=12)) +
guides(color = guide_legend(nrow = 1, byrow = TRUE))
p_inc
# save plot and underlying data -----------------------------------------------------
ggsave(p_inc, file=paste0(fig_dir, "stunting/fig-stunt-2-flow-line-region-allage-primary.png"),
width=6, height=6)
saveRDS(plot_combine, file=paste0(figdata_dir_stunting, "figdata-stunt-2-flow-line-region-allage-primary.RDS"))
|
fce8e6bcf76cf30b00bd7830c5d4c780b499c648
|
62e90dea1e9581331331dbebb657269d2ad6dfd7
|
/man/LdaFuncs-package.Rd
|
dff2c4bcc35982a47a656a8a96b8189417f60d38
|
[] |
no_license
|
NathanWycoff/LdaFuncs
|
c19fec6947730e7fabe8ff7465ca9fa6a200b003
|
a18dbe6bed79e6f5cd48f723827df2562ce2a9b5
|
refs/heads/master
| 2021-01-06T04:01:06.265472
| 2018-03-13T16:47:28
| 2018-03-13T16:47:28
| 99,517,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 900
|
rd
|
LdaFuncs-package.Rd
|
\name{LdaFuncs-package}
\alias{LdaFuncs-package}
\alias{LdaFuncs}
\docType{package}
\title{
LdaFuncs
}
\description{
An Implementation of the CVB0 algorithm for inference on Latent Dirichlet Allocation topic models.
}
\details{
Implements weighting for each unique term, allowing words to differ in terms of how much influence they have in both topic formation and document-topic assignment.
}
\author{
Nathan Wycoff
Maintainer: Nathan Wycoff <nathanbrwycoff@gmail.com>
}
\references{
Blei D.M., Ng A.Y., Jordan M.I. (2003).
Latent Dirichlet Allocation.
Arthur Asuncion, Max Welling, Padhraic Smyth, Yee Whye Teh
On Smoothing and Inference for Topic Models
}
\keyword{TopicModeling}
\seealso{
tm, TopicModel
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
8fe1cf943e7ad06b445b56c5f25ad08cf0213f9a
|
d4626615378e45ad5d14e4d1e3b631278475d165
|
/plot3.R
|
dc313848f379f5974645d46c73723c6663aff78c
|
[] |
no_license
|
bhurat/ExData_Plotting1
|
04a8a6f375934a455e694e10411e963140c30e62
|
d8644a7693decd9dc1e83d6d48b116a9c5c5e421
|
refs/heads/master
| 2021-01-18T00:14:40.999275
| 2014-11-09T20:12:28
| 2014-11-09T20:12:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 979
|
r
|
plot3.R
|
plot3<-function() {
datsnames<-read.csv("household_power_consumption.txt", TRUE, ";", nrows=1)
dats<-read.csv("household_power_consumption.txt", TRUE, ";", skip = 66636, nrows = 2880)
colnames(dats)<-colnames(datsnames)
dats[,2]<-as.numeric(dats[,2])
for (i in 1:nrow(dats)) {
if (i > 1440) {
dats[i,2]<-dats[i,2]+1440
}
}
#line plot of submetering (all 3) and time
with(dats, {
plot(Time, Sub_metering_1,type="l",xaxt="n", ann=FALSE)
lines(Time,Sub_metering_2,col="red")
lines(Time,Sub_metering_3,col="blue")
title(ylab = "Energy sub metering")
axis(1,c(0,1440,2880),labels = c("Thursday","Friday","Saturday"))
legend("topright",c("sub metering 1","sub metering 2", "sub metering 3"),
lty=c(1,1,1),col=c("black","blue","red"))
})
}
|
8b66bad3ae56bc5df166e2a7635ad47bcbab0856
|
70b343f159f3f94043ebc9c116088e04b689a2f4
|
/analyze_predictmrna.R
|
a257df3523f0fb1e9132c9455ac3b0ebee17949b
|
[] |
no_license
|
xwang234/predictmrna
|
579dfcc3ffa7f10dc4a81f80721450f166bee234
|
3c04a536c6364b34e1c21c449673ea58a88ee51e
|
refs/heads/master
| 2021-01-10T01:23:03.143600
| 2016-03-24T03:48:59
| 2016-03-24T03:48:59
| 53,985,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,900
|
r
|
analyze_predictmrna.R
|
#!/usr/bin/env Rscript
setwd("/fh/fast/dai_j/CancerGenomics/Ovarian")
load("./mrna_copynumber_methylation_mutation.RData")
library(gdata)
clinic=read.xls("/fh/fast/dai_j/CancerGenomics/Ovarian/TCGAdata/TCGA-OV-Clinical-Table_S1.2.xlsx")
clinic <- clinic[order(clinic[,1]),]
mrnacluster <- mrnacluster[order(mrnacluster[,1]),]
names(clinic)[1] <- "ID"
names(mrnacluster)[1] <- "ID"
clinic <- merge(clinic,mrnacluster,by="ID",x.all=T,y.all=F)
load("mrna.RData")
ID <- as.character(names(mrna))
list1 <- as.character(clinic[clinic[,7]=="PROGRESSIVE DISEASE"| clinic[,7]=="STABLE DISEASE",1])
list2 <- as.character(clinic[clinic[,13]=="Resistant" & clinic[,7]!="PROGRESSIVE DISEASE" & clinic[,7]!="STABLE DISEASE" ,1])
list3 <- as.character(clinic[clinic[,13]=="Sensitive" & !is.na(clinic[,12])& clinic[,12]<12& clinic[,7]!="PROGRESSIVE DISEASE" & clinic[,7]!="STABLE DISEASE",1])
list4 <- as.character(clinic[clinic[,13]=="Sensitive" & !is.na(clinic[,12])& clinic[,12]<=24 & clinic[,12]>=12& clinic[,7]!="PROGRESSIVE DISEASE" & clinic[,7]!="STABLE DISEASE",1])
list5 <-as.character(clinic[clinic[,13]=="Sensitive" & !is.na(clinic[,12])& clinic[,12]>24 & clinic[,7]!="PROGRESSIVE DISEASE" & clinic[,7]!="STABLE DISEASE",1,1])
group <- rep(NA,489)
for (i in 1:489) {
if (sum(ID[i]==list1)==1) group[i] <- 0
if (sum(ID[i]==list2)==1) group[i] <- 1
if (sum(ID[i]==list3)==1) group[i] <- 2
if (sum(ID[i]==list4)==1) group[i] <- 2
if (sum(ID[i]==list5)==1) group[i] <- 2
}
for (i in 1:489) mrna[,i] <- as.numeric(mrna[,i])
mdat <- t(mrna)
for (i in 1:11864) mdat[,i] <- as.numeric(mdat[,i])
for (i in 1:11864) mdat[,i] <- mdat[,i]/sqrt(var(mdat[,i]))
library(glmnet)
x=mdat[!is.na(group),]
y=group[!is.na(group)]
fit <- glmnet(x,y,family="multinomial")
cvfit <- cv.glmnet(x,y,family="multinomial")
fit <- glmnet(x,y)
cvfit <- cv.glmnet(x,y,nfolds=length(y))
test=coef(cvfit,s=cvfit$lambda.min)
test1=as.matrix(test[[1]])
test2=test1[test1[,1]>0,1]
test1=as.matrix(test[[2]])
test2=test1[test1[,1]>0,1]
test1=as.matrix(test[[3]])
test2=test1[test1[,1]>0,1]
glmnetclassify=function(data,group)
{
#remove rows contain NA
tmp=rowSums(data)
idx=is.na(tmp)
data=data[!idx,]
mdat <- t(data)
for (i in 1:nrow(data)) mdat[,i] <- as.numeric(mdat[,i])
for (i in 1:nrow(data)) mdat[,i] <- mdat[,i]/sqrt(var(mdat[,i]))
x=mdat[!is.na(group),]
y=group[!is.na(group)]
y=as.factor(y)
fit <- glmnet(x,y,family="multinomial")
cvfit <- cv.glmnet(x,y,family="multinomial",nfolds = length(y))
pfit = as.integer(predict(fit,x,s=cvfit$lambda.min,type="class"))
res=list(cvfit=cvfit,fit=fit,pfit=pfit,error=sum(pfit!=y)/length(y))
return(res)
}
data=mrna
rescmrna=glmnetclassify(data,group)
predictmrna=predictmrna1se
data=predictmrna[,1:489]
rescpredictmrna1se=glmnetclassify(data,group)
predictmrna=predictmrnamin
data=predictmrna[,1:489]
rescpredictmrnamin=glmnetclassify(data,group)
data=t(X) #X from predictmrna.RData
resX=glmnetclassify(data,group)
data=rbind(mrna,t(X))
resmrnaX=glmnetclassify(data,group)
glmnetclassify2=function(data,group)
{
#remove rows contain NA
tmp=rowSums(data)
idx=is.na(tmp)
data=data[!idx,]
idx=group==0 #remove group0
group[idx]=NA
mdat <- t(data)
for (i in 1:nrow(data)) mdat[,i] <- as.numeric(mdat[,i])
for (i in 1:nrow(data)) mdat[,i] <- mdat[,i]/sqrt(var(mdat[,i]))
x=mdat[!is.na(group),]
y=group[!is.na(group)]
y=as.factor(y)
fit <- glmnet(x,y,family="binomial")
cvfit <- cv.glmnet(x,y,family="binomial",nfolds = length(y))
pfit = as.integer(predict(fit,x,s=cvfit$lambda.min,type="class"))
res=list(cvfit=cvfit,fit=fit,pfit=pfit,error=sum(pfit!=y)/length(y))
return(res)
}
data=mrna
resc2mrna=glmnetclassify2(data,group)
predictmrna=predictmrna1se
data=predictmrna[,1:489]
resc2predictmrna1se=glmnetclassify2(data,group)
predictmrna=predictmrnamin
data=predictmrna[,1:489]
resc2predictmrnamin=glmnetclassify2(data,group)
# x=mdat[!is.na(group),]
# y=group[!is.na(group)]
# #fit <- glmnet(mdat[!is.na(group),],group[!is.na(group)])
# fit <- glmnet(x,y)
# cvfit <- cv.glmnet(mdat[!is.na(group),],group[!is.na(group)],nfolds=length(group[!is.na(group)]))
#
# cvfit$lambda.1se
# cvfit$lambda.min
#
# sel_set <- which(as.matrix(coef(fit,s=cvfit$lambda.1se))[,1]!=0)
#
# pfit = predict(fit,mdat[!is.na(group),],s=cvfit$lambda.1se,type="response")
# pfit = predict(fit,mdat[!is.na(group),],s=cvfit$lambda.min,type="response")
#
# xx <- cbind(pfit,group[!is.na(group)])
# #xx[,2] <- ifelse(xx[,2]>=1,1,0)
# xx[,2] <- ifelse(xx[,2]>=2,1,0)
# xx <- xx[order(xx[,1]),]
# ss <- xx
# for (i in 1:nrow(xx)) {
# ss[i,1] <- mean(xx[xx[,2]==0,1]>=xx[i,1])
# ss[i,2] <- mean(xx[xx[,2]==1,1]>=xx[i,1])
# }
#
# sens=ss[,2]
# spec=ss[,1]
# idx=order(spec)
# spec=spec[idx]
# sens=sens[idx]
# height = (sens[-1]+sens[-length(sens)])/2
# width = diff(spec)
# sum(height*width)
#
# plot(ss[,1],ss[,2],xlab="False positive rate",ylab="True positive rate",type="l",col=3,lwd=4)
# plot(xx)
col_rsquared=490 #column of r-squared
col_cvwork=493 #if cv.glmnet selected variables
col_numvar=491 #number of variables
col_variable=492 #selected variables
formcv=function(predictmrna)
{
if (ncol(predictmrna)>489)
{
print(sum(is.na(predictmrna[,col_rsquared])))
idxNA=is.na(predictmrna[,col_rsquared])
predictmrna1=predictmrna[!idxNA,]
#predictdmrna1[idxNA,1:ncol(mrna)]=mrna[idxNA,]
hist(predictmrna[,col_rsquared],xlab="Rsquared",main="")
hist(predictmrna[,col_numvar],xlab="Num of features",main="")
mean(predictmrna[,col_rsquared],na.rm=T)
}else
{
predictmrna1=predictmrna
}
#[1] 0.411332
#for (i in 1:489) predictmrna1[,i] <- as.numeric(predictmrna1[,i])
mdat1 <- t(predictmrna1[,1:489])
for (i in 1:ncol(mdat1)) mdat1[,i] <- as.numeric(mdat1[,i])
for (i in 1:ncol(mdat1)) mdat1[,i] <- mdat1[,i]/sqrt(var(mdat1[,i]))
library(glmnet)
x1=mdat1[!is.na(group),]
y=group[!is.na(group)]
#fit1 <- glmnet(mdat1[!is.na(group),],group[!is.na(group)])
fit1 <- glmnet(x1,y)
cvfit1 <- cv.glmnet(mdat1[!is.na(group),],group[!is.na(group)],nfolds=length(group[!is.na(group)]))
print(cvfit1$lambda.1se)
print(cvfit1$lambda.min)
sel_set1se <- which(as.matrix(coef(fit1,s=cvfit1$lambda.1se))[,1]!=0)
sel_setmin = which(as.matrix(coef(fit1,s=cvfit1$lambda.min))[,1]!=0)
pfit1se=pfitmin=rep(NA,length(y))
pfitmin = predict(fit1,mdat1[!is.na(group),],s=cvfit1$lambda.min,type="response")
pfit1se = predict(fit1,mdat1[!is.na(group),],s=cvfit1$lambda.1se,type="response")
result=list(cv=cvfit1,fit=fit1,sel_setmin=sel_setmin,sel_set1se=sel_set1se,pfitmin=pfitmin,pfit1se=pfit1se)
}
plotroc=function(pfit)
{
xx <- cbind(pfit,group[!is.na(group)])
#xx[,2] <- ifelse(xx[,2]>=1,1,0)
xx[,2] <- ifelse(xx[,2]>=2,1,0)
xx <- xx[order(xx[,1]),]
ss <- xx
for (i in 1:nrow(xx)) {
ss[i,1] <- mean(xx[xx[,2]==0,1]>=xx[i,1])
ss[i,2] <- mean(xx[xx[,2]==1,1]>=xx[i,1])
}
sens=ss[,2]
spec=ss[,1]
idx=order(spec)
spec=spec[idx]
sens=sens[idx]
height = (sens[-1]+sens[-length(sens)])/2
width = diff(spec)
print(sum(height*width))
plot(ss[,1],ss[,2],xlab="False positive rate",ylab="True positive rate",type="l",col=3,lwd=4)
}
resmrna=formcv(mrna)
plotroc(resmrna$pfitmin)
plotroc(resmrna$pfit1se)
predictmrnamin=NULL
for (i in 1:30)
{
tmp=read.table(file=paste0("./predictmrna/predictedmrnamin_",i,".txt"),header=T,sep="\t")
predictmrnamin=rbind(predictmrnamin,tmp)
}
predictmrna1se=NULL
for (i in 1:30)
{
tmp=read.table(file=paste0("./predictmrna/predictedmrna1se_",i,".txt"),header=T,sep="\t")
predictmrna1se=rbind(predictmrna1se,tmp)
}
predictmrnamin=read.table(file="./predictmrna/predictedmrnamin_all2.txt",header=T,sep="\t")
predictmrna1se=read.table(file="./predictmrna/predictedmrna1se_all2.txt",header=T,sep="\t")
predictmrnamin=read.table(file="./predictmrna/predictedmrnamin_all4.txt",header=T,sep="\t")
predictmrna1se=read.table(file="./predictmrna/predictedmrna1se_all4.txt",header=T,sep="\t")
predictmrnamin[,col_variable]=as.character(predictmrnamin[,col_variable])
predictmrna1se[,col_variable]=as.character(predictmrna1se[,col_variable])
hist(predictmrnamin$numvariables)
hist(predictmrna1se$numvariables)
hist(predictmrnamin$rsquared)
hist(predictmrna1se$rsquared)
respredictmrnamin=formcv(predictmrnamin)
plotroc(respredictmrnamin$pfitmin)
plotroc(respredictmrnamin$pfit1se)
respredictmrna1se=formcv(predictmrna1se)
plotroc(respredictmrna1se$pfitmin)
plotroc(respredictmrna1se$pfit1se)
#combine 1se and min results
predictmrna=predictmrna1se
idx_cvworkNA=is.na(predictmrna[,col_cvwork])
predictmrna[idx_cvworkNA,]=predictmrnamin[idx_cvworkNA,]
hist(predictmrna$numvariables,main="",xlab="Number of features")
hist(predictmrna$rsquared,main="",xlab="R-squared")
resall1=formcv(predictmrna)
plotroc(resall$pfitmin)
maxnumvar=100
idxNA=!is.na(predictmrna$numvariables) & predictmrna$numvariables>=maxnumvar
predictmrna[idxNA,]=rep(NA,ncol(predictmrna))
resnum1=formcv(predictmrna)
plotroc(resnum1$pfitmin)
min(predictmrna$numvariables,na.rm=T)
min(predictmrna$rsquared,na.rm=T)
minrsquared=0.1
idxNA=!is.na(predictmrna$rsquared) & predictmrna$rsquared<=minrsquared
predictmrna[idxNA,]=rep(NA,ncol(predictmrna))
resrsquared=formcv(predictmrna)
plotroc(resrsquared$pfitmin)
resrsquarednum=formcv(predictmrna) #consider both variable num and rsquared
plotroc(resrsquarednum$pfitmin)
#save(resmrna,resall,resrsquared,resrsquarednum,resnum,file="analyze_predictmrna0229.RData")
test=names(resmrna$sel_set1se) %in% names(resmrna$sel_setmin)
sum(test)
#[1] 27
test=names(resmrna$sel_set1se) %in% names(resall$sel_setmin)
sum(test)
#[1] 9
features=colnames(X)
countseltypes=function(features,predictmrna)
{
col_variable=492
numtypes=c(10391,9329,5876)
res=apply(predictmrna,1,function(x1) {
result=rep(0,3)
if (!is.na(x1[[col_variable]]))
{
tmp=as.numeric(unlist(strsplit(as.character(x1[[col_variable]]),",")))
tmp1=sum(tmp<=numtypes[1])
tmp2=sum(numtypes[1]<tmp & tmp<=numtypes[1]+numtypes[2])
tmp3=sum(tmp>numtypes[1]+numtypes[2])
if (tmp1>0) result[1]=tmp1
if (tmp2>0) result[2]=tmp2
if (tmp3>0) result[3]=tmp3
}
return(result)
})
res=data.frame(t(res))
colnames(res)=c("CP","ME","MU")
rownames(res)=rownames(predictmrna)
return(res)
}
predictmrna=predictmrna1se
predictmrna=predictmrnamin
predictmrna=predictmrna[predictmrna$cvwork==1,]
count1se=countseltypes(features,predictmrna)
hist(count1se[,1],main="Copy number")
hist(count1se[,2],main="Methylation")
hist(count1se[,3],main="Mutation")
hist(rowSums(count1se),main="ALL")
count1se_all=rowSums(count1se)
sum(count1se_all==1)
sum(predictmrna$rsquared<0.1,na.rm=T)
cp_prop=apply(count1se,1,function(x){x[1]/sum(x)})
me_prop=apply(count1se,1,function(x){x[2]/sum(x)})
mu_prop=apply(count1se,1,function(x){x[3]/sum(x)})
hist(cp_prop,main="Copy number")
hist(me_prop,main="Methylation")
hist(mu_prop,main="Mutation")
data=data.frame(matrix(NA,nrow=3,ncol=nrow(cor_mrna_copynumber)))
colnames(data)=rownames(cor_mrna_copynumber)
data[1,]=cp_prop
data[2,]=me_prop
data[3,]=mu_prop
colors=c("red","blue","green")
barplot(as.matrix(data[,1:100]), main="", ylab = "Proportion", cex.lab = 1, cex.main = 1, beside=TRUE, col=colors)
legend("topleft", c("CP","ME","MU"), cex=1.3, bty="n", fill=colors)
sum(is.na(predictmrna[,col_rsquared]))
# [1] 170
idxNA=is.na(predictmrna[,col_rsquared])
predictmrna1=predictmrna[!idxNA,]
#predictdmrna1[idxNA,1:ncol(mrna)]=mrna[idxNA,]
hist(predictmrna[,col_rsquared],xlab="Rsquared",main="")
hist(predictmrna[,col_numvar],xlab="Num of features",main="")
mean(predictmrna[,col_rsquared],na.rm=T)
#[1] 0.411332
for (i in 1:489) predictmrna1[,i] <- as.numeric(predictmrna1[,i])
mdat1 <- t(predictmrna1[,1:489])
for (i in 1:ncol(mdat1)) mdat1[,i] <- as.numeric(mdat1[,i])
for (i in 1:ncol(mdat1)) mdat1[,i] <- mdat1[,i]/sqrt(var(mdat1[,i]))
library(glmnet)
x1=mdat1[!is.na(group),]
y=group[!is.na(group)]
#fit1 <- glmnet(mdat1[!is.na(group),],group[!is.na(group)])
fit1 <- glmnet(x1,y)
cvfit1 <- cv.glmnet(x1,y)
cvfit1 <- cv.glmnet(mdat1[!is.na(group),],group[!is.na(group)],nfolds=length(group[!is.na(group)]))
#cvfit1 <- cv.glmnet(mdat[!is.na(group),],group[!is.na(group)],nfolds=length(group[!is.na(group)]))
cvfit1$lambda.1se
cvfit1$lambda.min
fit2=glmnet(x1,y,family="multinomial")
cvfit2=cv.glmnet(x1,y,family="multinomial",nfolds=length(group[!is.na(group)]))
cvfit2$lambda.1se
cvfit2$lambda.min
sel_set1 <- which(as.matrix(coef(fit2,s=cvfit2$lambda.min))[,1]!=0)
sel_set1 <- which(as.matrix(coef(fit1,s=cvfit1$lambda.min))[,1]!=0)
sel_set1 <- which(as.matrix(coef(fit1,s=cvfit1$lambda.1se))[,1]!=0)
pfit = predict(fit1,mdat1[!is.na(group),],s=cvfit1$lambda.min,type="response")
sel_set2 <- which(as.matrix(coef(fit1keep,s=cvkeep$lambda.min))[,1]!=0)
#use lambda.min
predictdmrnamin=NULL
for (i in 1:30)
{
tmp=read.table(file=paste0("./predictmrna/predictedmrnamin_",i,".txt"),header=T,sep="\t")
#tmp=read.table(file=paste0("./predictmrna/predictedmrna1se_",i,".txt"),header=T,sep="\t")
predictdmrnamin=rbind(predictdmrnamin,tmp)
}
sum(is.na(predictdmrnamin[,col_rsquared]))
# [1] 15
idxNA=is.na(predictdmrnamin[,col_rsquared])
predictdmrnamin1=predictdmrnamin[!idxNA,]
#predictdmrna1[idxNA,1:ncol(mrna)]=mrna[idxNA,]
hist(predictdmrnamin[,col_rsquared],xlab="Rsquared",main="Lambda.min")
mean(predictdmrnamin[,col_rsquared],na.rm=T)
mdat2 <- t(predictdmrnamin1[,1:489])
for (i in 1:ncol(mdat2)) mdat2[,i] <- as.numeric(mdat2[,i])
for (i in 1:ncol(mdat2)) mdat2[,i] <- mdat2[,i]/sqrt(var(mdat2[,i]))
x2=mdat2[!is.na(group),]
y=group[!is.na(group)]
#fit1 <- glmnet(mdat1[!is.na(group),],group[!is.na(group)])
fit2 <- glmnet(x2,y)
cvfit2 <- cv.glmnet(x2,y)
cvfit2 <- cv.glmnet(x2,y,nfolds=length(y))
#cvfit1 <- cv.glmnet(mdat[!is.na(group),],group[!is.na(group)],nfolds=length(group[!is.na(group)]))
cvfit2$lambda.1se
cvfit2$lambda.min
sel_set2 <- which(as.matrix(coef(fit2,s=cvfit2$lambda.min))[,1]!=0)
pfit = predict(fit2,mdat2[!is.na(group),],s=cvfit2$lambda.min,type="response")
#check predicted mrna
load("predictmrna.RData") #contains the X matrix
gene="ABCA12"
gene="A4GNT" # 1 variable selected in 1se
gene="ABCB4" # 1se has no variable selected
idx=which(rownames(mrna)==gene)
Ytest=t(as.matrix(mrna[idx,]))
# Y=Y[!idx]
fittest = glmnet(X, Ytest)
cvfittest=cv.glmnet(X,Ytest,nfolds=length(Ytest))
cvfittest$lambda.min
cvfittest$lambda.1se
plot(cvfittest)
cvfittest1=cv.glmnet(X,Ytest)
cvfittest1$lambda.min
cvfittest1$lambda.1se
lambdas =cv.glmnets(X,Ytest,nfolds=10,ntime=10)
sel_settest <- which(as.matrix(coef(fittest,s=lambdas$lambda.1se))[,1]!=0)
sel_settest <- which(as.matrix(coef(fittest,s=cvfittest1$lambda.1se))[,1]!=0)
cvfittest2=cv.glmnet(X,Ytest)
cvfittest2$lambda.min
cvfittest2$lambda.1se
lambdas =cv.glmnets(X,Ytest,nfolds=10,ntime=10)
|
dbf488443723b79f37e7be8d6c812d3e1d6b02dc
|
6ee8e9566a0df86ace46e06fe2778e07bf6e3224
|
/app_latest.R
|
14bf65151311e7990c59a5137a3bab6e92369513
|
[] |
no_license
|
TianaLiang/Palmer-Penguin-Project
|
7553b3e1fe96f0dc715dfbf7918b54f13313271e
|
26ea34446770fc50f49d32ee8eb39e4fbe8f7a3d
|
refs/heads/main
| 2023-06-10T15:01:59.064287
| 2021-06-17T07:16:20
| 2021-06-17T07:16:20
| 377,052,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,668
|
r
|
app_latest.R
|
library(shiny)
library(dplyr)
library(ggplot2)
origindata <- readRDS("origindata.RDS")
enrollmentdata <- readRDS("enrollmentdata.RDS")
ui <- fluidPage(
tags$head(tags$style(
HTML('
form.well{
font-family: "Arial";
background-color: rgb(105, 232, 251);
}
div {
background-color: rgb(125, 185, 222); #dark blue
}
body {
font-family: "Arial";
color : black;
}
input {
font-family: "Optima";
font-weight: bold;
}
h1{
font-family: "Arial";
color : white;
text-align: center;
font-weight: bold;
}
h5{
text-align: center;
font-size: 15px;
font-weight: bold;
}
p {
font-size: 17px;
}
')
)),
#Page title
h1("Dream School Wonderland"),
h5(em("By Team Random")),
h5(em("Team Members: Ziyue Jin, Tiana Liang, Stella Wang, Pei-Hsun Hsu, Miaosi Tao, Hongkai Lou")),
br(),
br(),
p("If you are currently looking for your dream school, you should be able to find useful statistics of geography, enrollment, and research development of those universities in the United States.
Please first enter a benchmark of a university's 25th perventile SAT score to help narrow down the range of school. Then, you can click on the tabs below to check out different categories of information."),
hr(),
# Application title
titlePanel("US University Statistics"),
numericInput("scores", "25th Percentile SAT Score has to be above of: ", value = 1000), #above this score
numericInput("scores2", "25th Percentile SAT Score has to be below of: ", value = 1300), #below this score
hr(),
br(),
tabsetPanel(
tabPanel("University List",tableOutput("list")),
tabPanel("Region Distribution", plotOutput("schoolregion")),
tabPanel("Urbanization", plotOutput("urbanization")),
tabPanel("Profile classification", plotOutput("profile"), textOutput("one"), textOutput("two"),
textOutput("three"),textOutput("four"),textOutput("five"),textOutput("six"),
textOutput("seven"),textOutput("eight"),textOutput("nine"),textOutput("ten"),
textOutput("eleven"),textOutput("twelve"),textOutput("thirteen"),textOutput("fourteen"),
textOutput("fifteen"),textOutput("zero")),
tabPanel("Enrollment Data", plotOutput("Full_time"),textOutput("Full_title"),tableOutput("Full_low"),plotOutput("transfer"),textOutput("tran_title"),tableOutput("tran_top") ),
tabPanel("Research Expenditure", plotOutput("research"), tableOutput("top"), textOutput("note"))
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
#Uni List
output$list <- renderTable({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop("You should enter a numeric input!")
if(input$scores2 < input$scores) stop("The lower bound should be less than the upper bound!")
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop("There is no school satisfying this condition!")
selectdata <- origindata[origindata$SAT >= input$scores & origindata$SAT <= input$scores2, ]
print(selectdata[order(selectdata$SAT, decreasing = TRUE),c(1,4)])
})
#Profile
output$profile <- renderPlot({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop("You should enter a numeric input!")
if(input$scores2 < input$scores) stop("The lower bound should be less than the upper bound!")
if (input$scores <0 || input$scores > 1530 ||input$scores2 < 0) stop("There is no school satisfying this condition!")
selectdata <- origindata[origindata$SAT >= input$scores & origindata$SAT <= input$scores2, ]
selectdata$profilef <- factor(selectdata$UGprofile2018, levels = c("0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15"))
selectprofile <- selectdata %>%
group_by(profilef) %>%
count(profilef)
x<- barplot(selectprofile$n,names.arg = selectprofile$profilef ,main= "Distribution of Schools based on their profile",xlab = "Profile", ylab = "Number of Schools", ylim = c(0,max(selectprofile$n)+30),col=2,family="Arial")
text(x,1.05*selectprofile$n, labels = selectprofile$n)
})
output$one <-renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"0: Not classified"})
output$two <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"1: Two-year, higher part-time"})
output$three <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"2: Two-year, mixed part/full-time"})
output$four <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"3: Two-year, medium full-time"})
output$five <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"4: Two-year, higher full-time"})
output$six <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"5: Four-year, higher part-time"})
output$seven <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"6: Four-year, medium full-time, inclusive, higher transfer-in"})
output$eight <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"7: Four-year, medium full-time, inclusive, lower transfer-in"})
output$nine <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"8: Four-year, medium full-time, selective, higher transfer-in"})
output$ten <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"9: Four-year, medium full-time , selective, lower transfer-in"})
output$eleven <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"10: Four-year, full-time, inclusive, higher transfer-in"})
output$twelve <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"11: Four-year, full-time, inclusive, lower transfer-in"})
output$thirteen <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"12: Four-year, full-time, selective, higher transfer-in"})
output$fourteen <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"13: Four-year, full-time, selective, lower transfer-in"})
output$fifteen <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"14: Four-year, full-time, more selective, higher transfer-in"})
output$zero <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"15: Four-year, full-time, more selective, lower transfer-in"})
#SchoolRegion
output$schoolregion <- renderPlot({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop("You should enter a numeric input!")
if(input$scores2 < input$scores) stop("The lower bound should be less than the upper bound!")
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop("There is no school satisfying this condition!")
selectdata <- origindata[origindata$SAT >= input$scores & origindata$SAT <= input$scores2, ]
selectdata$regionf <- factor(selectdata$Region, levels = c("US Service schools", "New England", "Mid East", "Great Lakes", "Plains", "Southeast", "Southwest", "Rocky Mountains", "Far West", "Others"))
selectregion <- selectdata %>%
group_by(regionf) %>%
count(regionf)
x<- barplot(selectregion$n,names.arg = selectregion$regionf ,main= "Distribution of Schools based on their region codes",xlab = "Region", ylab = "Number of Schools", ylim = c(0,max(selectregion$n)+30), col=5,family="Arial")
text(x, 1.05*selectregion$n, labels = selectregion$n)
})
#Urbanization
output$urbanization <- renderPlot({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop("You should enter a numeric input!")
if(input$scores2 < input$scores) stop("The lower bound should be less than the upper bound!")
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop("There is no school satisfying this condition!")
selectdata <- origindata[origindata$SAT >= input$scores & origindata$SAT <= input$scores2, ]
selectdata$urbanType <- factor(selectdata$Urban, levels = c("Rural Remote","Rural Distant","Rural Fringe","Town Distant","Town Remote","Town Fringe","Suburb Small", "Suburb Midsize" ,"Suburb Large","City Small" ,"City Midsize","City Large", "{Not available}"))
selectdata <- selectdata[selectdata$urbanType != "{Not available}",]
selecturban <- selectdata %>%
group_by(urbanType) %>%
count(urbanType)
x<- barplot(selecturban$n,names.arg = selecturban$urbanType ,main= "Distribution of Schools based on their urbanization",xlab = "Urbanization", ylab = "Number of Schools", ylim = c(0,max(selecturban$n)+30), col = 3,family="Arial")
text(x, 1.05*selecturban$n, labels = selecturban$n)
})
#Enrollment
output$Full_time <- renderPlot({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop("You should enter a numeric input!")
if(input$scores2 < input$scores) stop("The lower bound should be less than the upper bound!")
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop("There is no school satisfying this condition!")
selectdata <- enrollmentdata[origindata$SAT >= input$scores & origindata$SAT <= input$scores2,]
selectdata$FTP <- selectdata$FT/selectdata$Total_Enroll
selectdata <- selectdata[selectdata$FTP!="NaN",]
plot(selectdata$SAT, selectdata$FTP, main = "Proportion of Full-Time Enrollment", col = "blue", xlab = "SAT score", ylab = "Full-Time Enrollment %", pch = 20, family="Arial")
if(nrow(selectdata) != 1) {abline(lm(selectdata$FTP~selectdata$SAT), col = "red", lwd=2)}
})
output$Full_title <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"Bottom 5 :"})
output$Full_low <- renderTable({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
selectdata <- enrollmentdata[origindata$SAT >= input$scores & origindata$SAT <= input$scores2,]
selectdata$FTP <- selectdata$FT/selectdata$Total_Enroll
selectdata <- selectdata[selectdata$FTP!="NaN",]
names(selectdata)[8] <- "Full Time Enrollment %"
print(tail(selectdata[order(selectdata$'Full Time Enrollment %', decreasing = TRUE),c(1,8)],5))
})
output$transfer <- renderPlot({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
selectdata <- enrollmentdata[enrollmentdata$SAT >= input$scores & enrollmentdata$SAT <= input$scores2,]
selectdata$TRP <- selectdata$Transfer/selectdata$Total_Enroll
selectdata <- selectdata[selectdata$TRP!="NaN",]
plot(selectdata$SAT, selectdata$TRP, main = "Proportion of Transfer Student Enrollment", col = "blue", xlab = "SAT score", ylab = "Transfer Enrollment %", pch = 20,family="Arial")
if(nrow(selectdata) != 1) {abline(lm(selectdata$TRP~selectdata$SAT), col = "red", lwd=2)}
})
output$tran_title <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"Top 5 : "})
output$tran_top <- renderTable({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
selectdata <- enrollmentdata[enrollmentdata$SAT >= input$scores & enrollmentdata$SAT <= input$scores2,]
selectdata$TRP <- selectdata$Transfer/selectdata$Total_Enroll
selectdata <- selectdata[selectdata$TRP!="NaN",]
names(selectdata)[8] <- "Transfer Enrollment %"
print(head(selectdata[order(selectdata$'Transfer Enrollment %', decreasing = TRUE),c(1,8)],5))
})
#Research
output$research <- renderPlot({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop("You should enter a numeric input!")
if(input$scores2 < input$scores) stop("The lower bound should be less than the upper bound!")
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop("There is no school satisfying this condition!")
selectdata <- origindata[(origindata$SAT >= input$scores)&(origindata$SAT <= input$scores2)&(origindata$Research != 0), ]
selectdata$researchf <- selectdata$Research
index <- order(selectdata$researchf, decreasing = TRUE)
r <- head(selectdata[index,], n = 10)
x<- barplot(r$researchf, xaxt = "n",
main= "Top 10 Schools Based on Research Expenditure", xlab = "Rank", ylab = "Expenditure in dollars",
col = "salmon", space = 0.08,family="Arial")
text(x, 1, cex.lab = 0.5)
})
output$top <- renderTable({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
selectdata <- origindata[(origindata$SAT >= input$scores)&(origindata$SAT <= input$scores2)&(origindata$Research != 0), ]
selectdata$researchf <- selectdata$Research
indext <- order(selectdata$researchf, decreasing = TRUE)
t <- head(selectdata[indext,], n = 10)
print(t[, c(1,10)])
})
output$note <- renderText({
if(is.numeric(input$scores) == FALSE |is.numeric(input$scores2) == FALSE) stop()
if(input$scores2 < input$scores) stop()
if (input$scores <0 || input$scores > 1530 || input$scores2 < 0) stop()
"*We would plot all of the schools if there are fewer than ten schools in the input SAT interval."
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
2d0bba29c2d7b3e8127e19335af9039379422659
|
c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab
|
/man/gtkTextIterForwardChars.Rd
|
fe488831eb564c029e15ae1baa62ec705f339c19
|
[] |
no_license
|
cran/RGtk2.10
|
3eb71086e637163c34e372c7c742922b079209e3
|
75aacd92d4b2db7d0942a3a6bc62105163b35c5e
|
refs/heads/master
| 2021-01-22T23:26:26.975959
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
rd
|
gtkTextIterForwardChars.Rd
|
\alias{gtkTextIterForwardChars}
\name{gtkTextIterForwardChars}
\title{gtkTextIterForwardChars}
\description{Moves \code{count} characters if possible (if \code{count} would move past the
start or end of the buffer, moves to the start or end of the
buffer). The return value indicates whether the new position of
\code{iter} is different from its original position, and dereferenceable
(the last iterator in the buffer is not dereferenceable). If \code{count}
is 0, the function does nothing and returns \code{FALSE}.}
\usage{gtkTextIterForwardChars(object, count)}
\arguments{
\item{\code{object}}{[\code{\link{GtkTextIter}}] an iterator}
\item{\code{count}}{[integer] number of characters to move, may be negative}
}
\value{[logical] whether \code{iter} moved and is dereferenceable}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
85c15f71497a00c3415d06482c6571020676d9c8
|
db0ae95504a72594012fb708bb2df4014540a319
|
/server.R
|
edae2cd71a277cf5d0896a9aa6b729b9bdf6ae10
|
[] |
no_license
|
RCollins113/Capstone_RShiny
|
07d17570e586c545efce403244d32ca0cf94d746
|
16b6fe805f6750fc681b42c17d517790024fbb54
|
refs/heads/main
| 2023-06-21T14:56:19.611444
| 2021-07-27T20:55:30
| 2021-07-27T20:55:30
| 388,106,689
| 0
| 1
| null | 2021-07-29T05:41:33
| 2021-07-21T12:17:39
|
R
|
UTF-8
|
R
| false
| false
| 3,682
|
r
|
server.R
|
# Install and import required libraries
require(shiny)
require(ggplot2)
require(leaflet)
require(tidyverse)
require(httr)
require(scales)
# Import model_prediction R which contains methods to call OpenWeather API
# and make predictions
source("model_prediction.R")
test_weather_data_generation<-function(){
#Test generate_city_weather_bike_data() function
city_weather_bike_df <- generate_city_weather_bike_data()
stopifnot(length(city_weather_bike_df)>0)
print(head(city_weather_bike_df))
return(city_weather_bike_df)
}
# Create a RShiny server
shinyServer(function(input, output){
# Define a city list
# Define color factor
color_levels <- colorFactor(c("green", "yellow", "red"),
levels = c("small", "medium", "large"))
city_weather_bike_df <- test_weather_data_generation()
# Create another data frame called `cities_max_bike` with each row contains city location info and max bike
# prediction for the city
cities_max_bike <- city_weather_bike_df %>%
group_by(CITY_ASCII,LAT,LNG,BIKE_PREDICTION,BIKE_PREDICTION_LEVEL,LABEL,
DETAILED_LABEL,FORECASTDATETIME,TEMPERATURE ) %>%
summarize(count = n(),
max = max(BIKE_PREDICTION, na.rm = TRUE))
print(cities_max_bike)
print(factor(cities_max_bike$BIKE_PREDICTION_LEVEL))
myFirstFun<-function(cities_max_bike)
{
if(cities_max_bike$BIKE_PREDICTION_LEVEL=='small')
{
mapcol="green"
mapradius=6
}
else if(cities_max_bike$BIKE_PREDICTION_LEVEL=='medium')
{
mapcol="yellow"
mapradius=10
}
else
{
mapcol="red"
mapradius=12
}
return(mapcol)
}
myFirstFun1<-function(cities_max_bike)
{
if(cities_max_bike$BIKE_PREDICTION_LEVEL=='small')
{
mapradius=6
}
else if(cities_max_bike$BIKE_PREDICTION_LEVEL=='medium')
{
mapradius=10
}
else
{
mapradius=12
}
return(mapradius)
}
# Then render output plots with an id defined in ui.R
output$city_bike_map <- renderLeaflet({
# Complete this function to render a leaflet map
color_levels1 <- colorFactor(palette=c("green", "yellow", "red"),domain=cities_max_bike$BIKE_PREDICTION_LEVEL)
print(~color_levels(cities_max_bike$BIKE_PREDICTION_LEVEL))
map <- leaflet(cities_max_bike) %>% addTiles() %>%
addCircleMarkers(lng = cities_max_bike$LNG, lat = cities_max_bike$LAT,color=myFirstFun(cities_max_bike)
,radius=myFirstFun1(cities_max_bike),popup=cities_max_bike$LABEL)
})
# If All was selected from dropdown, then render a leaflet map with circle markers
# and popup weather LABEL for all five cities
observeEvent(input$city_dropdown, {
if(input$city_dropdown != 'All') {
leafletProxy("city_bike_map") %>% clearShapes()
index = which(cities_max_bike$CITY_ASCII == input$city_dropdown)
leafletProxy("city_bike_map")%>% addCircles(lng = cities_max_bike$LNG[index],
lat = cities_max_bike$LAT[index],
popup = cities_max_bike$DETAILED_LABEL[index])
output$temp_line <- renderPlot({
ggplot(city_weather_bike_df, aes(x = city_weather_bike_df$ , y = city_weather_bike_df$TEMPERATURE) +
geom_line() +
geom_point() +
geom_text()
})
}
else {
leafletProxy("city_bike_map")
}
})
})
|
944bc27a3fa39c6d82d51c4505cc33d0dc97d16e
|
c59a47a59fcafc82249e40c58950db9a9fbf1250
|
/R/b2DownloadFileById.R
|
e2f07688e57ef138dfdcd25d981279b1f0942636
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
phillc73/backblazer
|
dcd5a9804b9fb3a3a048c67c402e6bd7df0b9169
|
9a1d4fa9998174938b425ff79bda842fe7685195
|
refs/heads/master
| 2021-07-22T01:26:17.566419
| 2016-11-16T20:47:47
| 2016-11-16T20:47:47
| 49,987,062
| 8
| 2
|
NOASSERTION
| 2021-06-30T20:25:09
| 2016-01-19T22:17:01
|
R
|
UTF-8
|
R
| false
| false
| 3,321
|
r
|
b2DownloadFileById.R
|
#' Download B2 File by ID.
#'
#' \code{b2DownloadFileById} downloads a file from the user's account on the
#' Backblaze B2 cloud storage product.
#'
#' This function downloads a file from the user's account on the Backblaze B2
#' cloud storage product using the file's unique ID only. Files of the same name
#' may have multiple versions stored on B2. Therefore, every file version will
#' have a unique ID, which can be used for downloading that specific version.
#' Further details regarding this API call are available here:
#'
#' \url{https://www.backblaze.com/b2/docs/b2_download_file_by_id.html}
#'
#' \code{fileId} is mandatory and must be user defined. \code{overwrite} is
#' optionally user defined and defaults to FALSE.
#'
#' @param fileId The unique identifier of the file to be downloaded. File IDs
#' may be obtained through the \code{b2ListFiles}, \code{b2ListFileVersions}
#' and \code{b2UploadFile} functions in this package.
#' @param overwrite Binary TRUE or FALSE decision to overwrite any files in the
#' current working directory, whose names match the downloaded file name.
#' @return If successful the response headers include the Content-Type that was
#' specified when the file was uploaded. They also include the X-Bz-FileName
#' and X-Bz-Content-Sha1 headers. The X-Bz-FileName uses percent-encoding, as
#' if it were a URL parameter. If successful, the file will be downloaded to
#' the current working directory.
#'
#' @examples
#' \dontrun{
#' b2DownloadFileById(fileId = "Unique_identifier_of_the_file_to_download",
#' overwrite = TRUE)
#' }
#'
#' @export
b2DownloadFileById <- function(fileId, overwrite = FALSE) {
# Read Environment variables for authorisation details
downloadUrl <- Sys.getenv('downloadUrl')
authorizationToken <- Sys.getenv('authorizationToken')
# Function options from input, make a dataframe
fileId <- as.data.frame(fileId, stringsAsFactors = FALSE)
# API call
b2Return <-
httr::POST(
paste(
downloadUrl,"/b2api/v1/b2_download_file_by_id", sep =
""
), body = jsonlite::toJSON(jsonlite::unbox(fileId), pretty = TRUE), httr::add_headers(
'Authorization' = as.character(authorizationToken)
), httr::write_disk("tmp", overwrite = overwrite)
)
# Alternative GET call
# b2Return <- httr::GET(url = paste(accountAuthorization$downloadUrl,"/b2api/v1/b2_download_file_by_id?fileId=", fileId, sep=""), add_headers('Authorization' = as.character(accountAuthorization$authorizationToken)), write_disk("tmp", overwrite))
# Check for bad authorisation and sent message
if (httr::status_code(b2Return) != "200") {
badReturn <- jsonlite::fromJSON(httr::content(b2Return,type = "text"))
stop(
"\nSomething went wrong. Please check the function options to ensure valid values. \n",
"\nStatus Code: ", badReturn$code, "\nMessage: ", badReturn$message
)
} else {
# Rename tmp
if (file.exists(b2Return$headers$'x-bz-file-name') &
!isTRUE(overwrite)) {
print("Unable to write to disk. File(s) exist and overwrite is set to FALSE")
}
else {
renameResult <-
file.rename(from = "tmp", to = b2Return$headers$'x-bz-file-name')
# Output message
print("File(s) downloaded successfully and saved to disk.")
}
}
}
|
414dc4993724db301e50cef7cf1d8175f82643b8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CondIndTests/examples/InvariantTargetPrediction.Rd.R
|
e5941c948e00316eb8528b04faaf25f75e44d881
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 876
|
r
|
InvariantTargetPrediction.Rd.R
|
library(CondIndTests)
### Name: InvariantTargetPrediction
### Title: Invariant target prediction.
### Aliases: InvariantTargetPrediction
### ** Examples
# Example 1
n <- 1000
E <- rbinom(n, size = 1, prob = 0.2)
X <- 4 + 2 * E + rnorm(n)
Y <- 3 * (X)^2 + rnorm(n)
InvariantTargetPrediction(Y, as.factor(E), X)
InvariantTargetPrediction(Y, as.factor(E), X, test = wilcoxTestTargetY)
# Example 2
E <- rbinom(n, size = 1, prob = 0.2)
X <- 4 + 2 * E + rnorm(n)
Y <- 3 * E + rnorm(n)
InvariantTargetPrediction(Y, as.factor(E), X)
InvariantTargetPrediction(Y, as.factor(E), X, test = wilcoxTestTargetY)
# Example 3
E <- rnorm(n)
X <- 4 + 2 * E + rnorm(n)
Y <- 3 * (X)^2 + rnorm(n)
InvariantTargetPrediction(Y, E, X)
InvariantTargetPrediction(Y, X, E)
InvariantTargetPrediction(Y, E, X, test = wilcoxTestTargetY)
InvariantTargetPrediction(Y, X, E, test = wilcoxTestTargetY)
|
fbc588ac673aed077269d7b70cef639f755f3264
|
df712880b661e0148f7173d2e5e26fc69f84a07f
|
/R/AcqFunctionSmsEgo.R
|
fa0ecef7dd6db8e4e53cbf00c2211b39a031ff4f
|
[] |
no_license
|
mlr-org/mlr3mbo
|
006de583ae52232e32237e7a8986f65e6bde5cc9
|
520622de10a518b4c52a128b79398960dc7e6e09
|
refs/heads/main
| 2023-06-21T23:12:53.325192
| 2023-06-20T08:01:40
| 2023-06-20T08:01:40
| 212,462,939
| 23
| 1
| null | 2023-08-23T21:20:31
| 2019-10-02T23:44:40
|
R
|
UTF-8
|
R
| false
| false
| 6,712
|
r
|
AcqFunctionSmsEgo.R
|
#' @title Acquisition Function SMS-EGO
#'
#' @include AcqFunction.R
#' @name mlr_acqfunctions_smsego
#'
#' @description
#' S-Metric Selection Evolutionary Multi-Objective Optimization Algorithm Acquisition Function.
#'
#' @section Parameters:
#' * `"lambda"` (`numeric(1)`)\cr
#' \eqn{\lambda} value used for the confidence bound.
#' Defaults to `1`.
#' Based on \code{confidence = (1 - 2 * dnorm(lambda)) ^ m} you can calculate a
#' lambda for a given confidence level, see Ponweiser et al. (2008).
#' * `"epsilon"` (`numeric(1)`)\cr
#' \eqn{\epsilon} used for the additive epsilon dominance.
#' Can either be a single numeric value > 0 or `NULL` (default).
#' In the case of being `NULL`, an epsilon vector is maintained dynamically as
#' described in Horn et al. (2015).
#'
#' @references
#' * `r format_bib("ponweiser_2008")`
#' * `r format_bib("horn_2015")`
#'
#' @family Acquisition Function
#' @export
#' @examples
#' if (requireNamespace("mlr3learners") &
#' requireNamespace("DiceKriging") &
#' requireNamespace("rgenoud")) {
#' library(bbotk)
#' library(paradox)
#' library(mlr3learners)
#' library(data.table)
#'
#' fun = function(xs) {
#' list(y1 = xs$x^2, y2 = (xs$x - 2) ^ 2)
#' }
#' domain = ps(x = p_dbl(lower = -10, upper = 10))
#' codomain = ps(y1 = p_dbl(tags = "minimize"), y2 = p_dbl(tags = "minimize"))
#' objective = ObjectiveRFun$new(fun = fun, domain = domain, codomain = codomain)
#'
#' instance = OptimInstanceMultiCrit$new(
#' objective = objective,
#' terminator = trm("evals", n_evals = 5))
#'
#' instance$eval_batch(data.table(x = c(-6, -5, 3, 9)))
#'
#' learner = default_gp()
#'
#' surrogate = srlrn(list(learner, learner$clone(deep = TRUE)), archive = instance$archive)
#'
#' acq_function = acqf("smsego", surrogate = surrogate)
#'
#' acq_function$surrogate$update()
#' acq_function$progress = 5 - 4 # n_evals = 5 and 4 points already evaluated
#' acq_function$update()
#' acq_function$eval_dt(data.table(x = c(-1, 0, 1)))
#' }
AcqFunctionSmsEgo = R6Class("AcqFunctionSmsEgo",
inherit = AcqFunction,
public = list(
#' @field ys_front (`matrix()`)\cr
#' Approximated Pareto front.
#' Signs are corrected with respect to assuming minimization of objectives.
ys_front = NULL,
#' @field ref_point (`numeric()`)\cr
#' Reference point.
#' Signs are corrected with respect to assuming minimization of objectives.
ref_point = NULL,
#' @field epsilon (`numeric()`)\cr
#' Epsilon used for the additive epsilon dominance.
epsilon = NULL,
#' @field progress (`numeric(1)`)\cr
#' Optimization progress (typically, the number of function evaluations left).
#' Note that this requires the [bbotk::OptimInstance] to be terminated via a [bbotk::TerminatorEvals].
progress = NULL,
#' @description
#' Creates a new instance of this [R6][R6::R6Class] class.
#'
#' @param surrogate (`NULL` | [SurrogateLearnerCollection]).
#' @param lambda (`numeric(1)`).
#' @param epsilon (`NULL` | `numeric(1)`).
initialize = function(surrogate = NULL, lambda = 1, epsilon = NULL) {
assert_r6(surrogate, "SurrogateLearnerCollection", null.ok = TRUE)
assert_number(lambda, lower = 1, finite = TRUE)
assert_number(epsilon, lower = 0, finite = TRUE, null.ok = TRUE)
constants = ParamSet$new(list(
ParamDbl$new("lambda", lower = 0, default = 1),
ParamDbl$new("epsilon", lower = 0, default = NULL, special_vals = list(NULL)) # for NULL, it will be calculated dynamically
))
constants$values$lambda = lambda
constants$values$epsilon = epsilon
super$initialize("acq_smsego", constants = constants, surrogate = surrogate, requires_predict_type_se = TRUE, direction = "minimize", label = "SMS-EGO", man = "mlr3mbo::mlr_acqfunctions_smsego") # indeed, we minimize, see comments below about C code
},
#' @description
#' Updates acquisition function and sets `ys_front`, `ref_point`, `epsilon`.
update = function() {
if (is.null(self$progress)) {
stop("$progress is not set.") # needs self$progress here! Originally self$instance$terminator$param_set$values$n_evals - archive$n_evals
}
n_obj = length(self$archive$cols_y)
ys = self$archive$data[, self$archive$cols_y, with = FALSE]
for (column in self$archive$cols_y) {
set(ys, j = column, value = ys[[column]] * self$surrogate_max_to_min[[column]]) # assume minimization
}
ys = as.matrix(ys)
self$ref_point = apply(ys, MARGIN = 2L, FUN = max) + 1 # offset = 1 like in mlrMBO
self$ys_front = self$archive$best()[, self$archive$cols_y, with = FALSE]
for (column in self$archive$cols_y) {
set(self$ys_front, j = column, value = self$ys_front[[column]] * self$surrogate_max_to_min[[column]]) # assume minimization
}
self$ys_front = as.matrix(self$ys_front)
if (is.null(self$constants$values$epsilon)) {
# The following formula is taken from Horn et al. (2015).
# Note that the one in Ponweiser et al. 2008 has a typo.
# Note that nrow(self$ys_front) is correct and mlrMBO has a bug https://github.com/mlr-org/mlrMBO/blob/2dd83601ed80030713dfe0f55d4a5b8661919ce1/R/infill_crits.R#L292
c_val = 1 - (1 / (2 ^ n_obj))
epsilon = map_dbl(
seq_col(self$ys_front),
function(i) {
(max(self$ys_front[, i]) - min(self$ys_front[, i])) / (nrow(self$ys_front) + c_val * self$progress)
}
)
self$epsilon = epsilon
} else {
self$epsilon = self$constants$values$epsilon
}
}
),
private = list(
.fun = function(xdt, ...) {
constants = list(...)
lambda = constants$lambda
if (is.null(self$ys_front)) {
stop("$ys_front is not set. Missed to call $update()?")
}
if (is.null(self$epsilon)) {
stop("$epsilon is not set. Missed to call $update()?")
}
if (is.null(self$ref_point)) {
stop("$ref_point is not set. Missed to call $update()?")
}
ps = self$surrogate$predict(xdt)
means = map_dtc(ps, "mean")
ses = map_dtc(ps, "se")
cbs = as.matrix(means) %*% diag(self$surrogate_max_to_min) - lambda * as.matrix(ses)
# allocate memory for adding points to front for HV calculation in C
front2 = t(rbind(self$ys_front, 0))
sms = .Call("c_sms_indicator", PACKAGE = "mlr3mbo", cbs, self$ys_front, front2, self$epsilon, self$ref_point) # note that the negative indicator is returned from C
data.table(acq_smsego = sms)
}
)
)
mlr_acqfunctions$add("smsego", AcqFunctionSmsEgo)
|
38412ce35d348a4c8b6636d57d0f7aad380786ed
|
bd8c41ccf973b2602f01ef2cf772c26ee058e7cb
|
/R/validData.R
|
a2f5a8e380d5e60c2036a9b0737b78ade679e545
|
[] |
no_license
|
cran/RghcnV3
|
5759f30fe96b386faec9e1667c1ede43aca0d72c
|
d2d3dfadbddf5acd9e6c8d05d797d8d8b4b085d2
|
refs/heads/master
| 2021-01-15T22:29:03.926304
| 2012-06-02T00:00:00
| 2012-06-02T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 573
|
r
|
validData.R
|
validData <- function(Data){
if(isArray(Data) | isMts(Data) |is.zoo(Data)){
if(sum(duplicated(getStations(Data))) !=0){
print("duplicated stations")
return(FALSE)
}
if(!validMonthCount(Data)){
print("Invalid Month count. USE trimHeadtail")
return(FALSE)
}
if(sum(allNA(Data)) != 0){
print("Some Months have no data for any station")
return(FALSE)
}
return(TRUE)
}else{
print("Data must me a zoo object or Mts or 3D array")
return(FALSE)
}
}
|
4724f58c9cb99a99159a114ede841671da0fbe12
|
806ca2e424e11a5b7eab6c947fc47b5aee8a6c84
|
/R/df-util.R
|
7349b7fef8b17a164f96cad080ae078bd39ea52f
|
[] |
no_license
|
dfyj/wm
|
cefcb9f3c11bd37bef69e4a9e2a24b93f180bb60
|
ecf27eb4349400717a8fdee16754c253c8543ec7
|
refs/heads/master
| 2020-03-24T22:34:01.845300
| 2019-01-26T04:02:12
| 2019-01-26T04:02:12
| 143,091,352
| 0
| 2
| null | 2018-08-01T02:52:48
| 2018-08-01T02:04:38
|
R
|
UTF-8
|
R
| false
| false
| 1,365
|
r
|
df-util.R
|
# data frame related utils ------------------------------------------------
#' group + top_n + sum
#'
#' @param df data frame
#' @param gp group_by col
#' @param n top n
#' @param value value vol to sum
#'
#' @return
#' @export
#'
#' @examples
#' df_group_top_n_sum(df, date, 10, weight){
df_group_top_n_sum <- function(df, gp, n, value){
gp <- rlang::enquo(gp)
value <- rlang::enquo(value)
output_col <- stringr::str_c("top", n, rlang::quo_name(value), sep = "_")
# print(output_col)
df %>%
dplyr::group_by(!!gp) %>%
dplyr::top_n(., n, !!value) %>%
dplyr::summarise(!! output_col := sum(!!value))
}
#' group + sum
#'
#' @param df data frame
#' @param gp group_by col
#' @param value value vol to sum
#'
#' @return
#' @export
#'
#' @examples
#' df_group_top_n_sum(df, date, 10, weight){
df_group_sum <- function(df, gp, value){
gp <- rlang::enquo(gp)
value <- rlang::enquo(value)
output_col <- rlang::quo_name(value)
# print(output_col)
df %>%
dplyr::group_by(!!gp) %>%
dplyr::summarise(!! output_col := sum(!!value))
}
#' named vector to df
#'
#' @return
#' @export
#'
#' @examples
named_vector_to_df <- function(){
tibble(
!!name_col := names(named_vec) %||% colnames(named_vec),
!!value_col := named_vec %>% as.vector()
)
}
# df <- tibble(x = 1:5, y = letters[1:5])
# df %>%
# mutate( = letters[1:5])
|
1fbeeb91b8b3df12480491d05b57e56bf2488187
|
2df4b0fcee1964c9f37401e50d7abfff7460ccdc
|
/615-Final_Shiny.R
|
43741e2be309c579a17ef7b37e7c2840d7c43564
|
[] |
no_license
|
DonghaoXue/615-Final_Project
|
062c51bc38a867e0370bdd2e3909a3d02f464609
|
0cb55f024cdc1fbd9978821f76efa5ce27b8f39f
|
refs/heads/main
| 2023-02-03T15:14:37.789135
| 2020-12-14T17:27:56
| 2020-12-14T17:27:56
| 321,418,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,703
|
r
|
615-Final_Shiny.R
|
library(shiny)
library(knitr)
library(tidyverse)
library(lubridate)
library(bizdays)
library(tidyquant)
table_stock = tq_get(c("AAPL","GOOG","AMZN"), get = "stock.prices", from = '2020-07-01',to = "2020-12-02")
#get individual asset returns grouped by asset
stock_returns <- c("AAPL", "GOOG", "AMZN") %>%
tq_get(get = "stock.prices",
from = "2020-07-01",
to = "2020-12-02") %>%
group_by(symbol) %>%
tq_transmute(select = adjusted,
mutate_fun = periodReturn,
period = "daily",
col_rename = "Ra")
# Define UI for application that draws a histogram
ui <- fluidPage(
title = "Investment Project",
mainPanel(
tabsetPanel(
tabPanel("Stock Information",
# Create a new Row in the UI for selectInputs
fluidRow(
column(4,
selectInput("sym",
"Symbol:",
c("All",
unique(as.character(table_stock$symbol))))
),
column(4,
selectInput("date",
"Date:",
c("All",
unique(as.character(table_stock$date))))
)
),
# Create a new row for the table.
DT::dataTableOutput("table")),
tabPanel("Portfolio Performance",
column(4, wellPanel(
sliderInput("n1", "Weight of AAPL:",
min = 0, max = 1, value = 0.25, step = 0.05),
sliderInput("n2", "Weight of GOOG:",
min = 0, max = 1, value = 0.50, step = 0.05),
sliderInput("n3", "Weight of AMZN:",
min = 0, max = 1, value = 0.25, step = 0.05)
)),
column(5,
"The plot below will be not displayed when the sum of Weights is not euqal to 1.",
conditionalPanel("input.n1 + input.n2 + input.n3 == 1",
plotOutput("portfolioPlot", height = 700, width = 900)
)
)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$table <- DT::renderDataTable(DT::datatable({
data <- table_stock
if (input$sym != "All") {
data <- data[data$symbol == input$sym,]
}
if (input$date != "All") {
data <- data[data$date == input$date,]
}
data
}))
output$portfolioPlot <- renderPlot({
wts <- c(input$n1, input$n2, input$n3)
portfolio_growth <- stock_returns %>%
tq_portfolio(assets_col = symbol,
returns_col = Ra,
weights = wts,
col_rename = "investment.growth",
wealth.index = TRUE) %>%
mutate(investment.growth = investment.growth * 250000)
portfolio_growth %>%
ggplot(aes(x = date, y = investment.growth)) +
geom_line(size = 2, color = palette_light()[[1]]) +
labs(title = "Portfolio Growth",
x = "", y = "Portfolio Value") +
geom_smooth(method = "loess") +
theme_tq() +
scale_color_tq() +
scale_y_continuous(labels = scales::dollar)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
07412220bd79d3947412a625abb89680fcce9bed
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/ffstream/man/getExistsErrorMessage.Rd
|
9af1e22bc9f95ea92bf148b7c907a69538744dfd
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 842
|
rd
|
getExistsErrorMessage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/errorMessage.R
\name{getExistsErrorMessage}
\alias{getExistsErrorMessage}
\title{Create the error message for a certain parameter that does not exist}
\usage{
getExistsErrorMessage(paramName, value = NULL, algoName = NULL,
functionName = "")
}
\arguments{
\item{paramName}{Name of parameter, e.g. \code{lambda}}
\item{value}{The name of the object that does not exist.}
\item{algoName}{Name of algorithm, e.g. \code{AFF change detector}}
\item{functionName}{The name of the original function which is being
called to check the parameters. Will help user
with debugging.}
}
\description{
For a particular parameter, create an error message specifying that
the parameter does not exist (e.g. \code{Na}, which is not the same
as \code{NA}).
}
\keyword{internal}
|
9ffacbcce47207ffe8e5916220a44b3f57b21073
|
b68f039d3e9446439e322a7e5557b6f9df720673
|
/man/FindOverlap.Rd
|
aca55ae9a590ad0c1ccd6f61100176d96b962dcd
|
[] |
no_license
|
kieranrcampbell/SpatialStats
|
9ca0b231b5f0307320239b3e4feb6edff395dd00
|
49ea65afdfcab88f04b5f678765afdeee67b2901
|
refs/heads/master
| 2021-01-18T11:17:23.450562
| 2014-08-29T13:52:49
| 2014-08-29T13:52:49
| 23,077,488
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,104
|
rd
|
FindOverlap.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{FindOverlap}
\alias{FindOverlap}
\title{Given a list of two pathway results, find the overlap between them}
\usage{
FindOverlap(results, remove = c("same", "different"))
}
\arguments{
\item{results}{A list containg two 'pathway' matrices}
\item{remove}{Which results to keep: if \code{"same"} then 1 1 would be reported but 1 2 discarded, but if
\code{"different"} then 1 2 would be reported but 1 1 discarded (useful for finding pathways that don't
go from same component to same component)}
}
\value{
A matrix showing common pathways.
}
\description{
The result of the spatial pathway identification is a two-column matrix, where each row represents the numeric
identifiers of the causal direction of the pathway, so
1 4
3 2
would indicate there is a pathway from component 1 to 4, and from 3 to 2. If we use multiple methods to find these,
it is useful to find the overlap - pathways reported from both methods. This method does that, effectively taking the
intersection of the two matrices and returning the results in a similar matrix.
}
|
e847c19c3c63fcfd350a07468b26f2eb209e3de3
|
6cfede497caf67b5a1e4745b56b029e5ccce128f
|
/2020/2020-05 SCS Regional Forecast/R/household_units.R
|
286efb6f008c416ec1acefccf3dea5d053c31dca
|
[] |
no_license
|
SANDAG/QA
|
3bce623e269c745cd7c60933be8d81bab14a0e27
|
37edb55a7e79f205d44b67eb18e6474689268477
|
refs/heads/master
| 2023-08-19T10:47:05.145384
| 2023-08-17T15:57:01
| 2023-08-17T15:57:01
| 138,326,659
| 6
| 3
| null | 2023-02-24T22:07:34
| 2018-06-22T16:48:58
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 6,024
|
r
|
household_units.R
|
#
# This report summarizes select QA findings of the 2020 SCS Forecast review (2020-05).
#
# Tests included in this report are:
#
# Test #3: Household Units (Performance Analysis)
#
# Thorough descriptions of these tests may be found here:
# https://sandag.sharepoint.com/qaqc/_layouts/15/Doc.aspx?sourcedoc={17b5d132-f152-49f3-8c62-0b2791a26dd4}&action=edit&wd=target%28Test%20Plan.one%7Cdb0a3336-7736-46dc-afd0-8707889af1a0%2FOverview%7C5e8c7ac7-61e5-40fa-8d9c-6ae544b57ed1%2F%29
#set up
maindir = dirname(rstudioapi::getSourceEditorContext()$path)
setwd(maindir)
#load required packages
require(data.table)
source("readSQL.R")
source("common_functions.R")
packages <- c("RODBC","tidyverse","openxlsx","hash","readtext")
pkgTest(packages)
# connect to database
channel <- odbcDriverConnect('driver={SQL Server}; server=sql2014a8; database=demographic_warehouse; trusted_connection=true')
#Test 3 Data Prep
#load mgra dimension table
d_mgra <- data.table::as.data.table(
RODBC::sqlQuery(channel,
paste0("SELECT [mgra_id]
,[mgra]
,[series]
,[zip]
,[cpa]
,[jurisdiction]
,[jurisdiction_id]
FROM [demographic_warehouse].[dim].[mgra_denormalize]
GROUP BY [mgra_id]
,[mgra]
,[series]
,[zip]
,[cpa]
,[jurisdiction]
,[jurisdiction_id]"),
stringsAsFactors = FALSE),
stringsAsFactors = FALSE)
#select only series 14
d_mgra<- subset(d_mgra,d_mgra$series==14)
#aggregate to mgra level (not mgra_id)
#d_mgra<- d_mgra %>% distinct(mgra, .keep_all = TRUE)
#load reference table used by Nick to apply changes to household units
input_hhunits<- data.table::as.data.table(
RODBC::sqlQuery(channel,
paste0("SELECT [parcel_id]
,[mgra]
,[mohub]
,[tier]
,[score]
,[subtier]
,[cap_scs]
,[cap_jurisdiction_id]
,[scs_site_id]
,[startdate]
,[compdate]
,[scenario_cap]
,[cap_priority]
FROM [urbansim].[urbansim].[scs_parcel]"),
stringsAsFactors = FALSE),
stringsAsFactors = FALSE)
#load SCS forecast household data
datasource_id<- 36
hh <- readDB("../queries/households.sql",datasource_id)
#select only years of interest from SCS forecast dataset
hh<-subset(hh, hh$yr_id==2018 | hh$yr_id==2050)
#convert to data table
hh<-as.data.table(hh)
#check subtotals
hh_50<-subset(hh, hh$yr_id==2050)
sum(hh_50$units)
#merge in [mgra] variable to allow for crosswalk to input data
hh<-merge(hh,
d_mgra[ , c("mgra_id", "mgra", "jurisdiction", "jurisdiction_id")],
by= "mgra_id")
#aggregate SCS forecast data to the [mgra] (not [mgra_id]) level for analysis
hh_agg<-hh[ , list(
mgra_id,
mgra,
datasource_id,
yr_id,
units,
jurisdiction,
jurisdiction_id),
by="mgra"]
#check subtotals
hh_agg_18<-subset(hh_agg, hh_agg$yr_id==2018)
hh_agg_50<-subset(hh_agg, hh_agg$yr_id==2050)
sum(hh_agg_50$units)
#create aggregate table by mgra for 2016 and 2050
hh_agg<-as.data.table(hh_agg)
hh_reshape<- hh_agg[, list(
units_2018=units[yr_id==2018],
units_2050=units[yr_id==2050]),
by="mgra"]
hh_reshape2<- aggregate(hh_reshape,
by=list(hh_reshape$mgra),
FUN=sum)
#check subtotals
sum(hh_reshape2$units_2050)
#merge in jurisdiction
# hh_reshape3<- merge(hh_reshape2,
# d_mgra[ , c("mgra","jurisdiction")],
# by="mgra")
#check subtotals
sum(hh_reshape$units_2050)
#wrangle hh_inputs to have one record per mgra for merging
hh_inputs_agg<-aggregate(scenario_cap ~mgra+tier, data=input_hhunits, sum)
#merge scs forecast data to inputs data
hh_merged<-merge(hh_reshape2,
hh_inputs_agg,
by.x= "Group.1",
by.y= "mgra",
all.x=TRUE)
#check subtotals
sum(hh_merged$units_2050)
#calculate difference between 2050 and 2016 units
hh_merged$N_diff<- hh_merged$units_2050-hh_merged$units_2018
#apply flag to indicate relationship between expected change and actual change in units
hh_merged$change[hh_merged$N_diff==hh_merged$scenario_cap]<- "Exact capacity change"
hh_merged$change[hh_merged$N_diff<hh_merged$scenario_cap]<- "Less than capacity"
hh_merged$change[hh_merged$N_diff>hh_merged$scenario_cap]<- "Greater than capacity"
hh_merged$change[hh_merged$N_diff==0]<- "No Change"
#save out data file
write.csv(hh_merged, "C://Users//kte//OneDrive - San Diego Association of Governments//QA temp//SCS//housing_units_mgra_2018base.csv")
########################################################################################################
#Generate output aggregated at the jurisdiction level
hh_agg_jur<- aggregate(units~jurisdiction_id+yr_id, data=hh,sum)
input_agg_jur<-aggregate(scenario_cap ~cap_jurisdiction_id, data=input_hhunits, sum)
hh_jur<- dcast(hh_agg_jur, jurisdiction_id~yr_id, value.var="units")
#check totals
sum(hh_jur$`2050`)
#merge in scs capacity
hh_jur<- merge(hh_jur,
input_agg_jur,
by.x="jurisdiction_id",
by.y="cap_jurisdiction_id")
#calculate difference between 2050 and 2016 units
hh_jur$N_diff<- hh_jur$`2050`-hh_jur$`2018`
#apply flag to indicate relationship between expected change and actual change in units
hh_jur$change[hh_jur$N_diff==hh_jur$scenario_cap]<- "Exact capacity change"
hh_jur$change[hh_jur$N_diff<hh_jur$scenario_cap]<- "Less than capacity"
hh_jur$change[hh_jur$N_diff>hh_jur$scenario_cap]<- "Greater than capacity"
hh_jur$change[hh_jur$N_diff==0]<- "No Change"
#save out data file
write.csv(hh_jur, "C://Users//kte//OneDrive - San Diego Association of Governments//QA temp//SCS//housing_units_jur_2018base.csv")
#clean up
rm(list="hh", "d_mgra", "hh_agg", "hh_inputs_agg", "hh_reshape", "input_hhunits")
|
b35b9d98349de3f214bc5bb77a8d1f45b388f9f0
|
c88b0cbeda0edf9e745e324ef942a504e27d4f87
|
/Budongo cognition/fileLoader.R
|
a34055ea49bdb603170ce132b92076d5f98cdef8
|
[] |
no_license
|
Diapadion/R
|
5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27
|
1485c43c0e565a947fdc058a1019a74bdd97f265
|
refs/heads/master
| 2023-05-12T04:21:15.761115
| 2023-04-27T16:26:35
| 2023-04-27T16:26:35
| 28,046,921
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,360
|
r
|
fileLoader.R
|
# script for importing excel files
library(Rcpp)
library(plyr)
rbind.all.columns <- function(x, y) {
x.diff <- setdiff(colnames(x), colnames(y))
y.diff <- setdiff(colnames(y), colnames(x))
x[, c(as.character(y.diff))] <- NA
y[, c(as.character(x.diff))] <- NA
return(rbind(x, y
,header = FALSE
))
}
###
# personality
###
setwd('C:/Users/s1229179/GitHub/R/Budongo cognition/')
# pers <- read.csv('EDIpersonality.csv',header=TRUE)
## in the interest of maintaining consistency between experiment analyses
## the preferred way to get the personality composites is to load them
## from the scaled copies created for the RS analyses
pers <- read.csv('RS cognition/scaledPers.csv',header=TRUE)
# this removes the need to run perscoring_chimp.R
#
# Go directly to perf_associat.R (which has made a few modifications made)
###
# cognition
###
setwd('Z:/chimp cognitive data/raw/')
# Problems with
# - Kindia
# - David
# - Lucy
temp = list.files(pattern="*.csv")
# Kilimi
KLlist = grep('kl',temp,ignore.case=TRUE)
KLdata = read.csv(temp[KLlist[1]], header=TRUE)
KLlist = KLlist[-1]
for (i in 1:length(KLlist)){
KLdata <- rbind(KLdata,read.csv(temp[KLlist[i]]), header=FALSE)
}
KLattempt <- subset(KLdata, KLdata[ , 1] < 1)
KLdata <- subset(KLdata, KLdata[ , 1] > 0)
# Emma
EMlist = grep('em',temp,ignore.case=TRUE)
EMdata = read.csv(temp[EMlist[1]], header=TRUE)
EMlist = EMlist[-1]
for (i in 1:length(EMlist)){
EMdata <- rbind(EMdata,read.csv(temp[EMlist[i]]), header=FALSE)
}
EMattempt <- subset(EMdata, EMdata[ , 1] < 1)
EMdata <- subset(EMdata, EMdata[ , 1] > 0)
# Cindy
CIlist = grep('CI',temp,ignore.case=TRUE)
CIdata = read.csv(temp[CIlist[1]], header=TRUE)
CIlist = CIlist[-1]
for (i in 1:length(CIlist)){
CIdata <- rbind(CIdata,read.csv(temp[CIlist[i]]), header=FALSE)
}
CIattempt <- subset(CIdata, CIdata[ , 1] < 1)
CIdata <- subset(CIdata, CIdata[ , 1] > 0)
# Frek
FKlist = grep('FK',temp,ignore.case=TRUE)
FKdata = read.csv(temp[FKlist[1]], header=TRUE)
FKlist = FKlist[-1]
for (i in 1:length(FKlist)){
FKdata <- rbind(FKdata,read.csv(temp[FKlist[i]]), header=FALSE)
}
FKattempt <- subset(FKdata, FKdata[ , 1] < 1)
FKdata <- subset(FKdata, FKdata[ , 1] > 0)
# Liberius
LBlist = grep('LB',temp,ignore.case=TRUE)
LBdata = read.csv(temp[LBlist[1]], header=TRUE)
LBlist = LBlist[-1]
for (i in 1:length(LBlist)){
LBdata <- rbind(LBdata,read.csv(temp[LBlist[i]]), header=FALSE)
}
LBattempt <- subset(LBdata, LBdata[ , 1] < 1)
LBdata <- subset(LBdata, LBdata[ , 1] > 0)
# Pearl
PElist = grep('PE',temp,ignore.case=TRUE)
PEdata = read.csv(temp[PElist[1]], header=TRUE)
PElist = PElist[-1]
for (i in 1:length(PElist)){
PEdata <- rbind(PEdata,read.csv(temp[PElist[i]]), header=FALSE)
}
PEattempt <- subset(PEdata, PEdata[ , 1] < 1)
PEdata <- subset(PEdata, PEdata[ , 1] > 0)
# Eva
EVlist = grep('EV',temp,ignore.case=TRUE)
EVdata = read.csv(temp[EVlist[1]], header=TRUE)
EVlist = EVlist[-1]
for (i in 1:length(EVlist)){
EVdata <- rbind(EVdata,read.csv(temp[EVlist[i]]), header=FALSE)
}
EVattempt <- subset(EVdata, EVdata[ , 1] < 1)
EVdata <- subset(EVdata, EVdata[ , 1] > 0)
# David
DAlist = grep('DA',temp,ignore.case=TRUE)
DAdata = read.csv(temp[DAlist[1]], header=TRUE)
DAlist = DAlist[-1]
for (i in 1:length(DAlist)){
DAdata <- rbind(DAdata,read.csv(temp[DAlist[i]]), header=FALSE)
}
DAattempt <- subset(DAdata, DAdata[ , 1] < 1)
DAdata <- subset(DAdata, DAdata[ , 1] > 0)
# Sophie
SOlist = grep('SO',temp,ignore.case=TRUE)
SOdata = read.csv(temp[SOlist[1]], header=TRUE)
SOlist = SOlist[-1]
for (i in 1:length(SOlist)){
SOdata <- rbind(SOdata,read.csv(temp[SOlist[i]]), header=FALSE)
}
SOattempt <- subset(SOdata, SOdata[ , 1] < 1)
SOdata <- subset(SOdata, SOdata[ , 1] > 0)
# Lucy
LUlist = grep('LU',temp,ignore.case=TRUE)
LUdata = read.csv(temp[LUlist[1]], header=TRUE)
LUlist = LUlist[-1]
for (i in 1:length(LUlist)){
LUdata <- rbind(LUdata,read.csv(temp[LUlist[i]]), header=FALSE)
}
LUattempt <- subset(LUdata, LUdata[ , 1] < 1)
LUdata <- subset(LUdata, LUdata[ , 1] > 0)
# Rene
RElist = grep('RE',temp,ignore.case=TRUE)
REdata = read.csv(temp[RElist[1]], header=TRUE)
RElist = RElist[-1]
for (i in 1:length(RElist)){
REdata <- rbind(REdata,read.csv(temp[RElist[i]]), header=FALSE)
}
REattempt <- subset(REdata, REdata[ , 1] < 1)
REdata <- subset(REdata, REdata[ , 1] > 0)
# Kindia
KDlist = grep('KD',temp,ignore.case=TRUE)
KDdata = read.csv(temp[KDlist[1]], header=TRUE)
KDlist = KDlist[-1]
for (i in 1:length(KDlist)){
KDdata <- rbind(KDdata,read.csv(temp[KDlist[i]]), header=FALSE)
}
KDattempt <- subset(KDdata, KDdata[ , 1] < 1)
KDdata <- subset(KDdata, KDdata[ , 1] > 0)
# Paul
PAlist = grep('PA',temp,ignore.case=TRUE)
PAdata = read.csv(temp[PAlist[1]], header=TRUE)
PAlist = PAlist[-1]
for (i in 1:length(PAlist)){
PAdata <- rbind(PAdata,read.csv(temp[PAlist[i]]), header=FALSE)
}
PAattempt <- subset(PAdata, PAdata[ , 1] < 1)
PAdata <- subset(PAdata, PAdata[ , 1] > 0)
# Qafzeh
Qlist = grep('Q',temp,ignore.case=TRUE)
#needs to be done semi-manually
Qdata = read.csv(temp[Qlist[3]], header=TRUE)
Qlist = Qlist[-3]
# Qdata = rbind.data.frame(Qdata,read.csv(temp[Qlist[1]], header=FALSE))
# Qlist = Qlist[-1]
# Qdata = rbind.data.frame(Qdata,read.csv(temp[Qlist[1]], header=FALSE))
# Qlist = Qlist[-1]
# Qdata = rbind.data.frame(Qdata,read.csv(temp[Qlist[1]], header=FALSE))
# dimsQ <- NULL
# for (i in 1:length(Qlist)){
# dimsQ <- cbind(dims, dim(read.csv(temp[Qlist[i]])))
# #Qdata <- rbind(Qdata,read.csv(temp[Qlist[i]]), header=FALSE)
# Qdata <- rbind.all.columns(Qdata,read.csv(temp[Qlist[i]]))
# }
# need an alternative for this
# Qlist = grep('Q',temp,ignore.case=TRUE)
dimsQ <- NULL
for (i in 1:length(Qlist)){
dimsQ <- cbind(dimsQ, dim(read.csv(temp[Qlist[i]])))
}
Qattempts <- table(dimsQ)[1]
#rewmoving BS
#Qdata <- subset(Qdata, Qdata$V2 != 'Accuracy')
colnames(Qdata) <- colnames(LBdata[,1:13])
# Louis
LOlist = grep('LO',temp,ignore.case=TRUE)
LOdata = read.csv(temp[LOlist[1]], header=TRUE)
LOlist = LOlist[-1]
for (i in 1:length(LOlist)){
#LOdata <- rbind(LOdata,read.csv(temp[LOlist[i]]), header=FALSE)
LOdata <- rbind.fill(LOdata,read.csv(temp[LOlist[i]]))
}
LOlist = grep('LO',temp,ignore.case=TRUE)
dimsLO <- NULL
for (i in 1:length(LOlist)){
dimsLO <- cbind(dimsLO, dim(read.csv(temp[LOlist[i]])))
}
LOattempts <- table(dimsLO)[1]
# Edith
# her data is fucked up for some reason, god knows why
EDlist = grep('ed',temp,ignore.case=TRUE)
EDdata = read.csv(temp[EDlist[1]], header=TRUE)
EDlist = EDlist[-1]
# EDdata = rbind(EDdata, read.csv(temp[EDlist[37]], header=TRUE))
# EDlist = EDlist[-37]
for (i in 1:length(EDlist)){
EDdata <- rbind.fill(EDdata,read.csv(temp[EDlist[i]]))
#EDdata <- rbind(EDdata,read.csv(temp[EDlist[i]]), header = TRUE)
}
# Attempts need to be tabulated differently now
EDlist = grep('ed',temp,ignore.case=TRUE)
dimsED <- NULL
for (i in 1:length(EDlist)){
dimsED <- cbind(dimsED, dim(read.csv(temp[EDlist[i]])))
}
EDattempts <- table(dimsED)[1]
# what is this?
# for (i in 1:length(temp)) assign(temp[i], read.csv(temp[i]))
#
# objects()
#
#
# Misc BS
#11/14/2014, 9:37:01 AM GMT+0:00
#1415957821.25373+549.6891090869904+26.198975086212158
#1415958397.14
#1415958397.141814173203
#11/14/2014, 9:46:37 AM GMT+0:00
|
0cf6053810f81b7648ff328b18b9cf6bb6b4ffdb
|
5c724066f1c548acd17007702056982b12e5f7bf
|
/data/pbdb_3TPub_make.R
|
3e31b35d6946e821d16d0259984453e81f2f7572
|
[] |
no_license
|
ajrominger/extra_paleo_sstat
|
b403f03d20ba5241de73312a59f0e1f45391ff94
|
c923d0eb97decbb682020d0e672f29e03ffbbb21
|
refs/heads/master
| 2020-10-02T02:47:52.470484
| 2019-12-12T19:52:27
| 2019-12-12T19:52:27
| 227,478,075
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,463
|
r
|
pbdb_3TPub_make.R
|
# **this script produces diversity estimates per family per time bin from PBDB data
# corrected by the '3 timers method' and for possible publication bias**
# source function to produce a matrix of time by taxon with cells
# of corrected diversity
source('R/make3TPub.R')
# load other needed funcitons
# source('code/sstat_comp.R')
# source('code/sstat_methods.R')
# source('code/Px_gam.R')
# load and prepare data
# ---------------------
pbdbDat <- read.csv('data/pbdb_data.csv', as.is = TRUE)
# make column for midpoint ma
pbdbDat$ma_mid <- (pbdbDat$max_ma + pbdbDat$min_ma) / 2
# get rid of poor temporal resolution
pbdbDat <- pbdbDat[pbdbDat$tbin != '', ]
# get rid of bad taxonomy
pbdbDat <- pbdbDat[pbdbDat$family != '', ]
pbdbDat <- pbdbDat[pbdbDat$otu != '', ]
# get bin times
pbdbDat$mid_ma <- (pbdbDat$min_ma + pbdbDat$max_ma) / 2
pbdbTime <- sort(tapply(pbdbDat$mid_ma, pbdbDat$tbin, mean))
pbdbDat$tbin <- factor(pbdbDat$tbin, levels = names(pbdbTime))
# data.frame to hold publication, diversity and 3T stat
famTbinBias <- aggregate(list(div = pbdbDat$otu), list(fam = pbdbDat$family,
tbin = pbdbDat$tbin),
function(x) length(unique(x)))
# three timer stat and publication bias
# -------------------------------------
# matrix to determine three timers and part timers (sensu alroy 2008)
mt <- matrix(0, nrow = nlevels(pbdbDat$tbin),
ncol = nlevels(pbdbDat$tbin))
diag(mt) <- -10
mt[abs(row(mt) - col(mt)) == 1] <- 1
# loop through and compute three timers and part timers
timers <- lapply(split(pbdbDat$tbin, pbdbDat$otu),
function(x) {
# browser()
tbins <- integer(nlevels(x))
tbins[as.integer(unique(x))] <- 1
t3 <- as.integer(mt %*% tbins == 2)
tp <- as.integer(mt %*% tbins == -8)
return(cbind(t3, tp))
})
# compute 3 timer stat from 3 timers and part timers
timers <- array(unlist(timers), dim = c(nrow(timers[[1]]), 2, length(timers)))
t3stat <- 1 - rowSums(timers[, 1, ]) / (rowSums(timers[, 1, ]) + rowSums(timers[, 2, ]))
# add to data.frame holding all info to be saved
famTbinBias$T3Stat <- t3stat[match(famTbinBias$tbin,
levels(pbdbDat$tbin))]
famTbinBias$T3Div <- famTbinBias$div / famTbinBias$T3Stat
# record pubs per tbin
tbinPub <- tapply(pbdbDat$reference_no, pbdbDat$tbin,
function(x) length(unique(x)))
famTbinBias$tbinPub <- tbinPub[famTbinBias$tbin]
# calculate corrected diversity
pdf('ms/figSupp_divByPub_foo.pdf', width = 4, height = 4)
pbdbFamDiv <- with(famTbinBias,
make3TPub(div, T3Stat, tbinPub, fam, tbin, pbdbTime,
minPub = 10, plotit = TRUE))
dev.off()
# write out corrected diversity
write.csv(pbdbFamDiv, 'data/pbdb_3TPub-corrected.csv')
# for permutational d-stat tests we need diversity at the genus level;
# make that here
# a data.frame holding only one record per genus per family per time bin
pbdbOcc <- pbdbDat[!duplicated(pbdbDat[, c('tbin', 'family', 'otu')]), ]
genTbinBias <- parallel::mclapply(which(!is.nan(famTbinBias$T3Stat)), mc.cores = 3,
FUN = function(i) {
dat <- pbdbOcc[pbdbOcc$family == famTbinBias$fam[i] &
pbdbOcc$tbin == famTbinBias$tbin[i],
c('tbin', 'family', 'otu')]
dat$T3Occ <- 1 / famTbinBias$T3Stat[i]
dat$tbinPub <- famTbinBias$tbinPub[i]
return(dat)
}
)
genTbinBias <- do.call(rbind, genTbinBias)
pbdbGenDiv <- data.frame(genTbinBias[, c('tbin', 'family', 'otu')],
T3PubDiv = genTbinBias$T3Occ /
exp(predict(pbdbPubLM,
newdata = data.frame(
logPub = log(genTbinBias$tbinPub)))))
# write it out as a tidy data frame (not turned into a matrix) this will be easier
# for permuting
write.csv(pbdbGenDiv, file = 'data/pbdb_3TPub_genera.csv', row.names = FALSE)
|
a64f04667659283b64e98f4cbade990e434f42b5
|
391ad5a8f32ea0d0076f63885b995fb25603e2ad
|
/R/covcor_design.R
|
114154939cd6a1b0d91f4fb3b54572ce39407fec
|
[] |
no_license
|
cran/metan
|
a0d951dff5a151bd5a8b0b7a0a699fde0dddb50c
|
b3637d40a2fc9d955b928120d61399ba1bf6e11d
|
refs/heads/master
| 2023-03-20T17:17:24.469173
| 2023-03-05T21:00:15
| 2023-03-05T21:00:15
| 236,625,199
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,578
|
r
|
covcor_design.R
|
#' Variance-covariance matrices for designed experiments
#' @description
#' `r badge('stable')`
#'
#' Compute variance-covariance and correlation matrices using data from a
#' designed (RCBD or CRD) experiment.
#'
#'
#'@param .data The data to be analyzed. It can be a data frame, possible with
#' grouped data passed from [dplyr::group_by()].
#' @param gen The name of the column that contains the levels of the genotypes.
#' @param rep The name of the column that contains the levels of the
#' replications/blocks.
#' @param resp The response variables. For example `resp = c(var1, var2,
#' var3)`.
#' @param design The experimental design. Must be RCBD or CRD.
#'@param by One variable (factor) to compute the function by. It is a shortcut
#' to [dplyr::group_by()]. To compute the statistics by more than
#' one grouping variable use that function.
#' @param type What the matrices should return? Set to `NULL`, i.e., a list
#' of matrices is returned. The argument type allow the following values
#' `'pcor', 'gcor', 'rcor'`, (which will return the phenotypic, genotypic
#' and residual correlation matrices, respectively) or `'pcov', 'gcov',
#' 'rcov'` (which will return the phenotypic, genotypic and residual
#' variance-covariance matrices, respectively). Alternatively, it is possible
#' to get a matrix with the means of each genotype in each trait, by using
#' `type = 'means'`.
#' @return An object of class `covcor_design` containing the following
#' items:
#' * **geno_cov** The genotypic covariance.
#' * **phen_cov** The phenotypic covariance.
#' * **resi_cov** The residual covariance.
#' * **geno_cor** The phenotypic correlation.
#' * **phen_cor** The phenotypic correlation.
#' * **resi_cor** The residual correlation.
#'
#' If `.data` is a grouped data passed from [dplyr::group_by()]
#' then the results will be returned into a list-column of data frames.
#' @md
#' @author Tiago Olivoto \email{tiagoolivoto@@gmail.com}
#' @export
#' @examples
#' \donttest{
#' library(metan)
#' # List of matrices
#' data <- subset(data_ge2, ENV == 'A1')
#' matrices <- covcor_design(data,
#' gen = GEN,
#' rep = REP,
#' resp = c(PH, EH, NKE, TKW))
#'
#' # Genetic correlations
#' gcor <- covcor_design(data,
#' gen = GEN,
#' rep = REP,
#' resp = c(PH, EH, NKE, TKW),
#' type = 'gcor')
#'
#' # Residual (co)variance matrix for each environment
#' rcov <- covcor_design(data_ge2,
#' gen = GEN,
#' rep = REP,
#' resp = c(PH, EH, CD, CL),
#' by = ENV,
#' type = "rcov")
#'}
#'
covcor_design <- function(.data,
gen,
rep,
resp,
design = "RCBD",
by = NULL,
type = NULL){
if (!design %in% c("RCBD", "CRD")) {
stop("The experimental design must be RCBD or CRD.")
}
if (!is.null(type)) {
if (!type %in% c(c("pcor", "gcor", "rcor", "pcov", "gcov",
"rcov", "means"))) {
stop("The type must be one of the 'pcor', 'gcor', 'rcor', 'pcov', 'gcov', 'rcov', or 'means'. ")
}
}
if (!missing(by)){
if(length(as.list(substitute(by))[-1L]) != 0){
stop("Only one grouping variable can be used in the argument 'by'.\nUse 'group_by()' to pass '.data' grouped by more than one variable.", call. = FALSE)
}
.data <- group_by(.data, {{by}})
}
if(is_grouped_df(.data)){
results <- .data %>%
doo(covcor_design,
gen = {{gen}},
rep = {{rep}},
resp = {{resp}},
design = design,
type = type)
return(add_class(results, "covcor_design"))
}
factors <- select(.data,
GEN = {{gen}},
REP = {{rep}}) %>%
as_factor(1:2)
GEN <- factors$GEN
REP <- factors$REP
NREP <- nlevels(REP)
vars <- .data %>%
select({{resp}}, -{{gen}}, -{{rep}}) %>%
select_numeric_cols()
listres <- list()
nvar <- ncol(vars)
covdata <- data.frame(matrix(nrow = nrow(.data), ncol = nvar))
vin <- 0
mst <- NULL
msr <- NULL
for (var in 1:nvar) {
vin <- vin + 1
Y <- vars[[var]]
covdata[, vin] <- Y
if (design == "RCBD") {
model <- anova(aov(Y ~ GEN + REP))
mst[vin] <- model[1, 3]
msr[vin] <- model[3, 3]
} else {
model <- anova(aov(Y ~ GEN))
mst[vin] <- model[1, 3]
msr[vin] <- model[2, 3]
}
colnames(covdata)[[vin]] <- paste(names(vars[var]))
}
ms <-
data.frame(mst = mst, msr = msr) %>%
dplyr::mutate(tr = mst - msr)
vres <- diag(ms[, 2])
vfen <- diag(ms[, 1]/3)
vgen <- (diag(ms[, 1]) - diag(ms[, 2]))/3
means <-
as_tibble(cbind(GEN, covdata)) %>%
mean_by(GEN) %>%
column_to_rownames("GEN")
covdata2 <- comb_vars(data.frame(covdata), order = "first")
index <- data.frame(t(combn(nvar, 2)))
covres <- NULL
covfen <- NULL
covgen <- NULL
cores <- NULL
corfen <- NULL
corgen <- NULL
for (i in 1:ncol(covdata2)) {
if (design == "RCBD") {
model <- anova(aov(covdata2[[i]] ~ GEN + REP))
tcovres <- (model[3, 3] - ms[index[i, 1], 2] - ms[index[i, 2], 2])/2
tcovfen <- ((model[1, 3] - ms[index[i, 1], 1] - ms[index[i, 2], 1])/2)/NREP
tcovgen <- (tcovfen * NREP - tcovres)/NREP
covres[i] <- tcovres
covfen[i] <- tcovfen
covgen[i] <- tcovgen
corfen[i] <- tcovfen/sqrt((ms[index[i, 1], 1]/NREP) * (ms[index[i, 2], 1]/NREP))
corgen[i] <- tcovgen/sqrt((ms[index[i, 1], 3]/NREP) * (ms[index[i, 2], 3]/NREP))
cores[i] <- tcovres/sqrt((ms[index[i, 1], 2]) * (ms[index[i, 2], 2]))
} else {
model <- anova(aov(covdata2[[i]] ~ GEN))
tcovres <- (model[2, 3] - ms[index[i, 1], 2] - ms[index[i, 2], 2])/2
tcovfen <- ((model[1, 3] - ms[index[i, 1], 1] - ms[index[i, 2], 1])/2)/NREP
tcovgen <- (tcovfen * NREP - tcovres)/NREP
covres[i] <- tcovres
covfen[i] <- tcovfen
covgen[i] <- tcovgen
corfen[i] <- tcovfen/sqrt((ms[index[i, 1], 1]/NREP) * (ms[index[i, 2], 1]/NREP))
corgen[i] <- tcovgen/sqrt((ms[index[i, 1], 3]/NREP) * (ms[index[i, 2], 3]/NREP))
cores[i] <- tcovres/sqrt((ms[index[i, 1], 2]) * (ms[index[i, 2], 2]))
}
}
corres <- matrix(1, nvar, nvar)
corrgen <- matrix(1, nvar, nvar)
corrfen <- matrix(1, nvar, nvar)
vres[lower.tri(vres, diag = FALSE)] <- covres
vfen[lower.tri(vfen, diag = FALSE)] <- covfen
vgen[lower.tri(vgen, diag = FALSE)] <- covgen
corres[lower.tri(corres, diag = FALSE)] <- cores
corrfen[lower.tri(corrfen, diag = FALSE)] <- corfen
corrgen[lower.tri(corrgen, diag = FALSE)] <- corgen
colnames(vres) <- rownames(vres) <- names(means)
colnames(vfen) <- rownames(vfen) <- names(means)
colnames(vgen) <- rownames(vgen) <- names(means)
colnames(corres) <- rownames(corres) <- names(means)
colnames(corrfen) <- rownames(corrfen) <- names(means)
colnames(corrgen) <- rownames(corrgen) <- names(means)
if (is.null(type)) {
return(list(geno_cov = as.matrix(make_sym(vgen, diag = diag(vgen))),
phen_cov = as.matrix(make_sym(vfen, diag = diag(vfen))),
resi_cov = as.matrix(make_sym(vres, diag = diag(vres))),
geno_cor = as.matrix(make_sym(corrgen, diag = 1)),
phen_cor = as.matrix(make_sym(corrfen, diag = 1)),
resi_cor = as.matrix(make_sym(corres, diag = 1)),
means = means) %>%
add_class("covcor_design"))
}
if (type == "pcor") {
return(as.data.frame(make_sym(corrfen, diag = 1)))
}
if (type == "gcor") {
return(as.data.frame(make_sym(corrgen, diag = 1)))
}
if (type == "rcor") {
return(as.data.frame(make_sym(corres, diag = 1)))
}
if (type == "pcov") {
return(as.data.frame(make_sym(vfen, diag = diag(vfen))))
}
if (type == "gcov") {
return(as.data.frame(make_sym(vgen, diag = diag(vgen))))
}
if (type == "rcov") {
return(as.data.frame(make_sym(vres, diag = diag(vres))))
}
if (type == "means") {
return(as_tibble(means, rownames = NA))
}
}
|
09af4c1b7c4fe7e2633fe8b9bae0f253786455ab
|
80b5dc2575bbc4ce13ae760ab90aa6447e036fd7
|
/20190924.R
|
48935e1fb32b7f70fd5151ee5a1614c7b689f341
|
[] |
no_license
|
WestlakeData/Data_Course_PORTER
|
de56feda5be2fcf0d4c6da7a64db95741984ee3b
|
86f123590a268b904a6ffef0185ec620088df152
|
refs/heads/master
| 2020-07-07T18:59:09.811909
| 2019-12-12T21:20:24
| 2019-12-12T21:20:24
| 203,446,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
20190924.R
|
dat <- DNA_Conc_by_Extraction_Date
library(dplyr)
library(ggplot2)
D_Upstairs <- dat%>%
filter(Lab == "Upstairs") %>%
select(starts_with("D"))
?summarise
Lab <- dat %>%
group_by(Lab) %>%
summarise(Ben = mean(DNA_Concentration_Ben),
Katy = mean(DNA_Concentration_Katy))
Avg.GPA <- GradAd %>%
filter(admit == 1) %>%
group_by(rank)%>%
summarise(Average_GPA = mean(gpa))
GradAd %>%
filter(admit == 1)%>%
ggplot(aes(x= as.factor(rank), y= gpa)) +
geom_boxplot()
|
c066af88ac56c23d470995d32c1e3a75c2b6ae2b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/quantoptr/examples/abso_diff_est.Rd.R
|
7c80b2a5cf972d784c54b44b763529b619648880
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,320
|
r
|
abso_diff_est.Rd.R
|
library(quantoptr)
### Name: abso_diff_est
### Title: Estimate the Gini's mean difference/mean absolute
### difference(MAD) for a Given Treatment Regime
### Aliases: abso_diff_est
### ** Examples
library(stats)
GenerateData.MAD <- function(n)
{
x1 <- runif(n)
x2 <- runif(n)
tp <- exp(-1+1*(x1+x2))/(1+exp(-1+1*(x1+x2)))
a<-rbinom(n = n, size = 1, prob=tp)
error <- rnorm(length(x1))
y <- (1 + a*0.3*(-1+x1+x2<0) + a*-0.3*(-1+x1+x2>0)) * error
return(data.frame(x1=x1,x2=x2,a=a,y=y))
}
## Don't show:
n <- 50
testData <- GenerateData.MAD(n)
logistic.model.tx <- stats::glm(formula = a~x1+x2, data = testData, family=binomial)
ph <- as.vector(logistic.model.tx$fit)
Cnobs <- combn(1:n, 2)
abso_diff_est(beta=c(1,2,-1),
x=model.matrix(a~x1+x2, testData),
y=testData$y,
a=testData$a,
prob=ph,
Cnobs = Cnobs)
## End(Don't show)
## No test:
n <- 500
testData <- GenerateData.MAD(n)
logistic.model.tx <- glm(formula = a~x1+x2, data = testData, family=binomial)
ph <- as.vector(logistic.model.tx$fit)
Cnobs <- combn(1:n, 2)
abso_diff_est(beta=c(1,2,-1),
x=model.matrix(a~x1+x2, testData),
y=testData$y,
a=testData$a,
prob=ph,
Cnobs = Cnobs)
## End(No test)
|
21bca21435da18de114bd24b53352294d1b06bd1
|
cf606e7a3f06c0666e0ca38e32247fef9f090778
|
/test/integration/example-models/bugs_examples/vol2/cervix/post.R
|
b7386a545b237b53e1a7c50a771829e158695ee8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
nhuurre/stanc3
|
32599a71d5f82c759fd6768b8b699fb5f2b2d072
|
5612b357c1cd5a08cf2a57db97ce0e789bb87018
|
refs/heads/master
| 2023-07-05T02:27:08.083259
| 2020-11-12T15:37:42
| 2020-11-12T15:37:42
| 222,684,189
| 0
| 0
|
BSD-3-Clause
| 2019-11-19T11:50:39
| 2019-11-19T11:50:38
| null |
UTF-8
|
R
| false
| false
| 409
|
r
|
post.R
|
library(coda)
library(BUGSExamples);
post <- read.csv(file = 'samples.csv', header = TRUE, comment.char = '#')[, -(1:3)]
summary(as.mcmc(post))
pars <- c("q", "beta0C", "beta", "phi")
ex <- list(name = "Cervix", parameters = pars,
nSample = 10000, nBurnin = 1000, nThin = 1,
nChain = 3)
# jagspost <- runExample(ex, engine = 'JAGS')
# summary(jagspost$coda)
# plot(jagspost$coda);
|
762ea136c7e6fcb370d4df27240e6efeff58a6e8
|
866fba40d302b2fd52bbd92a4fc4c31af3fee6a9
|
/man/atualizar_dados.Rd
|
147fee1894fa7d8603ae0d1bc5ba0cf21e4c06a2
|
[
"MIT"
] |
permissive
|
curso-r/tidynetflix
|
7f498cff9d8e964294347667f6b111f6ba49ba35
|
f64b8e247374e430f6edee021e8ba3a30f96c204
|
refs/heads/master
| 2023-04-06T11:38:41.277488
| 2021-04-22T01:03:22
| 2021-04-22T01:03:22
| 360,337,508
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 669
|
rd
|
atualizar_dados.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/atualizar-dados.R
\name{atualizar_dados}
\alias{atualizar_dados}
\title{Atualizar base de dados do Tidytuesday}
\usage{
atualizar_dados(file, ...)
}
\arguments{
\item{file}{Caminho para salvar a base (deve ser CSV)}
\item{...}{Outros arguementos passados para \code{\link[readr:write_delim]{readr::write_csv()}}}
}
\value{
Invisivel, a base atualizada
}
\description{
Ler dados da Tidytuesday de 20/04/2021, salvando em um arquivo CSV conforme
o argumento \code{file}. Para mais informações, acesse
\url{https://github.com/rfordatascience/tidytuesday/tree/master/data/2021/2021-04-20}.
}
|
e2a886b7b76d4152a565f9dedda679009d3d57e2
|
c734ca3b2f2eda498461049be11c2c007e99d367
|
/tools/Rscripts/Best_blast.R
|
e62c200d9925845dcb8a0f074ba10bed4de56b26
|
[] |
no_license
|
nnalpas/Proteogenomics_reannotation
|
95b7387f3c81dbe1c9c26409fc5118b2572c79cb
|
b054de55f29bd095001f37db09adfda8b155f2f2
|
refs/heads/master
| 2023-08-22T14:38:55.164844
| 2023-08-08T10:33:31
| 2023-08-08T10:33:31
| 111,789,188
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,905
|
r
|
Best_blast.R
|
#!/usr/bin/env Rscript
# This script identifies the best blast hits from blasting results
# for target entries
### Environment set-up ---------------------------------------------------
# Start with clean environment
rm(list = ls())
# Define current time
date_str <- format(Sys.time(), "%Y-%m-%d")
print(paste("Start", format(Sys.time(), "%Y-%m-%d %H:%M:%S")))
# Define the current user
user <- Sys.info()[["user"]]
### List of required packages -----------------------------------------------
# Source the custom user functions
if (interactive()) {
source(
file = paste(
"C:/Users",
user,
"Documents/GitHub/Proteogenomics_reannotation",
"tools/Rscripts/helper.R",
sep = "/"))
} else {
source(
file = paste(
Sys.getenv("HOME"),
"bin/helper.R",
sep = "/"))
}
# Load the required packages (or install if not already in library)
library(plyr)
library(dplyr)
library(magrittr)
library(data.table)
library(splitstackshape)
library(stringr)
library(optparse)
### Parameters setting up ------------------------------------------------
# Define input parameters (interactively or from command line)
if (interactive()) {
# Define the list of input parameters
opt <- list(
input = choose.files(
caption = "Choose input Blast results",
multi = FALSE),
filter = readline(
prompt = paste(
"What filter to use to determine best blast",
"(do not provide value for default)?")),
multi_match = readline(
prompt = paste(
"What to do for multihit entries",
"(either: remove, keep, uniquify)?")),
output = NULL)
} else {
# Define the list of command line parameters
option_list <- list(
make_option(
opt_str = c("-i", "--input"), type = "character",
default = NULL, help = "Blast data file name",
metavar = "character"),
make_option(
opt_str = c("-f", "--filter"), type = "character",
default = NULL, help = "Specific filtering to apply",
metavar = "character"),
make_option(
opt_str = c("-m", "--multi_match"), type = "character",
default = NULL, help = "Filter for multi hits entry",
metavar = "character"),
make_option(
opt_str = c("-o", "--output"), type = "character",
default = ".", help = "Output directory",
metavar = "character"))
# Parse the parameters provided on command line by user
opt_parser <- OptionParser(option_list = option_list)
opt <- parse_args(opt_parser)
}
# Check whether input parameter was provided
if (is.null(opt$input)){
print_help(opt_parser)
stop("At least one argument must be supplied (input file)!")
}
# Check whether output parameter was provided
if (is.null(opt$output)){
opt$output <- dirname(opt$input)
warning(paste("Output results to path: ", opt$output, "!", sep = ""))
}
# If filter and multi_match parameters are undefined, define as null
if (is.null(opt$filter)) {
opt["filter"] <- list(NULL)
} else if (opt$filter == "") {
opt["filter"] <- list(NULL)
} else {
opt$filter <- enquote(opt$filter)
}
if (is.null(opt$multi_match)) {
opt["multi_match"] <- list(NULL)
} else if (opt$multi_match == "") {
opt["multi_match"] <- list(NULL)
}
# Create output directory if not already existing
dir.create(opt$output)
### Data import and processing -------------------------------------------
# Import the Blast results
blast_data <- blast_read(file = opt$input, blast_format = "6")
# Get the best blast match for each query
best_blast_data <- best_blast(
data = blast_data, key = "qseqid",
bb_filter = opt$filter, multi_match = opt$multi_match)
### Results export -------------------------------------------------------
# Export the best hits protein IDs that needs to be reciprocally blasted
write.table(
x = sub("^ref\\|(.+)\\|", "\\1", unique(best_blast_data$sseqid)),
file = paste0(opt$output, "/Reciprocal_id_", basename(opt$input)),
quote = FALSE,
sep = "\t",
row.names = FALSE,
col.names = FALSE)
# Export the best hits results
write.table(
x = best_blast_data,
file = paste0(opt$output, "/Best_blast_", basename(opt$input)),
quote = FALSE,
sep = "\t",
row.names = FALSE,
col.names = TRUE)
# Export the blast ID map results
write.table(
x = best_blast_data %>%
dplyr::select(., qseqid, sseqid),
file = paste0(opt$output, "/Blast_cross-map_", basename(opt$input)),
quote = FALSE,
sep = "\t",
row.names = FALSE,
col.names = TRUE)
# Define end time
print(paste("Complete", format(Sys.time(), "%Y-%m-%d %H:%M:%S")))
|
f4a3456cbe4844d2ac212a65b290a220a522b000
|
7a124a0bd8997e7f08f2f8599a1eb307eca4adcd
|
/R/SL.glm.R
|
eb53858bc3d661e0cf3a89797ca40d87ab7ab9ef
|
[] |
no_license
|
tedwestling/SuperLearner_Old
|
e4303cba6c1d9ee0348dbe8ca049c4f93eb582aa
|
3e328a5c830785b34c4b5d7ee5c2a1352b923a82
|
refs/heads/master
| 2021-05-27T01:38:20.763620
| 2011-05-26T04:32:51
| 2011-05-26T04:32:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 958
|
r
|
SL.glm.R
|
# generalized linear regression
SL.glm <- function(Y.temp, X.temp, newX.temp, family, obsWeights, ...){
fit.glm <- glm(Y.temp~., data=X.temp, family=family, weights = obsWeights)
out <- predict(fit.glm, newdata=newX.temp, type="response")
fit <- list(object=fit.glm)
foo <- list(out=out, fit=fit)
class(foo$fit) <- c("SL.glm")
return(foo)
}
predict.SL.glm <- function(object, newdata, ...){
out <- predict(object=object$object, newdata=newdata, type="response")
out
}
# object$fit.library[[k]]$object
SL.glmFactor <- function (Y.temp, X.temp, newX.temp, family, obsWeights, ...)
{
X.tempF <- data.frame(lapply(X.temp, factor))
newX.tempF <- data.frame(lapply(newX.temp, factor))
fit.glm <- glm(Y.temp ~ ., data = X.tempF, family = family)
out <- predict(fit.glm, newdata = newX.tempF, type = "response")
fit <- list(object = fit.glm)
foo <- list(out = out, fit = fit)
class(foo$fit) <- c("SL.glm")
return(foo)
}
|
c461c89ed15c7615fd5fc8926f21a42a95d7cedd
|
f44a214a714ce68e1cbb8fd8cd01fd608fe073f1
|
/Subject_Statistics/intro6.R
|
1fe8ad39062c5ba738b631d38de8a308fd812e49
|
[
"MIT"
] |
permissive
|
mahnooranjum/R_Programming
|
cdc4cffb3decabb07a6ed2e37515cdd055eb2bde
|
3227d95323d7c33644edeb6d21264d50f18a2725
|
refs/heads/master
| 2023-02-17T13:35:11.697968
| 2021-01-18T12:22:12
| 2021-01-18T12:22:12
| 257,075,397
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 189
|
r
|
intro6.R
|
# load in the dataset
cars = read.csv("http://s3.amazonaws.com/assets.datacamp.com/course/uva/mtcars_semicolon.csv", sep = ';')
# print the first 6 rows of the dataset
print(head(cars))
|
665149879a8162cddeb62062ae50b3480a0a266c
|
2de4b01d976daf4b4c34f035ea1943b3af0351fa
|
/R/drop_na_rows.R
|
67a0c733355ce5b106bf198583211b17bcf1ce5a
|
[
"BSD-3-Clause"
] |
permissive
|
cytomining/cytominer
|
4143e84f6d87bee70002e046320d5e0f0dc08924
|
d911eb5bc152e0fc8e382ee0492356fd939d2488
|
refs/heads/master
| 2023-07-06T08:08:54.859029
| 2023-06-29T20:02:36
| 2023-06-29T20:02:36
| 39,411,869
| 45
| 21
|
NOASSERTION
| 2023-06-29T19:57:20
| 2015-07-20T22:38:45
|
R
|
UTF-8
|
R
| false
| false
| 1,604
|
r
|
drop_na_rows.R
|
utils::globalVariables(c("key", "value", "rowname_temp", "rowid", "coalesce"))
#' Drop rows that are \code{NA} in all specified variables.
#'
#' \code{drop_na_rows} drops rows that are \code{NA} in all specified variables.
#'
#' @param population tbl with grouping (metadata) and observation variables.
#' @param variables character vector specifying observation variables.
#'
#' @return \code{population} without rows that have \code{NA} in all specified
#' variables.
#'
#' @examples
#' population <- tibble::tibble(
#' Metadata_group = c(
#' "control", "control", "control", "control",
#' "experiment", "experiment", "experiment", "experiment"
#' ),
#' Metadata_batch = c("a", "a", "b", "b", "a", "a", "b", "b"),
#' AreaShape_Area = c(10, 12, NA, 16, 8, 8, 7, 7),
#' AreaShape_Length = c(2, 3, NA, NA, 4, 5, 1, 5)
#' )
#' variables <- c("AreaShape_Area", "AreaShape_Length")
#' drop_na_rows(population, variables)
#' @importFrom magrittr %>%
#' @importFrom magrittr %<>%
#' @export
drop_na_rows <- function(population, variables) {
if (is.data.frame(population)) {
population %>%
tibble::rowid_to_column() %>%
tidyr::pivot_longer(variables) %>%
dplyr::filter(!is.na(value)) %>%
tidyr::pivot_wider(names_from = "name", values_from = "value") %>%
dplyr::select(-rowid) %>%
dplyr::select(names(population))
} else {
# Coalesce() must have at least 2 arguments.
if (length(variables) == 1) {
variables <- c(variables, variables)
}
population %>%
dplyr::filter(!is.null(coalesce(!!!rlang::syms(variables))))
}
}
|
67fe6f39c02e6325ed8c02b4d04f6aceed315daf
|
298fc5c4e90603412b6ab568a6406d058df95fcd
|
/man/reccsim-package.Rd
|
5682896926bab210e48c80798800ac8b6329f522
|
[] |
no_license
|
yadevi/reccsim
|
0334ef24c696cbad1aaa8c245dd4b09c67fa4eb0
|
03e4b6ba67d79a5d8e1367c03fcaba9230780351
|
refs/heads/master
| 2021-05-26T16:43:24.904127
| 2012-04-20T00:00:00
| 2012-04-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,832
|
rd
|
reccsim-package.Rd
|
\name{reccsim-package}
\alias{reccsim-package}
\alias{reccsim}
\docType{package}
\title{
Simulate case-control studies.
}
\description{
This package allows you to simulate case control-studies from a known
population with binary exogenous variables and a binary endogenous variable.
}
\details{
\tabular{ll}{
Package: \tab reccsim\cr
Type: \tab Package\cr
Version: \tab 0.9-1\cr
Date: \tab 2012-09-28\cr
License: \tab GPL (>=2)\cr
}
reccsim's functions are \code{\link{rccs}} and \code{\link{build.population}} as
main workhorses and \code{\link{interactive.population}}
to instruct users in how the \code{PopulationAtRisk} object has to be set up.
For simulating a case-control study you need to feed a
\code{PopulationAtRisk} to \code{\link{rccs}} (for \code{r}andom
\code{c}ase-\code{c}ontrol \code{s}tudy). It will then return a case-control
study which you may use for further analysis.
\code{\link{interactive.population}} will guide you through the construction
of a PopulationAtRisk object, however for repeated simulation with
different population parameters you will usually want to call
\code{\link{build.population}} directly under specification of your
population parameters.
}
\author{
Christian Westphal
Maintainer: Christian Westphal <westphal@staff.uni-marburg.de>
}
\references{
Breslow, N.E. (1996) \emph{Statistics in Epidemiology: The
Case-Control Study}. Journal of the American Statistical
Association, Vol. 91 (433) pp. 14-28.
}
\keyword{ package }
\seealso{
\code{\link{interactive.population}}
}
\examples{
## Create a PopulationAtRisk manually from a risk formula
## where cancer is dependent on smoking and drinking:
## Try this with a population size of 500, 0.2 drinking
## 0.2 smoking and 0.1 smoking and drinking.
## Use 2 and 5 and their product 10 for relative risks in the respective
## groups.
## PaR <- interactive.population( cancer ~ smoking + drinking )
PaR <- build.population( cancer ~ smoking + drinking,
50000000,
.0001,
c(.2,.2,.1),
c(2,5,10)
)
## Now the PopulationAtRisk object stored in PaR may be used
## to construct a case control study, where we use five times
## as many controls as cases:
ccs <- rccs( PaR, ctc = 5)
## This randomized case control study from the PopulationAtRisk
## is now ready for further analysis.
## Using build.population() instead of interactive.population()
## will allow automatization for studying how for e.g. the logit
## model estimator behaves for different population parameters.
## Let us have a short summary of this cas-control study:
summary( ccs )
}
|
39391162b8c3dadd4d7243747fe42abbb2ae18de
|
c4eac36ff94b667c3465a6a44556db8a615331e0
|
/R/get_hclust.R
|
2e5df20b45b8f6a3493005ffffaab5766997a1e9
|
[
"MIT"
] |
permissive
|
gtonkinhill/fastbaps
|
647b1b15f9a2b9afb95ddd213e7055a812c2d222
|
1c322182eab509c647a0584e9d55df7d50c21823
|
refs/heads/master
| 2022-10-16T03:32:54.563347
| 2022-09-18T13:09:50
| 2022-09-18T13:09:50
| 137,083,307
| 40
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,745
|
r
|
get_hclust.R
|
#' as.phylo.hclust.node.attributes
#' get_hclust
#'
#' Function to generate hclust object. Uses initial kmeans if dataset is too large.
#'
#' @import ape
#' @import Matrix
#'
#' @param sparse.data sparse data object generated using
#' @param quiet whether to supress additional printing information
#' @param method which hierarchical clustering method. One of 'ward' or 'genie'. (default='ward')
#' @param n.cores n.cores to use (not implemented)
#'
#'
# adpated from ape as.phylo
get_hclust <- function(sparse.data, quiet, method="ward", n.cores=1){
MAX_CLUSTER_TO_GENIE <- 10000
n.isolates <- ncol(sparse.data$snp.matrix)
stopifnot(method %in% c("ward", "genie"))
if(n.isolates<MAX_CLUSTER_TO_GENIE){
snp.dist <- as.matrix(tcrossprod(t(sparse.data$snp.matrix>0)))
snp.dist <- stats::as.dist((max(snp.dist)-snp.dist)/max(snp.dist))
if(method=="genie"){
h <- genie::hclust2(d=snp.dist, useVpTree=FALSE)
h$labels <- colnames(sparse.data$snp.matrix)
} else {
h <- stats::hclust(snp.dist, method = "ward.D2")
}
} else {
if(!quiet){
print("Large number of sequences so using an initial PCA and the genie hierarchical clustering algorithm.")
}
temp.matrix <- 1*t(sparse.data$snp.matrix>0)
# pc <- irlba::prcomp_irlba(temp.matrix, n=50)
svd <- irlba::irlba(temp.matrix, nv=50, tol=0.1, center=colMeans(temp.matrix))
x <- t(t(svd$u) * svd$d)
if(method=="genie"){
h <- genie::hclust2(d="euclidean", objects = x, useVpTree=FALSE, thresholdGini = 1)
h$labels <- colnames(sparse.data$snp.matrix)
} else {
h <- fastcluster::hclust.vector(X = x, method = "ward")
h$labels <- colnames(sparse.data$snp.matrix)
}
gc()
}
return(h)
}
|
a827d25a8e8537c226886e21da2c80a3da73d005
|
f5a57fa0d904f9ca6844e72e75d46699852aac5d
|
/Code/07a-vector_evaluation_100k_50d.R
|
cc76ba6fe760a1ecec54c0564489ce6091ce3b9b
|
[] |
no_license
|
rymc9384/Track2Vec
|
2b11b2708b2f121e7888f8c231b136e97f1da9c9
|
850fccc9772c7721ce606ab114fd0d41bb195283
|
refs/heads/master
| 2021-03-16T10:56:47.993501
| 2017-07-21T19:23:17
| 2017-07-21T19:23:17
| 82,110,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,255
|
r
|
07a-vector_evaluation_100k_50d.R
|
## Author: Ryan McMahon
## File: "~/Track2Vec/Code/07a-vector_evaluation_100k_50d.R"
## Date Created: 03/07/2017
## Date Last Modified: 03/07/2017
##
## Purpose: Explore the vectors produced by a GloVe model with the most frequent
# 100k (99,997) tracks on Spotify; embedding dimension is 50.
#
#
## NOTES:
#
#
#
## EDITS:
#
#
## Program and Hardware Information:
# R 3.1.3 "Smooth Sidewalk"; 64-bit
# Windows 8.1; MSi GE60 2PL Apache Laptop
################################################################################
##############################
### 0) SET UP
##############################
rm(list=ls())
options(stringsAsFactors = F)
set.seed(11769)
setwd("C:/Users/rbm166/Dropbox/Track2Vec/Data/")
# 0a) Cosine Similarity Function:
cos_similarity <- function(x.vec, y.vec, data, new_colname){
x.vec <- as.matrix(x.vec)
y.vec <- as.matrix(y.vec)
cos_sim <- y.vec %*% t(x.vec)
data[,new_colname] <- cos_sim[,1]
return(data)
}
# 0b) Analogy Function:
analogy <- function(q_vecs, y.vec, data, new_colname){
# 'q_vecs' is an ordered list of the analogy vectors;
# e.g., "q_vecs[[1]] is to q_vecs[[2]] as q_vecs[[3]] is to ______"
if(length(q_vecs) != 3) {
print("Need list of three vectors! In order!")
break
}
x.vec <- as.matrix(q_vecs[[2]] - q_vecs[[1]] + q_vecs[[3]])
d <- sqrt(sum(x.vec^2))
x.norm <- t(t(x.vec)/d)
y.vec <- as.matrix(y.vec)
dist <- y.vec %*% t(x.norm)
data[,new_colname] <- dist[,1]
return(data)
}
##############################
### 1) DATA PROCESSING
##############################
# 1a) Read in the track info and vectors:
df <- read.csv(file = "01-vectors_lookup_combined100k_50d.csv")
# 1b) Rename the first column from 'X' to 'track_id':
colnames(df)[1] <- "track_id"
# 1c) Order the data frames by track frequency:
df <- df[order(df$freq, decreasing=T), ]
# 1d) Put the embeddings into a separate matrix:
X <- df[,grep(pattern = "X[0-9]{1,2}", x = colnames(df), ignore.case = T)]
# 1e) Create a training set for PCA:
train.samp <- sample(x = nrow(X), size = 5000, replace = T)
X.train <- X[train.samp,]
##############################
### 2) PRINCIPAL COMPONENTS
##############################
# 2a) Fit PCA model to the training data:
vec.pca <- prcomp(x = X.train, retx = T, center = F, scale. = F)
# 2b) Inspect the results:
summary(vec.pca)
plot(vec.pca)
biplot(vec.pca)
# 2c) Fit the model to all of the embeddings:
X.pca <- predict(object = vec.pca, newdata = X)
# 2d) Generate new data frame with rotated embeddings:
new.df <- as.data.frame(cbind(df[,1:14], X.pca))
##############################
### 3) ANALYSIS:
##############################
plot(new.df$PC1[1:250], new.df$PC2[1:250], type = "n")
text(x = new.df$PC1[1:250], y = new.df$PC2[1:250], labels = new.df$artist_names[1:250], cex = 0.5)
# 3a) Similarity
similarity.df <- df[,1:14]
# i) 'Me, Myself, and I' - G-Eazy & Bebe Rexha
id.temp <- '40YcuQysJ0KlGQTeGUosTC'
x.vec <- df[df$track_id==id.temp, 15:ncol(df)]
y.vec <- df[, 15:ncol(df)]
new_colname <- "me.my.i.cos"
similarity.df <- cos_similarity(x.vec = x.vec, y.vec = y.vec,
new_colname = new_colname, data = similarity.df)
head(similarity.df[order(similarity.df$me.my.i.cos, decreasing=T),], 20)
# ii) 'Killing in the Name' - Rage Against the Machine
id.temp <- "59WN2psjkt1tyaxjspN8fp"
x.vec <- df[df$track_id==id.temp, 15:ncol(df)]
new_colname <- "kill.in.name.cos"
similarity.df <- cos_similarity(x.vec = x.vec, y.vec = y.vec,
new_colname = new_colname, data = similarity.df)
head(similarity.df[order(similarity.df$kill.in.name.cos, decreasing=T),], 20)
# iii) 'Drunk' - Ed Sheeran
id.temp <- "4RnCPWlBsY7oUDdyruod7Y"
x.vec <- df[df$track_id==id.temp, 15:ncol(df)]
new_colname <- "drunk.sheeran.cos"
similarity.df <- cos_similarity(x.vec = x.vec, y.vec = y.vec,
new_colname = new_colname, data = similarity.df)
head(similarity.df[order(similarity.df$drunk.sheeran.cos, decreasing=T),], 20)
# iv) Average of all 50 Cent songs:
x.vec <- t(apply(df[grep(pattern = '50 Cent', x = df$artist_names), 15:ncol(df)],2,mean))
new_colname <- "fiftycent.avg.cos"
similarity.df <- cos_similarity(x.vec = x.vec, y.vec = y.vec,
new_colname = new_colname, data = similarity.df)
head(similarity.df[order(similarity.df$fiftycent.avg.cos, decreasing=T),], 20)
# 3b) Analogies:
analogy.df <- df[,1:14]
# i) "Photograph" - Ed Sheeran is to "Thinking Out Loud" - Ed Sheeran as "Enter Sandman" - Metallica is to _________
# - the Ed Sheeran songs are on the same album.
ids.temp <- c('1HNkqx9Ahdgi1Ixy2xkKkL', '34gCuhDGsG4bRPIf9bb02f', '1hKdDCpiI9mqz1jVHRKG0E')
q_vecs = list(df[df$track_id==ids.temp[1], 15:ncol(df)],
df[df$track_id==ids.temp[2], 15:ncol(df)],
df[df$track_id==ids.temp[3], 15:ncol(df)])
y.vec <- df[, 15:ncol(df)]
new_colname <- "ed_ed_metallica.analogy"
analogy.df <- analogy(q_vecs = q_vecs, y.vec = y.vec,
new_colname = new_colname, data = analogy.df)
head(analogy.df[order(analogy.df$ed_ed_metallica.analogy, decreasing=T),], 20)
|
b8506d04f843d4931f98226770cf4e4d9e8fbd89
|
51e5e5eca9abc684f9b4d93488af410b456b98d2
|
/script.R
|
a558031b51fd5d668e60e346604bb8a469906269
|
[] |
no_license
|
cndesantana/relatorio_brandwatch
|
376a61db56400d59c90b1414b962e1b594bfd201
|
39f7ac670fd96e53438a4dad4238c8519be021b9
|
refs/heads/master
| 2022-12-25T04:52:21.588321
| 2020-09-30T00:03:04
| 2020-09-30T00:03:04
| 298,928,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,958
|
r
|
script.R
|
library(tidyverse)
library(plotrix)
library(stringr)
library(quanteda)
library(readtext)
library(tm)
library(dplyr)
library(scales)
library(qdapRegex)
library(grDevices)
library(treemap)
library(stylo)
library(tidytext)
library(tokenizers)
library(rtweet)
library(readxl)
library(lubridate)
library(treemapify)
### Variables and Functions
corpositivo <- "#20B2AA";
cornegativo <- "#c00000";
corneutro <- "#FFA500";
cornovo <- "orange3"
badwords <- c("compartilhado","boa","scontent.xx.fbcdn.net","https","oh","oe","pra","v","y9zuzurte",
"como","para","de","do","da","das","dos","isso","esse",
"nisso","nesse","aquele","nesses","aqueles","aquela",
"aquelas","que","q","é","sr","senhor","comentário","perfil",
"mais","com","está","por","uma","tem","vai","pelo","meu",
"sobre","não","já","nos","sem","quando","xed","xbd","ser",
"xbe","xa0","x8f","xb9","xb2","xb0","xb1","xb8","x8c","xa3",
"xbc","xaa","www.youtube.com","scontent.xx.fbcdn.net","https",
"oh","oe","pra","v","como","para","de","do","da","das","dos",
"isso","esse","nisso","nesse","aquele","nesses","aqueles","aquela",
"aquelas","que","q","é","sr","senhor","comentário","perfil","r","que",
"nao","sim","comentário","feito","comentario","imagem","comentario feito no perfil de secretaria",
"secretaria","foi","photos","http","bit.ly","sou","mais","bahia","vídeo","timeline","video","er",
"enem","vçpt","vç","x","vc", "aqui", "você", "tá", "dia", "amanhã", "ba","aqui","governador",
"com","que","nao","meu","mais","por","uma",
"pra","para","um","mais","mas","clap","para","tone","skin","type","heart","facebook","iticas","munici","3","4",
"unamused","esses","essas","até","são","ate","sao","todas","todos","toda","todo","essa", "esse","2")
palette <- c("#ff9ff3","#feca57","#ff6b6b","#48dbfb","#1dd1a1")
getPositionY <- function(test){
labelpos <- array(NA,length(test$Sentiment))
labelpos[ which(test$Sentiment == "positive") ] <- "0.02"
labelpos[ which(test$Sentiment == "negative") ] <- "0.98"
datasb <- test$Data[which(test$Sentiment == "neutral")];
posvarb <- which(test$Sentiment == "neutral");
for(i in 1:length(datasb)){
datasb_ <- datasb[i];
positionobsb <- which(test$Data == datasb_ & test$Sentiment == "positive")
obsb <- ifelse(length(positionobsb) > 0, test$freq[positionobsb], 0);
labelpos[posvarb[i]] <- obsb + 0.02
}
return(as.numeric(labelpos))
}
getUnigram <- function(text){
text <- removeWords(text,c(stopwords("portuguese"),badwords))
text <- rm_nchar_words(text, n= "1")#remove words with only one character
text <- rm_nchar_words(text, n="2")#remove words with two characters
text <- rm_nchar_words(text, n="3")#remove words with two characters
text <- gsub(" *\\b[[:alpha:]]{1,2}\\b *", " ", text)# Remove 1-2 letter words
text <- gsub("^ +| +$|( ) +", "\\1", text) # Remove excessive spacing
text <- stringi::stri_trans_general(text, "latin-ascii")
unigram <- data.frame(words = unlist(tokenize_ngrams(text, n = 1L, n_min = 1L, simplify = TRUE)))
return(unigram)
}
getTrigram <- function(text){
text <- removeWords(text,c(stopwords("portuguese"),badwords))
text <- rm_nchar_words(text, n= "1")#remove words with only one character
text <- rm_nchar_words(text, n="2")#remove words with two characters
text <- rm_nchar_words(text, n="3")#remove words with two characters
text <- gsub(" *\\b[[:alpha:]]{1,2}\\b *", " ", text) # Remove 1-2 letter words
text <- gsub("^ +| +$|( ) +", "\\1", text) # Remove excessive spacing
text <- stringi::stri_trans_general(text, "latin-ascii")
unigram <- data.frame(words = unlist(tokenize_ngrams(text, n = 3L, n_min = 3L, simplify = TRUE)))
return(unigram)
}
fa <- function(x) iconv(x, to = "ASCII//TRANSLIT")
getMatrizDeOcorrencias <- function(text){
text <- stringi::stri_trans_tolower(text)
temp <- fa(text)
temp <- rm_nchar_words(temp, "1")#remove words with only one character
temp <- rm_nchar_words(temp, "2")#remove words with two characters
temp <- rm_nchar_words(temp, "3")#remove words with two characters
# Lowercase
# Shrink down to just one white space
temp <- stringr::str_replace_all(temp,"[\\s]+", " ")
temp=str_replace_all(temp,"[^[:graph:]]", " ")
# Split it
any(grepl("I_WAS_NOT_ASCII", iconv(temp, "latin1", "ASCII", sub="I_WAS_NOT_ASCII")))
temp <- stringi::stri_trans_general(temp, "latin-ascii")
temp <- removePunctuation(temp)
temp <- unlist(stringr::str_split(temp, " "))
# Get rid of trailing "" if necessary
indexes <- which(temp == "")
if(length(indexes) > 0){
temp <- temp[-indexes]
}
docs <- Corpus(VectorSource(temp))
docs <- tm_map(docs, removeNumbers)
# Remove english common stopwords
docs <- tm_map(docs, removeWords, stopwords("portuguese"))
# Remove your own stop word
docs <- tm_map(docs, removeWords, c("blabla1", "blabla2","que","ser","pelo","tem","o","lhe","por","pra","de","da","do","essa","esse","isso","aquele","aquilo","desse","disso","daquilo","uma","um","NA"))
# Remove punctuations
docs <- tm_map(docs, stripWhitespace)
#
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
return(d)
}
getIndiceDeFavorabilidade <- function(polarization){
sentimentos <- toupper(polarization)
allsentimentos <- c("NEUTRO","NEGATIVO","POSITIVO");
mp <- length(which(sentimentos==allsentimentos[3]));#POSITIVA
mt <- length(polarization);#Total
indicefavorabilidade <- ifelse(mt == 0, 0, as.numeric(mp/mt))#changing sentiment index to a "positive feedback index"
indicefavorabilidade <- round((indicefavorabilidade),digits=2)
return(indicefavorabilidade)
}
ggplotColours <- function(n = 6, h = c(0, 360) + 15){
if ((diff(h) %% 360) < 1) h[2] <- h[2] - 360/n
hcl(h = (seq(h[1], h[2], length = n)), c = 100, l = 65)
}
plotIndiceFavorabilidade = function(file) {
filepath <- input$file$datapath
file <- read_xlsx(filepath)
allpolarization <- toupper(file$polarization)
isent <- getIndiceDeFavorabilidade(allpolarization);
colfunc <- colorRampPalette(c(corpositivo,corneutro))
legend_image <- as.raster(matrix(colfunc(20), ncol=1))
plot(c(1,20),c(0,10),type = 'n', axes = F,xlab = '', ylab = '', main = '')
text(x=3, y = 10*signif(isent,2), labels = paste(intToUtf8(9664),paste0(signif(isent,2))),pos=4)
text(x = 0.45, y=10, labels = 1,pos = 4)
text(x = 0.4, y=0, labels = 0,pos = 4)
rasterImage(legend_image, 1,0.1,3,9.9)
}
plotDetratoresApoiadores = function() {
filepath <- input$file$datapath
file <- read_xlsx(filepath)
file %>%
dplyr::select(user_id, polarization) %>%
group_by(user_id) %>%
count(user_id, polarization) %>%
arrange(n, user_id) %>%
tail(20) %>%
ggplot() +
geom_bar(stat = "identity",
aes(x = reorder(user_id,as.numeric(n)), y = as.numeric(n), fill = polarization)) +
ylab("Número de comentários") +
xlab("") +
scale_fill_manual("Polaridade", values = c("Positivo" = corpositivo, "Negativo" = cornegativo, "Neutro" = corneutro)) +
geom_text( aes (x = reorder(user_id,as.numeric(n)), y = as.numeric(n), label = as.numeric(n) ) , vjust = 0, hjust = 0, size = 2 ) +
coord_flip() +
theme_bw()
}
plotPalavrasDetratores = function(){
filepath <- input$file$datapath
file <- read_xlsx(filepath)
text <- file %>%
filter(toupper(polarization) == "NEGATIVO") %>%
dplyr::select(text) %>%
toupper()
mydfm <- getDFMatrix(text);
words_td <- topfeatures(mydfm,30)
ggplot() +
geom_bar(stat = "identity",
aes(x = reorder(names(words_td),as.numeric(words_td)), y = as.numeric(words_td)),
fill = cornegativo) +
ylab("Número de ocorrências") +
xlab("") +
labs(title = "Palavras mais citadas por detratores")+
geom_text( aes (x = reorder(names(words_td),as.numeric(words_td)), y = words_td, label = words_td ) , vjust = 0, hjust = 0, size = 2 ) +
coord_flip() +
theme_bw()
}
plotPalavrasApoiadores = function(){
filepath <- input$file$datapath
file <- read_xlsx(filepath)
text <- file %>%
filter(toupper(polarization) == "POSITIVO") %>%
dplyr::select(text) %>%
toupper()
mydfm <- getDFMatrix(text);
words_td <- topfeatures(mydfm, 30)
ggplot() +
geom_bar(stat = "identity",
aes(x = reorder(names(words_td),as.numeric(words_td)), y = as.numeric(words_td)),
fill = corpositivo) +
ylab("Número de ocorrências") +
xlab("") + ggtitle("Palavras mais citadas por apoiadores") +
geom_text( aes (x = reorder(names(words_td),as.numeric(words_td)), y = words_td, label = words_td ) , vjust = 0, hjust = 0, size = 2 ) +
coord_flip() +
theme_bw()
}
plotSerieTemporal = function(amostra) {
media <- stringr::str_remove_all(amostra$Domain,".com")
df_datas <- amostra %>%
mutate( Data = ymd_hms(Date) %>%
as.Date() %>%
format("%d/%m/%Y"),
Sentiment = as.factor(toupper(Sentiment))
) %>%
group_by(Data, Sentiment) %>%
summarise(count = n()) %>%
group_by(Data) %>%
mutate(freq = count / sum(count))
primeirodia <- min(dmy(df_datas$Data));
ultimodia <- max(dmy(df_datas$Data))
ggplot(df_datas, aes(x=dmy(Data), y=freq, fill=Sentiment)) +
geom_bar(position = "stack", stat = "identity") +
scale_x_date(date_breaks = "1 day",
labels = date_format("%d/%m/%Y")) +
theme(text = element_text(size=6), axis.text.x = element_text(angle=45, hjust=1)) +
scale_y_continuous(labels=scales::percent) +
labs (title = paste("Sentimento dos posts - ",media), subtitle = paste("Semana",paste(range(df_datas$Data),collapse = " a ")), x = "", y = "Porcentagem de Posts") +
theme(text = element_text(size=6), axis.text.x = element_text(angle=45, hjust=1)) +
coord_cartesian(xlim = c(primeirodia, ultimodia)) +
scale_fill_manual("Sentimento", values = c("POSITIVE" = corpositivo, "NEGATIVE" = cornegativo, "NEUTRAL" = corneutro)) +
geom_text(size = 2, col = "white", aes(x = dmy(Data), y = getPositionY(df_datas), label = paste(as.character(100*round(df_datas$freq,2)),"%",sep=""))) + theme_bw();
}
## Treemap
plotTreemap = function(text){
unigram <- getUnigram(text)
unigram <- unigram %>%
filter(!(words %in% badwords))%>%
filter(!is.na(words)) %>%
select(words) %>% group_by(words) %>%
summarise(palavras = n()) %>%
arrange(palavras) %>% tail(50)
numerodereferencia <- max(unigram$palavras) %/% 5
unigram <- unigram %>%
mutate(classe = case_when(palavras < numerodereferencia ~ "de 1 a 5",
palavras < 2*numerodereferencia ~ "de 5 a 10",
palavras < 3*numerodereferencia ~ "de 10 a 50",
palavras < 4*numerodereferencia ~ "de 50 a 100",
palavras >= 4*numerodereferencia ~ "mais que 100")) %>%
mutate(classe = factor(classe, levels = c("de 1 a 5", "de 5 a 10", "de 10 a 50", "de 50 a 100", "mais que 100")))
ggplot(unigram, aes(area = palavras,
fill = palette[as.numeric(classe)],
label = words,
subgroup=palavras)) +
geom_treemap(fill = "black") +
geom_treemap(aes(alpha=palavras)) +
geom_treemap_text(fontface = "italic", colour = "white", place = "centre",
grow = F, reflow=TRUE) +
geom_treemap_subgroup_text(place = "bottomright", grow = F, alpha = 1,
col="white", cex=10) +
ggtitle("Palavras mais comentadas - #ForaKalil")+
scale_fill_identity() +
scale_alpha_continuous(range = c(0.4, 1),guide = 'none')
}
plotTreemapNegativo = function(file){
media <- stringr::str_remove_all(file$Domain,".com")
text <- toupper(file$text[which(toupper(file$polarization) == "NEGATIVE")])
unigram <- getUnigram(text)
unigram <- unigram %>%
filter(!(words %in% badwords))%>%
filter(!is.na(words)) %>%
select(words) %>% group_by(words) %>%
summarise(palavras = n()) %>%
arrange(palavras) %>% tail(50)
numerodereferencia <- max(unigram$palavras) %/% 5
unigram <- unigram %>%
mutate(classe = case_when(palavras < numerodereferencia ~ "de 1 a 5",
palavras < 2*numerodereferencia ~ "de 5 a 10",
palavras < 3*numerodereferencia ~ "de 10 a 50",
palavras < 4*numerodereferencia ~ "de 50 a 100",
palavras >= 4*numerodereferencia ~ "mais que 100")) %>%
mutate(classe = factor(classe, levels = c("de 1 a 5", "de 5 a 10", "de 10 a 50", "de 50 a 100", "mais que 100")))
colfunc <- colorRampPalette(c(cornegativo))
ggplot(unigram, aes(area = palavras,
fill = colfunc(5)[as.numeric(classe)],
label = words,
subgroup=palavras)) +
geom_treemap(fill = "black") +
geom_treemap(aes(alpha=palavras)) +
geom_treemap_text(fontface = "italic", colour = "white", place = "centre",
grow = F, reflow=TRUE) +
geom_treemap_subgroup_text(place = "bottomright", grow = F, alpha = 1,
col="white", cex=10) +
ggtitle(paste("Palavras mais comentadas em comentários Negativos - ",media))+
scale_fill_identity() +
scale_alpha_continuous(range = c(0.4, 1),guide = 'none')
}
plotTreemapPositivo = function(file){
media <- stringr::str_remove_all(file$Domain,".com")
text <- toupper(file$text[which(toupper(file$polarization) == "POSITIVE")])
unigram <- getUnigram(text)
unigram <- unigram %>%
filter(!(words %in% badwords))%>%
filter(!is.na(words)) %>%
select(words) %>% group_by(words) %>%
summarise(palavras = n()) %>%
arrange(palavras) %>% tail(50)
numerodereferencia <- max(unigram$palavras) %/% 5
unigram <- unigram %>%
mutate(classe = case_when(palavras < numerodereferencia ~ "de 1 a 5",
palavras < 2*numerodereferencia ~ "de 5 a 10",
palavras < 3*numerodereferencia ~ "de 10 a 50",
palavras < 4*numerodereferencia ~ "de 50 a 100",
palavras >= 4*numerodereferencia ~ "mais que 100")) %>%
mutate(classe = factor(classe, levels = c("de 1 a 5", "de 5 a 10", "de 10 a 50", "de 50 a 100", "mais que 100")))
colfunc <- colorRampPalette(c(corpositivo))
ggplot(unigram, aes(area = palavras,
fill = colfunc(5)[as.numeric(classe)],
label = words,
subgroup=palavras)) +
geom_treemap(fill = "black") +
geom_treemap(aes(alpha=palavras)) +
geom_treemap_text(fontface = "italic", colour = "white", place = "centre",
grow = F, reflow=TRUE) +
geom_treemap_subgroup_text(place = "bottomright", grow = F, alpha = 1,
col="white", cex=10) +
ggtitle(paste("Palavras mais comentadas em comentários Positivos - ",media))+
scale_fill_identity() +
scale_alpha_continuous(range = c(0.4, 1),guide = 'none')
}
plotTreemapNeutro = function(file){
media <- stringr::str_remove_all(file$Domain,".com")
text <- toupper(file$text[which(toupper(file$polarization) == "NEUTRAL")])
unigram <- getUnigram(text)
unigram <- unigram %>%
filter(!(words %in% badwords))%>%
filter(!is.na(words)) %>%
select(words) %>% group_by(words) %>%
summarise(palavras = n()) %>%
arrange(palavras) %>% tail(50)
numerodereferencia <- max(unigram$palavras) %/% 5
unigram <- unigram %>%
mutate(classe = case_when(palavras < numerodereferencia ~ "de 1 a 5",
palavras < 2*numerodereferencia ~ "de 5 a 10",
palavras < 3*numerodereferencia ~ "de 10 a 50",
palavras < 4*numerodereferencia ~ "de 50 a 100",
palavras >= 4*numerodereferencia ~ "mais que 100")) %>%
mutate(classe = factor(classe, levels = c("de 1 a 5", "de 5 a 10", "de 10 a 50", "de 50 a 100", "mais que 100")))
colfunc <- colorRampPalette(c(corneutro))
ggplot(unigram, aes(area = palavras,
fill = colfunc(5)[as.numeric(classe)],
label = words,
subgroup=palavras)) +
geom_treemap(fill = "black") +
geom_treemap(aes(alpha=palavras)) +
geom_treemap_text(fontface = "italic", colour = "white", place = "centre",
grow = F, reflow=TRUE) +
geom_treemap_subgroup_text(place = "bottomright", grow = F, alpha = 1,
col="white", cex=10) +
ggtitle(paste("Palavras mais comentadas em comentários Neutros - ",media))+
scale_fill_identity() +
scale_alpha_continuous(range = c(0.4, 1),guide = 'none')
}
plotIndiceFavorabilidade = function(file) {
media <- stringr::str_remove_all(file$Domain,".com")
file %>% mutate(Date = ymd_hms(Date),
Date = floor_date(Date, unit="day"),
Sentiment = toupper(Sentiment)) %>%
group_by(Date) %>%
mutate(mt = n(),
ispositive = if_else(Sentiment=="POSITIVE",1,0)) %>%
group_by(Date, mt) %>% summarise(mp = sum(ispositive)) %>%
ungroup()%>%
mutate(isent=ifelse(mt == 0, 0, as.numeric(mp/mt)),
isent=round((isent),digits=2)) %>%
ggplot(aes(x = Date, y = isent)) +
geom_line(cex = 1.5, col = corpositivo) +
geom_point(cex=5, col = corpositivo)+
theme_minimal()+
labs(title = paste("Índice de Favorabilidade - ",media),
subtitle = paste("Semana",paste(range(ymd_hms(file$Date) %>% format("%d-%m-%Y")),collapse = " a "))) +
ylim(0,1)
}
setwd("/Users/isiscosta/RScript/BadogueHigh/Paiva_BH/")
### read data
fora_kalil <- search_tweets("#forakalil", n=5000)
data_ig <- read_xlsx("RP-IG.xlsx", skip = 8)
data_fb <- read_xlsx("RP-FB.xlsx", skip = 8)
data_tw <- read_xlsx("RP - Twitter .xlsx", skip = 7)
data_kalil <- read_xlsx("Kalil - Twitter.xlsx", skip=7)
###
data_users_fora_kalil <- users_data(fora_kalil)
###
#### select the period of the data
data_ig <- data_ig %>% filter(ymd_hms(Date) < ymd("2020-09-26"), ymd_hms(Date) > ymd("2020-09-18"))
data_fb <- data_fb %>% filter(ymd_hms(Date) < ymd("2020-09-26"), ymd_hms(Date) > ymd("2020-09-18"))
data_tw <- data_tw %>% filter(ymd_hms(Date) < ymd("2020-09-26"), ymd_hms(Date) > ymd("2020-09-18"))
### Plots
### Palavras mais citadas no Fora Kalil
comentarios_unicos <- data_users_fora_kalil %>% distinct(user_id, description) %>% select(description)
p1 <- getUnigram(comentarios_unicos$description) %>%
filter(!is.na(words)) %>%
count(words) %>% arrange(n) %>% tail(30) %>%
ggplot(aes(x = reorder(words,n), y = n)) +
geom_bar(stat="identity", fill = cornovo) + coord_flip() +
theme_minimal() +
labs(title = "Palavras mais citadas na descrição do perfil",
subtitle = "#ForaKalil",
y = "Número de menções",
x = "Palavra")
png("palavras_descricao_forakalil.png",width=3200,height=1800,res=300)
print(p1)
dev.off()
### Trigramas mais citados no Fora Kalil
comentarios_unicos <- data_users_fora_kalil %>% distinct(user_id, description) %>% select(description)
p1 <- getTrigram(comentarios_unicos$description) %>%
filter(!is.na(words)) %>%
count(words) %>% arrange(n) %>% tail(30) %>%
ggplot(aes(x = reorder(words,n), y = n)) +
geom_bar(stat="identity", fill = cornovo) + coord_flip() +
theme_minimal() +
labs(title = "Trigramas mais citadas na descrição do perfil",
subtitle = "#ForaKalil",
y = "Número de menções",
x = "Trigramas")
png("trigramas_descricao_forakalil.png",width=3200,height=1800,res=300)
print(p1)
dev.off()
### Location mais comum no Fora Kalil
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Belo Horizonte")] <- "Belo Horizonte, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Belo Horizonte - MG")] <- "Belo Horizonte, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Belo Horizonte MG")] <- "Belo Horizonte, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Belo Horizonte, Brazil")] <- "Belo Horizonte, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Belo Horizonte MG Brasil")] <- "Belo Horizonte, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="BRASIL")] <- "Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Belo Horizonte, MG - BRASIL")] <- "Belo Horizonte, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Mineira")] <- "Minas Gerais, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Minas, Brasil y Latinoamérica")] <- "Minas Gerais, Brasil"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Estados Unidos ")] <- "Estados Unidos"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="EUA")] <- "Estados Unidos"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="United States of America")] <- "Estados Unidos"
data_users_fora_kalil$location[which(data_users_fora_kalil$location=="Belo Horizonte MG Brasil")] <- "Belo Horizonte, Brasil"
data_users_fora_kalil$location[str_detect(data_users_fora_kalil$location, "BH")] <- "Belo Horizonte, Brasil"
p1 <- data_users_fora_kalil %>%
filter(!is.na(location), location !="") %>%
count(location) %>% arrange(n) %>% tail(10) %>%
ggplot(aes(x = reorder(location,n), y = n)) +
geom_bar(stat="identity", fill = cornovo) + coord_flip() +
theme_minimal() +
labs(title = "Cidades mais citadas na descrição do perfil", subtitle = "#ForaKalil",
y = "Número de menções", x = "Palavra")
png("palavras_location_forakalil.png",width=3200,height=1800,res=300)
print(p1)
dev.off()
### treemap no Fora Kalil
png("treemap_forakalil_forakalil.png",width=3200,height=1800,res=300)
plotTreemap(fora_kalil$text)
dev.off()
#### Serie temporal
p1 <- fora_kalil %>%
mutate(data = ymd_hms(created_at), data = floor_date(data, unit = "hour")) %>%
count(data) %>% ggplot(aes(x = data, y = n)) +
geom_bar(stat="identity", fill = cornovo) + theme_minimal() +
labs(title = "Número de tweets no tempo", subtitle = "#ForaKalil",
x = "Tempo (por hora)", y = "Número de tweets")
png("serie_temporal_forakalil_forakalil.png",width=3200,height=1800,res=300)
print(p1)
dev.off()
###### serie temporal de sentimentos
png("serie_temporal_facebook.png",width=3200,height=1800,res=300)
plotSerieTemporal(data_fb)
dev.off()
png("serie_temporal_instagram.png",width=3200,height=1800,res=300)
plotSerieTemporal(data_ig)
dev.off()
png("serie_temporal_twitter.png",width=3200,height=1800,res=300)
plotSerieTemporal(data_tw)
dev.off()
##### plotar trigramas
png("trigrama_facebook.png",width=3200,height=1800,res=300)
getTrigram(data_fb$`Full Text`) %>% filter(!is.na(words)) %>%
count(words) %>% arrange(n) %>% tail(30) %>%
ggplot(aes(x = reorder(words,n), y = n)) +
geom_bar(stat="identity", fill = cornovo) + coord_flip() +
theme_minimal() +
labs(title = "Trigramas mais citadas - Facebook",
y = "Número de menções",
x = "Trigramas")
dev.off()
png("trigrama_instagram.png",width=3200,height=1800,res=300)
getTrigram(data_ig$Snippet)%>%filter(!is.na(words)) %>%
count(words) %>% arrange(n) %>% tail(30) %>%
ggplot(aes(x = reorder(words,n), y = n)) +
geom_bar(stat="identity", fill = cornovo) + coord_flip() +
theme_minimal() +
labs(title = "Trigramas mais citadas - Instagram",
y = "Número de menções",
x = "Trigramas")
dev.off()
png("trigrama_twitter.png",width=3200,height=1800,res=300)
getTrigram(data_tw$Snippet)%>%filter(!is.na(words)) %>%
count(words) %>% arrange(n) %>% tail(30) %>%
ggplot(aes(x = reorder(words,n), y = n)) +
geom_bar(stat="identity", fill = cornovo) + coord_flip() +
theme_minimal() +
labs(title = "Trigramas mais citadas - Twitter",
y = "Número de menções",
x = "Trigramas")
dev.off()
###### plotar treemap positivo
png("treemap_positivo_facebook.png",width=3200,height=1800,res=300)
plotTreemapPositivo(data.frame(text = data_fb$`Full Text`, polarization = data_fb$Sentiment, Domain = data_fb$Domain))
dev.off()
png("treemap_positivo_instagram.png",width=3200,height=1800,res=300)
plotTreemapPositivo(data.frame(text = data_ig$Snippet, polarization = data_ig$Sentiment, Domain = data_ig$Domain))
dev.off()
png("treemap_positivo_twitter.png",width=3200,height=1800,res=300)
plotTreemapPositivo(data.frame(text = data_tw$Snippet, polarization = data_tw$Sentiment, Domain = data_tw$Domain))
dev.off()
###### plotar treemap negativo
png("treemap_negativo_facebook.png",width=3200,height=1800,res=300)
plotTreemapNegativo(data.frame(text = data_fb$`Full Text`, polarization = data_fb$Sentiment, Domain = data_fb$Domain))
dev.off()
png("treemap_negativo_instagram.png",width=3200,height=1800,res=300)
plotTreemapNegativo(data.frame(text = data_ig$Snippet, polarization = data_ig$Sentiment, Domain = data_ig$Domain))
dev.off()
png("treemap_negativo_twitter.png",width=3200,height=1800,res=300)
plotTreemapNegativo(data.frame(text = data_tw$Snippet, polarization = data_tw$Sentiment, Domain = data_tw$Domain))
dev.off()
###### plotar treemap neutro
png("treemap_neutro_facebook.png",width=3200,height=1800,res=300)
plotTreemapNeutro(data.frame(text = data_fb$`Full Text`, polarization = data_fb$Sentiment, Domain = data_fb$Domain))
dev.off()
png("treemap_neutro_instagram.png",width=3200,height=1800,res=300)
plotTreemapNeutro(data.frame(text = data_ig$Snippet, polarization = data_ig$Sentiment, Domain = data_ig$Domain))
dev.off()
png("treemap_neutro_twitter.png",width=3200,height=1800,res=300)
plotTreemapNeutro(data.frame(text = data_tw$Snippet, polarization = data_tw$Sentiment, Domain = data_tw$Domain))
dev.off()
###### Indice de Favorabilidade
png("favorabilidade_facebook.png",width=3200,height=1800,res=300)
plotIndiceFavorabilidade(data_fb)
dev.off()
png("favorabilidade_instagram.png",width=3200,height=1800,res=300)
plotIndiceFavorabilidade(data_ig)
dev.off()
png("favorabilidade_twitter.png",width=3200,height=1800,res=300)
plotIndiceFavorabilidade(data_tw)
dev.off()
#### Temas
plotTendenciaTemas <- function(df_data){
media <- stringr::str_remove_all(df_data$Domain, ".com")[1]
df_data %>% separate_rows(Tags, sep=",") %>%
filter(!is.na(Tags)) %>%
mutate(Tags = tolower(Tags), Tags = stringr::str_trim(Tags, side = "both")) %>%
group_by(Tags) %>%mutate(total = n()) %>% ungroup()%>%
group_by(Sentiment, Tags, total) %>%
summarise(parcial = n())%>%
ggplot(aes(x = reorder(Tags,total), y = parcial, fill = Sentiment)) +
geom_bar(stat="identity")+
coord_flip()+
theme_minimal()+
labs(title = paste("Temas dos posts - ",media),
x = "Temas",
y = "Número de ocorrências",
fill = "Sentimento",
subtitle = paste("Semana",paste(ymd_hms(df_data$Date) %>% format("%d-%m-%Y") %>% range(),collapse = " a ")))+
theme(text = element_text(size=12)) +
scale_fill_manual("Sentimento", values = c("positive" = corpositivo, "negative" = cornegativo, "neutral" = corneutro))
}
png("tendencia_temas_facebook.png",width=3200,height=1800,res=300)
plotTendenciaTemas(data_fb)
dev.off()
png("tendencia_temas_instagram.png",width=3200,height=1800,res=300)
plotTendenciaTemas(data_ig)
dev.off()
png("tendencia_temas_twitter.png",width=3200,height=1800,res=300)
plotTendenciaTemas(data_tw)
dev.off()
|
e84b8123e653f5c76bf38d4490472d3de997a2a9
|
4ac943bff7e252c3086ead0030847f4c52bc06af
|
/R/calc_rho.R
|
a42b6ca74e77a0975a6243bf37674afa196e7efa
|
[
"MIT"
] |
permissive
|
fboehm/xu2015
|
be2717a6e46acfac29c466b4a225f154b6a30f4f
|
edf08e79adf13ce82f91ab518166cc26d663a66b
|
refs/heads/master
| 2021-01-10T12:12:16.816133
| 2016-02-14T20:18:09
| 2016-02-14T20:18:09
| 50,595,752
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,895
|
r
|
calc_rho.R
|
#' Calculate rho(omega_current, omega_proposal)
#'
#' @param omega_big parameter vector from model with bigger $K$
#' @param omega_small parameter vector from model with smaller $K$
#' @param y a data vector
#' @param theta DPP hyperparameter
#' @param tau DPP hyperparameter
#'
#' @export
calc_rho <- function(omega_small, omega_big, y, theta, tau){
#unpack omega_small
mu_small <- omega_small$mu
K_small <- length(mu_small)
w_small <- omega_small$w
kappa_small <- omega_small$kappa
s_small <- omega_small$s
#unpack omega_big
mu_big <- omega_big$mu
K_big <- length(mu_big)
w_big <- omega_big$w
kappa_big <- omega_big$kappa
s_big <- omega_big$s
# calculate qu
extras <- define_extra_parameters()
qu <- dbeta(extras[1], 1, 1) * dbeta(extras[2], 1, 1) * dbeta(extras[3], 2, 2)
# set qK_small_u
qK_small_u <- (K_small == 1) + (K_small > 1) / 2
# set qK_small_sj
qK_small_s <- 1 / K_small
# set q_K_big_d
q_K_big_d <- 1 / 2
# set q_K_big_c
q_K_big_c <- 1 / (K_big * (K_big - 1))
# ratio1: everything except 1. detJ and 2. posterior ratio
ratio1 <- q_K_big_c * q_K_big_d / (K_big * qK_small_u * qK_small_s * qu)
# calc det J
detJ <- calc_detJ(w_big = w_big, w_small = w_small, kappa_big = kappa_big, kappa_small = kappa_small, r = extras[3])
# calc w ratio
w_ratio <- calc_w_ratio(w_big = w_big, w_small = w_small, s_big = s_big, s_small = s_small)
# calc kappa ratio
kappa_ratio <- calc_kappa_ratio(kappa_big = kappa_big, kappa_small = kappa_small, a = extras[1], b = extras[2])
# calc mu ratio
mu_ratio <- calc_mu_ratio(mu_big = mu_big, mu_small = mu_small, theta = theta, tau = tau)
# calc likelihood ratio
lik_ratio <- calc_lik_ratio(s_big = s_big, s_small = s_small, w_big = w_big, w_small = w_small,
mu_big = mu_big, mu_small = mu_small, kappa_big = kappa_big, kappa_small = kappa_small,
y = y)
# posterior ratio
out <- lik_ratio * kappa_ratio * w_ratio * mu_ratio * ratio1 * detJ
return(out)
}
#' Calculate determinant of Jacobian
#'
#' @param w_big weight vector from model with greater $K$
#' @param w_small weight vector from model with smaller $K$
#' @param kappa_big kappa vector from model with greater $K$
#' @param kappa_small kappa vector from model with smaller $K$
#'
#' @export
calc_detJ <- function(w_big, w_small, kappa_big, kappa_small, r){
## first, determine w1, w1_tilde, w2_tilde
w_int <- intersect(w_big, w_small)
w12_tilde <- w_big[!(w_big %in% w_int)]
w1_tilde <- w12_tilde[1]
w2_tilde <- w12_tilde[2]
w1 <- w_small[!(w_small %in% w_int)]
## second, determine kappa1 (from kappa_small)
kappa_int <- intersect(kappa_small, kappa_big)
kappa1 <- kappa_small[!(kappa_small %in% kappa_int)]
# calculate determinant of J
out <- w1 ^ (3 + 1) * kappa1 ^ (3 / 2) * (1 - r^2) / (w1_tilde * w2_tilde) ^ (3 / 2)
return(out)
}
#' Calculate w ratio
#'
#' @param w_big w vector of weights from model with greater $K$
#' @param w_small w vector of weights from model with smaller $K$
#'
#' @export
calc_w_ratio <- function(w_big, w_small, s_big, s_small, delta = 1){
# determine w1
w_int <- intersect(w_big, w_small)
w1 <- w_small[!(w_small %in% w_int)]
# determine w1_tilde & w2_tilde
w12_tilde <- w_big[!(w_big %in% w_int)]
w1_tilde <- w12_tilde[1]
w2_tilde <- w12_tilde[2]
# determine n1_tilde and n2_tilde
ind12 <- which(!(w_big %in% w_int))
ind1 <- min(ind12)
ind2 <- max(ind12)
n1_tilde <- sum(s_big == ind1)
n2_tilde <- sum(s_big == ind2)
# ratio calcs
numer <- w1_tilde ^ (delta - 1 + n1_tilde) * w2_tilde ^ (delta - 1 + n2_tilde)
K_small <- length(w_small)
denom <- w1 ^ (delta - 1 + n1_tilde + n2_tilde) * beta(delta, K_small * delta)
return(numer / denom)
}
#' Calculate mu ratio
#'
#' @param mu_big mean vector for model with greater $K$
#' @param mu_small mean vector for model with smaller $K$
#' @param theta hyperparameter theta for DPP
#' @param tau hyperparameter tau for DPP
#' @export
calc_mu_ratio <- function(mu_big, mu_small, theta , tau){
C_big <- calc_C(mu_big, theta, tau)
C_small <- calc_C(mu_small, theta, tau)
return(det(C_big) / det(C_small))
}
#' Calculate kappa ratio
#'
#' @param kappa_big kappa vector from the model with a greater $K$
#' @param kappa_small kappa vector from the model with smaller $K$
#' @param a hyperparameter a
#' @param b hyperparameter b
#' @export
calc_kappa_ratio <- function(kappa_big, kappa_small, a, b){
# determine kappa1, kappa1_tilde, kappa2_tilde
kappa_int <- intersect(kappa_big, kappa_small)
kappa1 <- kappa_small[!(kappa_small %in% kappa_int)]
kappa12_tilde <- kappa_big[!(kappa_big %in% kappa_int)]
kappa1_tilde <- kappa12_tilde[1]
kappa2_tilde <- kappa12_tilde[2]
out <- kappa1_tilde ^ (1 - a / 2) * kappa2_tilde * (b / (kappa2_tilde * 2)) ^ (a / 2) / (kappa1 ^ (1 - a / 2) * gamma(a / 2)) * exp(-b / 2 * (1 / kappa1_tilde + 1 / kappa2_tilde - 1 / kappa1))
return(out)
}
#' Calculate likelihood ratio
#'
#' @param s_big s for big model
#' @param s_small s for small model
#' @param w_big w for big model
#' @param w_small w for small model
#' @param mu_big mu for big model
#' @param mu_small mu for small model
#' @param kappa_big kappa for big model
#' @param kappa_small kappa for small model
#' @param y data vector
#' @export
calc_lik_ratio <- function(s_big = s_big, s_small = s_small, w_big = w_big, w_small = w_small,
mu_big = mu_big, mu_small = mu_small, kappa_big = kappa_big, kappa_small = kappa_small,
y = y){
sd_big <- sqrt(1 / kappa_big)
sd_small <- sqrt(1 / kappa_small)
log_lik_big <- dnorm(y, mean = mu_big[s_big], sd = sd_big[s_big], log = TRUE)
log_lik_small <- dnorm(y, mean = mu_small[s_small], sd = sd_small[s_small], log = TRUE)
log_diff <- sum(log_lik_big) - sum(log_lik_small)
return(exp(log_diff))
}
|
aea7070c7c888ed26c495cf2daa869ab1858af56
|
759251b2620a2ad8674f157e2ff9a66603f28b39
|
/run_analysis.R
|
9a7103583498a4c33a89b3f96ce2f1edb15ebfd5
|
[] |
no_license
|
lutfor3737/Tidy-data
|
e152447dcdc908d8f565d419676f156fedc0c600
|
8178345e4f1d588e835e0eb8862c1ef928728c2e
|
refs/heads/master
| 2021-01-10T01:02:23.084504
| 2015-08-23T21:05:42
| 2015-08-23T21:05:42
| 41,262,964
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,161
|
r
|
run_analysis.R
|
setwd("~/R/Getting and Cleaning data")
if (!require("data.table")) {
install.packages("data.table",dependencies = TRUE)
}
if (!require("reshape2")) {
install.packages("reshape2",dependencies = TRUE)
}
require("data.table")
require("reshape2")
#Merges the training and the test sets to create one data set.
X_train_data <- read.table("./UCI HAR Dataset/train/X_train.txt", header = FALSE)
y_train_data <- read.table("./UCI HAR Dataset/train/y_train.txt", header = FALSE)
X_test_data <- read.table("./UCI HAR Dataset/test/X_test.txt", header = FALSE)
y_test_data <- read.table("./UCI HAR Dataset/test/y_test.txt", header = FALSE)
subject_train_data <- read.table("./UCI HAR Dataset/train/subject_train.txt", header = FALSE)
subject_test_data <- read.table("./UCI HAR Dataset/test/subject_test.txt", header = FALSE)
X_data <- rbind(X_train_data, X_test_data)
y_data <- rbind(y_train_data, y_test_data)
subject_data <- rbind(subject_train_data, subject_test_data)
#Uses descriptive activity names to name the activities in the data set
#Appropriately labels the data set with descriptive variable names.
features <- read.table("./UCI HAR Dataset/features.txt")
names(features) <- c('feature_id', 'feature_name')
index_features <- grep("-mean\\(\\)|-std\\(\\)", features$feature_name)
X_data <- X_data[, index_features]
names(X_data) <- gsub("\\(|\\)", "", (features[index_features, 2]))
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
names(activities) <- c('activity_id', 'activity_name')
y_data[, 1] = activities[y_data[, 1], 2]
names(y_data) <- "Activity"
names(subject_data) <- "Subject"
# bind tidy data
tidy_data <- cbind(subject_data, y_data, X_data)
#creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_2nd <- tidy_data[, 3:dim(tidy_data)[2]]
tidy_data_Avg <- aggregate(tidy_2nd,list(tidy_data$Subject, tidy_data$Activity), mean)
names(tidy_data_Avg)[1] <- "Subject"
names(tidy_data_Avg)[2] <- "Activity"
write.csv(tidy_data, file = "tidy_data.txt",row.names = FALSE)
write.csv(tidy_data_Avg, file = "tidy_data_mean.txt",row.names = FALSE)
|
fcb1bfe8e1542b6669c0b749d2fccbff0f425e7f
|
3925dcd72c626dd7979b65a3bc02dc997bdf52f1
|
/run_analysis.R
|
3bfe15bcc1b1f68604de3dac89bf60dda13d57f2
|
[] |
no_license
|
visheshtayal/ProjectCourse3Week4
|
ed01c3c28db429543e583ce84246849682d0e265
|
dd2102242878fa2efc2531853c60451838bbf809
|
refs/heads/master
| 2023-06-09T00:12:19.737040
| 2023-05-29T01:51:21
| 2023-05-29T01:51:21
| 286,048,788
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,153
|
r
|
run_analysis.R
|
#Loading packages and getting the data
install.packages("reshape2")
library(data.table)
library(reshape2)
path<-getwd()
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url,file.path(path,"datafiles.zip"))
unzip("datafiles.zip")
#Loading activity and features
activityLabels<-fread(file.path(path,"UCI HAR Dataset/activity_labels.txt"),col.names = c("classLabels","activityName"))
features<-fread(file.path(path,"UCI HAR Dataset/features.txt"),col.names = c("index","featureNames"))
featuresWanted <- grep("(mean|std)\\(\\)", features[, featureNames])
measurements <- features[featuresWanted, featureNames]
measurements <- gsub('[()]', '', measurements)
#loading train datasets
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(train, colnames(train), measurements)
trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt"),col.names = c("Activity"))
trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt"),col.names = c("SubjectNum"))
train <- cbind(trainSubjects, trainActivities, train)
#loading test datasets
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt"),col.names = c("Activity"))
testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt"),col.names = c("SubjectNum"))
test <- cbind(testSubjects, testActivities, test)
#combining train and test datasets
combined<-rbind(train,test)
#tidying data to more extent
combined[["Activity"]] <- factor(combined[, Activity],levels = activityLabels[["classLabels"]],labels = activityLabels[["activityName"]])
combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum])
combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity"))
combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean)
data.table::fwrite(x = combined, file = "tidyData.txt", quote = FALSE)
|
459489ebdaa711582ce08b1cb34a0e8b67e8b0be
|
878f7f98050ff52ef50a168e7a2452e03c1f5bac
|
/R/get0.R
|
1fd7b144a59f1af11011275631c358782e69c201
|
[] |
no_license
|
dmurdoch/backports
|
c87654f818830114f87a5978ee2f96d6c3156039
|
4b85d90a3ad11a5ee4e61d1c113991526b4d1cc2
|
refs/heads/main
| 2023-04-11T12:15:23.749190
| 2023-03-20T13:02:45
| 2023-03-20T13:02:45
| 284,331,893
| 0
| 0
| null | 2020-08-01T20:13:42
| 2020-08-01T20:13:42
| null |
UTF-8
|
R
| false
| false
| 1,024
|
r
|
get0.R
|
#' @title Backport of get0 for R < 3.2.0
#' @rdname get0
#'
#' @description
#' See the original description in \code{base::get0}.
#'
#' @keywords internal
#' @rawNamespace if (getRversion() < "3.2.0") export(get0)
#' @examples
#' # get function from namespace instead of possibly getting
#' # implementation shipped with recent R versions:
#' bp_get0 = getFromNamespace("get0", "backports")
#'
#' bp_get0("a")
#' bp_get0("a", ifnotfound = 0)
#'
#' foo = 12
#' bp_get0("foo")
get0 = function(x, envir = pos.to.env(-1L), mode = "any", inherits = TRUE, ifnotfound = NULL) {
if (!is.character(x) || length(x) == 0L) {
stop("Invalid first argument")
}
if (length(x) > 1L && getRversion() >= "4.1.0") {
# this check was introduced in R-4.1.0
# We can remove the version check as soon as all CRAN packages have been updated,
# i.e. as soon as 4.1.0 is released
stop("first argument has length > 1")
}
mget(x[1L], envir = envir, mode = mode, inherits = inherits, ifnotfound = list(ifnotfound))[[1L]]
}
|
7d64a22504b343627f71ffbda5ec6c9a8f3b0572
|
e26420970229a0c55ec092168797ed6e42f5708f
|
/tests/run_tests.R
|
6c3aaa82f9ba44eb3f3ef9196fd1d84246a699df
|
[
"MIT"
] |
permissive
|
kcha/psiplot
|
cd26b3d39301d20d82e453e4882b60a214afb600
|
7c840f69f7b321b8690e9f3f03191a0cd699a371
|
refs/heads/master
| 2022-06-03T02:54:17.403767
| 2022-03-18T15:30:05
| 2022-03-18T15:30:05
| 27,505,866
| 2
| 1
|
MIT
| 2022-03-18T15:30:06
| 2014-12-03T20:20:07
|
R
|
UTF-8
|
R
| false
| false
| 81
|
r
|
run_tests.R
|
#!/usr/bin/env Rscript
library(testthat)
library(psiplot)
test_check("psiplot")
|
9a9d59906ffcc47d0996840777444b5791248ab9
|
c799d05e76440b5224e2960946b80c567571eaeb
|
/pp_qrf.R
|
8f60a25af77cb62aeb0e557f96b11a062aaf4fd2
|
[
"MIT"
] |
permissive
|
XuetongWang-DRR/paper_pp_wind_gusts
|
6c035786efbe8bc90bc5234e83e2d8dcd660f730
|
7aa4a2f9f69dd4474d7291f40213f73e8ccc4a0c
|
refs/heads/main
| 2023-08-28T00:36:12.464457
| 2021-10-27T07:08:55
| 2021-10-27T07:08:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,622
|
r
|
pp_qrf.R
|
## File containing functions for postprocessing ensemble forecasts via QRF
#### Import ####
# Import basic functions
source(paste0(getwd(), "/fn_data.R"))
source(paste0(getwd(), "/fn_eval.R"))
#### Prediction ####
# Function for prediction based on QRF #
qrf_pred <- function(X, qrf_train, pred_vars,
q_levels = NULL, n_ens = 20, n_cores = NULL,
scores_pp = TRUE, scores_ens = TRUE){
###-----------------------------------------------------------------------------
###Input
#X...............Ensemble data for prediction including predictors (and obs.) (n x n_preds (+ 1) data.frame)
#qrf_train.......Output of qrf function used for prediction
#pred_vars.......Predictors used for QRF (vector of strings)
#q_levels........Quantile levels used for output and evaluation (probability vector)
#................Default: NULL -> At least 100 member, incl. median and COSMO coverage
#n_ens...........Ensemble size (integer)
#................Default: 20 member (COSMO)
#n_cores.........Number of cores used in predict.ranger (integer)
#................Default: NULL -> Use one less than available
#scores_ens/pp...Should scores of ensemble and QRF forecasts, interval lengths, ranks be calculated? (logical)
#................Data needs to include variables 'ens_1',...,'ens_(n_ens)', 'obs'
###-----------------------------------------------------------------------------
###Output
#res...List containing:
#......f...............QRF forecasts (i.e. quantiles) based on qrf_train (n x n_q matrix)
#......runtime.........Prediction time (numeric)
#......n_test..........Number of test samples (integer)
#......scores_ens/pp...Data frames containing (n x 6 data frame):
#.........rank.........Ranks of ensemble/QRF forecasts (n vector)
#.........crps.........CRPS of ensemble/QRF forecasts (n vector)
#.........logs.........Log-Score of ensemble/QRF forecasts (n vector)
#.........lgt..........Ensemble range / Length of QRF prediction interval (n vector)
#.........e_md.........Bias of median forecast (n vector)
#.........e_me.........Bias of mean forecast (n vector)
###-----------------------------------------------------------------------------
#### Initiation ####
# Load packages
library(ranger)
library(scoringRules)
# Relevant variables for prediction
test_vars <- pred_vars
# Input check
if(scores_ens){
# Ensemble members
ens_str <- paste0("ens_", 1:n_ens)
# Check
if(any(!is.element(ens_str, names(X)))){
print(paste0("QRF-pred: Data does not include all ensemble members!
CRPS, Log-Score of raw ensemble and ranks can therefore not be calculated!"))
scores_ens <- FALSE
}
}
# Observations for scores
if(scores_pp | scores_ens){ test_vars <- c(test_vars, "obs") }
# Input check
if(any(!is.element(test_vars, names(X)))){
print("QRF-pred: Data does not include all of the relevant variables.") }
# Cut data to relevant variables
if(scores_ens){ X <- X[,unique(c(test_vars, ens_str))] }
else{ X <- X[,test_vars] }
# Input check
if(any(is.na(X))){
print("QRF-pred: Data includes missing values! Missing values are left out!")
X <- na.omit(X)
}
if(is.element("ens_sd", test_vars)){ if(any(X[["ens_sd"]] < 0)){
print("QRF-pred: At least one ensemble standard deviation is negative!") }}
# Number of cores
if(is.null(n_cores)){ n_cores <- parallel::detectCores() - 1 }
# If not given use equidistant quantiles (multiple of ensemble coverage, incl. median)
if(is.null(q_levels)){ q_levels <- seq(from = 1/(6*(n_ens + 1)),
to = (6*(n_ens + 1) - 1)/(6*(n_ens + 1)),
by = 1/(6*(n_ens + 1))) }
#### Data preparation ####
# Number of predictions
n <- nrow(X)
#### Prediction ####
# Take time
start_tm <- Sys.time()
# Calculate quantiles
q <- predict(object = qrf_train,
data = X,
type = "quantiles",
quantiles = q_levels,
num.threads = n_cores)$predictions
# Take time
end_tm <- Sys.time()
# Time needed
runtime <- as.numeric(difftime(end_tm, start_tm, units = "mins"))
#### Evaluation ####
# Calculate evaluation measure of QRF forecasts
scores_pp <- fn_scores_ens(ens = q,
y = X[["obs"]],
scores_ens = scores_pp)
# Calculate evaluation measure of ensemble forecasts
scores_ens <- fn_scores_ens(ens = as.matrix(X[,paste0("ens_", 1:n_ens)]),
y = X[["obs"]],
scores_ens = scores_ens)
# Transform ranks to n_(ens + 1) bins (for multiples of (n_ens + 1) exact)
if(ncol(q) != n_ens){ scores_pp[["rank"]] <- ceiling(scores_pp[["rank"]]*(n_ens + 1)/(ncol(q) + 1)) }
#### Output ####
return(list(f = q,
scores_pp = scores_pp,
scores_ens = scores_ens,
n_test = nrow(X),
runtime = runtime))
}
#### Estimation ####
# Function for estimating QRF #
qrf_est <- function(train, pred_vars = c("ens_mean", "ens_sd"),
n_ens = 20, n_cores = NULL, qrf_ls = list()){
###-----------------------------------------------------------------------------
###Input
#train...........Training data including predictors and obs. (n_train x (n_preds + 1) data.frame)
#pred_vars.......Predictors used for QRF (vector of strings)
#................Default: c("ens_mean", "ens_sd") -> Use only mean and variance
#n_ens...........Ensemble size (integer)
#................Default: 20 member (COSMO)
#n_cores.........Number of cores used in ranger (integer)
#................Default: NULL -> Use one less than available
#qrf_ls..........List that may contain the following variables:
#...console......Query, if output should be shown (logical)
#................Default: FALSE
#...importance...Importance setting for ranger (string)
#................Default: "permutation"
#...n_mtry.......Number of variables considered at each split (1,...,length(pred_vars))
#................Default: -1 -> NULL (< 10 preds) resp. half of predictors
#...n_trees......Number of trees (integer)
#................Default: 1,000
#...min_node.....Minimal node size (integer)
#................Default: 10
#...max_depth....Maximal tree depth (integer)
#................Default: 0 (default) -> unlimited
###-----------------------------------------------------------------------------
###Output
#res...List containing:
#......qrf_train.......Estimated QRF ('ranger'-object)
#......qrf_ls..........Hyperparameters (list)
#......pred_vars........Predictors (string vector)
#......n_preds.........Number of predictors used (integer)
#......n_train.........Number of training samples (integer)
#......runtime.........Estimation time (numeric)
###-----------------------------------------------------------------------------
#### Initiation ####
# Load packages
library(ranger)
# Relevant variables for training
train_vars <- c("obs", pred_vars)
# Input check
if(any(!is.element(train_vars, names(train)))){
print("QRF-est: Training data does not include relevant variables.") }
# Cut data to relevant variables
train <- train[,train_vars]
# Input check
if(any(is.na(train))){
print("QRF-est: Training data includes missing values! Missing values are left out!")
train <- na.omit(train)
}
if(is.element("ens_sd", train_vars)){ if(any(train[["ens_sd"]] < 0)){
print("QRF-est: At least one ensemble standard deviation is negative!") }}
# Number of cores
if(is.null(n_cores)){ n_cores <- parallel::detectCores() - 1 }
#### Hyperparameter ####
# Hyperparameters and their default values (for global QRF)
hpar_ls <- list(console = 0,
importance = "permutation",
n_trees = 1000,
min_node = 10,
max_depth = 0,
n_mtry = -1) # -2 for ranger-default, -1 for own default
# Update hyperparameters
qrf_ls <- update_hpar(hpar_ls = hpar_ls,
in_ls = qrf_ls)
# Set mtry to ranger-default (-2) or half of predictors (-1)
if(qrf_ls$n_mtry == -1){ qrf_ls$n_mtry <- floor(length(pred_vars)/2) }
else if(qrf_ls$n_mtry == -2){ qrf_ls$n_mtry <- NULL }
#### Data preparation ####
# Remove constant predictors
pred_vars <- rm_const(data = train,
cols = pred_vars,
t_c = 0)
#### Estimation ####
# Get formula from predictors
qrf_formula <- paste0("obs ~ ", paste0(pred_vars, collapse = " + "))
# Take time
start_tm <- Sys.time()
# Quantile regression forest
est <- ranger(formula = qrf_formula,
data = train,
num.trees = qrf_ls$n_trees,
mtry = qrf_ls$n_mtry,
min.node.size = qrf_ls$min_node,
max.depth = qrf_ls$max_depth,
quantreg = TRUE,
num.threads = n_cores,
importance = qrf_ls$importance,
verbose = qrf_ls$console)
# Take time
end_tm <- Sys.time()
# Time needed
runtime <- as.numeric(difftime(end_tm, start_tm, units = "mins"))
#### Output ####
return(list(qrf_train = est,
qrf_ls = qrf_ls,
pred_vars = pred_vars,
n_preds = length(pred_vars),
n_train = nrow(train),
runtime = runtime))
}
|
18edf2afba05498057f03d0bd9cbc2e70487840c
|
d0108c3f8d99cf84d0227c57b10dbd8236585192
|
/Fig.1D.venn.diagram.R
|
ca366a1d9580693f0bb792940f1aba0f7822d89b
|
[] |
no_license
|
Webb-Laboratory/Maybury-Lewis_et_al_2021
|
f2ef2f9e4571427571bfbf83f5242cc88254b51a
|
77f353d6c5ae91fa0cb260abe0d674fbed946bc4
|
refs/heads/main
| 2023-06-05T10:24:03.584835
| 2021-06-30T19:55:13
| 2021-06-30T19:55:13
| 346,836,008
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 390
|
r
|
Fig.1D.venn.diagram.R
|
###################################################
# venn diagrams for AQ, AA, stable peak #s
# R version 3.6.1
# venneuler
###################################################
setwd("~/Dropbox/Desktop/ATAC-seq-PEAKS/DiffBind/results.diffbind.2019/v2_AvsQ")
library(venneuler)
# venn diagram for Activated vs Quiescent
dat <- venneuler(c(AA=6777, AQ=3152, "AA&AQ"=19976))
plot(dat)
|
791a0da09dd99558be2d6f824166257f202171c0
|
ab5efddbc1a7d58c8666a5ae17faf6b8420f6dcd
|
/inst/runit_tests/runit-throw.R
|
24c337f85df65beeea582eda71d553acf2128114
|
[] |
no_license
|
cran/fakemake
|
c509062cae30a17645d3d86fc8e3d152e71cb39a
|
05aabfdfd2c8449f650701eac6be1d6cabbd5f21
|
refs/heads/master
| 2023-08-17T14:49:30.136027
| 2023-08-15T22:10:01
| 2023-08-16T01:26:42
| 111,586,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 93
|
r
|
runit-throw.R
|
test_exception <- function() {
RUnit::checkException(fakemake:::throw("Hello, error"))
}
|
76061df711d21ae9f0a78df4b71360e9c132d67a
|
54bd3fd47a43afce71a194225486325f1a1488ff
|
/R/fars_functions.R
|
a39e8258b5b0326b9af71dedb50c2f74168d7142
|
[] |
no_license
|
shaowei72/MyFirstPackage
|
5c5589a660bc733523d1dad63deb98c9b7f54bf2
|
df35e1dca53740024580658edcdfa8cff81513de
|
refs/heads/master
| 2022-11-12T08:00:02.235744
| 2020-06-20T09:13:30
| 2020-06-20T09:13:30
| 271,950,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,290
|
r
|
fars_functions.R
|
#' Read data from file into a tibble
#'
#' Function that reads in data from a csv file and returns a tibble
#'
#' @param filename Name of the csv file
#'
#' @return This function returns a tibble.
#'
#' @details Function aborts and prints error message if the file filename cannot be found
#'
#' @examples
#' \dontrun{
#' fars_read("input_file.csv")
#' }
#'
#' @importFrom dplyr tbl_df
#' @importFrom readr read_csv
#'
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' Generate string with defined format for denoting a filename
#'
#' Function that creates a string accident_XXXX.csv.bz2 where XXXX represents the year
#'
#' @param year Year to tag the filename with
#'
#' @return This function returns a string
#'
#' @examples
#' \dontrun{
#' make_filename("2020")
#' }
#'
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' Read yearly FARS data from list of files into a tibble list
#'
#' Function that reads in data from a csv file and returns a tibble
#'
#' @param years Vector denoting the years for which the yearly FARS data is required
#'
#' @return This function returns a list of tibble, with each element containing the yearly FARS data
#'
#' @details Function aborts and prints error message if there is no FARS data for a particular year
#'
#' @examples
#' \dontrun{
#' fars_read_years(c("2020", "2019", "2018"))
#' }
#'
#' @importFrom dplyr mutate select
#' @importFrom magrittr %>%
#'
#' @note Makes use of make_filename() and fars_read() functions
#'
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' Presents number of fatal injuries from FARS by month for multiple years
#'
#' Function that reads in yearly FARS data, and tabulates the number of fatal injuries by month, for the years of interest
#'
#' @param years Vector denoting the years of interest, i.e.,when the yearly FARS data is required
#'
#' @return This function returns a table showing the number of fatal injuries by month, for the years of interest
#'
#' @note Makes use of fars_read_years() functions
#'
#' @importFrom magrittr %>%
#'
#' @importFrom dplyr bind_rows group_by summarize
#'
#' @importFrom tidyr spread
#'
#' @examples
#' \dontrun{
#' fars_summarize_years(c("2020", "2019", "2015"))
#' }
#'
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarise(n = n()) %>%
tidyr::spread(year, n)
}
#' Plots fatal accidents in a State for a particular year
#'
#' Function that renders on a map the location of fatal accidents that occurred in a specific State in a particular year
#'
#' @param year Integer representing the year of interest
#' @param state.num Integer denoting the state
#'
#' @return This function renders and returns a map object
#'
#' @details Function aborts if the state number state.num is invalid
#'
#' @note Makes use of fars_read() and make_filename() functions
#'
#' @importFrom maps map
#' @importFrom graphics points
#' @importFrom dplyr filter
#'
#' @examples
#' \dontrun{
#' fars_map_state(20, 2013)
#' }
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
a2843d58ee3366e19a62e86d7a36396f7611714c
|
1e4c9ffd2979c793edf1312b220abeecb3d1a25b
|
/man/plotLocalMethylationProfile.Rd
|
e59878335d7942bf19dea1c7e42cec19e0b1fa8d
|
[] |
no_license
|
nrzabet/DMRcaller
|
e21e2919f191fbb9b5db4a8a91be15526c288c22
|
6191d9e68dee6e0a2daedc07485bdc939e84e14e
|
refs/heads/master
| 2020-03-11T13:32:07.018510
| 2019-02-15T18:16:10
| 2019-02-15T18:16:10
| 130,027,757
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,614
|
rd
|
plotLocalMethylationProfile.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/localProfile.R
\name{plotLocalMethylationProfile}
\alias{plotLocalMethylationProfile}
\title{Plot local methylation profile}
\usage{
plotLocalMethylationProfile(methylationData1, methylationData2, region,
DMRs = NULL, conditionsNames = NULL, gff = NULL, windowSize = 150,
context = "CG", labels = NULL, col = NULL, main = "",
plotMeanLines = TRUE, plotPoints = TRUE)
}
\arguments{
\item{methylationData1}{the methylation data in condition 1
(see \code{\link{methylationDataList}}).}
\item{methylationData2}{the methylation data in condition 2
(see \code{\link{methylationDataList}}).}
\item{region}{a \code{\link{GRanges}} object with the region where to plot
the high resolution profile.}
\item{DMRs}{a \code{\link{GRangesList}} object or a list with the list of
DMRs (see \code{\link{computeDMRs}} or \code{\link{filterDMRs}}.}
\item{conditionsNames}{the names of the two conditions. This will be used to
plot the legend.}
\item{gff}{a \code{\link{GRanges}} object with all elements usually imported
from a GFF3 file. The gff file needs to have an metafield \code{"type"}. Only
the elements of type \code{"gene"}, \code{"exon"} and
\code{"transposable_element"} are plotted. Genes are represented as
horizontal black lines, exons as a black rectangle and transposable elements
as a grey rectangle. The elements are plotted on the corresponding strand
(\code{+} or \code{-}).}
\item{windowSize}{the size of the triangle base used to smooth the average
methylation profile.}
\item{context}{the context in which the DMRs are computed (\code{"CG"},
\code{"CHG"} or \code{"CHH"}).}
\item{labels}{a \code{vector} of \code{character} used to add a subfigure
characters to the plot. If \code{NULL} nothing is added.}
\item{col}{a \code{character} vector with the colors. It needs to contain a
minimum of \code{4 length(DMRs)} colors. If not or if \code{NULL}, the
defalut colors will be used.}
\item{main}{a \code{character} with the title of the plot}
\item{plotMeanLines}{a \code{logical} value indicating whether to plot the
mean lines or not.}
\item{plotPoints}{a \code{logical} value indicating whether to plot the
points or not.}
}
\value{
Invisibly returns \code{NULL}
}
\description{
This function plots the methylation profile at one locus for the bisulfite
sequencing data.The points on the graph represent methylation proportion of
individual cytosines, their colour which sample they belong to and the
intesity of the the colour how many reads that particular cytosine had. This
means that darker colors indicate stronger evidence that the corresponding
cytosine has the corresponding methylation proportion, while lighter colors
indicate a weaker evidence. The solid lines represent the smoothed profiles
and the intensity of the line the coverage at the corresponding position
(darker colors indicate more reads while lighter ones less reads). The boxes
on top represent the DMRs, where a filled box will represent a DMR which
gained methylation while a box with a pattern represent a DMR that lost
methylation. The DMRs need to have a metadafield \code{"regionType"} which
can be either \code{"gain"} (where there is more methylation in condition 2
compared to condition 1) or \code{"loss"} (where there is less methylation in
condition 2 compared to condition 1). In case this metadafield is missing all
DMRs are drawn using a filled box. Finally, we also allow annotation of the
DNA sequence. We represent by a black boxes all the exons, which are joined
by a horizontal black line, thus, marking the full body of the gene. With
grey boxes we mark the transposable elements. Both for genes and transposable
elements we plot them over a mid line if they are on the positive strand and
under the mid line if they are on the negative strand.
}
\examples{
# load the methylation data
data(methylationDataList)
# load the gene annotation data
data(GEs)
#select the genes
genes <- GEs[which(GEs$type == "gene")]
# the coordinates of the area to be plotted
chr3Reg <- GRanges(seqnames = Rle("Chr3"), ranges = IRanges(510000,530000))
# load the DMRs in CG context
data(DMRsNoiseFilterCG)
DMRsCGlist <- list("noise filter"=DMRsNoiseFilterCG)
# plot the CG methylation
par(mar=c(4, 4, 3, 1)+0.1)
par(mfrow=c(1,1))
plotLocalMethylationProfile(methylationDataList[["WT"]],
methylationDataList[["met1-3"]], chr3Reg,
DMRsCGlist, c("WT", "met1-3"), GEs,
windowSize=100, main="CG methylation")
}
\author{
Nicolae Radu Zabet
}
|
6211fad128f61ebe47d7fde5774a6b43d12a390b
|
3ee34bf19343757c8661ee4a0eeb8d6ca33f1208
|
/R/PLNmixture.R
|
dea6e9afff33002d6b2c8c83da457a1731aa7606
|
[] |
no_license
|
cran/PLNmodels
|
48c0b954fb9aca6b8824d89a348bc9b923216621
|
57733cce7d9c585feada02a6ee99c21f2da667a7
|
refs/heads/master
| 2023-09-03T14:47:28.812155
| 2023-08-24T15:10:02
| 2023-08-24T16:35:05
| 187,254,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,700
|
r
|
PLNmixture.R
|
#' Poisson lognormal mixture model
#'
#' Fit the mixture variants of the Poisson lognormal with a variational algorithm. Use the (g)lm syntax for model specification (covariates, offsets).
#'
#' @param formula an object of class "formula": a symbolic description of the model to be fitted.
#' @param data an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which lm is called.
#' @param subset an optional vector specifying a subset of observations to be used in the fitting process.
#' @param clusters a vector of integer containing the successive number of clusters (or components) to be considered
#' @param control a list-like structure for controlling the optimization, with default generated by [PLNmixture_param()]. See the associated documentation
#' for details.
#'
#' @return an R6 object with class [`PLNmixturefamily`], which contains
#' a collection of models with class [`PLNmixturefit`]
#'
#' @rdname PLNmixture
#' @examples
#' ## Use future to dispatch the computations on 2 workers
#' \dontrun{
#' future::plan("multisession", workers = 2)
#' }
#'
#' data(trichoptera)
#' trichoptera <- prepare_data(trichoptera$Abundance, trichoptera$Covariate)
#' myMixtures <- PLNmixture(Abundance ~ 1 + offset(log(Offset)), clusters = 1:4, data = trichoptera,
#' control = PLNmixture_param(smoothing = 'none'))
#'
#' # Shut down parallel workers
#' \dontrun{
#' future::plan("sequential")
#' }
#' @seealso The classes [`PLNmixturefamily`], [`PLNmixturefit`] and [PLNmixture_param()]
#' @importFrom stats model.frame model.matrix model.response model.offset update.formula
#' @export
PLNmixture <- function(formula, data, subset, clusters = 1:5, control = PLNmixture_param()) {
## Temporary test for deprecated use of list()
if (!inherits(control, "PLNmodels_param"))
stop("We now use the function PLNmixture_param() to generate the list of parameters that controls the fit:
replace 'list(my_arg = xx)' by PLN_param(my_arg = xx) and see the documentation of PLNmixture_param().")
# remove the intercept term if any (will be used to deal with group means)
the_call <- match.call(expand.dots = FALSE)
the_call$formula <- update.formula(formula(the_call), ~ . -1)
## extract the data matrices and weights
args <- extract_model(the_call, parent.frame())
## Instantiate the collection of PLN models
if (control$trace > 0) cat("\n Initialization...")
if (control$trace > 0) cat("\n\n Adjusting", length(clusters), "PLN mixture models.\n")
myPLN <- PLNmixturefamily$new(clusters, args$Y, args$X, args$O, args$formula, control)
## Now adjust the PLN models
myPLN$optimize(control$config_optim)
## Smoothing to avoid local minima
if (control$smoothing != "none" & control$trace > 0) cat("\n\n Smoothing PLN mixture models.\n")
myPLN$smooth(control)
## Post-treatments: Compute pseudo-R2, rearrange criteria and the visualization for PCA
if (control$trace > 0) cat("\n Post-treatments")
config_post <- config_post_default_PLNmixture; config_post$trace <- control$trace
myPLN$postTreatment(config_post)
if (control$trace > 0) cat("\n DONE!\n")
myPLN
}
#' Control of a PLNmixture fit
#'
#' Helper to define list of parameters to control the PLNmixture fit. All arguments have defaults.
#'
#' @param backend optimization back used, either "nlopt" or "torch". Default is "nlopt"
#' @param covariance character setting the model for the covariance matrices of the mixture components. Either "full", "diagonal" or "spherical". Default is "spherical".
#' @param smoothing The smoothing to apply. Either, 'none', forward', 'backward' or 'both'. Default is 'both'.
#' @param init_cl The initial clustering to apply. Either, 'kmeans', CAH' or a user defined clustering given as a list of clusterings, the size of which is equal to the number of clusters considered. Default is 'kmeans'.
#' @param config_optim a list for controlling the optimizer (either "nlopt" or "torch" backend). See details
#' @param trace a integer for verbosity.
#' @param inception Set up the parameters initialization: by default, the model is initialized with a multivariate linear model applied on
#' log-transformed data, and with the same formula as the one provided by the user. However, the user can provide a PLNfit (typically obtained from a previous fit),
#' which sometimes speeds up the inference.
#'
#' @return list of parameters configuring the fit.
#'
#' @details The list of parameters `config_optim` controls the optimizers. When "nlopt" is chosen the following entries are relevant
#' * "it_smooth" number of forward/backward iteration of smoothing. Default is 2.
#' * "algorithm" the optimization method used by NLOPT among LD type, e.g. "CCSAQ", "MMA", "LBFGS". See NLOPT documentation for further details. Default is "CCSAQ".
#' * "maxeval" stop when the number of iteration exceeds maxeval. Default is 10000
#' * "ftol_rel" stop when an optimization step changes the objective function by less than ftol multiplied by the absolute value of the parameter. Default is 1e-8
#' * "xtol_rel" stop when an optimization step changes every parameters by less than xtol multiplied by the absolute value of the parameter. Default is 1e-6
#' * "ftol_out" outer solver stops when an optimization step changes the objective function by less than xtol multiply by the absolute value of the parameter. Default is 1e-6
#' * "maxit_out" outer solver stops when the number of iteration exceeds out.maxit. Default is 50
#' * "ftol_abs" stop when an optimization step changes the objective function by less than ftol_abs. Default is 0.0 (disabled)
#' * "xtol_abs" stop when an optimization step changes every parameters by less than xtol_abs. Default is 0.0 (disabled)
#' * "maxtime" stop when the optimization time (in seconds) exceeds maxtime. Default is -1 (disabled)
#'
#' When "torch" backend is used, with the following entries are relevant:
#' * "maxeval" stop when the number of iteration exceeds maxeval. Default is 10000
#' * "ftol_rel" stop when an optimization step changes the objective function by less than ftol multiplied by the absolute value of the parameter. Default is 1e-8
#' * "xtol_rel" stop when an optimization step changes every parameters by less than xtol multiplied by the absolute value of the parameter. Default is 1e-6
#' * "ftol_out" outer solver stops when an optimization step changes the objective function by less than xtol multiply by the absolute value of the parameter. Default is 1e-6
#' * "maxit_out" outer solver stops when the number of iteration exceeds out.maxit. Default is 50
#'
#' The list of parameters `config_post` controls the post-treatment processing, with the following entries:
#' * jackknife boolean indicating whether jackknife should be performed to evaluate bias and variance of the model parameters. Default is FALSE.
#' * bootstrap integer indicating the number of bootstrap resamples generated to evaluate the variance of the model parameters. Default is 0 (inactivated).
#' * variational_var boolean indicating whether variational Fisher information matrix should be computed to estimate the variance of the model parameters (highly underestimated). Default is FALSE.
#' * rsquared boolean indicating whether approximation of R2 based on deviance should be computed. Default is FALSE
#'
#' @export
PLNmixture_param <- function(
backend = "nlopt" ,
trace = 1 ,
covariance = "spherical",
init_cl = "kmeans" ,
smoothing = "both" ,
config_optim = list() ,
inception = NULL # pretrained PLNfit used as initialization
) {
if (!is.null(inception)) stopifnot(isPLNfit(inception))
## optimization config
backend <- match.arg(backend)
stopifnot(backend %in% c("nlopt", "torch"))
if (backend == "nlopt") {
stopifnot(config_optim$algorithm %in% available_algorithms_nlopt)
config_opt <- config_default_nlopt
}
if (backend == "torch") {
stopifnot(config_optim$algorithm %in% available_algorithms_torch)
config_opt <- config_default_torch
}
config_opt$ftol_out <- 1e-3
config_opt$maxit_out <- 50
config_opt$it_smooth <- 1
config_opt[names(config_optim)] <- config_optim
config_opt$trace <- trace
structure(list(
backend = backend ,
trace = trace ,
covariance = covariance ,
init_cl = init_cl ,
smoothing = smoothing ,
config_optim = config_opt ,
inception = inception ), class = "PLNmodels_param")
}
|
09bd74896d053a20a9e7a008e40b24170f8015ed
|
0a997668786ab68d7bed3c4b5aec0fa78899f124
|
/.Rprofile
|
c8e6b1e4e10fe0ea0878d4b60a917918b72a2b4e
|
[] |
no_license
|
noamross/reprotemplate
|
70578bd7e9ec9e42de04487356c5b310305f5283
|
2cb5cb2706e6967a29870636d6ba23f566f04269
|
refs/heads/main
| 2023-07-15T15:54:02.946685
| 2021-08-28T19:23:03
| 2021-08-28T21:55:50
| 389,652,556
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,613
|
rprofile
|
.Rprofile
|
if (file.exists(".env")) {
try(readRenviron(".env"))
} else {
message("No .env file")
}
# Put the project library *outside* the project
Sys.setenv(RENV_PATHS_LIBRARY_ROOT = file.path(normalizePath("~/.renv-project-libraries", mustWork = FALSE)))
if (file.exists("renv/activate.R")) {
source("renv/activate.R")
} else {
message("No renv/activate.R")
}
# Use the local user's .Rprofile when interative.
# Good for keeping local preferences, but not always reproducible.
user_rprof <- Sys.getenv("R_PROFILE_USER", normalizePath("~/.Rprofile", mustWork = FALSE))
if(interactive() && file.exists(user_rprof)) {
source(user_rprof)
}
options(
renv.config.auto.snapshot = TRUE, ## Attempt to keep renv.lock updated automatically
renv.config.rspm.enabled = TRUE, ## Use RStudio Package manager for pre-built package binaries
renv.config.install.shortcuts = TRUE, ## Use the existing local library to fetch copies of packages for renv
renv.config.cache.enabled = TRUE, ## Use the renv build cache to speed up install times
renv.config.cache.symlinks = FALSE ## Keep full copies of packages locally than symlinks to make the project portable in/out of containers
)
# If project packages have conflicts define them here
if(requireNamespace("conflicted", quietly = TRUE)) {
conflicted::conflict_prefer("filter", "dplyr", quiet = TRUE)
conflicted::conflict_prefer("count", "dplyr", quiet = TRUE)
conflicted::conflict_prefer("geom_rug", "ggplot2", quiet = TRUE)
conflicted::conflict_prefer("set_names", "magrittr", quiet = TRUE)
conflicted::conflict_prefer("View", "utils", quiet = TRUE)
}
|
f7fa25c741c741f2a761c82f15cc222a6144fcbc
|
44f613b633e9b6e797cddab9a1e9f825b601c8f4
|
/R/check.R
|
c887b2cd3f73b2e9200de5f7dd4f1dfc3196848c
|
[
"MIT"
] |
permissive
|
ktargows/rhub
|
5e7801c774b2384f9dbbdee193cc8aaf5c96e727
|
84476e5e935471a2e25a29571f4e8b397c03e48b
|
refs/heads/master
| 2020-12-30T12:55:38.296861
| 2016-10-04T18:55:34
| 2016-10-04T18:55:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,130
|
r
|
check.R
|
#' Check an R package on r-hub
#'
#' @param path Path to a directory containing an R package, or path to
#' source R package tarball built with `R CMD check`.
#' @param platform Platform to build/check the package on. See
#' [platforms()] for the available platforms.
#' @param email Email address to send notification to about the build.
#' It must be a validated email address, see [validate_email()]. If
#' `NULL`, then the email address of the maintainer is used, as defined
#' in the `DESCRIPTION` file of the package.
#' @param valgrind Whether to run the check in valgrind. Only supported on
#' Linux currently, and ignored on other platforms.
#' @param check_args Extra arguments for the `R CMD check` command.
#' @param show_status Whether to show the status of the build as it is
#' happening.
#' @return Return the response from r-hub, invisibly. It contains the
#' URL of the build's status page on r-hub.
#'
#' @export
#' @examples
#' \dontrun{
#' check(".")
#' check("mypackage_1.0.0.tar.gz", platform = "fedora-clang-devel")
#' }
check <- function(path = ".", platform = platforms()$name[1],
email = NULL, valgrind = FALSE, check_args = character(),
show_status = interactive()) {
## Check that it is a package
path <- normalizePath(path)
assert_pkg_dir_or_tarball(path)
assert_flag(valgrind)
## Make sure that maintainer email was validated
if (is.null(email)) email <- get_maintainer_email(path)
if (is.na(email)) stop("Cannot get email address from package")
assert_validated_email(email)
## Build the tar.gz, if needed
if (file.info(path)$isdir) {
if (show_status) header_line("Building package")
pkg_targz <- build_package(path, tmpdir <- tempfile())
} else {
pkg_targz <- path
}
## Add valgrind to check_args
check_args <- c(
check_args,
if (valgrind) "--use-valgrind"
)
## Submit to r-hub
id <- submit_package(
email,
pkg_targz,
platform = platform,
check_args = check_args,
show_status = show_status
)
## Show the status
check_status(id, interactive = show_status)
}
|
68119b33ee3946369602f09db10c4ec54d980882
|
c2b840bbebe54492a64f91d16cc7023998a686d0
|
/import_questemp.R
|
4729788530de62143c823adcdc5baff5ffe71945
|
[
"MIT"
] |
permissive
|
yutamasuda/heatillness
|
29886cdccff2dc665833dba435a20c1e84382850
|
d6626655158d6bb7f3995db29608786933794377
|
refs/heads/master
| 2021-05-03T10:26:26.617892
| 2018-09-19T20:50:46
| 2018-09-19T20:50:46
| 120,534,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,713
|
r
|
import_questemp.R
|
# Import core body temperature (Questemp) data
# Repository: https://github.com/yutamasuda/heatillness/
# License: MIT - See the LICENSE file for more details
# ------
# Setup
# ------
# Clear workspace of all objects and unload all extra (non-base) packages
rm(list = ls(all = TRUE))
if (!is.null(sessionInfo()$otherPkgs)) {
res <- suppressWarnings(
lapply(paste('package:', names(sessionInfo()$otherPkgs), sep=""),
detach, character.only=TRUE, unload=TRUE, force=TRUE))
}
# Install pacman if needed
my_repo <- 'http://cran.r-project.org'
if (!require("pacman")) {install.packages("pacman", repos = my_repo)}
# Load the other packages, installing as needed
pacman::p_load(data.table, stringr, chron, lubridate, gdata)
# Set timezone
# Timezone: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# CC Coordinates Timezone Offset DST Offset
# ID −0507+11924 Asia/Makassar +08:00 +08:00
sampling_tz <- 'Asia/Makassar'
# -----------------------------------
# Import active, start and end times
# -----------------------------------
col_classes = c('character', 'numeric', 'character', 'character')
start_end <- fread('data/start and end time.csv', colClasses = col_classes)
start_end$noquestioner <- str_pad(start_end$noquestioner, 6, pad = '0')
start_end$starttime = times(start_end$starttime_str)
start_end$endtime = times(start_end$endtime_str)
start_end[, c('starttime_str', 'endtime_str')] <- NULL
# ---------------------
# Import Questemp data
# ---------------------
# We will import the XLS files as provided to us, converting the timestamp
# format, omiting extra columns and then write this to a separate CSV for each
# input CSV, plus a combined CSV. We will also write out a plot file for each.
# There are several different formats for the input files due to variations
# in the number of columns and presence or absence of a title row above the
# column headings row. Also, there may be values reported for eartemp which
# are ten times higher than they should be due to a missing decimal point.
# Further, the the eartemp values may have a comma instead of the decimal
# point, so we make this substition if necessary. Lastly, the reported
# timestamps are not trusted, so alternate timestamps will be generated using
# a separate file containing start times, end times, and durations. The last
# observation will be assumed to occur at the correct end time as reported in
# the separate file and all other timestamps will be based on this end time
# and a 10 second interval between observations assumed to be chronologically
# sequential. Thus, if the end time is t, then observation n occurs at time t
# and observation n-1 occurs at time t-10s, n-2 at t-20s, n-3 at t-30s, etc.
# Set paths
data_path <- 'data/Questemp'
output_data_path <- 'output_data/Questemp'
# Create output folder(s) if necessary
dir.create(file.path(output_data_path),
showWarnings=FALSE, recursive=TRUE)
dir.create(file.path(output_data_path, 'csv'),
showWarnings=FALSE, recursive=TRUE)
dir.create(file.path(output_data_path, 'png'),
showWarnings=FALSE, recursive=TRUE)
# Get list of input filenames
file_names <- dir(data_path, pattern ='.xls')
# Define a function to read a Questemp XLS file to get core body temp data
get_questemp_data <- function(file_name) {
# Set file paths
file_path <- file.path(data_path, file_name)
csv_file_path <- file.path(output_data_path, 'csv',
gsub('\\.xls$', '\\.csv', file_name))
png_file_path <- file.path(output_data_path, 'png',
gsub('\\.xls$', '\\.png', file_name))
# Import raw data file, checking first 3 rows to determine table format
df = read.xls(file_path, sheet = 1, colClasses = 'character',
header = FALSE, stringsAsFactors=FALSE, nrows=3)
if (df[1, 1] == 'Logged Data Chart') {
df = read.xls(file_path, sheet = 1, colClasses = 'character',
header = FALSE, stringsAsFactors=FALSE, skip=3)[1:2]
} else if (df[1, 1] == 'Timestamp') {
df = read.xls(file_path, sheet = 1, colClasses = 'character',
header = FALSE, stringsAsFactors=FALSE, skip=1)[1:2]
} else if (df[2, 1] == 'Village Id') {
df = read.xls(file_path, sheet = 1, colClasses = 'character',
header = FALSE, stringsAsFactors=FALSE, skip=3)[4:5]
} else {
df = read.xls(file_path, sheet = 1, colClasses = 'character',
header = FALSE, stringsAsFactors=FALSE, skip=1)[4:5]
}
# Replace variable names
names(df) <- c('timestamp', 'eartemp')
# Use the filename to find the "noquestioner" id
id <- as.character(gsub('\\.xls$', '', file_name))
# Use the first oberservation's timestamp to find the sampling date
questdate <- as.Date(df$timestamp, '%m/%d/%Y %H:%M:%S')[1]
# Use the end time as reported in the "start and end time.csv" file
endtime <- start_end[noquestioner == id, endtime]
# Calculate number of seconds until the "end time" for each observation
# assuming a chronological sequence of observations at 10 second interval
time_offset <- seq(nrow(df)*10-10, 0, -10)
# Create POSIX timestamp based on sampling date and reported end time
df$timestamp <- as.POSIXct(
strptime(paste(questdate, endtime),
"%Y-%m-%d %H:%M:%S", tz = sampling_tz) - time_offset)
# Replace commas in "eartemp" with decimal points and convert to number
df$eartemp <- as.numeric(gsub(',', '\\.', df$eartemp))
# Correct for temperatures off by a factor of 10, ignoring NA values
df$eartemp <- sapply(df$eartemp, function(x) {
if (!is.na(x) & x > 100) x/10 else x })
# Plot
png(filename=png_file_path)
plot(df, type='l',
main=paste('Core Body Temp [Questemp]', file_name),
xlab='Timestamp', ylab='Core Body Temp (°C)')
dev.off()
# Write to CSV
write.csv(df, csv_file_path, row.names = FALSE)
# Add column for 'noquestioner' and combine into single data frame
df$noquestioner <- id
df
}
# Read, clean, write, and combine Questemp data into a single data table
questemp_data <- rbindlist(as.data.table(
sapply(file_names, function(x) get_questemp_data(x))))
# Do final clean-up on this single Questemp data table
names(questemp_data) <- c('timestamp', 'eartemp', 'noquestioner')
# Write this single Questemp data table to a single CSV file
write.csv(questemp_data, file.path(output_data_path, 'questemp_data.csv'),
row.names = FALSE)
|
341c596e8f6a310905bf82944c196ead0b36c203
|
206aab419371d80bab70ab01ef9a7f78e3d8232f
|
/inst/examples/ui.R
|
482d2bef8bb8b2e7cd475cb3bd1ca8e6e1153458
|
[] |
no_license
|
anubhav-dikshit/rLab5
|
c09b5e4913887b38849b5e6deddefc983191c969
|
c6ab9f3dc2381206960b2b6221fdbd3670652b80
|
refs/heads/master
| 2020-03-29T23:54:20.340409
| 2018-10-01T10:01:19
| 2018-10-01T10:01:19
| 150,486,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 515
|
r
|
ui.R
|
#' Ui function that sets the ui
#'
#' @importFrom shiny fluidPage sidebarLayout sidebarPanel textInput mainPanel tableOutput
#'
#'
ui <- shiny::fluidPage(
# Sidebar layout with a input and output definitions
shiny::sidebarLayout(
shiny::sidebarPanel(
# Inputs
shiny::textInput(inputId = "city",
label = "Which city?",
value = "Bangalore")
),
# Outputs
shiny::mainPanel(
shiny::tableOutput('table_output')
)
)
)
|
8c10428322304e04e27b916303641d803c9d7d0b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/IntClust/R/FindElement.R
|
9fe15b8e2a56867151734377f6537f3e22520a25
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,485
|
r
|
FindElement.R
|
FindElement<-function(What,Object,Element=list()){
#str(Object)
if(class(Object)=="data.frame"){
#search in columns
if(What %in% colnames(Object)){
Element[[length(Element)+1]]<-Object[,What]
names(Element)[length(Element)]=paste(What,"_",length(Element),sep="")
}
else if(What %in% rownames(Object)){
Element[[length(Element)+1]]<-Object[What,]
names(Element)[length(Element)]=paste(What,"_",length(Element),sep="")
}
}
if(class(Object)=="list"){
#Element=list()
for(i in 0:length(Object)){
if(i==0){
Names=names(Object)
if(What%in%Names){
for(j in which(What==Names)){
Element[length(Element)+1]=Object[j]
names(Element)[length(Element)]=paste(What,"_",length(Element),sep="")
return(Element)
}
}
}
else if(class(Object[[i]])[1]=="list"){
#Names=names(Object[[i]])
#if(What%in%Names){
# for(j in which(What==Names)){
# Element[length(Element)+1]=Object[[i]][j]
# names(Element)[length(Element)]=paste(What,"_",length(Element),sep="")
#
# }
#}
Element=FindElement(What,Object[[i]],Element=Element)
#for(j in 1:length(temp)){
# Element[length(Element)+1]=temp[j]
# names(Element)[length(Element)]=paste(What,"_",length(Element),sep="")
#}
}
else if(class(Object[[i]])[1]=="data.frame"){
Element=FindElement(What,Object[[i]],Element=Element)
}
}
}
return(Element)
}
|
8feb2559415b7a3d0b0e4ed583f3f7724fd53dd8
|
95e50cf75ded646b9664ff08977e95726301272b
|
/ARIC/split.ARIC.R
|
1a94070de11f435128aa2984b20149d3de59de2a
|
[] |
no_license
|
gracekatherine/hacking-NHLBI
|
206b4b93298922c83e197f3d5e901c1eb2e905a3
|
674a49bde39f51cab34be43e9aab70820c2b106a
|
refs/heads/master
| 2021-09-06T00:16:58.506092
| 2018-01-31T22:56:43
| 2018-01-31T22:56:43
| 119,752,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
split.ARIC.R
|
## Split up large ARIC files into smaller ones
csv_name <- "phs000280.v3.pht004208.v1.p1.c1.UBMDALL.HMB-IRB.csv"
data <- read.csv(csv_name)
data <- data[,1:1330]
name_length <- nchar(csv_name) - 4
csv_name_short <- substr(csv_name, 1, name_length)
csv_name_1 <- paste(csv_name_short, "_1.csv", sep = "")
csv_name_2 <- paste(csv_name_short, "_2.csv", sep = "")
csv_name_3 <- paste(csv_name_short, "_3.csv", sep = "")
subset_1 <- data[,c(1,1330,2:444)]
subset_2 <- data[,c(1,1330,445:887)]
subset_3 <- data[,c(1,1330,888:1329)]
FileName <- rep(csv_name_1, nrow(subset_1))
subset_1 <- cbind(subset_1, FileName)
FileName <- rep(csv_name_2, nrow(subset_2))
subset_2 <- cbind(subset_2, FileName)
FileName <- rep(csv_name_3, nrow(subset_3))
subset_3 <- cbind(subset_3, FileName)
write.table(subset_1, file = csv_name_1, qmethod = "double", sep = ",", row.names = FALSE)
write.table(subset_2, file = csv_name_2, qmethod = "double", sep = ",", row.names = FALSE)
write.table(subset_3, file = csv_name_3, qmethod = "double", sep = ",", row.names = FALSE)
|
e23306f27715d53031197978002ea338cf623a79
|
2f4917898af7f531467820a9ed8b878a091e856d
|
/scripts/network_plot.R
|
6daf9472c14d08e129b5fc0d67bc8d715ffb13e8
|
[] |
no_license
|
more1/NETWORK_MA_FRONTIERS_TUTORIAL
|
52cfbd6af45600892975b798939d347d1a6ff726
|
a047edad0bd957e5e7d86806d187d00b90b25c8a
|
refs/heads/master
| 2023-06-02T16:18:32.039042
| 2019-12-13T20:56:53
| 2019-12-13T20:56:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,640
|
r
|
network_plot.R
|
## plot the network. You can set the node text size and node size and add plot title
## Input:
## MTCdata: MTCdata file
## map_txt: a dataframe which maps each treatment to a number
## dataType: the type of your MTCdata, available options are “Arm” and “Contrast”
## for other arguments, please see mtm.networkplot.fun()
## Return:
## the network plot
network_plot <- function(MTCdata, map_txt, dataType = "Arm",
percomparison = T, nodetextsize=.8, nodesize =0.8, graphtitle='',
vertex.col = "red",...){
if( !(dataType %in% c("Arm", "Contrast") ) ){
return(cat("Please input the correct data type, available options are \"Arm\" and \"Contrast\""))
}
pairwise_comp <- generateMTMNetWork(MTCdata, dataType = dataType)
networkNode <- mtmNetWorkNodeName(MTCdata, map_txt = map_txt)
treat_new <- networkNode$treat_new
if(dataType=="Arm"){
total <- apply(cbind(pairwise_comp$n1, pairwise_comp$n2), 1, sum)
mtm.networkplot.fun(pairwise_comp$t1, pairwise_comp$t2, percomparison = percomparison,
nameoftreatments = treat_new,
VAR2 = total, nodetextsize=nodetextsize, nodesize = nodesize,
graphtitle = graphtitle, vertex.col = vertex.col)
}
if(dataType=="Contrast"){
mtm.networkplot.fun(pairwise_comp$t1, pairwise_comp$t2, percomparison = percomparison,
nameoftreatments = treat_new,
nodetextsize=nodetextsize, nodesize = nodesize,
graphtitle = graphtitle, vertex.col = vertex.col)
}
}
|
92cdc396ea9dd02d344de038de4c8603ae9521f3
|
7a6a8c1980d0d61e984cbd6474513520621ccad6
|
/kaggle-pakdd2014-initial-research.R
|
d1a9ea610b5852f285a18312a168d5f041e3cd32
|
[] |
no_license
|
Convalytics/kaggle-pakdd2014
|
e2d33aa32897be9b33b87e16db1d87150642ebb9
|
b62b647f7d9e97af33ff9f87f366114b3dd247e6
|
refs/heads/master
| 2016-09-05T18:47:37.969058
| 2014-03-28T21:22:36
| 2014-03-28T21:22:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,181
|
r
|
kaggle-pakdd2014-initial-research.R
|
######################################################
# Kaggle PAKDD 2014 ASUS Challenge
# Jason Green
# March 18th, 2014
# https://github.com/Convalytics/kaggle-pakdd2014
# Last Updated: 3/18/2014
######################################################
#install.packages("Hmisc")
# Load Packages
library(plyr)
library(ggplot2)
library(gridExtra)
library(Hmisc)
# Set Working Directory
setwd("~/GitHub/kaggle-pakdd2014")
# Import Data
sampleSubmission <- read.csv("~/GitHub/kaggle-pakdd2014/SampleSubmission.csv")
RepairTrain <- read.csv("~/GitHub/kaggle-pakdd2014/RepairTrain.csv")
SaleTrain <- read.csv("~/GitHub/kaggle-pakdd2014/SaleTrain.csv")
TargetMap <- read.csv("~/GitHub/kaggle-pakdd2014/Output_TargetID_Mapping.csv")
head(sampleSubmission, n=5)
head(RepairTrain, n=5)
summary(RepairTrain)
hist(subset(RepairTrain$number_repair, RepairTrain$number_repair > 3))
table(RepairTrain$number_repair)
# Very few number_repair > 2
head(TargetMap, n=5)
summary(TargetMap)
head(SaleTrain, n=5)
summary(SaleTrain)
#plot(SaleTrain$number_sale ~ SaleTrain$module_category)
moduleRepairs <- qplot(module_category,data=RepairTrain, binwidth = 1, geom="histogram", na.rm=T, ylab="Repair Count")
compRepairs <- qplot(component_category,data=RepairTrain, binwidth = 1, geom="histogram", na.rm=T, ylab="RepairCount")
moduleSales <- qplot(module_category,data=SaleTrain, binwidth = 1, geom="histogram", na.rm=T, ylab="Sales Count")
compSales <- qplot(component_category,data=SaleTrain, binwidth = 1, geom="histogram", na.rm=T, ylab="Sales Count")
# ??? Every component was sold the exact same number of times???
grid.arrange(moduleSales, compSales, moduleRepairs, compRepairs,ncol=2)
SaleSummary <- ddply(SaleTrain, c("module_category","component_category","year.month"), summarize,
#TotalCount = length(module_category),
SaleCount = sum(number_sale)
)
RepairSummary <- ddply(RepairTrain, c("module_category","component_category","year.month.repair."), summarize,
#TotalCount = length(module_category),
RepairCount = sum(number_repair)
)
TotalRepairs <- ddply(RepairTrain, c("module_category","component_category"), summarize,
#TotalCount = length(module_category),
RepairCount = sum(number_repair)
)
# write.csv(SaleSummary, file="SaleSummary.csv")
# write.csv(RepairSummary, file="RepairSummary.csv")
AllSummary <- merge(x=SaleSummary, y=RepairSummary,
by.x=c("module_category","component_category","year.month"),
by.y=c("module_category","component_category","year.month.repair."),
all=TRUE
)
head(AllSummary)
AllSummary$year <- substr(AllSummary$year.month,1,4)
AllSummary$month <- substr(AllSummary$year.month,6,7)
#
# M1_P02_Sales <- subset(SaleTrain, module_category=="M1" & component_category == "P02")
# M1_P02_Sales <- ddply(M1_P02_Sales, c("module_category","component_category","year.month"), summarize,
# TotalCount = length(module_category),
# SaleCount = sum(number_sale)
# )
#
#
# M1_P02_Repairs <- subset(RepairTrain, module_category=="M1" & component_category == "P02")
# M1_P02_Repairs <- ddply(M1_P02_Repairs, c("module_category","component_category","year.month.repair."), summarize,
# TotalCount = length(module_category),
# RepairCount = sum(number_repair)
# )
#
#
####
####
##########################
# Map to target
TargetMap$Target <- NA
head(TargetMap)
# Put repairs into ranks
TotalRepairs$Group <- as.numeric(cut2(TotalRepairs$RepairCount, g=5))
TargetMap <- merge(x=TargetMap, y=TotalRepairs,
by.x=c("module_category","component_category"),
by.y=c("module_category","component_category"),
all.x=TRUE
)
TargetMap$Group <- TargetMap$Group - 1
TargetMap$Group <- ifelse(TargetMap$year == 2011, TargetMap$Group - 1, TargetMap$Group)
TargetMap$Group <- ifelse(TargetMap$Group < 0, 0, TargetMap$Group)
write.csv(TargetMap[,"Group"],file="convalytics_pakdd_xx.csv")
|
a2451db81517cf61edec1e8521b67e784396fa3a
|
62e9ec62c910bac5eeb77c6cf9719a1fb30f566e
|
/R/GraphP1.R
|
5c5b8fe63f7befc8dab815062daa8bba52642191
|
[
"MIT"
] |
permissive
|
bhupendpatil/Practice
|
25dd22ccca706359aabe135a3cbfb813e2611cef
|
cae96a3636527100e4b98880e5d3e9ce9399335a
|
refs/heads/master
| 2023-09-01T13:58:53.857928
| 2023-08-21T06:16:41
| 2023-08-21T06:16:41
| 112,185,402
| 5
| 1
|
MIT
| 2023-08-21T06:16:43
| 2017-11-27T11:02:33
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 522
|
r
|
GraphP1.R
|
library(igraph)
library(igraphdata)
dir = graph(edges = c(1,2,3,4,2,3,1,3,4,2,4,5,2,5,6,2,2,7),n = 7,directed = T)
undir = graph(edges = c(1,2,3,5,2,3,2,4,5,6,3,2,5,4,3,4,5,8,6,4,4,2,4,6,4,2,1,7,4,2),n=8,directed = F)
plot(dir)
plot(undir)
# i) number of edges
gsize(undir)
ecount(undir)
# ii) number of nodes
vcount(dir)
# iii) degree of nodes
degree(dir)
centr_degree(dir) #centrrality
# iv) node with lowest degree
min(degree(dir))
# v) adjacency list
get.adjlist(dir)
# vi) matrix of graph
get.adjacency(undir)
|
229013074a188fc6844aa4fbacbc7676df3ed0df
|
407350d22d3a58e93dd9b02bdb5e2213d8c55cc8
|
/Lab 1/Solutions/assignment2.R
|
4795189831fbd6842b24002251c4d816bbb1d734
|
[] |
no_license
|
vonElfvin/Machine-Learning-Practice
|
e679fa2b111c8178273c9b3e375abe528ceac213
|
65334ae79d29673c7ae9e5e4833728cf309be22e
|
refs/heads/master
| 2021-05-14T16:23:43.328273
| 2018-11-23T19:46:05
| 2018-11-23T19:46:05
| 116,019,701
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,932
|
r
|
assignment2.R
|
# APPENDIX 2
# returns the loglikelihood value for given θ and vector X
loglikelihood = function(x, θ){
return(length(x)*log(θ)-θ*sum(x))
}
# returns the argmax θ of the logilikelihood the given distribution estimation
max_loglikelihood = function(x){
return(length(x)/sum(x))
}
# returns log value of the bayesian proportional probability for given θ, λ and vector X
log_bayesian = function(x, θ, λ){
n = length(x)
#return(n*log(θ)-θ*sum(x)+n*log(λ)-n*λ*θ)
return(n*log(θ)-θ*sum(x)+log(λ)-λ*θ)
}
# returns the argmax max value of θ for the bayesian model
max_bayesian = function(x, λ){
n = length(x)
return (n/(sum(x)+λ))
}
# Task 1
# Data preparation
dataframe = read.csv("machines.csv", dec=',')
X = dataframe[1]$Length # X vector of data
X_6 = X[1:6] # X vector of first 6 values of the data
θ = seq(from=0, to=20, by=0.025) # θs to test
m = length(θ) # Amount of θs to be tested
loglikelihood_n = numeric(m) # Empty vector for loglikelihood values θ when using all values
loglikelihood_6 = numeric(m) # Empty vector for loglikelihood values θ when using the first 6 values
# Task 2
# Calculating the loglikelihoods and filling the vetors different θ values with the given vectors
for(i in 1:m){
loglikelihood_n[i] = loglikelihood(X, θ[i])
loglikelihood_6[i] = loglikelihood(X_6, θ[i])
}
# Task 3
# Plot the loglikelihoods for both all x values and first 6 for comparision
plot(θ, loglikelihood_6, xlim=c(0,7), ylim=c(-60, 0), type="l", main="Dependence of Log-Likelihood and θ", xlab="θ", ylab="Log-Likelihood", col="green")
lines(θ, loglikelihood_n, col="blue")
legend("bottomright", legend=c("(green) loglikelihood for n values", "(blue) loglikelihood for n values"))
# θ values for maximum loglikelihoods for all x values and first 6
θstar_n = max_loglikelihood(X)
θstar_6 = max_loglikelihood(X_6)
# Task 4
λ = 10; # given λ value for the prior
log_bayesian_model = numeric(m) # empty vector to be filled with the log values of the proportional probability to P(θ|x)
# Calculating the log values of the proportional probability to given θ
for(i in 1:m){
log_bayesian_model[i] = log_bayesian(X, θ[i], λ)
}
# Plot the Bayesian Model
plot(θ, log_bayesian_model, type="l", xlim=c(0,5), ylim=c(-500, 0), main="Dependance of l(θ) on θ", xlab="θ", ylab="l(θ)", col="green")
θstar_b = max_bayesian(X, λ) # Calculate the argmax θ of the proportional probability
# Task 5
# Compare new generated observations to original ones
set.seed(12345)
new_observations = rexp(n=50, rate = θstar_n) # Generate new observations with optimal θ from Task 3
#data.frame(X, new_observations)
hist(X, col="green", xlim=c(0,6), ylim=c(0,30), main="Original observations", xlab="x") # Histogram of the original obs
x11()
hist(new_observations, col="blue", breaks=12, xlim=c(0,6), ylim=c(0,30), main="New observations", xlab="x") #Histogram of the new obs
|
4107a8e4350c658d9ba0000379a56d48161caffe
|
7eb915ede2984068148d50b538c6a4c67ae103bd
|
/man/wig2rle.Rd
|
f9a0853e2997b54196aec8afd32e269a3847c2e7
|
[] |
no_license
|
lbarquist/norclip
|
8867b946a128016e97722e422c31bc9c91023cd0
|
28e6297a9ed402e03be2b82166c95ac8f9bd7f3a
|
refs/heads/master
| 2020-05-22T00:04:20.781365
| 2016-11-23T11:00:44
| 2016-11-23T11:00:44
| 60,686,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 727
|
rd
|
wig2rle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wig2rle.R
\name{wig2rle}
\alias{wig2rle}
\title{Import wig files into rle coverage vectors.}
\usage{
wig2rle(forward_path, reverse_path)
}
\arguments{
\item{forward_path}{Path to forward strand wig file.}
\item{reverse_path}{Path to reverse strand wig file.}
}
\value{
Return a concatenated IRanges rle coverage vector.
}
\description{
Imports wiggle files for forward and reverse strands of a CLIP-seq
experiment. These are then converted to rle coverage vectors, and the
absolute value of the reverse strand is taken to guard against negative
values before concatenation.
}
\examples{
}
\seealso{
\code{\link{import}}, \code{\link{loadData}}
}
|
6b5ba587fb6aebe8836ded5fcb32723ff38d167f
|
a9b4425bb81547dd57939a29f1a635ce5f653da8
|
/Plot1.R
|
28ed61640d3f30d03a59213a90572957c19518c4
|
[] |
no_license
|
ScJavier/ExData_Plotting1
|
c688bef85f5611f07eaadb5da66063cc9824f9ed
|
6f1e047b043438cffcf6979f27d3972307388d5d
|
refs/heads/master
| 2021-01-14T10:54:49.617907
| 2014-09-07T00:58:03
| 2014-09-07T00:58:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
Plot1.R
|
#Plot 1
#Read and subset the data
dir<-"C:/Users/demyc 13/Google Drive/Coursera/Data Science Specialization/04 - Exploratory Data Analysis/Prog.Assign1/ExData_Plotting1/"
file<-"C:/Users/demyc 13/Google Drive/Coursera/Data Science Specialization/04 - Exploratory Data Analysis/Prog.Assign1/household_power_consumption.txt"
setwd(dir)
data <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE)
subset <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# Plotting data
png("Plot1.png", width = 480, height = 480)
hist(as.numeric(subset$Global_active_power),main="Global Active Power",
col="red",xlab="Global Active Power (kilowatts)")
dev.off()
|
15ed99f2863d1632f0f1688c1b44207e98cd51c7
|
2b2aee3352f8a10c121fe74036eddec01b3ee595
|
/man/slim_extract_genlight.Rd
|
1634f2f617f5977b6264c30d2ac6bd8c8e7a28b6
|
[
"MIT"
] |
permissive
|
rdinnager/slimr
|
56f1fef0a83198bce292dd92dc1014df87c2d686
|
e2fbb7115c7cca82dabd26dc6560e71a8cd0958b
|
refs/heads/master
| 2023-08-21T14:00:36.089104
| 2023-07-31T03:11:09
| 2023-07-31T03:11:09
| 226,999,099
| 8
| 1
|
NOASSERTION
| 2023-08-03T05:44:32
| 2019-12-10T01:04:16
|
R
|
UTF-8
|
R
| false
| true
| 536
|
rd
|
slim_extract_genlight.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_extract.R
\name{slim_extract_genlight}
\alias{slim_extract_genlight}
\title{Extract data into a genlight object}
\usage{
slim_extract_genlight(x, ...)
}
\arguments{
\item{x}{\code{slimr_results} object containing data generated by \code{slimr_output} using \code{outputVCF()} or}
\item{...}{Arguments passed to or from other methods.
\code{outputFull()} in SLiM}
}
\value{
A \code{genlight} object
}
\description{
Extract data into a genlight object
}
|
459369dec5c28285355d0a6e08650dc297ac7b94
|
9b76f92dfecfc84e2a43da24b9e4aa678a2de356
|
/bootcamp/022RBasicsExercise.R
|
a65c9f5e9f16ffdce33e6d4279dcb0c7c85e0eb8
|
[] |
no_license
|
rathanDev/r
|
fa9d82582a83271b1f771c3bc9dd4348b0b28f73
|
2c4871f13de7cde82df7e0e63a253fa4a575a23b
|
refs/heads/master
| 2022-12-13T17:56:46.669651
| 2020-09-10T12:32:49
| 2020-09-10T12:32:49
| 264,051,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 473
|
r
|
022RBasicsExercise.R
|
# R basics exercise
# /udemyR/R-Course-HTML-Notes/R-for-Data-Science-and-Machine-Learning/TrainingExercises/Exercises/RBasicsExercise.html
2 ^ 5
stock.prices <- c(23,27,23,21,34)
stock.prices
daysOfWeek <- c('Mon','Tue','Wed','Thu','Fri')
names(stock.prices) <- daysOfWeek
stock.prices
mean(stock.prices)
over.23 <- stock.prices > 23
over.23
stock.prices[over.23]
max(stock.prices)
maxPrice <- stock.prices == max(stock.prices)
maxPrice
stock.prices[maxPrice]
|
b607703b96eb80b406e6f6a3a9dd8869ed7c5976
|
a2718fd2bab9eb1b86b77b8c9b0d776973d11315
|
/extras/basicShinyApp/ui.R
|
6427cbf00eea934bb990dbe65ad770928e478250
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/SelfControlledCaseSeries
|
df61f7622fa19bb7b24a909a8da4f18c626cf33c
|
28cd3a6cb6f67be5c6dfba0682b142ff3853e94c
|
refs/heads/main
| 2023-08-31T02:00:48.909626
| 2023-04-13T11:42:06
| 2023-04-13T11:42:06
| 20,701,289
| 13
| 12
| null | 2023-09-07T04:50:44
| 2014-06-10T20:53:47
|
R
|
UTF-8
|
R
| false
| false
| 8,394
|
r
|
ui.R
|
library(shiny)
library(DT)
shinyUI(
fluidPage(style = "width:1500px;",
titlePanel("SCCS Evidence Explorer"),
tags$head(tags$style(type = "text/css", "
#loadmessage {
position: fixed;
top: 0px;
left: 0px;
width: 100%;
padding: 5px 0px 5px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #ADD8E6;
z-index: 105;
}
")),
conditionalPanel(condition = "$('html').hasClass('shiny-busy')",
tags$div("Processing...",id = "loadmessage")),
fluidRow(
column(3,
selectInput("exposuresOutcome", "Exposures-outcome", exposuresOutcomeNames$name),
checkboxGroupInput("database", "Data source", databases$cdmSourceAbbreviation, selected = databases$cdmSourceAbbreviation),
checkboxGroupInput("analysis", "Analysis", sccsAnalyses$description, selected = sccsAnalyses$description)
),
column(9,
dataTableOutput("mainTable"),
conditionalPanel("output.rowIsSelected == true",
tabsetPanel(id = "detailsTabsetPanel",
tabPanel("Power",
div(strong("Table 1."), "For each variable of interest: the number of cases (people with at least one outcome), the number of years those people were observed, the number of outcomes, the number of subjects with at least one exposure, the number of patient-years exposed, the number of outcomes while exposed, and the minimum detectable relative risk (MDRR)."),
tableOutput("powerTable")
),
tabPanel("Attrition",
plotOutput("attritionPlot", width = 600, height = 500),
div(strong("Figure 1."), "Attrition, showing the number of cases (number of subjects with at least one outcome), and number of outcomes (number of ocurrences of the outcome) after each step in the study.")
),
tabPanel("Model",
tabsetPanel(id = "modelTabsetPanel",
tabPanel("Model coefficients",
div(strong("Table 2."), "The fitted non-zero coefficent (incidence rate ratio) and 95 percent confidence interval for all variables in the model."),
tableOutput("modelTable")
),
tabPanel("Age spline",
plotOutput("ageSplinePlot"),
div(strong("Figure 2a."), "Spline fitted for age.")
),
tabPanel("Season spline",
plotOutput("seasonSplinePlot"),
div(strong("Figure 2b."), "Spline fitted for season")
),
tabPanel("Calendar time spline",
plotOutput("calendarTimeSplinePlot"),
div(strong("Figure 2c."), "Spline fitted for calendar time")
)
)
),
tabPanel("Spanning",
radioButtons("spanningType", label = "Type:", choices = c("Age", "Calendar time")),
plotOutput("spanningPlot"),
div(strong("Figure 3."), "Number of subjects observed for 3 consecutive months, centered on the indicated month.")
),
tabPanel("Time trend",
plotOutput("timeTrendPlot", height = 600),
div(strong("Figure 4."), "Per calendar month the number of people observed, the unadjusted rate of the outcome, and the rate of the outcome after adjusting for age, season, and calendar time, if specified in the model. Red indicates months where the adjusted rate was significantly different from the mean adjusted rate.")
),
tabPanel("Time to event",
plotOutput("timeToEventPlot"),
div(strong("Figure 5."), "The number of events and subjects observed per week relative to the start of the first exposure (indicated by the thick vertical line).")
),
tabPanel("Event dep. observation",
plotOutput("eventDepObservationPlot"),
div(strong("Figure 6."), "Histograms for the number of months between the first occurrence of the outcome and the end of observation, stratified by whether the end of observation was censored (inferred as not being equal to the end of database time), or uncensored (inferred as having the subject still be observed at the end of database time).")
),
tabPanel("Systematic error",
plotOutput("controlEstimatesPlot"),
div(strong("Figure 7."),"Systematic error. Effect size estimates for the negative controls (true incidence rate ratio = 1)
and positive controls (true incidence rate ratio > 1), before and after calibration. Estimates below the diagonal dashed
lines are statistically significant (alpha = 0.05) different from the true effect size. A well-calibrated
estimator should have the true effect size within the 95 percent confidence interval 95 percent of times.")
),
tabPanel("Diagnostics summary",
tableOutput("diagnosticsSummary")
)
)
)
)
)
)
)
|
69d92ff9dcf0a2d6e936d61b0ad2ed88ee284174
|
c7f693112b1019b0017f94fcdb3f1dba79749016
|
/Rfunction/CellSIUS_final_cluster_assignment.R
|
18ef7133f7bba5e1884c62b0cd49cc00cf892048
|
[] |
no_license
|
fabotao/GapClust-rge-paper
|
d08d13dfc14d730a6c77459a1a353f5c7de4443b
|
bc845328fe1196f2b4aac380a8f0192372481f10
|
refs/heads/master
| 2022-12-09T08:47:44.750004
| 2020-09-06T12:58:22
| 2020-09-06T12:58:22
| 291,741,399
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,368
|
r
|
CellSIUS_final_cluster_assignment.R
|
############################################
# OPTIONAL: Final assignment to unique clusters
# Note: This is different from the previous subcluster asignemnt, where a cell can potentially be
# a member of multiple subgroups.
############################################
#' Final assignment to unique clusters
#'
#'
#'
#' @description
#'
#'
#' \code{CellSIUS_final_cluster_assignment} merges the main clusters with the sub clusters
#' identified by \code{\link[CellSIUS]{CellSIUS}} and returns the final cluster assignment.
#'
#'
#' @param CellSIUS.out data.table: Output of \code{\link[CellSIUS]{CellSIUS}}.
#' @param group_id Character: vector with cluster cell assignment.
#' Make sure that the order of cell cluster
#' assignment reflects the order of the columns of norm.mat.
#' @param min_n_genes Integer: Minimum number of genes in a signature.
#'
#' @details
#'
#' \code{CellSIUS_final_cluster_assignment} returns a \code{character}.
#' Note that a cell can potentially be a member of multiple subgroups.
#'
#' @seealso
#'
#' Main CellSIUS function: \code{\link[CellSIUS]{CellSIUS}}.
#' @import data.table
#' @importFrom data.table ":="
#' @export
CellSIUS_final_cluster_assignment = function(CellSIUS.out, group_id, min_n_genes = 3){
CellSIUS.out[,n_genes:=length(unique(gene_id)),by='sub_cluster']
assignments = data.table::data.table(cell_idx = names(group_id), group=as.character(group_id))
names(assignments) = c('cell_idx', 'group')
assignments$group = as.character(assignments$group)
assignments = merge(assignments, CellSIUS.out[n_genes>=min_n_genes,c('cell_idx','main_cluster','sub_cluster')],by='cell_idx',all=T)
assignments = unique(assignments)
final_assignment = function(main_cluster,sub_cluster){
if(length(sub_cluster)==1){
if(is.na(sub_cluster) || grepl("0$",sub_cluster)){
out = main_cluster
} else {
out = gsub('_\\d$','',sub_cluster)
}
} else {
subclusts = gsub('_\\d$', '',sub_cluster[grepl("1$",sub_cluster)])
out = paste(subclusts,collapse='-')
if(out == ''){out = main_cluster}
}
return(out)
}
assignments[,final:=final_assignment(group,sub_cluster),by="cell_idx"]
assignments = unique(assignments[,c('cell_idx','final')])
out = as.character(assignments$final)
names(out)=assignments$cell_idx
out = out[names(group_id)]
return(out)
}
|
3aa24f6b7bdd0dd29144b7ba4ab70864f1995aba
|
1fe8e63f792ec46de431b05f8a43ef85d4374a69
|
/scripts/Merged quantification and stats/Spectre - merged quantification and stats.R
|
233d87526c3da339ea83da5e39f3d5cb8675e505
|
[
"MIT"
] |
permissive
|
tomashhurst/Spectre
|
4b470b7f366037e9b9b00b6c75af2cafd1c64236
|
b24cf382ea9b0d98c28fc4b5d60ab3339447d220
|
refs/heads/master
| 2021-07-16T18:07:55.542076
| 2021-06-27T04:42:54
| 2021-06-27T04:42:54
| 199,840,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,414
|
r
|
Spectre - merged quantification and stats.R
|
##########################################################################################################
#### Spectre discovery workflow - C - differential quantiative and statistical analysis
##########################################################################################################
# Spectre R package: https://sydneycytometry.org.au/spectre
# Thomas Myles Ashhurst, Felix Marsh-Wakefield, Givanna Putri
##########################################################################################################
#### Analysis session setup
##########################################################################################################
### Load packages
library(Spectre)
Spectre::package.check() # Check that all required packages are installed
Spectre::package.load() # Load required packages
### Set DT threads
getDTthreads()
### Set primary directory
dirname(rstudioapi::getActiveDocumentContext()$path) # Finds the directory where this script is located
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) # Sets the working directory to where the script is located
getwd()
PrimaryDirectory <- getwd()
PrimaryDirectory
### Set input directory
setwd(PrimaryDirectory)
setwd("Output B - discovery analysis/Output - summary data/")
InputDirectory <- getwd()
InputDirectory
setwd(PrimaryDirectory)
### Set metadata directory
setwd(PrimaryDirectory)
setwd("metadata/")
MetaDirectory <- getwd()
MetaDirectory
setwd(PrimaryDirectory)
### Set output directory
setwd(PrimaryDirectory)
dir.create("Output C - quantitative analysis", showWarnings = FALSE)
setwd("Output C - quantitative analysis")
OutputDirectory <- getwd()
setwd(PrimaryDirectory)
### Save session info
setwd(OutputDirectory)
dir.create("Output - info")
setwd("Output - info")
sink(file = "session_info.txt", append=TRUE, split=FALSE, type = c("output", "message"))
session_info()
sink()
setwd(PrimaryDirectory)
##########################################################################################################
#### Import data
##########################################################################################################
setwd(InputDirectory)
### Read in files
sum.list <- list()
files <- list.files(getwd(), ".csv")
files
for(i in files){
sum.list[[i]] <- fread(i)
}
sum.list[[1]]
### Merge into single data.table
as.matrix(names(sum.list[[1]]))
checks <-do.list.summary(sum.list)
checks$name.table
metric.col <- c(1)
essential.cols <- c(2,3,4)
rmv.cols <- c(5)
sum.dat <- sum.list[[1]][,c(essential.cols), with = FALSE]
sum.dat
for(i in files){
temp <- sum.list[[i]]
metric <- temp[1,metric.col, with = FALSE]
temp <- temp[,-c(metric.col, essential.cols, rmv.cols), with = FALSE]
names(temp) <- paste0(metric, " -- ", names(temp))
sum.dat <- cbind(sum.dat, temp)
}
### Check reults
sum.dat
as.matrix(names(sum.dat))
##########################################################################################################
#### Preferences
##########################################################################################################
### Preferences
as.matrix(names(sum.dat))
sample.col <- "Sample"
group.col <- "Group"
batch.col <- "Batch"
annot.cols <- c(group.col, batch.col)
annot.cols
plot.cols <- names(sum.dat)[c(4:9,64:69)]
plot.cols
### Comparisons
variance.test <- NULL # 'kruskal.test'
pairwise.test <- "wilcox.test"
comparisons <- list(c("Mock", "WNV"))
grp.order <- c("Mock", "WNV")
### Other adjustments
sum.dat$Batch <- as.factor(sum.dat$Batch)
##########################################################################################################
#### Stats calculations
##########################################################################################################
setwd(OutputDirectory)
dir.create("Output - summary data tables")
setwd("Output - summary data tables")
### Z score
zscore <- function(x) {
z <- (x - mean(x)) / sd(x)
return(z)
}
sum.dat.z <- sum.dat
res <- scale(sum.dat.z[,plot.cols, with = FALSE])
res <- as.data.table(res)
sum.dat.z[,plot.cols] <- res
sum.dat.z
fwrite(sum.dat, "Summary data.csv")
fwrite(sum.dat.z, "Summary data - z-score.csv")
### Statistical tests
sum.dat.stats.raw <- do.stats(sum.dat,
use.cols = plot.cols,
sample.col = sample.col,
grp.col = group.col,
comparisons = comparisons,
corrections = NULL,
variance.test = variance.test,
pairwise.test = pairwise.test)
sum.dat.stats.FDR <- do.stats(sum.dat,
use.cols = plot.cols,
sample.col = sample.col,
grp.col = group.col,
comparisons = comparisons,
corrections = 'fdr',
variance.test = variance.test,
pairwise.test = pairwise.test)
sum.dat.stats.raw
sum.dat.stats.FDR
sum.dat.stats.raw[,c(1:3)]
sum.dat.stats.FDR[,c(1:3)]
fwrite(sum.dat.stats.raw, "Summary data - stats - uncorrected.csv")
fwrite(sum.dat.stats.FDR, "Summary data - stats - FDR.csv")
### Sig tables
## Raw p-values
raw <- sum.dat.stats.raw[sum.dat.stats.raw[["Type"]] == 'p-value',]
raw <- raw[raw[["Comparison"]] != 'Kruskal',]
pval.compars <- raw[["Comparison"]]
pval <- raw[,plot.cols, with = FALSE]
pval.sig <- matrix(nrow = 0, ncol = length(plot.cols))
for(i in c(1:nrow(pval))){
temp <- pval[i,]
temp <- temp < 0.05
temp <- gsub(TRUE, "Significant", temp)
temp <- gsub(FALSE, "NS", temp)
pval.sig <- rbind(pval.sig, temp)
}
pval.sig <- as.data.frame(pval.sig)
names(pval.sig) <- plot.cols
rownames(pval.sig) <- paste0("p-value - ", pval.compars)
p.val.annots <- list()
for(i in rownames(pval.sig)){
p.val.annots[[i]] <- c('NS' = "Black", "Significant" = "Blue")
}
## P-values FDR
fdr <- sum.dat.stats.FDR[sum.dat.stats.FDR[["Type"]] == 'p-value_fdr',]
fdr <- fdr[fdr[["Comparison"]] != 'Kruskal',]
pval.fdr.compars <- fdr[["Comparison"]]
pval.fdr <- fdr[,plot.cols, with = FALSE]
pval.fdr.sig <- matrix(nrow = 0, ncol = length(plot.cols))
for(i in c(1:nrow(pval.fdr))){
temp <- pval.fdr[i,]
temp <- temp < 0.05
temp <- gsub(TRUE, "Significant", temp)
temp <- gsub(FALSE, "NS", temp)
pval.fdr.sig <- rbind(pval.fdr.sig, temp)
}
pval.fdr.sig <- as.data.frame(pval.fdr.sig)
names(pval.fdr.sig) <- plot.cols
rownames(pval.fdr.sig) <- paste0("p-value_fdr - ", pval.fdr.compars)
p.val.fdr.annots <- list()
for(i in rownames(pval.fdr.sig)){
p.val.fdr.annots[[i]] <- c('NS' = "Black", "Significant" = "Red")
}
## Create annotation data.frame
x <- rbind(pval.sig, pval.fdr.sig)
# x <- data.frame("p_value" = pval,
# "p_value_FDR" = pval.fdr)
x <- t(x)
x <- as.data.frame(x)
# x <- as.matrix(x)
# feature.annots <- as.data.frame(x)
# rownames(feature.annots) <- plot.cols
# feature.annots
#str(feature.annots)
#str(my_sample_col)
str(x)
feature.annots <- x
p.val.annots
p.val.fdr.annots
annotation_colors <- c(p.val.annots, p.val.fdr.annots)
# annotation_colors <- list('p_value' = c('NS' = "Black", "Significant" = "Blue"),
# 'p_value_FDR' = c('NS' = "Black", "Significant" = "Red"))
##########################################################################################################
#### Differential heatmap
##########################################################################################################
setwd(OutputDirectory)
dir.create("Output - heatmaps")
setwd("Output - heatmaps")
sum.dat.z[[group.col]]
make.pheatmap(sum.dat.z,
sample.col = sample.col,
plot.cols = plot.cols,
annot.cols = annot.cols,
feature.annots = feature.annots,
annotation_colors = annotation_colors,
is.fold = TRUE,
fold.range = c(3, -3),
dendrograms = 'column',
row.sep = 6,
#cutree_rows = 2,
cutree_cols = 2,
plot.title = "All features - z-score (static rows)",
file.name = "All features - z-score (static rows).png")
make.pheatmap(sum.dat.z,
sample.col = sample.col,
plot.cols = plot.cols,
annot.cols = annot.cols,
feature.annots = feature.annots,
annotation_colors = annotation_colors,
is.fold = TRUE,
fold.range = c(3, -3),
cutree_rows = 2,
cutree_cols = 2,
plot.title = "All features - z-score",
file.name = "All features - z-score.png")
##########################################################################################################
#### PCA
##########################################################################################################
setwd(OutputDirectory)
dir.create("Output - PCA")
setwd("Output - PCA")
### Select for relevant columns
for.pca <- sum.dat[, c(sample.col, annot.cols, plot.cols), with = FALSE]
for.pca
any(is.na(for.pca))
### Remove NAs
## Remove columns with NA
# for.pca <- for.pca[ , colSums(is.na(for.pca)) == 0 , with = FALSE]
# pca.cols <- names(for.pca)[c(3:length(names(for.pca)))]
## Remove ROWS with NA
# for.pca <- for.pca[complete.cases(for.pca), ]
# pca.cols <- names(for.pca)[c(4:length(names(for.pca)))]
any(is.na(for.pca))
### Simple PCA plot
pca_out <- stats::prcomp(for.pca[,plot.cols,with = FALSE],
scale = TRUE)
pcs <- colnames(pca_out$x)[c(1:10)]
pcs
pca.plotting <- cbind(for.pca, pca_out$x[,c(1:10)])
make.colour.plot(pca.plotting, 'PC1', 'PC2', group.col, dot.size = 5)
### Complex PCA
Spectre::run.pca(dat = for.pca,
use.cols = plot.cols,
# scale = FALSE,
# plot.ind.label = c("point", "text"),
# row.names = patient.id,
plot.ind.group = TRUE,
group.ind = group.col,
repel = FALSE)
##########################################################################################################
#### Volcano plots
##########################################################################################################
setwd(OutputDirectory)
dir.create("Output - volcano plots")
setwd("Output - volcano plots")
### Setup
comps <- list()
for(i in c(1:length(comparisons))){
temp <- comparisons[[i]]
strg <- paste0(temp[[1]], " to ", temp[[2]])
comps[[i]] <- strg
}
comps
### Uncorrected volcanos
setwd(OutputDirectory)
dir.create("Output - volcano plots")
setwd("Output - volcano plots")
dir.create("Uncorrected p-values")
setwd("Uncorrected p-values")
for(i in comps){
temp <- sum.dat.stats.raw[sum.dat.stats.raw[["Comparison"]] == i,]
p.dat <- temp[temp[["Type"]] == "p-value",]
p.dat <- p.dat[,names(p.dat)[c(3:length(names(p.dat)))], with = FALSE]
fc.dat <- temp[temp[["Type"]] == "FClog2",]
fc.dat <- fc.dat[,names(fc.dat)[c(3:length(names(fc.dat)))], with = FALSE]
nms <- names(fc.dat)
make.volcano.plot(dat.p = p.dat,
dat.fc = fc.dat,
vars = nms,
title = i,
xlim = c(-3.5, 3.5))
}
### Corrected
setwd(OutputDirectory)
dir.create("Output - volcano plots")
setwd("Output - volcano plots")
dir.create("Corrected p-values")
setwd("Corrected p-values")
for(i in comps){
temp <- sum.dat.stats.FDR[sum.dat.stats.FDR[["Comparison"]] == i,]
p.dat <- temp[temp[["Type"]] == "p-value_fdr",]
p.dat <- p.dat[,names(p.dat)[c(3:length(names(p.dat)))], with = FALSE]
fc.dat <- temp[temp[["Type"]] == "FClog2",]
fc.dat <- fc.dat[,names(fc.dat)[c(3:length(names(fc.dat)))], with = FALSE]
nms <- names(fc.dat)
make.volcano.plot(dat.p = p.dat,
dat.fc = fc.dat,
vars = nms,
title = i,
xlim = c(-3.5, 3.5))
}
##########################################################################################################
#### AutoGraphs
##########################################################################################################
setwd(OutputDirectory)
dir.create("Output - autographs")
setwd("Output - autographs")
# meas.type <- unique(sub(" -- .*", "", names(sum.dat[,..plot.cols])))
# meas.type
for(i in plot.cols){
pop <- sub(".* -- ", "", i) # population
meas <- sub(" -- .*", "", i) # measurement
make.autograph(sum.dat,
x.axis = group.col,
y.axis = i,
y.axis.label = meas,
grp.order = grp.order,
my_comparisons = comparisons,
Variance_test = variance.test,
Pairwise_test = pairwise.test,
title = pop,
subtitle = meas
)
}
|
e9f2d6f32e326f08f4fab55da123db5ee28e9279
|
7f3f81bdde73f1aa8f2f5a5baf208659c35925a8
|
/plot1.R
|
cb90cbff95ab56420852b810264ff48aa9be0a1b
|
[] |
no_license
|
eranmanor/Exploratory-Data-Analysis---Course-Project-1
|
f64e4db4e18e32e18689852a99aff5cf8b0c12a5
|
6c135b10f85c1e4916972374a67eefa7fb23f628
|
refs/heads/master
| 2020-12-03T14:59:02.320312
| 2016-09-01T08:22:31
| 2016-09-01T08:22:31
| 67,109,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
r
|
plot1.R
|
# Reading Power Consumption Data into a table
hhPowerConsum <- read.table("household_power_consumption.txt", skip = 1, sep = ";", na.strings = "?")
names(hhPowerConsum) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
# Subsetting the data to include only the relevant data from the plot graphs - between 1/2/2007 - 2/2/2007
plotData <- subset(hhPowerConsum, hhPowerConsum$Date == "1/2/2007" | hhPowerConsum$Date == "2/2/2007")
# Opening the PNG device and defining the file name and Width & Height
png(filename = "plot1.png", width = 489, height = 480, units = "px")
# Calling the Histogram plot function
hist(plotData$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
6a1dc24c46b02081ade7b1504ee0a4d91a22c1e3
|
7f4bbb0dd5be6a704b618cf0fbf0c80fff699235
|
/man/load_tab_rachat.Rd
|
25d4dafa390c542755e2f6731e765d74d18097f4
|
[] |
no_license
|
DTichit/ALModel
|
67c7b041a84dd9fdaec3c084d3cbd1f322b5a827
|
820faab78ff29d404b522b737170393930132c77
|
refs/heads/master
| 2023-03-08T09:40:48.628155
| 2021-02-10T18:24:52
| 2021-02-10T18:24:52
| 337,813,431
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 789
|
rd
|
load_tab_rachat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TabRachat-load.R
\docType{methods}
\name{load_tab_rachat}
\alias{load_tab_rachat}
\title{Fonction \code{load_tab_rachat}.}
\usage{
load_tab_rachat(address)
}
\arguments{
\item{address}{est un objet de type \code{character} indiquant le dossier dans lequel se situe l'ensemble des donnees necessaires
pour la construction de l'objet.}
}
\description{
Cette fonction permet de charger les donnees pour un objet de type \code{\link{TabRachat}}. Les donnees auront ete prealablement disposees dans
une architecture propre a \code{SiALM}.
}
\details{
La creation d'un objet \code{\link{TabRachat}} necessite des donnees presentes dans un fichier nomme \code{Rachat.csv}.
}
\author{
Damien Tichit pour Sia Partners
}
|
f0688457e784fc4006bba2b76ef3b51d243442dc
|
0392e3ed22ce0e014c04634f2adb018325c0025b
|
/inst/docs/examples_for_paper.R
|
f2ffef7808666a9c420e13035dd6d95543ab88c4
|
[] |
no_license
|
swang87/fc
|
cff78468ac1cdb1ed2922f94a29fd2e4eb682c92
|
f031539b06b26016560f47303ebef28dec90808d
|
refs/heads/master
| 2021-03-27T14:53:00.290061
| 2019-10-20T22:53:12
| 2019-10-20T22:53:12
| 114,667,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,735
|
r
|
examples_for_paper.R
|
### Benchmark
library(fc)
library(purrr)
library(magrittr)
library(microbenchmark)
make_res_tbl <- function(x, units = "us") {
N <- 10000 # num of its, for SE calc
if (units == "us") {
div <- 10^3
} else if (units == "ms") {
div <- 10^6
} else {
div <- 1
}
res <- paste0("$", round(aggregate(x$time ~ x$expr, FUN=mean)[,2]/div, 3), " (\\pm",
round(aggregate(x$time ~ x$expr, FUN=sd)[,2]/sqrt(N)/div,3),")$")
names(res) <- aggregate(x$time ~ x$expr, FUN=mean)[,1]
res
}
## 1
log_sqrt_base <- function(x) log(x=sqrt(x))
log_sqrt_purrr <- purrr::compose(log, sqrt)
`%>%` <- magrittr::`%>%`
log_sqrt_mag <- . %>% sqrt %>% log
log_sqrt_fc <- fc(log, x=sqrt(x))
`%>%` <- fc::`%>%`
log_sqrt_fc_pipe <- log %>% sqrt
tmp <- microbenchmark::microbenchmark(log_sqrt_base(10),
log_sqrt_purrr(10), log_sqrt_mag(10),
log_sqrt_fc(10), log_sqrt_fc_pipe(10),
times = 10000)
res <- make_res_tbl(tmp, "us")
## 2
x <- c("<td class = 'address'>24 Hillhouse Ave.</td>",
"<td class = 'city'>New Haven</td>",
"</table>")
search_trim_base <- function(v) {
trimws(gsub(grep(v, pattern="<[^/]*>", value=TRUE),
pattern=".*>(.*)<.*", replacement = "\\1"))
}
`%>%` <- magrittr::`%>%`
search_trim_mag <- . %>% grep(pattern="<[^/]*>", x=., value=TRUE) %>%
gsub(".*>(.*)<.*", "\\1", x=.) %>%
trimws
search_trim_purrr <- purrr::compose(trimws, partial(gsub, pattern=".*>(.*)<.*",
replacement = "\\1"),
partial(grep, pattern="<[^/]*>", value=TRUE))
search_trim_fc <- fc(trimws,
x = fc(gsub, pattern=".*>(.*)<.*",
replacement = "\\1",
x = fc(grep, pattern="<[^/]*>", value=TRUE)(x))(x))
`%>%` <- fc::`%>%`
search_trim_fc_pipe <- fc(grep, pattern="<[^/]*>", value=TRUE) %>%
fc(gsub, pattern=".*>(.*)<.*", replacement = "\\1") %>% trimws
tmp2 <- microbenchmark::microbenchmark(search_trim_base(x),
search_trim_mag(x),
search_trim_purrr(x),
search_trim_fc(x),
search_trim_fc_pipe(x), times = 10000)
print(tmp2, digits = 3)
res <- rbind(res, make_res_tbl(tmp2, units = "us"))
## 3
library(fc)
library(dplyr)
library(dbplyr)
my_db <- DBI::dbConnect(RSQLite::SQLite(), path = ":memory:")
library(nycflights13)
copy_to(my_db,
flights,
temporary = FALSE,
indexes = list(
c("year", "month", "day"),
"carrier",
"tailnum"
)
)
flights_db <- tbl(my_db, "flights")
flights_db
flight_summary_base <- function(x) {
filter_(summarize_(group_by_(x, .dots = list('tailnum')),
.dots = list(count = 'n()',
dist='mean(distance, na.rm=TRUE)',
delay='mean(arr_delay, na.rm=TRUE)')),
.dots = list('count > 20', 'dist < 2000'))
}
`%>%` <- magrittr::`%>%`
flight_summary_mag <- . %>% group_by_(.dots = list('tailnum')) %>%
summarize_(.dots = list(count = 'n()',
dist='mean(distance, na.rm=TRUE)',
delay='mean(arr_delay, na.rm=TRUE)')) %>%
filter_(.dots = list('count > 20', 'dist < 2000'))
# flight_summary_mag <- . %>% group_by(tailnum) %>%
# summarize(count = n(),
# dist = mean(distance, na.rm=TRUE),
# delay = mean(arr_delay, na.rm = TRUE)) %>%
# filter(count > 20, dist < 2000)
library(fc)
flight_summary_purrr <- compose(partial(filter_, .dots = list('count > 20', 'dist < 2000')),
compose(partial(summarize_, .dots = list(count = 'n()',
dist='mean(distance, na.rm=TRUE)',
delay='mean(arr_delay, na.rm=TRUE)')),
partial(group_by_, .dots = list('tailnum'))))
flight_summary_fc <- fc(filter_, .dots = list('count > 20', 'dist < 2000'),
.data = fc(summarize_, .dots = list(count = 'n()',
dist='mean(distance, na.rm=TRUE)',
delay='mean(arr_delay, na.rm=TRUE)'),
.data = fc(group_by_, .dots = list('tailnum'))(.data))(.data))
`%>%` <- fc::`%>%`
flight_summary_fc_pipe <- fc(group_by_, .dots = list('tailnum')) %>%
fc(summarize_, .dots = list(count = 'n()',
dist='mean(distance, na.rm=TRUE)',
delay='mean(arr_delay, na.rm=TRUE)')) %>%
fc(filter_, .dots = list('count > 20', 'dist < 2000'))
flight_summary_base(flights_db)
flight_summary_mag(flights_db)
flight_summary_fc_pipe(flights_db)
tmp3 <- microbenchmark(flight_summary_base(flights_db),
flight_summary_mag(flights_db),
flight_summary_purrr(flights_db),
flight_summary_fc(flights_db),
flight_summary_fc_pipe(flights_db), times = 10000)
print(tmp3, digits=3)
## Example 4
get_random_sepal <- function(x) head(x[sample(1:nrow(x)), grep("Sepal", colnames(x))], n=10)
get_random_sepal(iris)
`%>%` <- magrittr::`%>%`
get_random_sepal_mag <- . %>% (function(x) x[sample(1:nrow(x)), grep("Sepal", colnames(x))]) %>% head(n = 10) %>% summary
get_random_sepal_mag(iris)
get_sepal1 <- fc(summary, object = fc(head, x = (function(x) {
x[sample(1:nrow(x)),
grep("Sepal", colnames(x))]
}) (x), n=10)(x))
get_random_sepal_fc <- fc(summary, object=fc(head, x =
fc(function(x, cols) {x[sample(1:nrow(x)), cols]},
cols = grep("Sepal", colnames(x)))(x),
n=10)(x))
get_random_sepal_fc_2 <- fc(summary, object = fc(head, x =
(function(x) {
x[sample(1:nrow(x)),
grep("Sepal", colnames(x))]
}) (x), n=10)(x) )
`%>%` <- fc::`%>%`
get_random_sepal_fc_2_pipe <- { function(x) {
x[sample(1:nrow(x)),
grep("Sepal", colnames(x))]
}} %>% fc(head, n=10) %>% summary
get_random_sepal_fc_2_pipe(iris)
# `%>%` <- magrittr::`%>%`
# get_random_sepal_mag <- . %>% (function(x) {
# x[sample(1:nrow(x)),
# grep("Sepal", colnames(x))]
# } ) %>% head
get_random_sepal_mag(iris)
library(purrr)
get_random_sepal_purrr <- compose(function(x) {
x[sample(1:nrow(x)),
grep("Sepal", colnames(x))]
}, partial(head, n=10), summary)
get_random_sepal_purrr(iris)
tmp4 <- microbenchmark::microbenchmark(get_random_sepal(iris),
get_random_sepal_mag(iris),
get_random_sepal_purrr(iris),
get_sepal1(iris),
#get_random_sepal_fc(iris),
#get_random_sepal_fc_2(iris),
get_random_sepal_fc_2_pipe(iris),times = 10000)
# compile output
res <- cbind(make_res_tbl(tmp, units="us"),
make_res_tbl(tmp2, units = "us"),
make_res_tbl(tmp3, units = "ms"),
make_res_tbl(tmp4, units = "ms"))
colnames(res) <- paste("Example", 1:4, paste0("(", c("us", "us", "ms", "ms"), ")"))
rownames(res) <- c("base R", "magrittr", "purrr", "fc", "fc_pipe")
library(xtable)
print(xtable(res), sanitize.text.function=function(x){x})
library(ggplot2)
autoplot(tmp)
|
f423aea1ad117efd8696dd2a9568f9e6801748c8
|
574a4b779bc2fe69eb19113cf7130f9ae9100ee6
|
/man/SCDC_prop.Rd
|
57e67fe44748553a93a6708dd3d06147e1016043
|
[
"MIT"
] |
permissive
|
meichendong/SCDC
|
472a3bc0bd676446f57795ce2106d5de40e49dce
|
890c604eebd7fffa4a08d7344fbd516df6efcf8d
|
refs/heads/master
| 2023-01-27T12:42:41.558530
| 2023-01-09T02:31:48
| 2023-01-09T02:31:48
| 187,677,545
| 40
| 14
| null | 2022-06-09T07:51:14
| 2019-05-20T16:33:20
|
R
|
UTF-8
|
R
| false
| true
| 1,569
|
rd
|
SCDC_prop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Deconvolution.R
\name{SCDC_prop}
\alias{SCDC_prop}
\title{Proportion estimation}
\usage{
SCDC_prop(
bulk.eset,
sc.eset,
ct.varname,
sample,
ct.sub,
iter.max = 1000,
nu = 1e-04,
epsilon = 0.01,
truep = NULL,
weight.basis = T,
ct.cell.size = NULL,
Transform_bisque = F,
...
)
}
\arguments{
\item{bulk.eset}{ExpressionSet object for bulk samples}
\item{sc.eset}{ExpressionSet object for single cell samples}
\item{ct.varname}{variable name for 'cell types'}
\item{sample}{variable name for subject/samples}
\item{ct.sub}{a subset of cell types that are selected to construct basis matrix}
\item{iter.max}{the maximum number of iteration in WNNLS}
\item{nu}{a small constant to facilitate the calculation of variance}
\item{epsilon}{a small constant number used for convergence criteria}
\item{truep}{true cell-type proportions for bulk samples if known}
\item{ct.cell.size}{default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.}
\item{Transform_bisque}{The bulk sample transformation from bisqueRNA. Aiming to reduce the systematic difference between single cells and bulk samples.}
}
\value{
Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
}
\description{
Proportion estimation function for multi-subject case
}
|
0e7dc72e79e9714f4140cb035bf9145b645b3dc4
|
411beb364f61f99d7ac3c3747cf73a864fc71d26
|
/EstimatingActivity_example_Nepal2019.R
|
2abc25842930e78a1123fe2905ecc45dbdd8a158
|
[] |
no_license
|
GBFerr/CameraTrapActivity_tutorial
|
6a67c87543da4cbd0f64a75b2c2d0e4badbc4f46
|
16df1ba1c6167b747f2c64c8cb9d14ef46066698
|
refs/heads/main
| 2022-12-30T23:27:34.571775
| 2020-10-23T11:01:15
| 2020-10-23T11:01:15
| 306,605,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,194
|
r
|
EstimatingActivity_example_Nepal2019.R
|
########################################################################
# Estimating activity level and activity pattern from camera trap data #
# Guilherme Ferreira - 14/July/2020
########################################################################
# Key papers:
# 1) Rowcliffe et al. Methods in Ecology and Evolution 5.11 (2014):1170-1179.
# 2) Ridout & Linkie. Journal of Agricultural, Biological, and Environmental Statistics 14.3 (2009):322-337.
##### Function to transform time #####
# gettime function from Marcus Rowcliffe's Github - activity
#Converts character, POSIXct or POSIXlt time of day data to numeric
#ARGUMENTS
# x: vector of character, POSIXct or POSIXlt time data to convert
# format: used only if x is character, see strptime
# scale: scale on which to return times (see below)
#VALUE
#A vector of numeric times of day in units defined by scale:
# radian: [0,2*pi]
# hours: [0,24]
# proportion: [0,1]
# example: ptime <- gettime(BCItime$date, "%d/%m/%Y %H:%M", "proportion")
# SOLAR TIME FUNCTIONS FROM ROWCLIFFE ####
install.packages("insol")
require(insol)
gettime <- function(x, format="%Y-%m-%d %H:%M:%S", scale=c("radian","hour","proportion")){
if(class(x)[1]=="character") x <- strptime(x, format, "UTC") else
if(class(x)[1]=="POSIXct") x <- as.POSIXlt(x) else
if(class(x)[1]!="POSIXlt") stop("x must be character or POSIXt class")
scale <- match.arg(scale)
res <- x$hour + x$min/60 + x$sec/3600
if(scale=="radian") res <- res*pi/12
if(scale=="proportion") res <- res/24
if(all(res==0, na.rm=T)) warning("All times are 0: may be just strptime default?")
res
}
##### loading camera trap data ####
setwd("Z:/biome_health_project_files/country_files/nepal/tagging_photos")
d <- read.csv("Latest_species_meta.csv") # file with CT image metadata and species tags
names(d)
d2 <- d[,c("site_cam.x", "site_id", "CommonName", "date_fixed", "Time", "Image_nam" )] # keeping only cols needed
head(d2)
d2$protection <- d2$site_id # col indicating National Park or outside NP, 2 categories i.o. 3
d2[d2$protection== "BZ", "protection"] <- "outside"
head(d2)
d2[d2$protection== "OBZ", "protection"] <- "outside"
d2$Time_rad <- gettime(d2$Time, "%H:%M:%S") # transforming time in radians
d2$Time_prop <- gettime(d2$Time, "%H:%M:%S", "proportion") # transforming time in proportion
range(d2$Time_rad) # just checking - should be between ca. 0 and 6.28
range(d2$Time_prop) # just checking - should be between 0 and 1
d2[is.na(d2$CommonName), "CommonName"] <- "blank" # replacing NAs for blank on species names
which(is.na(d2$CommonName)) # checking
table(d2$CommonName, d2$protection) # number of images per spp
# wild boar and barking deer have a decent number of images in both protection levels (65/243; 130/171, respectively )
##### Wild Boar activity levels ####
library(activity)
# Wild boar
# just checking if selection is ok
length(d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"])
length(d2[d2$CommonName == "Wild Boar" & d2$protection == "outside" , "Time_rad"])
# estimating proportion of time active for wild boar for the whole dataset (overall)
boar_overall<- fitact(d2[d2$CommonName == "Wild Boar", "Time_rad"],
reps=1000,sample="model",show=TRUE)
#now using only records in Bardiya National Park
boar_NP<- fitact(d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"],
reps=1000,sample="model",show=TRUE)
#now using only records outside Bardiya National Park (i.e. from buffer zone and outside BZ)
boar_outside<- fitact(d2[d2$CommonName == "Wild Boar" & d2$protection == "outside" , "Time_rad"],
reps=1000,sample="model",show=TRUE)
# view estimates of proportion of time active
boar_overall@act
boar_NP@act
boar_outside@act
# Wald test to compare activity level between management types
compareAct(list(boar_NP, boar_outside))
#### No need to run - just testing "sample= 'data'"; no difference observed ####
boar_overallD<- fitact(d2[d2$CommonName == "Wild Boar", "Time_rad"],
reps=10000,sample="data",show=TRUE) # with 10K reps parameters were very similar as "model"
boar_NPD<- fitact(d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"],
reps=10000,sample="data",show=TRUE)
boar_outsideD<- fitact(d2[d2$CommonName == "Wild Boar" & d2$protection == "outside" , "Time_rad"],
reps=1000,sample="data",show=TRUE)
boar_overallD@act
boar_NPD@act
boar_outsideD@act
#####
# activity pattern graph
plot(boar_overall, centre= "night", tline=list(lty=1,lwd=2, col="blue"),
cline=list(lty=2,lwd=1.5, col="black"), dline=list(col="dark gray"),main= "Wild boar", cex.main=1)
plot(boar_NP, centre= "night", tline=list(lty=1,lwd=2, col="blue"),
cline=list(lty=2,lwd=1.5, col="black"), dline=list(col="dark gray"),main= "Wild boar_NP", cex.main=1)
plot(boar_outside, centre= "night", tline=list(lty=1,lwd=2, col="blue"),
cline=list(lty=2,lwd=1.5, col="black"), dline=list(col="dark gray"),main= "Wild boar_outside", cex.main=1)
#### Wild Boar overlap estimates ####
library(overlap)
# estimating Dhats - overlap metric
(DhatsWB<-overlapEst(d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"],
d2[d2$CommonName == "Wild Boar" & d2$protection == "outside" , "Time_rad"]))
#Do smoothed bootstrap for each protection level
bsWB.NP<-resample(d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"],10000)
bsWB.outside<-resample(d2[d2$CommonName == "Wild Boar" & d2$protection == "outside" , "Time_rad"],10000)
#Analyse with bootEst:
# Ridout and Linkie (2009) recommend using adjust=0.8 to estimate Dhat1, adjust=1 for Dhat 4, an adjust=4 for Dhat 5.
# When smaller sample <50, Dhat1 performed best, while Dhat4 was better when smaller sample >75 (Meredith & Ridout 2018; overview of overlap)
bsWB<-bootEst(bsWB.NP,bsWB.outside,adjust=c(NA,1,NA)) #Dhat 4
bsWB.mean <-as.vector(colMeans(bsWB))
bsWB.mean <-bsWB.mean[2]
#Convert column with Dhat4 to vector and get CIs
bsWBVec<-as.vector(bsWB[,2])#2 because it's Dhat 4
#bootCI(DhatsPtaj[2],bsPtajVec)['norm0',] #2 because it's Dhat 4
WB.CI <- as.vector(bootCI(DhatsWB[2],bsWBVec)['basic0',]) #2 because it's Dhat 4
WB.overlap <- cbind(bsWB.mean,WB.CI)
#plotting overlapping activities
overlapPlot(d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"],
d2[d2$CommonName == "Wild Boar" & d2$protection == "outside" , "Time_rad"],
xcenter= "midnight",
rug=TRUE,
main="Wild Boar")
legend('topright',c("NP","outside"),lty=c(1,2),col=c("black","blue"),bty='n')
#Watson two Test to compare whether 2 circular data came from the same distribution
# i.e. is WB activity patterns different in the two protection levels?
# Refs:Jammalamadaka & SenGupta 2001; Oliveira-Santos et al. 2013
library(circular)
watson.two.test(d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"],
d2[d2$CommonName == "Wild Boar" & d2$protection == "outside" , "Time_rad"])
##### Barking Deer activity level####
length(d2[d2$CommonName == "Barking Deer" & d2$protection == "NP" , "Time_rad"])
length(d2[d2$CommonName == "Barking Deer" & d2$protection == "outside" , "Time_rad"])
deer_overall<- fitact(d2[d2$CommonName == "Barking Deer", "Time_rad"],
reps=1000,sample="model",show=TRUE)
deer_NP<- fitact(d2[d2$CommonName == "Barking Deer" & d2$protection == "NP" , "Time_rad"],
reps=1000,sample="model",show=TRUE)
deer_outside<- fitact(d2[d2$CommonName == "Barking Deer" & d2$protection == "outside" , "Time_rad"],
reps=1000,sample="model",show=TRUE)
deer_overall@act
deer_NP@act
deer_outside@act
# Wald test to compare activity level between management types
compareAct(list(deer_NP, deer_outside))
# activity pattern graph
plot(deer_overall, centre= "night", tline=list(lty=1,lwd=2, col="blue"),
cline=list(lty=2,lwd=1.5, col="black"), dline=list(col="dark gray"),main= "Barking Deer", cex.main=1)
plot(deer_NP, centre= "night", tline=list(lty=1,lwd=2, col="blue"),
cline=list(lty=2,lwd=1.5, col="black"), dline=list(col="dark gray"),main= "Barking Deer_NP", cex.main=1)
plot(deer_outside, centre= "night", tline=list(lty=1,lwd=2, col="blue"),
cline=list(lty=2,lwd=1.5, col="black"), dline=list(col="dark gray"),main= "Barking Deer_outside", cex.main=1)
#plotting overlap graph
overlapPlot(d2[d2$CommonName == "Barking Deer" & d2$protection == "NP" , "Time_rad"],
d2[d2$CommonName == "Barking Deer" & d2$protection == "outside" , "Time_rad"],
xcenter= "midnight",
rug=TRUE,
#abline(v=c(1.57,4.71),lty=3),
main="Barking Deer")
legend('topright',c("NP","outside"),lty=c(1,2),col=c("black","blue"),bty='n')
#Watson two Test to compare whether 2 circular data came from the same distribution
library(circular)
watson.two.test(d2[d2$CommonName == "Barking Deer" & d2$protection == "NP" , "Time_rad"],
d2[d2$CommonName == "Barking Deer" & d2$protection == "outside" , "Time_rad"])
#### exploring other plots ####
overlapPlot(d2[d2$CommonName == "Tiger" & d2$protection == "NP" , "Time_rad"],
d2[d2$CommonName == "Wild Boar" & d2$protection == "NP" , "Time_rad"],
xcenter= "midnight",
rug=TRUE,
#abline(v=c(1.57,4.71),lty=3),
main="Tiger vs Wild Boar")
legend('topright',c("Tg","WB"),lty=c(1,2),col=c("black","blue"),bty='n')
overlapPlot(d2[d2$CommonName == "Nilgai" & d2$protection == "outside" , "Time_rad"],
d2[d2$CommonName == "Chital" & d2$protection == "outside" , "Time_rad"],
#xcenter= "midnight",
rug=TRUE,
#abline(v=c(1.57,4.71),lty=3),
main="Niglai vs Chital")
legend('topright',c("Ng","Ch"),lty=c(1,2),col=c("black","blue"),bty='n')
|
e841f7dfbd5ce2bccb7b77984264df8a46165094
|
c79acbf4db62c5e2fecde2931a437ae626766633
|
/apply family.R
|
8b572e03550dfb65126c6b8c70cd51e5488cd8a6
|
[] |
no_license
|
gonggaobushang/R-Base
|
5e999750f49227051f6d890cad25a7ce53b7540e
|
a5c266af6bf59a8c55e79a6d1eae7ffeefeb5439
|
refs/heads/master
| 2020-05-31T13:49:50.510290
| 2019-11-26T08:36:48
| 2019-11-26T08:36:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,427
|
r
|
apply family.R
|
########################################################################apply
#apply(X, MARGIN, FUN, ...) MARGIN: 按行计算或按按列计算,1表示按行,2表示按列
#矩阵、数据框、数组(二维、多维),按行或列进行循环计算
x<-matrix(1:12,ncol=3)
apply(x,1,sum)
apply(x,2,mean)
x <- cbind(x1 = 3, x2 = c(4:1, 2:5)); x
myFUN<- function(x, c1, c2) {
c(sum(x[c1],1), mean(x[c2]))
}
t1=apply(x,1,myFUN,c1='x1',c2=c('x1','x2'))
data.frame(x1=x[,1]+1,x2=rowMeans(x))
system.time(t1)
#r内置的向量运算如 rowMeans>apply>for、while
########################################################################lapply
#lapply(X, FUN, ...) 适用于列表和数据框
x <- list(a = 1:10, b = rnorm(6,10,5), c = c(TRUE,FALSE,FALSE,TRUE));x
lapply(x,fivenum)#五分位数
#lapply把list数据集进行循环操作,可以用data.frame数据集按列进行循环
#但如果数据集是一个向量或矩阵对象,那么直接使用lapply就不能达到想要的效果了
x <- cbind(x1=3, x2=c(2:1,4:5))
lapply(x,sum)#矩阵的效果不好
lapply(data.frame(x), sum)#数据框的效果就好,但是自动按列组合
########################################################################sapply
#简化版的lapply,返回值为向量,而不是list对象。
#sapply(X, FUN, ..., simplify=TRUE, USE.NAMES = TRUE)
#simplify: 是否数组化,当值array时,输出结果按数组进行分组
#USE.NAMES: 如果X为字符串,TRUE设置字符串为数据名,FALSE不设置
x <- cbind(x1=3, x2=c(2:1,4:5))
apply(x,2,sum)
sapply(x,sum) #不对
sapply(data.frame(x), sum)
a<-1:2
sapply(a,function(x) matrix(x,2,2), simplify='array')
sapply(a,function(x) matrix(x,2,2))
val<-head(letters)
sapply(val,paste,USE.NAMES=TRUE)# 默认设置数据名
sapply(val,paste,USE.NAMES=FALSE)#不设置数据名
########################################################################sapply
#类似于sapply,用来控制返回值的行名
#vapply(X, FUN, FUN.VALUE, ..., USE.NAMES = TRUE)
#FUN.VALUE: 定义返回值的行名row.names
x <- data.frame(cbind(x1=3, x2=c(2:1,4:5)))
vapply(x,cumsum,FUN.VALUE=c('a'=123123,'b'=0,'c'=0,'d'=0))
y=sapply(x, cumsum);y
row.names(y)<-c("a","b","c","d");y
data(quakes)
getColor <- function(quakes) {
sapply(quakes$mag, function(mag) {
if(mag <= 4) {
"green"
} else if(mag <= 5) {
"orange"
} else {
"red"
} } )
}
getColor(quakes[c(1:20),])
data(iris)
irisiri<-function(iris){
sapply(iris$Species,function(Specise){
if(Specise=="setosa" ){
"1"
}else if(Specise=="versicolor"){
"2"
}else{
3
}
}
)
}
irisiri(iris[c(1:12),])
########################################################################mapply
#sapply的变形函数
#mapply(FUN, ..., MoreArgs = NULL, SIMPLIFY = TRUE,USE.NAMES = TRUE)
set.seed(1)
x<-1:10
y<-5:-4
z<-round(runif(10,-5,5))
mapply(max,x,y,z)
n<-rep(4,4)
m<-v<-c(1,10,100,1000)
mapply(rnorm,n,m,v)
########################################################################tapply
#用于分组的循环计算
#tapply(X, INDEX, FUN = NULL, ..., simplify = TRUE)
#INDEX: 用于分组的索引
tapply(iris$Petal.Length,iris$Species,mean)
x<-y<-1:10;x;y
t<-round(runif(10,1,100)%%2);t
tapply(x,t,sum)#即按t中的分类将x累加
########################################################################rapply
#只处理list数据,对list的每个元素进行递归遍历,如果list包括子元素则继续遍历
#rapply(object,f,classes="ANY",deflt = NULL,how = c("unlist", "replace", "list"))
#当how为replace时,则用调用f后的结果替换原list中原来的元素
#当how为list时,新建一个list,类型匹配调用f函数,不匹配赋值为deflt
#当how为unlist时,会执行一次unlist(recursive = TRUE)的操作
x=list(a=12,b=1:4,c=c('b','a'))
y=pi
z=data.frame(a=rnorm(10),b=1:10)
a <- list(x=x,y=y,z=z)
# 进行排序,并替换原list的值
rapply(a,sort, classes='numeric',how='replace')
class(a$z$b)
rapply(a,function(x) paste(x,'++++'),
classes="character",deflt=NA, how = "list")
########################################################################eapply
#对一个环境空间中的所有变量进行遍历
#eapply(env, FUN, ..., all.names = FALSE, USE.NAMES = TRUE)
|
957e9a777ce2f7950ba5cf9e5dfc8bdab7dac15b
|
3b65ab1fc7af3b4711085c05f21dd28ef4bcb98a
|
/original.R
|
a96688d0f5b9f12a7acd449ce7304795b7466752
|
[] |
no_license
|
llecowen/Mark-Resight-Recovery-HMM
|
56a16e9e03c157291f09e08a936486ba23be45cb
|
662a186c2e4f486db5f351ac722a2748075f0d85
|
refs/heads/master
| 2020-06-11T07:30:03.262890
| 2018-07-13T21:57:20
| 2018-07-13T21:57:20
| 75,733,909
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,475
|
r
|
original.R
|
#This is the version of the HMM model that works with the constant paramters.
devMULTIEVENT <- function(b,data,eff,e, nh,km1){
# data= encounter histories, eff= counts/frequency
# e vector of dates of first captures
# garb vector of initial states
# km1 number of recapture occasions
# nh number individuals
# OBSERVATIONS (+1)
# 0 = non-detected
# 1 = not resighted/recovered, captured
# 2 = resighted not recovered, not captured
# 3 = resighted not recovered, captured
# 4 = resighted and recovered (Note that these are assummed not possible in Barker's model)
# 5 = not resighted, recovered
# OBSERVATIONS for last time period
# 6 = non-detected
# 7 = resighted not recovered
# 8 = not resighted, recovered
# STATES
# 1 = alive
# 2 = newly dead
# 3 = dead
# PARAMETERS
# phi survival prob
# p detection prob
# r recovery prob
# R resight prob given alive at next time (see Barker 1997, parameter R)
# Rr resight (and not recovered) prob given not alive at next time (see Barker 1997, parameter R')
# logit link for all parameters
# note: below, we decompose the state and obs process in two steps composed of binomial events,
# which makes the use of the logit link appealing;
# if not, a multinomial (aka generalised) logit link should be used
phi <- 1/(1+exp(-b[1]))
p <- 1/(1+exp(-b[2]))
R <- 1/(1+exp(-b[3]))
Rr <- 1/(1+exp(-b[4]))
r <- 1/(1+exp(-b[5]))
# prob of obs (rows) cond on states (col)
P0 = matrix(c((1-R)*(1-p),0,0,0,(1-Rr)*(1-r),0,0,0,1),nrow=3,ncol=3,byrow=T)
P1 = matrix(c((1-R)*p,0,0,0,0,0,0,0,0),nrow=3,ncol=3,byrow=T)
P2 = matrix(c(R*(1-p),0,0,0,Rr*(1-r),0,0,0,0),nrow=3,ncol=3,byrow=T)
P3 = matrix(c(R*p,0,0,0,0,0,0,0,0),nrow=3,ncol=3,byrow=T)
P4 = matrix(c(0,0,0,0,Rr*r,0,0,0,0),nrow=3,ncol=3,byrow=T)
P5 = matrix(c(0,0,0,0,(1-Rr)*r,0,0,0,0),nrow=3,ncol=3,byrow=T)
P6 = matrix(c((1-R),0,0,0,(1-Rr)*(1-r),0,0,0,1),nrow=3,ncol=3,byrow=T)
P7 = matrix(c(0,0,0,0,(1-Rr)*r,0,0,0,0),nrow=3,ncol=3,byrow=T)
P8 = matrix(c(R,0,0,0,Rr*(1-r),0,0,0,0),nrow=3,ncol=3,byrow=T)
P =array(rep(0,3*3*9), dim=c(3,3,9))
P[,,1]=P0
P[,,2]=P1
P[,,3]=P2
P[,,4]=P3
P[,,5]=P4
P[,,6]=P5
P[,,7]=P6
P[,,8]=P7
P[,,9]=P8
# prob of states at t+1 given states at t
A <- matrix(c(phi,1-phi,0,0,0,1,0,0,1),nrow=3,ncol=3,byrow=T)
# init states (all animals are alive at release and we condition on release)
# c(prob(alive), prob(newly dead), prob(dead))
PI <- c(1,0,0)
# likelihood
l <- 0
for (i in 1:nh) # loop on ind
{
ei <- e[i] # date of first det
evennt <- data[,i] + 1 # add 1 to obs to avoid 0s in indexing
ALPHA=PI
# cond on first capture, might have to change km1 depending on how structure the histories, also might have to change from ei:km1 for loop.
for (j in 1:km1)
{
# if ((ei+1)>(km1+1)) {break} # sous MATLAB la commande >> 8:7 rend >> null, alors que sous R, ça rend le vecteur c(8,7)!
ALPHA <- (ALPHA %*% A)%*%P[,,evennt[j]]
}
l <- l + logprot(sum(ALPHA))*eff[i]
}
l <- -l
l
}
# avoid explosion of log(v) for small values of v
logprot <- function(v){
eps <- 2.2204e-016
u <- log(eps) * (1+vector(length=length(v)))
index <- (v>eps)
u[index] <- log(v[index])
u
}
# read in data
# Read in data as character string to be converted to a history matrix later
oyster.inp=read.table(file="oystercatchers.txt", colClasses="character", header=T)
sex<- as.numeric(unlist(oyster.inp$Male)) #sex where Male=1, Female=0
# Read characters into history matrix where "times" is the frequency of that particular history
LD.history <- matrix(as.numeric(unlist(strsplit(rep(oyster.inp$history,times=1), ""))),ncol=nchar(oyster.inp$history), byrow=TRUE)
nh= dim(LD.history)[1] #number of animals observed in experiment
nsample= dim(LD.history)[2]/2 #number of sample times used in experiment
nsample
#The "first" matrix specifies the first sample time at which each animal was observed
first<- NULL
for(i in 1:nobs){
first<- c(first,min(which(LD.history[i,]!=0)))
}
# Create event history matrix
history=matrix(rep(0,nobs*nsample), nrow=nobs, ncol=nsample)
nspot=nsample*2 #number of spots in an LiDi type history
# Convert LiDi type event history with x=0,1,2,3,4,5,6,7,8 type history, conditional on first release.
# Thus actually looking at the DiLi+1 events and mapping these to x.
# For example LiDi history of 10 12 01 would map to event history of 128
for(i in 1:nobs){
index=(first[i]+1)/2
j<-first[i]+1
while(j<nspot){
if(LD.history[i,j]==0 & LD.history[i,j+1]==0){history[i,index]=0 #not observed
}else if(LD.history[i,j]==0 & LD.history[i,j+1]==1){history[i,index]=1 #not resighted, recapture
}else if(LD.history[i,j]==2 & LD.history[i,j+1]==0){history[i,index]=2 #resight, not recaptured
}else if(LD.history[i,j]==2 & LD.history[i,j+1]==1){history[i,index]=3 #resight, recapture
}else if(LD.history[i,j]==1 & LD.history[i,j+1]==0){history[i,index]=5 #recovered
}
j<-j+2
index=index+1
#print(c(j, index))
}#endwhile
if(j==nspot){#Final time period only has resight and recovery possibility, thus x=6,7, or 8
if(LD.history[i,j]==0){history[i,index]<-6 #no observation
}else if(LD.history[i,j]==1){history[i,index]<- 7 #recovery
} else if(LD.history[i,j]==2){history[i,index]<-8} #resight
#print(c(i,j,index,LD.history[i,j], history[i,index]))
}
}
# define various quantities#
km1 <- nsample
# counts
eff <- rep(1,nh)
# compute the date of first capture fc, and state at initial capture init.state
fc <- first
# initial values
binit <- runif(5)# where 5 is the number of parameters phi, p, R, Rr, r
# transpose data
data <- t(history)
# fit model
deb=Sys.time()
tmpmin <- optim(binit,devMULTIEVENT,NULL,hessian=TRUE,data,eff,fc,nh,km1,method="BFGS",control=list(trace=1, REPORT=1))
fin=Sys.time()
fin-deb
# get estimates and back-transform
x <- tmpmin$par
phi <- 1/(1+exp(-x[1]))
p <- 1/(1+exp(-x[2]))
R <- 1/(1+exp(-x[3]))
Rr <- 1/(1+exp(-x[4]))
r <- 1/(1+exp(-x[5]))
# Get standard errors
H=tmpmin$hessian
# Calulate the variance via the delta method
VAR<- function(design_matrix,Hess,stdparms)
{ library(MASS)
inv_Hess=ginv(Hess,tol=sqrt(.Machine$double.eps));
Cov_XB=design_matrix%*%inv_Hess%*%t(design_matrix);
std.vect=stdparms*(1-stdparms);
COV=diag(std.vect)%*%Cov_XB%*%diag(std.vect);
VAR.vect=vector(mode="double");
i=1;
for(r in 1:nrow(design_matrix)){ VAR.vect[i]=COV[r,r]; i=i+1;}
VAR.vect
}
# Create a design matrix, here it is the identity matrix for the constant model
X = matrix(c(1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1),nrow=5,ncol=5,byrow=T)
stdparms=1/(1+exp(-x))
std.VAR=VAR(X,H,stdparms)
std.se=sqrt(std.VAR)
cat("Parameter estimates and estimated standard errors","\n",
"phi=", round(phi, digits=3), "se=", round(std.se[1], digits=3),"\n",
"p =", round(p, digits=3), "se=", round(std.se[2], digits=3),"\n",
"R =", round(R, digits=3), "se=", round(std.se[3], digits=3),"\n",
"R' =", round(Rr, digits=3), "se=", round(std.se[4], digits=3),"\n",
"r =", round(r, digits=3), "se=", round(std.se[5], digits=3),"\n")
Parameter estimates and estimated standard errors
phi= 0.942 se= 0.006
p = 0.418 se= 0.012
R = 0.088 se= 0.007
R' = 0.065 se= 0.043
r = 0.196 se= 0.036
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.