blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fdf37106c00d92b55919241f3da00e189004eba8 | cb317be804f5836c29f5cf20a9186b4a48d14009 | /man/split_merge.Rd | 3b9570b5bc75148f305d5ec5aa2d476de8bda831 | [
"MIT"
] | permissive | bahaeomid/tabulizer | fcae1e1018dd89eacc9c8130e2e69b995034d79b | 3aad163d7c5dc04739c0cab4b60b3e1f4c41d6cd | refs/heads/master | 2021-07-10T01:34:06.199683 | 2017-10-10T12:35:53 | 2017-10-10T12:35:53 | 109,482,433 | 1 | 0 | null | 2017-11-04T09:30:03 | 2017-11-04T09:30:03 | null | UTF-8 | R | false | true | 1,898 | rd | split_merge.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/split_merge.R
\name{split_pdf}
\alias{merge_pdfs}
\alias{split_pdf}
\title{Split and merge PDFs}
\usage{
split_pdf(file, outdir = NULL, password = NULL)
merge_pdfs(file, outfile)
}
\arguments{
\item{file}{For \code{merge_pdfs}, a character vector specifying the path to one or more \emph{local} PDF files. For \code{split_pdf}, a character string specifying the path or URL to a PDF file.}
\item{outdir}{For \code{split_pdf}, an optional character string specifying a directory into which to split the resulting files. If \code{NULL}, the directory of the original PDF is used, unless \code{file} is a URL in which case a temporary directory is used.}
\item{password}{Optionally, a character string containing a user password to access a secured PDF. Currently, encrypted PDFs cannot be merged with \code{merge_pdfs}.}
\item{outfile}{For \code{merge_pdfs}, a character string specifying the path to the PDF file to create from the merged documents.}
}
\value{
For \code{split_pdfs}, a character vector specifying the output file names, which are patterned after the value of \code{file}. For \code{merge_pdfs}, the value of \code{outfile}.
}
\description{
Split PDF into separate pages or merge multiple PDFs into one.
}
\details{
\code{\link{split_pdf}} splits the file listed in \code{file} into separate one-page doucments. \code{\link{merge_pdfs}} creates a single PDF document from multiple separate PDF files.
}
\examples{
\dontrun{
# simple demo file
f <- system.file("examples", "data.pdf", package = "tabulizer")
get_n_pages(file = f)
# split PDF by page
sf <- split_pdf(f)
# merge pdf
merge_pdfs(sf, "merged.pdf")
get_n_pages(file = "merged.pdf")
}
}
\author{
Thomas J. Leeper <thosjleeper@gmail.com>
}
\seealso{
\code{\link{extract_areas}}, \code{\link{get_page_dims}}, \code{\link{make_thumbnails}}
}
|
400b4643ef5e6af1e53a5f56f8702cc3e81d038f | d42ebe8fff3082dda512919bbc32e4bc55b6296d | /generated_code_examples/r/regression/svm.r | 00982938b8ca63598ab66fb7f0b3887b572b169d | [
"MIT",
"Python-2.0"
] | permissive | goldv/m2cgen | d4eb093eb738af102912f5720331a7504fc54a19 | d3dcedbc976917b4e205cd3641914f957feade34 | refs/heads/master | 2022-12-22T08:10:47.062447 | 2020-09-23T17:50:49 | 2020-09-23T17:50:49 | 298,049,601 | 0 | 0 | MIT | 2020-09-23T17:50:50 | 2020-09-23T17:46:44 | null | UTF-8 | R | false | false | 33,210 | r | svm.r | score <- function(input) {
var1 <- ((((((26.85177874216177) + ((subroutine0(input)) * (-0.12034962779157432))) + ((subroutine1(input)) * (1.0))) + ((subroutine2(input)) * (-1.0))) + ((subroutine3(input)) * (-1.0))) + ((subroutine4(input)) * (-1.0))) + ((subroutine5(input)) * (0.6171875007313155))
var2 <- (subroutine6(input)) * (-1.0)
var0 <- (((((((((((((((((((((((((var1) + (var2)) + ((subroutine7(input)) * (1.0))) + ((subroutine8(input)) * (-1.0))) + ((subroutine9(input)) * (1.0))) + ((subroutine10(input)) * (0.3164062486215933))) + ((subroutine11(input)) * (-1.0))) + ((subroutine12(input)) * (1.0))) + ((subroutine13(input)) * (-1.0))) + ((subroutine14(input)) * (1.0))) + ((subroutine15(input)) * (-1.0))) + ((subroutine16(input)) * (1.0))) + ((subroutine17(input)) * (-1.0))) + ((subroutine18(input)) * (-1.0))) + ((subroutine19(input)) * (-0.3201043650830524))) + ((subroutine20(input)) * (-1.0))) + ((subroutine21(input)) * (-1.0))) + ((subroutine22(input)) * (1.0))) + ((subroutine23(input)) * (-0.7715023625371545))) + ((subroutine24(input)) * (-1.0))) + ((subroutine25(input)) * (1.0))) + ((subroutine26(input)) * (-1.0))) + ((subroutine27(input)) * (-0.006346611962003479))) + ((subroutine28(input)) * (1.0))) + ((subroutine29(input)) * (-1.0))) + ((subroutine30(input)) * (-1.0))
var3 <- (subroutine31(input)) * (-0.17130203879318218)
return((((((((((((((((((((((((((var0) + (var3)) + ((subroutine32(input)) * (1.0))) + ((subroutine33(input)) * (1.0))) + ((subroutine34(input)) * (1.0))) + ((subroutine35(input)) * (-0.32034025068626093))) + ((subroutine36(input)) * (-0.9199503780639393))) + ((subroutine37(input)) * (1.0))) + ((subroutine38(input)) * (-1.0))) + ((subroutine39(input)) * (1.0))) + ((subroutine40(input)) * (-0.12010436508304956))) + ((subroutine41(input)) * (1.0))) + ((subroutine42(input)) * (1.0))) + ((subroutine43(input)) * (1.0))) + ((subroutine44(input)) * (-1.0))) + ((subroutine45(input)) * (-1.0))) + ((subroutine46(input)) * (1.0))) + ((subroutine47(input)) * (1.0))) + ((subroutine48(input)) * (0.816406250647308))) + ((subroutine49(input)) * (1.0))) + ((subroutine50(input)) * (1.0))) + ((subroutine51(input)) * (1.0))) + ((subroutine52(input)) * (-1.0))) + ((subroutine53(input)) * (1.0))) + ((subroutine54(input)) * (-1.0))) + ((subroutine55(input)) * (-1.0)))
}
subroutine0 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((25.9406) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.679) - (input[5])) ^ (2))) + (((5.304) - (input[6])) ^ (2))) + (((89.1) - (input[7])) ^ (2))) + (((1.6475) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((127.36) - (input[12])) ^ (2))) + (((26.64) - (input[13])) ^ (2)))))
}
subroutine1 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((6.53876) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((1.0) - (input[4])) ^ (2))) + (((0.631) - (input[5])) ^ (2))) + (((7.016) - (input[6])) ^ (2))) + (((97.5) - (input[7])) ^ (2))) + (((1.2024) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((392.05) - (input[12])) ^ (2))) + (((2.96) - (input[13])) ^ (2)))))
}
subroutine2 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((22.5971) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.7) - (input[5])) ^ (2))) + (((5.0) - (input[6])) ^ (2))) + (((89.5) - (input[7])) ^ (2))) + (((1.5184) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((31.99) - (input[13])) ^ (2)))))
}
subroutine3 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((45.7461) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((4.519) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.6582) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((88.27) - (input[12])) ^ (2))) + (((36.98) - (input[13])) ^ (2)))))
}
subroutine4 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((11.8123) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.718) - (input[5])) ^ (2))) + (((6.824) - (input[6])) ^ (2))) + (((76.5) - (input[7])) ^ (2))) + (((1.794) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((48.45) - (input[12])) ^ (2))) + (((22.74) - (input[13])) ^ (2)))))
}
subroutine5 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.08187) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((2.89) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.445) - (input[5])) ^ (2))) + (((7.82) - (input[6])) ^ (2))) + (((36.9) - (input[7])) ^ (2))) + (((3.4952) - (input[8])) ^ (2))) + (((2.0) - (input[9])) ^ (2))) + (((276.0) - (input[10])) ^ (2))) + (((18.0) - (input[11])) ^ (2))) + (((393.53) - (input[12])) ^ (2))) + (((3.57) - (input[13])) ^ (2)))))
}
subroutine6 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((7.67202) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((5.747) - (input[6])) ^ (2))) + (((98.9) - (input[7])) ^ (2))) + (((1.6334) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((393.1) - (input[12])) ^ (2))) + (((19.92) - (input[13])) ^ (2)))))
}
subroutine7 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((1.46336) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((19.58) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.605) - (input[5])) ^ (2))) + (((7.489) - (input[6])) ^ (2))) + (((90.8) - (input[7])) ^ (2))) + (((1.9709) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((403.0) - (input[10])) ^ (2))) + (((14.7) - (input[11])) ^ (2))) + (((374.43) - (input[12])) ^ (2))) + (((1.73) - (input[13])) ^ (2)))))
}
subroutine8 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((20.0849) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.7) - (input[5])) ^ (2))) + (((4.368) - (input[6])) ^ (2))) + (((91.2) - (input[7])) ^ (2))) + (((1.4395) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((285.83) - (input[12])) ^ (2))) + (((30.63) - (input[13])) ^ (2)))))
}
subroutine9 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((1.83377) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((19.58) - (input[3])) ^ (2))) + (((1.0) - (input[4])) ^ (2))) + (((0.605) - (input[5])) ^ (2))) + (((7.802) - (input[6])) ^ (2))) + (((98.2) - (input[7])) ^ (2))) + (((2.0407) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((403.0) - (input[10])) ^ (2))) + (((14.7) - (input[11])) ^ (2))) + (((389.61) - (input[12])) ^ (2))) + (((1.92) - (input[13])) ^ (2)))))
}
subroutine10 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.5405) - (input[1])) ^ (2)) + (((20.0) - (input[2])) ^ (2))) + (((3.97) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.575) - (input[5])) ^ (2))) + (((7.47) - (input[6])) ^ (2))) + (((52.6) - (input[7])) ^ (2))) + (((2.872) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((264.0) - (input[10])) ^ (2))) + (((13.0) - (input[11])) ^ (2))) + (((390.3) - (input[12])) ^ (2))) + (((3.16) - (input[13])) ^ (2)))))
}
subroutine11 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((73.5341) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.679) - (input[5])) ^ (2))) + (((5.957) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.8026) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((16.45) - (input[12])) ^ (2))) + (((20.62) - (input[13])) ^ (2)))))
}
subroutine12 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.33147) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((6.2) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.507) - (input[5])) ^ (2))) + (((8.247) - (input[6])) ^ (2))) + (((70.4) - (input[7])) ^ (2))) + (((3.6519) - (input[8])) ^ (2))) + (((8.0) - (input[9])) ^ (2))) + (((307.0) - (input[10])) ^ (2))) + (((17.4) - (input[11])) ^ (2))) + (((378.95) - (input[12])) ^ (2))) + (((3.95) - (input[13])) ^ (2)))))
}
subroutine13 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((25.0461) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((5.987) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.5888) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((26.77) - (input[13])) ^ (2)))))
}
subroutine14 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.57834) - (input[1])) ^ (2)) + (((20.0) - (input[2])) ^ (2))) + (((3.97) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.575) - (input[5])) ^ (2))) + (((8.297) - (input[6])) ^ (2))) + (((67.0) - (input[7])) ^ (2))) + (((2.4216) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((264.0) - (input[10])) ^ (2))) + (((13.0) - (input[11])) ^ (2))) + (((384.54) - (input[12])) ^ (2))) + (((7.44) - (input[13])) ^ (2)))))
}
subroutine15 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((16.8118) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.7) - (input[5])) ^ (2))) + (((5.277) - (input[6])) ^ (2))) + (((98.1) - (input[7])) ^ (2))) + (((1.4261) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((30.81) - (input[13])) ^ (2)))))
}
subroutine16 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.31533) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((6.2) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.504) - (input[5])) ^ (2))) + (((8.266) - (input[6])) ^ (2))) + (((78.3) - (input[7])) ^ (2))) + (((2.8944) - (input[8])) ^ (2))) + (((8.0) - (input[9])) ^ (2))) + (((307.0) - (input[10])) ^ (2))) + (((17.4) - (input[11])) ^ (2))) + (((385.05) - (input[12])) ^ (2))) + (((4.14) - (input[13])) ^ (2)))))
}
subroutine17 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((67.9208) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((5.683) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.4254) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((384.97) - (input[12])) ^ (2))) + (((22.98) - (input[13])) ^ (2)))))
}
subroutine18 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.18337) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((27.74) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.609) - (input[5])) ^ (2))) + (((5.414) - (input[6])) ^ (2))) + (((98.3) - (input[7])) ^ (2))) + (((1.7554) - (input[8])) ^ (2))) + (((4.0) - (input[9])) ^ (2))) + (((711.0) - (input[10])) ^ (2))) + (((20.1) - (input[11])) ^ (2))) + (((344.05) - (input[12])) ^ (2))) + (((23.97) - (input[13])) ^ (2)))))
}
subroutine19 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((14.3337) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.7) - (input[5])) ^ (2))) + (((4.88) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.5895) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((372.92) - (input[12])) ^ (2))) + (((30.62) - (input[13])) ^ (2)))))
}
subroutine20 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.20746) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((27.74) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.609) - (input[5])) ^ (2))) + (((5.093) - (input[6])) ^ (2))) + (((98.0) - (input[7])) ^ (2))) + (((1.8226) - (input[8])) ^ (2))) + (((4.0) - (input[9])) ^ (2))) + (((711.0) - (input[10])) ^ (2))) + (((20.1) - (input[11])) ^ (2))) + (((318.43) - (input[12])) ^ (2))) + (((29.68) - (input[13])) ^ (2)))))
}
subroutine21 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((41.5292) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((5.531) - (input[6])) ^ (2))) + (((85.4) - (input[7])) ^ (2))) + (((1.6074) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((329.46) - (input[12])) ^ (2))) + (((27.38) - (input[13])) ^ (2)))))
}
subroutine22 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((1.51902) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((19.58) - (input[3])) ^ (2))) + (((1.0) - (input[4])) ^ (2))) + (((0.605) - (input[5])) ^ (2))) + (((8.375) - (input[6])) ^ (2))) + (((93.9) - (input[7])) ^ (2))) + (((2.162) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((403.0) - (input[10])) ^ (2))) + (((14.7) - (input[11])) ^ (2))) + (((388.45) - (input[12])) ^ (2))) + (((3.32) - (input[13])) ^ (2)))))
}
subroutine23 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((11.5779) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.7) - (input[5])) ^ (2))) + (((5.036) - (input[6])) ^ (2))) + (((97.0) - (input[7])) ^ (2))) + (((1.77) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((25.68) - (input[13])) ^ (2)))))
}
subroutine24 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((14.2362) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((6.343) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.5741) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((20.32) - (input[13])) ^ (2)))))
}
subroutine25 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((9.2323) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.631) - (input[5])) ^ (2))) + (((6.216) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.1691) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((366.15) - (input[12])) ^ (2))) + (((9.53) - (input[13])) ^ (2)))))
}
subroutine26 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((9.91655) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((5.852) - (input[6])) ^ (2))) + (((77.8) - (input[7])) ^ (2))) + (((1.5004) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((338.16) - (input[12])) ^ (2))) + (((29.97) - (input[13])) ^ (2)))))
}
subroutine27 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((22.0511) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.74) - (input[5])) ^ (2))) + (((5.818) - (input[6])) ^ (2))) + (((92.4) - (input[7])) ^ (2))) + (((1.8662) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((391.45) - (input[12])) ^ (2))) + (((22.11) - (input[13])) ^ (2)))))
}
subroutine28 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.61154) - (input[1])) ^ (2)) + (((20.0) - (input[2])) ^ (2))) + (((3.97) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.647) - (input[5])) ^ (2))) + (((8.704) - (input[6])) ^ (2))) + (((86.9) - (input[7])) ^ (2))) + (((1.801) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((264.0) - (input[10])) ^ (2))) + (((13.0) - (input[11])) ^ (2))) + (((389.7) - (input[12])) ^ (2))) + (((5.12) - (input[13])) ^ (2)))))
}
subroutine29 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((10.8342) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.679) - (input[5])) ^ (2))) + (((6.782) - (input[6])) ^ (2))) + (((90.8) - (input[7])) ^ (2))) + (((1.8195) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((21.57) - (input[12])) ^ (2))) + (((25.79) - (input[13])) ^ (2)))))
}
subroutine30 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((15.8603) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.679) - (input[5])) ^ (2))) + (((5.896) - (input[6])) ^ (2))) + (((95.4) - (input[7])) ^ (2))) + (((1.9096) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((7.68) - (input[12])) ^ (2))) + (((24.39) - (input[13])) ^ (2)))))
}
subroutine31 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((17.8667) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.671) - (input[5])) ^ (2))) + (((6.223) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.3861) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((393.74) - (input[12])) ^ (2))) + (((21.78) - (input[13])) ^ (2)))))
}
subroutine32 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((8.26725) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((1.0) - (input[4])) ^ (2))) + (((0.668) - (input[5])) ^ (2))) + (((5.875) - (input[6])) ^ (2))) + (((89.6) - (input[7])) ^ (2))) + (((1.1296) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((347.88) - (input[12])) ^ (2))) + (((8.88) - (input[13])) ^ (2)))))
}
subroutine33 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.52693) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((6.2) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.504) - (input[5])) ^ (2))) + (((8.725) - (input[6])) ^ (2))) + (((83.0) - (input[7])) ^ (2))) + (((2.8944) - (input[8])) ^ (2))) + (((8.0) - (input[9])) ^ (2))) + (((307.0) - (input[10])) ^ (2))) + (((17.4) - (input[11])) ^ (2))) + (((382.0) - (input[12])) ^ (2))) + (((4.63) - (input[13])) ^ (2)))))
}
subroutine34 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.0351) - (input[1])) ^ (2)) + (((95.0) - (input[2])) ^ (2))) + (((2.68) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.4161) - (input[5])) ^ (2))) + (((7.853) - (input[6])) ^ (2))) + (((33.2) - (input[7])) ^ (2))) + (((5.118) - (input[8])) ^ (2))) + (((4.0) - (input[9])) ^ (2))) + (((224.0) - (input[10])) ^ (2))) + (((14.7) - (input[11])) ^ (2))) + (((392.78) - (input[12])) ^ (2))) + (((3.81) - (input[13])) ^ (2)))))
}
subroutine35 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((12.2472) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.584) - (input[5])) ^ (2))) + (((5.837) - (input[6])) ^ (2))) + (((59.7) - (input[7])) ^ (2))) + (((1.9976) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((24.65) - (input[12])) ^ (2))) + (((15.69) - (input[13])) ^ (2)))))
}
subroutine36 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((14.4208) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.74) - (input[5])) ^ (2))) + (((6.461) - (input[6])) ^ (2))) + (((93.3) - (input[7])) ^ (2))) + (((2.0026) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((27.49) - (input[12])) ^ (2))) + (((18.05) - (input[13])) ^ (2)))))
}
subroutine37 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.29819) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((6.2) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.504) - (input[5])) ^ (2))) + (((7.686) - (input[6])) ^ (2))) + (((17.0) - (input[7])) ^ (2))) + (((3.3751) - (input[8])) ^ (2))) + (((8.0) - (input[9])) ^ (2))) + (((307.0) - (input[10])) ^ (2))) + (((17.4) - (input[11])) ^ (2))) + (((377.51) - (input[12])) ^ (2))) + (((3.92) - (input[13])) ^ (2)))))
}
subroutine38 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((38.3518) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((5.453) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.4896) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((30.59) - (input[13])) ^ (2)))))
}
subroutine39 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.06129) - (input[1])) ^ (2)) + (((20.0) - (input[2])) ^ (2))) + (((3.33) - (input[3])) ^ (2))) + (((1.0) - (input[4])) ^ (2))) + (((0.4429) - (input[5])) ^ (2))) + (((7.645) - (input[6])) ^ (2))) + (((49.7) - (input[7])) ^ (2))) + (((5.2119) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((216.0) - (input[10])) ^ (2))) + (((14.9) - (input[11])) ^ (2))) + (((377.07) - (input[12])) ^ (2))) + (((3.01) - (input[13])) ^ (2)))))
}
subroutine40 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((88.9762) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.671) - (input[5])) ^ (2))) + (((6.968) - (input[6])) ^ (2))) + (((91.9) - (input[7])) ^ (2))) + (((1.4165) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((17.21) - (input[13])) ^ (2)))))
}
subroutine41 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.05602) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((2.46) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.488) - (input[5])) ^ (2))) + (((7.831) - (input[6])) ^ (2))) + (((53.6) - (input[7])) ^ (2))) + (((3.1992) - (input[8])) ^ (2))) + (((3.0) - (input[9])) ^ (2))) + (((193.0) - (input[10])) ^ (2))) + (((17.8) - (input[11])) ^ (2))) + (((392.63) - (input[12])) ^ (2))) + (((4.45) - (input[13])) ^ (2)))))
}
subroutine42 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.01501) - (input[1])) ^ (2)) + (((90.0) - (input[2])) ^ (2))) + (((1.21) - (input[3])) ^ (2))) + (((1.0) - (input[4])) ^ (2))) + (((0.401) - (input[5])) ^ (2))) + (((7.923) - (input[6])) ^ (2))) + (((24.8) - (input[7])) ^ (2))) + (((5.885) - (input[8])) ^ (2))) + (((1.0) - (input[9])) ^ (2))) + (((198.0) - (input[10])) ^ (2))) + (((13.6) - (input[11])) ^ (2))) + (((395.52) - (input[12])) ^ (2))) + (((3.16) - (input[13])) ^ (2)))))
}
subroutine43 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.02009) - (input[1])) ^ (2)) + (((95.0) - (input[2])) ^ (2))) + (((2.68) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.4161) - (input[5])) ^ (2))) + (((8.034) - (input[6])) ^ (2))) + (((31.9) - (input[7])) ^ (2))) + (((5.118) - (input[8])) ^ (2))) + (((4.0) - (input[9])) ^ (2))) + (((224.0) - (input[10])) ^ (2))) + (((14.7) - (input[11])) ^ (2))) + (((390.55) - (input[12])) ^ (2))) + (((2.88) - (input[13])) ^ (2)))))
}
subroutine44 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((15.1772) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.74) - (input[5])) ^ (2))) + (((6.152) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.9142) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((9.32) - (input[12])) ^ (2))) + (((26.45) - (input[13])) ^ (2)))))
}
subroutine45 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((18.0846) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.679) - (input[5])) ^ (2))) + (((6.434) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.8347) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((27.25) - (input[12])) ^ (2))) + (((29.05) - (input[13])) ^ (2)))))
}
subroutine46 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.01381) - (input[1])) ^ (2)) + (((80.0) - (input[2])) ^ (2))) + (((0.46) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.422) - (input[5])) ^ (2))) + (((7.875) - (input[6])) ^ (2))) + (((32.0) - (input[7])) ^ (2))) + (((5.6484) - (input[8])) ^ (2))) + (((4.0) - (input[9])) ^ (2))) + (((255.0) - (input[10])) ^ (2))) + (((14.4) - (input[11])) ^ (2))) + (((394.23) - (input[12])) ^ (2))) + (((2.97) - (input[13])) ^ (2)))))
}
subroutine47 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((5.66998) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((1.0) - (input[4])) ^ (2))) + (((0.631) - (input[5])) ^ (2))) + (((6.683) - (input[6])) ^ (2))) + (((96.8) - (input[7])) ^ (2))) + (((1.3567) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((375.33) - (input[12])) ^ (2))) + (((3.73) - (input[13])) ^ (2)))))
}
subroutine48 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.01538) - (input[1])) ^ (2)) + (((90.0) - (input[2])) ^ (2))) + (((3.75) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.394) - (input[5])) ^ (2))) + (((7.454) - (input[6])) ^ (2))) + (((34.2) - (input[7])) ^ (2))) + (((6.3361) - (input[8])) ^ (2))) + (((3.0) - (input[9])) ^ (2))) + (((244.0) - (input[10])) ^ (2))) + (((15.9) - (input[11])) ^ (2))) + (((386.34) - (input[12])) ^ (2))) + (((3.11) - (input[13])) ^ (2)))))
}
subroutine49 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((4.89822) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.631) - (input[5])) ^ (2))) + (((4.97) - (input[6])) ^ (2))) + (((100.0) - (input[7])) ^ (2))) + (((1.3325) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((375.52) - (input[12])) ^ (2))) + (((3.26) - (input[13])) ^ (2)))))
}
subroutine50 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((2.01019) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((19.58) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.605) - (input[5])) ^ (2))) + (((7.929) - (input[6])) ^ (2))) + (((96.2) - (input[7])) ^ (2))) + (((2.0459) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((403.0) - (input[10])) ^ (2))) + (((14.7) - (input[11])) ^ (2))) + (((369.3) - (input[12])) ^ (2))) + (((3.7) - (input[13])) ^ (2)))))
}
subroutine51 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.52014) - (input[1])) ^ (2)) + (((20.0) - (input[2])) ^ (2))) + (((3.97) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.647) - (input[5])) ^ (2))) + (((8.398) - (input[6])) ^ (2))) + (((91.5) - (input[7])) ^ (2))) + (((2.2885) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((264.0) - (input[10])) ^ (2))) + (((13.0) - (input[11])) ^ (2))) + (((386.86) - (input[12])) ^ (2))) + (((5.91) - (input[13])) ^ (2)))))
}
subroutine52 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((9.33889) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.679) - (input[5])) ^ (2))) + (((6.38) - (input[6])) ^ (2))) + (((95.6) - (input[7])) ^ (2))) + (((1.9682) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((60.72) - (input[12])) ^ (2))) + (((24.08) - (input[13])) ^ (2)))))
}
subroutine53 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((0.03578) - (input[1])) ^ (2)) + (((20.0) - (input[2])) ^ (2))) + (((3.33) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.4429) - (input[5])) ^ (2))) + (((7.82) - (input[6])) ^ (2))) + (((64.5) - (input[7])) ^ (2))) + (((4.6947) - (input[8])) ^ (2))) + (((5.0) - (input[9])) ^ (2))) + (((216.0) - (input[10])) ^ (2))) + (((14.9) - (input[11])) ^ (2))) + (((387.31) - (input[12])) ^ (2))) + (((3.76) - (input[13])) ^ (2)))))
}
subroutine54 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((24.8017) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.693) - (input[5])) ^ (2))) + (((5.349) - (input[6])) ^ (2))) + (((96.0) - (input[7])) ^ (2))) + (((1.7028) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((396.9) - (input[12])) ^ (2))) + (((19.77) - (input[13])) ^ (2)))))
}
subroutine55 <- function(input) {
var0 <- (0) - (0.07692307692307693)
return(exp((var0) * (((((((((((((((13.6781) - (input[1])) ^ (2)) + (((0.0) - (input[2])) ^ (2))) + (((18.1) - (input[3])) ^ (2))) + (((0.0) - (input[4])) ^ (2))) + (((0.74) - (input[5])) ^ (2))) + (((5.935) - (input[6])) ^ (2))) + (((87.9) - (input[7])) ^ (2))) + (((1.8206) - (input[8])) ^ (2))) + (((24.0) - (input[9])) ^ (2))) + (((666.0) - (input[10])) ^ (2))) + (((20.2) - (input[11])) ^ (2))) + (((68.95) - (input[12])) ^ (2))) + (((34.02) - (input[13])) ^ (2)))))
}
|
d18777ee6734420882ca7bf80119673c7398c1df | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mistral/examples/S2MART.Rd.R | e4def209c002359ee541d95048e260a25fc639f3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,183 | r | S2MART.Rd.R | library(mistral)
### Name: S2MART
### Title: Subset by Support vector Margin Algorithm for Reliability
### esTimation
### Aliases: S2MART
### ** Examples
## Not run:
##D res = S2MART(dimension = 2,
##D lsf = kiureghian,
##D N1 = 1000, N2 = 5000, N3 = 10000,
##D plot = TRUE)
##D
##D #Compare with crude Monte-Carlo reference value
##D reference = MonteCarlo(2, kiureghian, N_max = 500000)
## End(Not run)
#See impact of metamodel-based subset simulation with Waarts function :
## Not run:
##D res = list()
##D # SMART stands for the pure metamodel based algorithm targeting directly the
##D # failure domain. This is not recommended by its authors which for this purpose
##D # designed S2MART : Subset-SMART
##D res$SMART = mistral:::SMART(dimension = 2, lsf = waarts, plot=TRUE)
##D res$S2MART = S2MART(dimension = 2,
##D lsf = waarts,
##D N1 = 1000, N2 = 5000, N3 = 10000,
##D plot=TRUE)
##D res$SS = SubsetSimulation(dimension = 2, waarts, n_init_samples = 10000)
##D res$MC = MonteCarlo(2, waarts, N_max = 500000)
## End(Not run)
|
3c649d47674d29cfdb08c22e9aac93856345b6df | faee77c07264c2c9fa2868effb9db5537451de4f | /scripts/clean_rmats.R | 77037752cd1ed59a5e92e355270d442d0f6e1adb | [] | no_license | davemcg/EiaD_build | 46773d3408ce548de45cabdfa76718248fd05615 | 81ef03f67d2d6c5edd58112cba99d60d4d76cb17 | refs/heads/master | 2023-08-18T17:34:58.093662 | 2021-03-05T15:34:27 | 2021-03-05T15:34:27 | 132,913,205 | 7 | 2 | null | 2023-09-03T13:55:37 | 2018-05-10T14:38:35 | R | UTF-8 | R | false | false | 12,703 | r | clean_rmats.R | notes <- '
-for a single subtisue, there are different sites present in diffferent comparisons, might want to look into that
combination=i_combination
event <- i_event
files <- i_files
event_header <- i_event_header
- script wont run on its own, need to remove and move some files gotta fix that
> files <- i_files
> event <- i_event
subtissue <- i_subtissue
'
#setwd('~/NIH/autoRNAseq/')
library(dplyr)
# i_event <- 'MXE.MATS.JC.txt'
#combination=k[[1]]
#somehow this will fail sometimes as a function but will run fine line by line
combine_PE_SE <- function(combination,event,files,event_header){
target_files <- files[grepl(combination[1],files)]%>%.[grepl(combination[2],.)]
if(length(target_files)==1){
countsCol<-c('IJC_SAMPLE_1','SJC_SAMPLE_1','IJC_SAMPLE_2','SJC_SAMPLE_2')
tmp <- paste('rmats_out',target_files,event, sep = '/')%>%read.table(header = T,sep = '\t',stringsAsFactors = F)
tmp[,countsCol] <- apply(tmp[,countsCol],2, function(x) sapply(x,function(y) strsplit(y,',')%>%unlist%>%as.numeric%>%sum) )
path <- paste0('rmats_comb/',combination[1],'_VS_',combination[2])
dir.create(path = path)
write.table(tmp,paste(path,event,sep='/'),row.names = F,col.names = T, quote = F,sep = '\t')
#if(event=='A3SS.MATs.JC.txt') unlink(target_files,recursive = T)# after all events, remove the folders
#if(event=='A3SS.MATS.JC.txt') unlink(paste0('rmats_out/',target_files),recursive = T)
return(0)
}else if(length(target_files)==0){
print('REEEEEEEEEEEEEE')
return(1)
}
countsCol<-c('IJC_SAMPLE_1','SJC_SAMPLE_1','IJC_SAMPLE_2','SJC_SAMPLE_2')
names(target_files) <- grepl('_PE',target_files)%>%ifelse('PE','SE')
samp_PE <- paste('rmats_out',target_files[grep('_PE',target_files)],event,sep = '/')%>%read.table(header = T,sep = '\t',stringsAsFactors = F)
samp_SE <- paste('rmats_out',target_files[grep('_SE',target_files)],event,sep = '/')%>%read.table(,header = T,sep = '\t',stringsAsFactors = F)
if(nrow(samp_SE)==0 || nrow(samp_PE)==0){
#shitty error handling
print(combination)
path <- paste0('rmats_comb/',combination[1],'_VS_',combination[2],'/',samp_PE,event)
write.table(samp_PE,ath,row.names = F,col.names = T, quote = F,sep = '\t')
path <- paste0('rmats_comb/',combination[1],'_VS_',combination[2],'/',samp_SE,event)
write.table(samp_SE,ath,row.names = F,col.names = T, quote = F,sep = '\t')
return(1)
}
samp_PE[,countsCol] <- apply(samp_PE[,countsCol],2, function(x) sapply(x,function(y) strsplit(y,',')%>%unlist%>%as.numeric%>%sum) )
samp_SE[,countsCol] <- apply(samp_SE[,countsCol],2, function(x) sapply(x,function(y) strsplit(y,',')%>%unlist%>%as.numeric%>%sum) )
#the first tissue is the first one in combination, the second tissue is the second
st1_se <- paste0(c('IJC_SAMPLE_','SJC_SAMPLE_'), grep(combination[1],strsplit(target_files['SE'],'VS')%>%unlist))
st2_se <- paste0(c('IJC_SAMPLE_','SJC_SAMPLE_'), grep(combination[2],strsplit(target_files['SE'],'VS')%>%unlist))
st1_pe <- paste0(c('IJC_SAMPLE_','SJC_SAMPLE_'), grep(combination[1],strsplit(target_files['PE'],'VS')%>%unlist))
st2_pe <- paste0(c('IJC_SAMPLE_','SJC_SAMPLE_'), grep(combination[2],strsplit(target_files['PE'],'VS')%>%unlist))
#test <- full_join(samp_SE,samp_PE, by=c("chr","strand","exonStart_0base","exonEnd","upstreamES","upstreamEE","downstreamES","downstreamEE"))
good_cols <- c("GeneID","geneSymbol",event_header[[event]],'IJC_SAMPLE_1','SJC_SAMPLE_1','IJC_SAMPLE_2','SJC_SAMPLE_2',"PValue","FDR")
samp_PE <- samp_PE[,c("GeneID","geneSymbol",event_header[[event]],st1_pe,st2_pe,"PValue","FDR")]
colnames(samp_PE) <- good_cols
samp_SE <- samp_SE[,c("GeneID","geneSymbol",event_header[[event]],st1_se,st2_se,"PValue","FDR")]
colnames(samp_SE) <- good_cols
event_header[event]
#z_merge <- full_join(samp_PE,samp_SE,by=c("chr","strand","exonStart_0base","exonEnd","upstreamES","upstreamEE","downstreamES","downstreamEE"))
z_merge <- full_join(samp_PE,samp_SE,by=event_header[[event]])
# fill in na values for info
mergeCols.x <- c("GeneID.x","geneSymbol.x",'IJC_SAMPLE_1.x','SJC_SAMPLE_1.x','IJC_SAMPLE_2.x','SJC_SAMPLE_2.x')
mergeCols.y <- c("GeneID.y","geneSymbol.y",'IJC_SAMPLE_1.y','SJC_SAMPLE_1.y','IJC_SAMPLE_2.y','SJC_SAMPLE_2.y')
z_merge[is.na(z_merge$IJC_SAMPLE_1.x),mergeCols.x] <-z_merge[is.na(z_merge$IJC_SAMPLE_1.x),mergeCols.y]
# fill in na p-values by just replicatng p-value from sample with valid values, so when we average, it will stay the same
z_merge$PValue.x[is.na(z_merge$PValue.x)] <- z_merge$PValue.y[is.na(z_merge$PValue.x)]
z_merge$PValue.y[is.na(z_merge$PValue.y)] <- z_merge$PValue.x[is.na(z_merge$PValue.y)]
new_pvalue <- rowMeans(z_merge[c('PValue.x','PValue.y')])
new_fdr <- p.adjust(new_pvalue,method = "BH")
final <- data.frame(z_merge[,c("GeneID.x","geneSymbol.x",event_header[[event]],'IJC_SAMPLE_1.x','SJC_SAMPLE_1.x','IJC_SAMPLE_2.x','SJC_SAMPLE_2.x')],new_pvalue,new_fdr,stringsAsFactors = F)
colnames(final) <- good_cols
path <- paste0('rmats_comb/',combination[1],'_VS_',combination[2])
dir.create(path = path)
write.table(final,paste(path,event,sep='/'),row.names = F,col.names = T, quote = F,sep = '\t')
}
##Combine all different comparisons for a specific tissue
#needs to be cleaned up a little bit, there are soe redundant parts
combine_rmats_output <- function(files,subtissue,event,first=TRUE,event_header){
files.st <- files[grep(subtissue,files)]
for(comparison in files.st){
#generate the first comparison
#comparison <- files.st[1]
#print(comparison)
if(first==TRUE){
print('in firsdt')
first <- FALSE
test1 <- paste('rmats_comb',comparison,event, sep = '/')%>% read.table(,header = T,sep = '\t',stringsAsFactors = F)
st_counts <- c('IJC_SAMPLE_1','SJC_SAMPLE_1')
comp <- paste0(c("PValue","FDR"),'.',comparison )
if(strsplit(comparison,'VS')%>%unlist%>%grepl(subtissue,.)%>%.[2]) st_counts <- c('IJC_SAMPLE_2','SJC_SAMPLE_2')
cols <- c( "GeneID","geneSymbol",event_header[[event]],st_counts,"PValue","FDR" )
test1 <- test1[,cols]
# files generated above already have cleaned count, so account for that
comp <- paste0(c("PValue","FDR"),'.',comparison)
colnames(test1)<- c( "GeneID","geneSymbol",event_header[[event]], 'IJC_SAMPLE_1','SJC_SAMPLE_1',comp)
st_counts <- c('IJC_SAMPLE_1','SJC_SAMPLE_1')
if(grepl(',', test1[,'IJC_SAMPLE_1'])%>%any) test1[,st_counts] <- apply(test1[,st_counts],2, function(x) sapply(x,function(y) strsplit(y,',')%>%unlist%>%as.numeric%>%sum) )
colnames(test1)<- c( "GeneID","geneSymbol",event_header[[event]], 'IJC_SAMPLE_1','SJC_SAMPLE_1',comp)
} else{# now add rest of comparisons to first
#comparison <- files.st[2]
test2 <- paste('rmats_comb',comparison,event, sep = '/')%>% read.table(,header = T,sep = '\t',stringsAsFactors = F)
st_counts <- c('IJC_SAMPLE_1','SJC_SAMPLE_1')
# sample be first or second sample , so account for that
if(strsplit(comparison,'VS')%>%unlist%>%grepl(subtissue,.)%>%.[2]) st_counts <- c('IJC_SAMPLE_2','SJC_SAMPLE_2')
cols <- c( "GeneID","geneSymbol",event_header[[event]],st_counts,"PValue","FDR" )
test2 <- test2[,cols]
# counts are presented as a comma sep list, so split and sum for total count for a tissue
comp <- paste0(c("PValue","FDR"),'.',comparison)
colnames(test2)<- c( "GeneID","geneSymbol",event_header[[event]], 'IJC_SAMPLE_1','SJC_SAMPLE_1',comp)
st_counts <- c('IJC_SAMPLE_1','SJC_SAMPLE_1')
if(grepl(',', test1[,'IJC_SAMPLE_1'])%>%any) test2[,st_counts] <- apply(test2[,st_counts],2, function(x) sapply(x,function(y) strsplit(y,',')%>%unlist%>%as.numeric%>%sum) )
#test2[,st_counts] <- apply(test2[,st_counts],2, function(x) sapply(x,function(y) strsplit(y,',')%>%unlist%>%as.numeric%>%sum) )
#join old and new dfs together, then fill in any events only foun in new, and the format
test_join <- full_join(test1,test2, by= event_header[[event]])
end=ncol(test_join)
mergeCols.x <- c("GeneID.x","geneSymbol.x","IJC_SAMPLE_1.x", "SJC_SAMPLE_1.x")
mergeCols.y <- c("GeneID.y", "geneSymbol.y","IJC_SAMPLE_1.y" ,"SJC_SAMPLE_1.y" )
test_join[is.na(test_join$IJC_SAMPLE_1.x),mergeCols.x] <-test_join[is.na(test_join$IJC_SAMPLE_1.x),mergeCols.y]
test_join <- select(test_join,-mergeCols.y)
colnames(test_join) <- c(colnames(test1),comp)
#consider adding the fold change here
test1 <- test_join
}
}
path <- paste('rmats_final',subtissue,sep = '/')
dir.create(path = path)
test1[,c('IJC_SAMPLE_1','SJC_SAMPLE_1')] <- apply(test1[,c('IJC_SAMPLE_1','SJC_SAMPLE_1')],2, function(x) sapply(x,function(y) strsplit(y,',')%>%unlist%>%as.numeric%>%sum) )
test1[is.na(test1)] <- 1
write.table(test1,paste(path,event,sep = '/'), col.names = T, row.names = F, quote = F, sep = '\t')
}
combine_fromGTF.novel <- function(event,files,first=TRUE){
for(path in files){
if (first==TRUE){
if(nrow(read.table(paste0(path,'/fromGTF.novelEvents.',event,'.txt'),sep = '\t',header = F,stringsAsFactors = F))>1){
prev <- read.table(paste0(path,'/fromGTF.novelEvents.',event,'.txt'),sep = '\t',header = T,stringsAsFactors = F)
first <- FALSE
}
}else{
next1 <- read.table(paste0(path,'/fromGTF.novelEvents.',event,'.txt'),sep = '\t',header = T,stringsAsFactors = F)
prev <- anti_join(next1,prev)%>%rbind(.,prev)# BAAAAAAAAD
}
}
return(prev)
}
# generate tables with all novel events
t <- c('SE','RI','MXE','A5SS','A3SS')
files <- dir('~/NIH/autoRNAseq/old_rmats_out',full.names = T)
for(i in t){
all_ev <- combine_fromGTF.novel('SE',files)
write.table(all_ev,paste0('all.',i,'.novelevents.txt'),quote = F,col.names = T,row.names = F,sep = '\t')
}
i_event_header <- list(SE.MATS.JC.txt=c('chr' ,'strand', 'exonStart_0base', 'exonEnd', 'upstreamES', 'upstreamEE', 'downstreamES', 'downstreamEE'),
RI.MATS.JC.txt=c('chr' ,'strand', 'riExonStart_0base', 'riExonEnd' ,'upstreamES' ,'upstreamEE' ,'downstreamES' ,'downstreamEE'),
MXE.MATS.JC.txt=c('chr', 'strand', 'X1stExonStart_0base', 'X1stExonEnd', 'X2ndExonStart_0base', 'X2ndExonEnd' ,'upstreamES', 'upstreamEE', 'downstreamES', 'downstreamEE'),
A5SS.MATS.JC.txt=c('chr', 'strand', 'longExonStart_0base', 'longExonEnd', 'shortES', 'shortEE', 'flankingES', 'flankingEE'),
A3SS.MATS.JC.txt=c('chr', 'strand', 'longExonStart_0base', 'longExonEnd' ,'shortES', 'shortEE' ,'flankingES', 'flankingEE')
)
events <- names(i_event_header)
i_files <- dir('rmats_out')
subtissues_PE <- c("Retina_Adult.Tissue", "RPE_Cell.Line", "ESC_Stem.Cell.Line" , "RPE_Adult.Tissue" )# add body back in at some point
k <- combn(subtissues_PE,2,simplify = F)
for (i in 1:length(k)){
i_combination <- k[[i]]
for(j in 1:length(events)){
i_event <- events[j]
combine_PE_SE(combination = i_combination,event = i_event,files = i_files,event_header = i_event_header)
}
}# add PESE
for (combination in k){
target_files <- i_files[grepl(combination[1],i_files)]%>%.[grepl(combination[2],.)]%>%paste0('rmats_out/',.)
print(target_files)
unlink(target_files,recursive = T)
}$ # remove stuff
#system2('mv rmats_out/* rmats_comb/')
#couldnt get that^ to work, might have to just run it separately
#nowcombine everything together
#hcekc to see if body files are alive
i_files <- dir('rmats_comb/')
subtissues <- c("RPE_Stem.Cell.Line","RPE_Cell.Line","Retina_Adult.Tissue","RPE_Fetal.Tissue","ESC_Stem.Cell.Line","Cornea_Adult.Tissue","Cornea_Fetal.Tissue",
"Cornea_Cell.Line","Retina_Stem.Cell.Line","RPE_Adult.Tissue")
for(j in 1:length(subtissues)){
i_subtissue <- subtissues[j]
for( i in 1:length(events)){
i_event=events[i]
combine_rmats_output(files = i_files,subtissue = i_subtissue,event = i_event,event_header = i_event_header)
}
}
#strat: combine all novel events per type in one master file, then select specific ones
|
1cc47b38899dd98cc91e0863ebc6476319cb55e8 | ff73c5d42378641361604368ff62ebc3438dc967 | /R/tableau.R | c00c259a01982e688a0ddf2c9d6042db42966440 | [
"MIT"
] | permissive | ErandM50/tableau-scraping | 7a55988571121b7a9ae0ce31788c0299d6f62029 | 812b2f7606b5a913a37ace0c057225f676c1a6a3 | refs/heads/master | 2023-08-16T23:20:20.959855 | 2021-10-14T21:53:19 | 2021-10-14T21:53:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,312 | r | tableau.R | # The following script will get the session token, get the data,
# prompt the user to select a worksheet, parse the data into a dataframe
library(rvest)
library(rjson)
library(httr)
library(stringr)
#replace the hostname and the path if necessary
host_url <- "https://public.tableau.com"
path <- "/views/COVID-19inMissouri/COVID-19inMissouri"
body <- read_html(modify_url(host_url,
path = path,
query = list(":embed" = "y",":showVizHome" = "no")
))
data <- body %>%
html_nodes("textarea#tsConfigContainer") %>%
html_text()
json <- fromJSON(data)
url <- modify_url(host_url, path = paste(json$vizql_root, "/bootstrapSession/sessions/", json$sessionid, sep =""))
resp <- POST(url, body = list(sheet_id = json$sheetId), encode = "form")
data <- content(resp, "text")
extract <- str_match(data, "\\d+;(\\{.*\\})\\d+;(\\{.*\\})")
info <- fromJSON(extract[1,1])
data <- fromJSON(extract[1,3])
worksheets = names(data$secondaryInfo$presModelMap$vizData$presModelHolder$genPresModelMapPresModel$presModelMap)
for(i in 1:length(worksheets)){
print(paste("[",i,"] ",worksheets[i], sep=""))
}
cat("select worksheet by index: ")
selected <- readLines("stdin",n=1);
worksheet <- worksheets[as.integer(selected)]
print(paste("you selected :", worksheet, sep=" "))
columnsData <- data$secondaryInfo$presModelMap$vizData$presModelHolder$genPresModelMapPresModel$presModelMap[[worksheet]]$presModelHolder$genVizDataPresModel$paneColumnsData
i <- 1
result <- list();
for(t in columnsData$vizDataColumns){
if (is.null(t[["fieldCaption"]]) == FALSE) {
paneIndex <- t$paneIndices
columnIndex <- t$columnIndices
if (length(t$paneIndices) > 1){
paneIndex <- t$paneIndices[1]
}
if (length(t$columnIndices) > 1){
columnIndex <- t$columnIndices[1]
}
result[[i]] <- list(
fieldCaption = t[["fieldCaption"]],
valueIndices = columnsData$paneColumnsList[[paneIndex + 1]]$vizPaneColumns[[columnIndex + 1]]$valueIndices,
aliasIndices = columnsData$paneColumnsList[[paneIndex + 1]]$vizPaneColumns[[columnIndex + 1]]$aliasIndices,
dataType = t[["dataType"]],
stringsAsFactors = FALSE
)
i <- i + 1
}
}
dataFull = data$secondaryInfo$presModelMap$dataDictionary$presModelHolder$genDataDictionaryPresModel$dataSegments[["0"]]$dataColumns
cstring <- list();
for(t in dataFull) {
if(t$dataType == "cstring"){
cstring <- t
break
}
}
data_index <- 1
name_index <- 1
frameData <- list()
frameNames <- c()
for(t in dataFull) {
for(index in result) {
if (t$dataType == index["dataType"]){
if (length(index$valueIndices) > 0) {
j <- 1
vector <- character(length(index$valueIndices))
for (it in index$valueIndices){
vector[j] <- t$dataValues[it+1]
j <- j + 1
}
frameData[[data_index]] <- vector
frameNames[[name_index]] <- paste(index$fieldCaption, "value", sep="-")
data_index <- data_index + 1
name_index <- name_index + 1
}
if (length(index$aliasIndices) > 0) {
j <- 1
vector <- character(length(index$aliasIndices))
for (it in index$aliasIndices){
if (it >= 0){
vector[j] <- t$dataValues[it+1]
} else {
vector[j] <- cstring$dataValues[abs(it)]
}
j <- j + 1
}
frameData[[data_index]] <- vector
frameNames[[name_index]] <- paste(index$fieldCaption, "alias", sep="-")
data_index <- data_index + 1
name_index <- name_index + 1
}
}
}
}
df <- NULL
lengthList <- c()
for(i in 1:length(frameNames)){
lengthList[[i]] <- length(frameData[[i]])
}
max <- max(lengthList)
for(i in 1:length(frameNames)){
if (length(frameData[[i]]) < max){
len <- length(frameData[[i]])
frameData[[i]][(len+1):max]<-""
}
df[frameNames[i]] <- frameData[i]
}
options(width = 1200)
df <- as.data.frame(df, stringsAsFactors = FALSE)
print(df) |
de501b19e258c363b11fc0525d67329e0b5dea6c | 50ea9fdc986b4e919cb09525be4e50fbbaf4f97a | /R/Agg.R | 2d5a3dc09b9b99b48bb875285e1b0a68a60d51b4 | [] | no_license | cran/EmiStatR | 90e991b998bb100b8344447399d6bb952380cfad | 2bc9c4a7ec754ec720dd81ea079784a39359ac9d | refs/heads/master | 2021-10-06T11:31:48.483290 | 2021-09-28T06:30:06 | 2021-09-28T06:30:06 | 63,985,573 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,220 | r | Agg.R | # aggregation of data.frame function
# author: J.A. Torres-Matallana
# organization: LIST
# date: 15.07.2015 - 19.07.2016
# data <- P1
# nameData <- deparse(substitute(P1))
# delta <- 1
# func <- "sum"
# data <- wlt_obs
# nameData <- "wlt_obs"
# delta <- 1
# func <- "mean"
Agg <- function(data, nameData, delta, func, namePlot){
# data <- var; nameData <- var.name; delta <- 60; func <- "mean"; namePlot <- "hourly"
#---------------------------------------------------------------------------------------------------------
# aggregating to 10, 30, 60 min resolution
#---------------------------------------------------------------------------------------------------------
tt <- as.POSIXct(data[,1], tz="UTC")
# delta min
dt <- 60/1*delta # 60_s/1_min * delta_min = dt_s
bucket = (tt) - as.numeric(tt) %% dt
namePlot <- paste(namePlot, "(res =", delta, "min)", sep=" ")
if(nameData == "P1"){
P1 <- data
head(P1)
ts <- aggregate(P1$rainfall, list(bucket), func)
ts[,3] <- NA
colnames(ts) <- c("time", "rainfall", "intensity")
#length(P1$Rainfall)
#length(ts$Rainfall)
# head(ts)
par(mfrow = c(2, 1))
par(mar = rep(2, 4)) #------------------------------------------ added after MC set-up
plot(P1$rainfall, type="l", main=namePlot) #------------------------------------------ commented after MC set-up
plot(ts$rainfall, type="l")
P1 <- ts
# head(P1)
# save(P1, file="P1.RData")
return(P1)
}else{
obs <- data
head(obs)
ts <- aggregate(data$value, list(bucket), func)
head(ts)
head(obs)
length(obs$value)
length(ts$x)
# commented out to avoid creation of local file (pdf plot)
#pdf(paste(namePlot, ".pdf", sep=""), pointsize=10)
#par(mfrow = c(2,1))
#par(cex.lab=1, cex.axis=1., cex.main = 1.5)
#plot(obs$time,obs$value, type="l", main="Original time series", xlab = "Time", ylab = nameData)#------ commented after MC set-up
#plot(ts[,1],ts$x, type="l", main=namePlot, xlab = "Time", ylab = nameData)
#dev.off()
colnames(ts) <- c("time", "value")
obs <- ts
#save(obs, file="obs.RData")
return(obs)
}
} |
0815abfc1ddd7e0c85186b1883a70dd327c021c1 | c13fd87cbb4066729f529a09f06db1649b7d1d6f | /man/int.moran.Rd | 9ab1fc0a831cba33b50bcc55f76d34c1c66e6db1 | [] | no_license | cran/EcoGenetics | 05c6bd845a714051c5317f3e70ececf8b2dbc40b | 46b904508a5958f3cb11513b83cc6b69c3c0a3ab | refs/heads/master | 2021-01-17T14:00:47.875370 | 2020-05-24T14:20:17 | 2020-05-24T14:20:17 | 30,083,571 | 2 | 4 | null | 2018-01-12T04:31:38 | 2015-01-30T17:44:07 | R | UTF-8 | R | false | true | 1,015 | rd | int.moran.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/int.moran.R
\name{int.moran}
\alias{int.moran}
\title{Moran internal.}
\usage{
int.moran(
Z,
con,
nsim,
alternative,
test = "permutation",
adjust.n = FALSE,
plotit
)
}
\arguments{
\item{Z}{Vector, matrix or data frame.}
\item{con}{Connection network.}
\item{nsim}{Number of Monte-Carlo simulations.}
\item{alternative}{The alternative hypothesis. If "auto" is selected (default) the
program determines the hypothesis by difference between the median of the simulations
and the observed value. Other options are: "two.sided", "greater" and "less".
if test == cross, for the first interval (d == 0) the p and CI are computed with cor.test.}
\item{adjust.n}{Should be adjusted the number of individuals? (warning, this would
change variances)}
\item{plotit}{Should be generated a plot of the simulations?}
}
\description{
Moran internal.
}
\author{
Leandro Roser \email{leandroroser@ege.fcen.uba.ar}
}
\keyword{internal}
|
e8593d5a82f8d6354f0fb43c24c4d4008b1af40c | 9df16ef128620c9d4ddc2c94cf5bd348641b4256 | /R/backshift.R | bb36759def330b86ecbefe299418cabdfa482498 | [] | no_license | aushaff/echanFuncs | 4fad7a82a5eba78ed2a9b785994d569318518e6d | 26fcb9b7f98478e7be551a5f7c20cf80e6dba863 | refs/heads/master | 2020-03-07T10:37:46.157249 | 2018-05-04T15:28:47 | 2018-05-04T15:28:47 | 127,436,226 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 231 | r | backshift.R | backshift <- function(day, x) {
stopifnot(day >= 0)
y <- c(rep(NaN, day),x[1:(length(x)-day)])
}
# function y=backshift(day,x)
# % y=backshift(day,x)
# assert(day>=0);
# y=[NaN(day,size(x,2), size(x, 3));x(1:end-day,:, :)];
|
20a5da5234ebd62e9e8796c4cf9de0359632ab15 | 475856e028f1ab7e8259c8d34f02bdcaefcf4db0 | /R/phalfcauchy.R | 8c252a7e1211f8af51666e45420ce7f333bd0c5d | [] | no_license | cran/BNPdensity | 44ee5d2ebc9aae83b243574090d211514014a9d1 | 314c943b35f83098fa87c7b15a16757c7334d346 | refs/heads/master | 2023-04-07T12:52:52.293810 | 2023-03-24T14:10:02 | 2023-03-24T14:10:02 | 17,677,929 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 559 | r | phalfcauchy.R | #' Distribution function half Cauchy
#'
#' Computes the cdf.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(q, location = 0, scale = 1) {
#' ifelse(x < 0, 0, 1) * (pcauchy(q, location, scale) - pcauchy(
#' 0,
#' location, scale
#' )) / (1 - pcauchy(0, location, scale))
#' }
phalfcauchy <-
function(q, location = 0, scale = 1) {
ifelse(q < 0, 0, 1) * (pcauchy(q, location, scale) - pcauchy(
0,
location, scale
)) / (1 - pcauchy(0, location, scale))
}
|
55d8b4815bcf23eaf2930f02d5f82607b75657e8 | 195bdc0c32027ee636aca6e1cd31f756b5e37571 | /code/DE-Workflow-Implementation/ref_script/COV_edgeR.R | 9ac097264152ddbdd93486b18b1e0f7fd9b63bf8 | [
"MIT"
] | permissive | noobCoding/Benchmarking-integration-of-differential-expression | e59a86413e2f50bac8150d356fe6538527f41868 | fc715ff54f56652e84dcacf9ab7f4fe93e7cd7f0 | refs/heads/main | 2023-05-11T13:36:37.736002 | 2023-05-09T08:24:54 | 2023-05-09T08:24:54 | 451,292,650 | 8 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,419 | r | COV_edgeR.R | #raw count or pseudobulk data as input processed=rawcount
run_edgeR<-function(processed,cellinfo,cov=T,Det=F,former.meth=''){
library(edgeR)
count_df<-processed
rownames(cellinfo)=cellinfo$Cell
cellinfo<-cellinfo[colnames(processed),]
cellinfo$Group%<>%factor()
cellinfo$Batch%<>%factor()
cellinfo.cov<-cellinfo[,c('Group','Batch')]
y <- DGEList(counts=count_df, group=cellinfo.cov$Group)
y <- calcNormFactors(y)
cellGroup <- factor(cellinfo.cov$Group)
cellBatch <- factor(cellinfo.cov$Batch)
cdr <- scale(colMeans(count_df > 0))
if(Det){
if(cov){
design<-model.matrix(~cellGroup+cdr+cellBatch)
}else{
design <- model.matrix(~cellGroup+cdr)
}
}else{
if(cov){
design<-model.matrix(~cellGroup+cellBatch)
}else{
design <- model.matrix(~cellGroup)
}
}
rownames(design) <- colnames(y)
y <- estimateDisp(y, design, robust=TRUE)
fit <- glmQLFit(y, design, robust=TRUE, prior.df = 0)
qlf <- glmQLFTest(fit, coef=2)
FDR<-p.adjust(qlf$table$PValue,method = "BH")
qlf$table$FDR <- FDR
res <- data.frame('pvalue' = qlf$table$PValue, 'adjpvalue' = qlf$table$FDR, 'logFC' = qlf$table$logFC)
rownames(res) <- rownames(qlf)
res_name<-paste0(ifelse(former.meth=='','',paste0(former.meth,'+')),'edgeR',ifelse(Det,'_Detrate',''),ifelse(cov,'_Cov',''))
save(res, cellinfo, file=paste0('./',res_name,'.rda'))
return(res_name)
}
|
66833bf72d8263582e97d07926e00f53eda8499f | 35f844f6f5145265ffdd48c14522756030263dba | /man/TableStyle.Rd | c5a4f6d97c699a4ae187573baf23c1850c5c86fa | [] | no_license | cbailiss/basictabler | 708aa9e2da21b65ef65f529be868ebcbfee4373c | 63486f6acd7b163c28839dd607a98f4bd0e7920d | refs/heads/master | 2021-07-16T05:12:03.971848 | 2021-07-01T20:48:46 | 2021-07-01T20:48:46 | 104,359,939 | 33 | 2 | null | null | null | null | UTF-8 | R | false | true | 7,325 | rd | TableStyle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TableStyle.R
\docType{class}
\name{TableStyle}
\alias{TableStyle}
\title{R6 class that specifies styling.}
\format{
\code{\link{R6Class}} object.
}
\description{
The `TableStyle` class specifies the styling for headers and cells in a
table. Styles are specified in the form of Cascading Style Sheet (CSS)
name-value pairs.
}
\examples{
# TableStyle objects are normally created indirectly via one of the helper
# methods.
# For an example, see the `TableStyles` class.
}
\section{Active bindings}{
\if{html}{\out{<div class="r6-active-bindings">}}
\describe{
\item{\code{name}}{The unique name of the style (must be unique among the style
names in the table theme).}
\item{\code{declarations}}{A list containing CSS style declarations.
Example: `declarations = list(font="...", color="...")`}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{TableStyle$new()}}
\item \href{#method-setPropertyValue}{\code{TableStyle$setPropertyValue()}}
\item \href{#method-setPropertyValues}{\code{TableStyle$setPropertyValues()}}
\item \href{#method-getPropertyValue}{\code{TableStyle$getPropertyValue()}}
\item \href{#method-asCSSRule}{\code{TableStyle$asCSSRule()}}
\item \href{#method-asNamedCSSStyle}{\code{TableStyle$asNamedCSSStyle()}}
\item \href{#method-getCopy}{\code{TableStyle$getCopy()}}
\item \href{#method-asList}{\code{TableStyle$asList()}}
\item \href{#method-asJSON}{\code{TableStyle$asJSON()}}
\item \href{#method-clone}{\code{TableStyle$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new `TableStyle` object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$new(parentTable, styleName = NULL, declarations = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{parentTable}}{Owning table.}
\item{\code{styleName}}{A unique name for the style.}
\item{\code{declarations}}{A list containing CSS style declarations.
Example: `declarations = list(font="...", color="...")`}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setPropertyValue"></a>}}
\if{latex}{\out{\hypertarget{method-setPropertyValue}{}}}
\subsection{Method \code{setPropertyValue()}}{
Set the value of a single style property.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$setPropertyValue(property = NULL, value = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{property}}{The CSS style property name, e.g. color.}
\item{\code{value}}{The value of the style property, e.g. red.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setPropertyValues"></a>}}
\if{latex}{\out{\hypertarget{method-setPropertyValues}{}}}
\subsection{Method \code{setPropertyValues()}}{
Set the values of multiple style properties.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$setPropertyValues(declarations = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{declarations}}{A list containing CSS style declarations.
Example: `declarations = list(font="...", color="...")`}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getPropertyValue"></a>}}
\if{latex}{\out{\hypertarget{method-getPropertyValue}{}}}
\subsection{Method \code{getPropertyValue()}}{
Get the value of a single style property.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$getPropertyValue(property = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{property}}{The CSS style property name, e.g. color.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-asCSSRule"></a>}}
\if{latex}{\out{\hypertarget{method-asCSSRule}{}}}
\subsection{Method \code{asCSSRule()}}{
Generate a CSS style rule from this table style.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$asCSSRule(selector = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{selector}}{The CSS selector name. Default value `NULL`.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
The CSS style rule, e.g. { text-align: center; color: red; }
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-asNamedCSSStyle"></a>}}
\if{latex}{\out{\hypertarget{method-asNamedCSSStyle}{}}}
\subsection{Method \code{asNamedCSSStyle()}}{
Generate a named CSS style from this table style.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$asNamedCSSStyle(styleNamePrefix = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{styleNamePrefix}}{A character variable specifying a prefix for all named
CSS styles, to avoid style name collisions where multiple tables exist.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
The CSS style rule, e.g. cell { text-align: center; color: red; }
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getCopy"></a>}}
\if{latex}{\out{\hypertarget{method-getCopy}{}}}
\subsection{Method \code{getCopy()}}{
Create a copy of this `TableStyle` object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$getCopy(newStyleName = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{newStyleName}}{The name of the new style.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
The new `TableStyle` object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-asList"></a>}}
\if{latex}{\out{\hypertarget{method-asList}{}}}
\subsection{Method \code{asList()}}{
Return the contents of this object as a list for debugging.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$asList()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A list of various object properties.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-asJSON"></a>}}
\if{latex}{\out{\hypertarget{method-asJSON}{}}}
\subsection{Method \code{asJSON()}}{
Return the contents of this object as JSON for debugging.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$asJSON()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A JSON representation of various object properties.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableStyle$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
442801dc4f170d3b9496628b75cb2869b2bf4872 | 39597a6d5fb41d0caa9e0e7cd5dd19331556f440 | /man/lsid.Rd | 2d963355191f938dafdb3ceb6783f7966be0a2a0 | [] | no_license | cran/spidR | 2444e78e3b6c89a4349522ef95be483f23ed83ed | 8d4b6d0afa3cae7bbdd9cd96ff1445c3f6f83b12 | refs/heads/master | 2023-08-21T00:44:58.438942 | 2021-10-18T06:20:02 | 2021-10-18T06:20:02 | 349,128,766 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 865 | rd | lsid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spidR.R
\name{lsid}
\alias{lsid}
\title{Get species LSID from WSC.}
\usage{
lsid(tax, order = FALSE)
}
\arguments{
\item{tax}{A taxon name or vector with taxa names.}
\item{order}{Order taxa names alphabetically or keep as in tax.}
}
\value{
A data.frame with species and LSID.
}
\description{
Get species LSID from the World Spider Catalogue.
}
\details{
This function will get species LSID from the World Spider Catalogue (2021). Family and genera names will be converted to species.
}
\examples{
\dontrun{
lsid("Anapistula")
lsid(tax = c("Iberesia machadoi", "Nemesia bacelarae", "Amphiledorus ungoliantae"), order = TRUE)
}
}
\references{
World Spider Catalog (2021). World Spider Catalog. Version 22.0. Natural History Museum Bern, online at http://wsc.nmbe.ch. doi: 10.24436/2.
}
|
122687c8943235bab5505049c1690498e41be870 | 61fafe1ef929fcde59954f0ec5a7acd341ac5148 | /R/getVarExpSim.R | 012a638d0319054570e9694953405a9395d3df06 | [] | no_license | cran/Mangrove | fc1d63bd9dbc237d57540acd7fc5151f2f4cd205 | a302411add434c6056126a3fd3eded27339d47aa | refs/heads/master | 2021-01-23T11:48:12.462119 | 2017-02-22T16:15:37 | 2017-02-22T16:15:37 | 17,680,818 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 358 | r | getVarExpSim.R | getVarExpSim <-
function(ORs,K,iter=1000){
# calculate variance explained by simulation
sam <- replicate(iter,(runif(ORs[,5]) < ORs[,5]) + (runif(ORs[,5]) < ORs[,5]))
temp <- apply(sam,2,function(x) prod((1*(x == 0) + ORs[,3]*(x == 1) + ORs[,4]*(x==2))/ORs[,6]))
post <- applyORs(temp,K)
T <- qnorm(1 - K)
mu <- T - qnorm(1 - post)
return(var(mu))
}
|
aa187644f6833fb3e399a15eb43098e8fbbfeb6d | 249ab3aea059a14d18d9dfcb4a1b9558ce09608c | /4 - Exploratory Data Analysis/Week 4/plot1.R | b257b55d9378a387780e10ac595b7149e0d3c67c | [] | no_license | sawyerWeld/DataScience-Coursera | e64cd3556a81d73f3e957d9915345771a79d9b2a | 9ee5959684c1d95c2007a8aa60012fdecd726afd | refs/heads/master | 2021-06-22T14:47:38.903318 | 2017-08-30T23:29:19 | 2017-08-30T23:29:19 | 93,285,651 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 416 | r | plot1.R | # Load in data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Aggregate data necessary for plot
emissions.by.Year <- aggregate(Emissions ~ year, NEI, sum)
# Plot
png('plot1.png')
barplot(height=emissions.by.Year$Emissions, names.arg=emissions.by.Year$year, xlab="Year", ylab=expression('Total PM'[2.5]*''),main=expression('Total PM'[2.5]*' by Year 1999 - 2008'))
dev.off() |
772b1c3568628b4dbf2ba1e26d04803b7c54a880 | 3edcc98f5a87b4bfe1f395588fb8c5a78e7d76c8 | /plot2.R | c978353e2c33fa95b418f976cfd9477c60e0a84f | [] | no_license | Nid0/ExData_Plotting1 | ae5403672566ae5509103162e5dd574d31daee5c | 0069ac433e9cc0865285e6df6aa0034f37e6e812 | refs/heads/master | 2020-12-31T03:56:16.772934 | 2015-01-11T14:50:27 | 2015-01-11T14:50:27 | 28,992,756 | 0 | 0 | null | 2015-01-09T00:15:49 | 2015-01-09T00:15:49 | null | UTF-8 | R | false | false | 862 | r | plot2.R | ## Reading dataset from .txt file.
dataset <- read.csv("~/downloads/household_power_consumption.txt", header= TRUE, sep =";",
na.strings="?" ,nrows= 2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## Defining date format.
dataset$Date <- as.Date(dataset$Date, format="%d/%m/%Y")
## Subsitting data to the data between the dates 2007-02-01 and 2007-02-02.
data <- subset(dataset, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(dataset)
## Converting date.
datetime <- paste(as.Date(data$Date), data$Time)
data$datetime <- as.POSIXct(datetime)
## Plotting: Plot2.
plot(data$Global_active_power~data$datetime, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
## Saving the plot to a .png file.
dev.copy(png, file="~/documents/ExData_Plotting1/plot2.png", height=480, width=480)
dev.off()
|
5f35f4bf9beccd392abe4192f753796a17b35f4b | d11dba6dafe5f5204743e03662d8d6d216672393 | /man/ip_in_any.Rd | 90c0304989b57af2830c1c9d6496444873fd5fbd | [] | no_license | ktargows/iptools | d7b6e260296750198444b0edde26a09df4ad3630 | d3d85680cd85d276672a42f4bbdeb8fac3d8758e | refs/heads/master | 2021-01-11T01:55:03.682784 | 2016-10-06T01:54:41 | 2016-10-06T01:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,127 | rd | ip_in_any.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ip_in_any}
\alias{ip_in_any}
\title{check if IP address falls within any of the ranges specified}
\usage{
ip_in_any(ip_addresses, ranges)
}
\arguments{
\item{ip_addresses}{character vector of IP addresses}
\item{ranges}{character vector of CIDR reanges}
}
\value{
a logical vector of whether a given IP was in any of the ranges
}
\description{
\code{ip_in_any} checks whether a vector of IP addresses
fall within any of the speficied ranges.
}
\examples{
\dontrun{
north_america <- unlist(country_ranges(countries=c("US", "CA", "MX")))
germany <- unlist(country_ranges("DE"))
set.seed(1492)
targets <- ip_random(1000)
for_sure <- range_generate(sample(north_america, 1))
all(ip_in_any(for_sure, north_america)) # shld be TRUE
## [1] TRUE
absolutely_not <- range_generate(sample(germany, 1))
any(ip_in_any(absolutely_not, north_america)) # shld be FALSE
## [1] FALSE
who_knows_na <- ip_in_any(targets, north_america)
who_knows_de <- ip_in_any(targets, germany)
sum(who_knows_na)
## [1] 464
sum(who_knows_de)
## [1] 43
}
}
|
f134b43c686aaec53a5eeb78ae5bb7a6c020f8ba | 782239ceca3eda22f993f8421f47566ca92955c1 | /09-12-18_notes341.R | 3ff631e37d5ace29db04ecc2cd61248e80829f99 | [] | no_license | adraper2/stats-code | c1c55b69468dfa06141c45d902083ebb13e60579 | c333ff462eb76ac193f5bc8039ce09208b5c4e58 | refs/heads/master | 2020-03-30T14:40:14.932382 | 2018-12-22T23:47:20 | 2018-12-22T23:47:20 | 151,329,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 164 | r | 09-12-18_notes341.R | #pmf <- vector(mode="list", length=4)
#names(pmf) <- c("2","3","5","7")
#pmf[[1]]
k <- 1:100
pmf <- choose(100,k) * .75**k * (1-.75)**(100-k)
hist(pmf)
plot(pmf)
|
139f8eb0fbb7fb30403346509fad122b6eed9c28 | eee770b9f658ede3f74d365e86111a7edb99e764 | /import.R | 5c5726e49b2a8ea9933483dce0504b8028dcea8f | [] | no_license | maxheld83/unicorn | 7cf51d20b332df35f12d0015894e128ba8912599 | 64e4bcc5de9d5efcbc9c52409ae172a0c78be35f | refs/heads/master | 2020-06-18T18:50:55.575882 | 2019-07-12T08:41:47 | 2019-07-12T08:41:47 | 196,407,785 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 370 | r | import.R | if (FALSE) {
library(googlesheets)
unicorn <- gs_key(x = "1WEvq7XKALZcNlKc09rPYI9vcIFTAJUXrRwHCRhQ6GYI")
votes <- gs_read(ss = unicorn, ws = "votes", col_types = "ccdc")
tweets <- gs_read(ss = unicorn, ws = "tweets", col_types = "ciii")
readr::write_rds(x = list(votes = votes, tweets = tweets), path = "data.rds")
}
data <- readr::read_rds(path = "data.rds")
|
600c2bf27d697950650e3f095a3a6d5e54b970be | 110096cf62a7ac937f3cf84308a430942641629e | /data-collection/process-comments.journal.R | 72c218286a8b188c1ef953e05c9b262d1d892cd3 | [] | no_license | s-ben/pi-research | e9fc10b6036a271105ff7dccd333fda534b3d75b | 98d4c30a97580d093368fd78480b24902627a04e | refs/heads/master | 2020-04-07T06:32:41.963810 | 2018-11-21T12:02:05 | 2018-11-21T12:02:05 | 158,140,080 | 0 | 0 | null | 2018-11-19T00:17:28 | 2018-11-19T00:17:28 | null | UTF-8 | R | false | false | 2,290 | r | process-comments.journal.R |
library(jsonlite)
library(RCurl)
props =read.csv("prop-urls.csv", stringsAsFactors = FALSE )
get.comments = function(url)
{
url = paste(url, "/plugins/decred/comments.journal", sep="")
#fetch the prop's comments.journal
prop.input = getURL(url)
#processing to make this a valid json object
prop.input = gsub("}{", ",", prop.input, fixed = TRUE)
prop.input = gsub("}", "},", prop.input, fixed = TRUE)
prop.input = gsub("\n", "", prop.input, fixed = TRUE)
prop.input = gsub("\t", "", prop.input, fixed = TRUE)
prop.input = gsub("\\.", "", prop.input, fixed = TRUE)
prop.input = gsub("\"action\":\"-1\"", "\"vote\": \"-1\"", prop.input, fixed = TRUE)
prop.input = gsub("\"action\":\"1\"", "\"vote\": \"1\"", prop.input, fixed = TRUE)
prop.input = paste("{\"proposals\": [", prop.input, sep="")
prop.input = paste(prop.input, "}", sep="")
prop.input = gsub(",}", "]}", prop.input, fixed = TRUE)
#read the json
prop = fromJSON(prop.input, flatten = TRUE)
prop1 = prop[[1]]
prop = as.data.frame(prop1)
#split comments and votes into different data frames
prop.comments = prop[prop$action == "add",]
prop.comment.votes = prop[prop$action == "addlike",]
proposal = prop$token[1]
#write.csv(prop.comments, file = paste(proposal, "-comments.csv", sep=""), row.names = FALSE)
#write.csv(prop.comment.votes, file = paste(proposal, "-votes.csv", sep=""), row.names = FALSE)
return(prop)
}
df.comments = df[df$action == "add",]
df.comment.votes = df[df$action == "addlike",]
df.comments$score = 0
df.comments$votes = 0
for(p in unique(df.comments$token))
{
votes = df.comment.votes[df.comment.votes$token == p,]
comments = unique(votes$commentid)
for(c in comments)
{
relvotes = votes[votes$commentid == c,]
score = sum(as.numeric(relvotes$vote))
commentvotes = length(as.numeric(relvotes$vote))
df.comments$score[df.comments$token == p & df.comments$commentid == c] = score
df.comments$votes[df.comments$token == p & df.comments$commentid == c] = commentvotes
}
}
write.csv(df.comments, file = paste("pi-comments.csv", sep=""), row.names = FALSE)
write.csv(df.comment.votes, file = paste("pi-comment-votes.csv", sep=""), row.names = FALSE)
|
0e6cb5d19e5731073f581bffeeb325bc6679763d | 29585dff702209dd446c0ab52ceea046c58e384e | /magclass/R/magpieResolution.R | 9f86e7ee964e91b61eaa01b329910883033faced | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 403 | r | magpieResolution.R | magpieResolution<- function(object) {
if(!is.magpie(object)){stop("Object is no magpie object")
} else {
n_magpie_regions <-length(getRegions(object))
n_magpie_cells <-dim(object)[[1]]
if (n_magpie_cells==1) {
resolution<-"glo"
} else if(n_magpie_cells==n_magpie_regions) {
resolution<-"reg"
} else {
resolution<-"cell"
}
}
return(resolution)
}
|
4659a2ad4c68875ec1047e1787512a4c768a0a8f | 311244ce857f5abc9b5135f7d7cab5e1019dfc34 | /R/tutorial/calmap.r | 33b5095b605ead240ca67b0522293003620ed6bc | [
"MIT"
] | permissive | REFRAME/betacal | c4d2231ff26dd4128b602004be2f7d952527d6f6 | 7c4a733a1f5b52a8a1700a8e793ac75ec16c9177 | refs/heads/master | 2021-07-14T00:04:04.805961 | 2021-03-11T19:58:21 | 2021-03-11T19:58:21 | 80,637,377 | 10 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,086 | r | calmap.r | library(ggplot2)
library(latex2exp)
library(reshape2)
plot_calibration_map <- function(scores_set, info, legend_set, color_set, alpha=1){
n_lines <- length(legend_set)
sizes <- seq(1.5, 0.5, length.out = n_lines)
bins <- seq(0, 1, length.out = 11)
hist_tot <- hist(info$prob, breaks=bins, plot = FALSE)
hist_pos <- hist(info$prob[info$labels == 1], breaks=bins, plot = FALSE)
centers <- hist_tot$mids
empirical <- (hist_pos$counts+alpha) / (hist_tot$counts+2*alpha)
pdata <- melt(scores_set, id="linspace")
i <- 1
g <- ggplot(pdata, aes(x=linspace, y=value, colour=variable))
for (legend in legend_set){
g <- g + geom_line(size=sizes[i])
i <- i + 1
}
df <- data.frame(centers, empirical)
d <- melt(df, id="centers")
g <- g + geom_point(data=d, aes(x=centers, y=value, colour=variable))
g <- g + scale_colour_manual(values=c(color_set,'black'))
g <- g + labs(x=TeX("$s$"),y=TeX("$\\hat{p}$"), title="Calibration map")
g <- g + theme(plot.title = element_text(hjust = 0.5))
g <- g + guides(colour = guide_legend("Method"))
print(g)
}
|
7ee5a56076591d542a6637bc11c6bc36341eb98a | 01411d60b66af197744af1cb885e77bebe13b912 | /tests/testthat/test-unicode.R | bc6f9126226a1bd37c9bf3b4cd23a2f8740c2094 | [] | no_license | LaAzteca/re2r | a7c3a272860b58a902696947f08d01976336f5d6 | 1b1f4f963d9e3fa00b6259f1fe1eafb414eea734 | refs/heads/master | 2017-12-04T08:22:42.267522 | 2016-12-19T16:54:52 | 2016-12-19T16:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,582 | r | test-unicode.R | context("Unicode")
library(stringi)
test_that("unicode match with native string",{
# the Unicode codepoint cannot be converted to destination encoding
skip_on_os("windows")
letters <- stri_c(stri_enc_fromutf32(list(174L, 173L,182L,190L)), collapse = "")
x <- stri_encode(letters,"UTF-8","")
expect_true(re2_detect(x,letters))
expect_true(re2_detect(x,letters, parallel = T, grain_size = 1))
})
test_that("unicode match",{
expect_identical(re2_detect(c("\u0105\u0106\u0107", "\u0105\u0107"), "\u0106*"), c(TRUE,TRUE))
expect_identical(re2_detect(c("\u0105\u0106\u0107", "\u0105\u0107"), "\u0106*", parallel = T, grain_size = 1), c(TRUE,TRUE))
})
library(stringi)
test_that("Chinese",{
expect_true(re2_detect("A", "\\p{L}"));
expect_true(re2_detect("A", "\\p{Lu}"));
expect_true(!re2_detect("A", "\\p{Ll}"));
expect_true(!re2_detect("A", "\\P{L}"));
expect_true(!re2_detect("A", "\\P{Lu}"));
expect_true(re2_detect("A", "\\P{Ll}"));
tan = stri_enc_fromutf32(35674)
expect_true(re2_detect(tan , "\\p{L}"));
expect_true(!re2_detect(tan , "\\p{Lu}"));
expect_true(!re2_detect(tan , "\\p{Ll}"));
expect_true(!re2_detect(tan , "\\P{L}"));
expect_true(re2_detect(tan , "\\P{Lu}"));
expect_true(re2_detect(tan , "\\P{Ll}"));
tan = stri_enc_fromutf32(27704)
expect_true(re2_detect(tan , "\\p{L}"));
expect_true(!re2_detect(tan , "\\p{Lu}"));
expect_true(!re2_detect(tan , "\\p{Ll}"));
expect_true(!re2_detect(tan , "\\P{L}"));
expect_true(re2_detect(tan , "\\P{Lu}"));
expect_true(re2_detect(tan , "\\P{Ll}"));
tan = stri_enc_fromutf32(37586)
expect_true(re2_detect(tan , "\\p{L}"));
expect_true(!re2_detect(tan , "\\p{Lu}"));
expect_true(!re2_detect(tan , "\\p{Ll}"));
expect_true(!re2_detect(tan , "\\P{L}"));
expect_true(re2_detect(tan , "\\P{Lu}"));
expect_true(re2_detect(tan , "\\P{Ll}"));
tan = stri_enc_fromutf32(c(65L, 66L, 67L, 68L, 69L, 70L, 71L, 72L, 73L, 35674L, 27704L, 37586L))
expect_identical(structure(c("ABC","A", "B", "C"), .Dim = c(1L, 4L), .Dimnames = list(NULL, c(".match",".1", ".2", ".3"))),re2_match(tan,"(.).*?(.).*?(.)"))
expect_identical(structure(c("ABC","A", "B", "C"), .Dim = c(1L, 4L), .Dimnames = list(NULL, c(".match",".1", ".2", ".3"))),re2_match(tan,"(.).*?([\\p{L}]).*?(.)"))
expect_identical(structure(c(tan,stri_enc_fromutf32(list( 35674L, 27704L, 37586L))), .Dim = c(1L, 4L), .Dimnames = list( NULL, c(".match",".1", ".2", ".3"))),re2_match(tan,".*(.).*?([\\p{Lu}\\p{Lo}]).*?(.)"))
})
|
62b15fbdbdc7ccb7d5a63b7dd3613e3c12e46334 | 31eff34979bcec6a1e49922d75afd8b562fc6551 | /man/ASvisualization.Rd | a418d985b5e1bbc90db9e00884413c4227d61029 | [] | no_license | hangost/IMAS | ec7c9b49a9d6abeeea5ab3f055965285d47f05ad | b6e8cd7c4972d49f77708e139560cfec92b36281 | refs/heads/master | 2021-01-18T21:35:13.117367 | 2017-04-07T04:33:15 | 2017-04-07T04:33:15 | 84,370,059 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,956 | rd | ASvisualization.Rd | \name{ASvisualization}
\alias{ASvisualization}
\title{
Visualize the results of the ASdb object.
}
\description{
This function makes a pdf file consisting of plots for results in the ASdb object.
}
\usage{
ASvisualization(ASdb,CalIndex=NULL,txTable=NULL,exon.range=NULL,snpdata=NULL,
snplocus=NULL,methyldata=NULL,methyllocus=NULL,GroupSam=NULL,
ClinicalInfo=NULL,out.dir=NULL)
}
\arguments{
\item{ASdb}{
A ASdb object.
}
\item{CalIndex}{
An index number in the ASdb object which will be tested in this function.
}
\item{txTable}{
A data frame of transcripts including transcript IDs, Ensembl gene names, Ensembl transcript names, transcript start sites, and transcript end sites.
}
\item{exon.range}{
A list of GRanges objects including total exon ranges in each transcript resulted from the \code{\link{exonsBy}} function in \pkg{GenomicFeatures}.
}
\item{snpdata}{
A data frame of genotype data.
}
\item{snplocus}{
A data frame consisting of locus information of SNP markers in the snpdata.
}
\item{methyldata}{
A data frame consisting of methylation levels.
}
\item{methyllocus}{
A data frame consisting of methylation locus.
}
\item{GroupSam}{
A list object of a group of each sample.
}
\item{ClinicalInfo}{
A data frame consisting of a path of bam file and identifier of each sample.
}
\item{out.dir}{
An output directory
}
}
\value{
This function makes pdf for plots.
}
\author{
Seonggyun Han, Younghee Lee
}
\examples{
data(sampleGroups)
data(samplemethyl)
data(samplemethyllocus)
data(samplesnp)
data(samplesnplocus)
data(sampleclinical)
data(bamfilestest)
ext.dir <- system.file("extdata", package="IMAS")
samplebamfiles[,"path"] <- paste(ext.dir,"/samplebam/",samplebamfiles[,"path"],".bam",sep="")
sampleDB <- system.file("extdata", "sampleDB", package="IMAS")
transdb <- loadDb(sampleDB)
ASdb <- Splicingfinder(transdb,Ncor=1)
ASdb <- ExonsCluster(ASdb,transdb)
ASdb <- RatioFromReads(ASdb,samplebamfiles,"paired",50,40,3,CalIndex="ES3")
ASdb <- sQTLsFinder(ASdb,samplesnp,samplesnplocus,method="lm")
ASdb <- CompGroupAlt(ASdb,GroupSam,CalIndex="ES3")
ASdb <- MEsQTLFinder(ASdb,sampleMedata,sampleMelocus,CalIndex="ES3",GroupSam=GroupSam,out.dir=NULL)
Sdb <- ClinicAnalysis(ASdb,Clinical.data,CalIndex="ES3",out.dir=NULL)
exon.range <- exonsBy(transdb,by="tx")
sel.cn <- c("TXCHROM","TXNAME","GENEID","TXSTART","TXEND","TXSTRAND")
txTable <- select(transdb, keys=names(exon.range),columns=sel.cn,keytype="TXID")
ASvisualization(ASdb,CalIndex="ES3",txTable,exon.range,samplesnp,samplesnplocus,
sampleMedata,sampleMelocus,GroupSam,Clinical.data,out.dir="./")
} |
ca856c1a7a14f66a7dab59a1a3c29e6f511742d1 | 26fb8c0b5fadde0d5499f334025f4e709391e23e | /Predict.R | a1d7b1aff78d6c0d10adc7b3835fa9160302861f | [] | no_license | ryq1230/ECG | d0c003ac8c9f9187e701516a0e503d52b90671af | 53820e32c007f249d07188768c953d49eec56406 | refs/heads/master | 2020-09-14T11:38:48.719167 | 2016-09-09T02:20:33 | 2016-09-09T02:20:33 | 67,757,683 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,051 | r | Predict.R | AOI_data_frame <- read.csv('C:\\Users\\lenovo\\Desktop\\ecg\\baseline\\P36F.csv')
ECGGroup <- c('Normal Sinus Rhythm.JPG','SVT.JPG','Ventricular Fibrillation.jpg','Myocarditis - sinus tachy non specific ST changes.JPG','VT.JPG','Anterior STEMI.jpg','AF, LBBB.JPG','WPW.jpg','Hyperkalaemia.jpg','Atrial Fluter.jpg','Bivent Pacer.JPG')
difference_vector <- NULL
baseline_vector <-NULL
AOI_data_frame$Predict<-as.integer(0)
training <- sample(1:11,6)
testing <- setdiff(1:11,training)
for (ECG_index in testing) {
AOI_subset <- AOI_data_frame[AOI_data_frame$Medianame == ECGGroup[ECG_index],]
cluster_ave <- NULL
summary_cluster <- names(rev(sort(table(AOI_subset$ClusterLabel))))
baseline <- summary_cluster[1]
for (y in 1:length(summary_cluster)) {
baseCluster <- AOI_subset[AOI_subset$ClusterLabel==summary_cluster[y],]
baseCluster_sd <- mean(baseCluster$Intersd)
cluster_ave <- c(cluster_ave,baseCluster_sd)
}
cluster_ave <- data.frame(cluster=summary_cluster, duration=cluster_ave)
target_cluster <- AOI_subset[AOI_subset$Label==1,]$ClusterLabel
difference <- cluster_ave[cluster_ave$cluster==target_cluster,]$duration/cluster_ave[1,]$duration
difference_vector <- c(difference_vector,difference)
baseline_vector <- c(baseline_vector,cluster_ave[1,]$duration)
for (test_cluster in 1:length(summary_cluster)) {
criterion <- cluster_ave[cluster_ave$cluster==test_cluster,]$duration/cluster_ave[1,]$duration
if(2.2<criterion && criterion<3.6){
AOI_subset[AOI_subset$ClusterLabel==test_cluster,]$Predict <- 1
}
}
for (index in 1:nrow(AOI_subset)) {
if(AOI_subset[index,]$Predict == 1){
AOI_data_frame[AOI_data_frame$Index==AOI_subset[index,]$Index,]$Predict <- 1
}else{
AOI_data_frame[AOI_data_frame$Index==AOI_subset[index,]$Index,]$Predict <- 0
}
}
}
#write.csv(AOI_data_frame,file = "C:\\Users\\lenovo\\Desktop\\ecg\\baseline\\PF.csv",row.names = FALSE,col.names = FALSE,quote = TRUE) |
069d6099633a66c236ce6a7a15ec9af20517fb20 | f7fd50a127978fe49dabeb3d4a1bbdc3c3ba40a3 | /LIFE toolbox/s01_findingWearTimes_excludingOutlierMinutes.R | 86207b0ebeec35b84a01928294705a4a5dd3249e | [] | no_license | matin-ufl/mobility-signature | 75ef109c4a14c336b637082e484d39d87002ed71 | 14ff228721aa84e0c1c92fc4f60797f1a236a1dc | refs/heads/master | 2021-01-21T04:44:32.172140 | 2016-07-26T14:40:06 | 2016-07-26T14:40:06 | 52,101,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,357 | r | s01_findingWearTimes_excludingOutlierMinutes.R | library(PhysicalActivity)
setwd("~/Workspaces/R workspace/Mobility Signature Paper/mobility-signature/LIFE toolbox/")
source("f01_functions.R")
# Data File selections ------------------------------------------
# Temporary for easier file selection
setwd("~/../../Volumes/aging/SHARE/ARRC/Active_Studies/ANALYSIS_ONGOING/LIFE Main data/LIFE accelerometry - second data - 10_26_15/")
# Select PID_VC_HID
clr()
load("PID_VC_HID.Rdata")
valid.files <- valid_participants(PID_VC_HID = REF, valid.days = 5)
rm(REF)
# Creating new files without outlier points
out.table <- data.frame(matrix(nrow = 0, ncol = 8))
colnames(out.table) <- c("PID", "startTimeStamp", "endTimeStamp", "days", "weekday", "start", "end", "duration")
for (i in 1:nrow(valid.files)) {
PID <- valid.files$pid[i]
HID <- paste("HID", valid.files$HID[i], ".RData", sep = "")
load(HID)
AC.1s <- append.VM(AC.1s, HID)
wearTimes.info <- find.wearTime.exludeOutlier(AC.1s, sample.per.min = 60)
out.table <- rbind(out.table, wearTimes.info)
print(paste(i, " out of ", nrow(valid.files), " - Being processed... ", HID, " PID (", PID, ")", sep = ""))
}
colnames(out.table) <- c("PID", "startTimeStamp", "endTimeStamp", "days", "weekday", "start", "end", "duration")
save(out.table, file = "../Baseline weartimes - outliers excluded/wearTimes_table.RData") |
02bd9bfd137adeea9bd7934be33c42cb6162018e | 8d78b83bbd4a661d1891ea5f9c181de04d654321 | /tests/testthat.R | 69f830fc0f2e0f84d345d91ad398a8c9ab0224e2 | [
"MIT"
] | permissive | dynverse/dynwrap | ef84d0a123f4264c1013b868fc7b37a5a5c213d4 | 852535635765cfd7db9938c83da96b1e483ec513 | refs/heads/master | 2023-07-19T18:35:10.780205 | 2023-07-18T10:00:41 | 2023-07-18T10:00:41 | 124,226,016 | 15 | 7 | NOASSERTION | 2021-03-23T23:11:26 | 2018-03-07T11:27:02 | R | UTF-8 | R | false | false | 59 | r | testthat.R | library(testthat)
library(dynwrap)
test_check("dynwrap")
|
ae0b5da821806e0f0a3f14e5ce437216e9b91580 | 21b4913b611e0c16404655374372a233ae8b74af | /man/exploregraph.Rd | 3b65cd6ba3d2f92e3d00d6b4a9048a8c748ebd11 | [] | no_license | cran/rope | 2f5e6cbbe4ff44089db67e311a369f4aed1313db | 7dd0192257fea85ea9c7d24c199415dab591a6aa | refs/heads/master | 2021-01-19T09:48:44.694796 | 2017-02-16T06:55:41 | 2017-02-16T06:55:41 | 82,150,015 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 833 | rd | exploregraph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rope.R
\name{exploregraph}
\alias{exploregraph}
\title{Convenience wrapper for \code{explore} for adjacency matrices}
\usage{
exploregraph(data, B, ...)
}
\arguments{
\item{data}{List of symmetric matrices, one matrix for each penalization
level}
\item{B}{Number of bootstraps used to construct \code{data}. At least 21 are
needed for u-shape test heuristic to work, but in general it is recommended
to use many more.}
\item{...}{Additional arguments are passed on to \code{explore}.}
}
\value{
A list with components
\item{pop.sep}{vector of values saying how separated true and false
variables are for each level of penalization}
}
\description{
When modeling graphs it may be more convenient to store data as matrices
instead of row vectors.
}
|
27e9c9b0aa11b3d6f0d9f67b456253b980eeafa1 | 736281b44e7f46705960e14ba97c98acdeecd6b1 | /man/ggcormat.Rd | a53823ebe2f10284480a565c1bb6fd8c6d74b13d | [] | no_license | abusjahn/wrappedtools | 97dd0d77514cd42affbaab7679d2fc2eab8f8358 | aca66340603ef4a5ba0e0ebee76a06ba6610a33f | refs/heads/main | 2023-08-24T12:09:11.635831 | 2023-08-04T16:37:30 | 2023-08-04T16:37:30 | 132,574,321 | 3 | 2 | null | 2023-09-01T18:01:05 | 2018-05-08T07:55:20 | R | UTF-8 | R | false | true | 1,705 | rd | ggcormat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{ggcormat}
\alias{ggcormat}
\title{Print graphical representation of a correlation matrix.}
\usage{
ggcormat(
cor_mat,
p_mat = NULL,
method = "Correlation",
title = "",
maxpoint = 2.1,
textsize = 5,
axistextsize = 2,
titlesize = 3,
breaklabels = NULL,
lower_only = TRUE,
.low = "blue3",
.high = "red2",
.legendtitle = NULL
)
}
\arguments{
\item{cor_mat}{correlation matrix as produced by cor.}
\item{p_mat}{Optional matrix of p-values; if provided, this is used to define
size of dots rather than absolute correlation.}
\item{method}{text specifying type of correlation.}
\item{title}{plot title.}
\item{maxpoint}{maximum for scale_size_manual, may need adjustment depending on plotsize.}
\item{textsize}{for theme text.}
\item{axistextsize}{relative text size for axes.}
\item{titlesize}{as you already guessed, relative text size for title.}
\item{breaklabels}{currently not used, intended for str_wrap.}
\item{lower_only}{should only lower triangle be plotted?}
\item{.low}{Color for heatmap.}
\item{.high}{Color for heatmap.}
\item{.legendtitle}{Optional name for color legend.}
}
\value{
A ggplot object, allowing further styling.
}
\description{
\code{ggcormat} makes the same correlation matrix as \link{cortestR}
and graphically represents it in a plot
}
\examples{
coeff_pvalues <- cortestR(mtcars[, c("wt", "mpg", "qsec", "hp")],
split = TRUE, sign_symbol = FALSE
)
# focus on coefficients:
ggcormat(cor_mat = coeff_pvalues$corout, maxpoint = 5)
# size taken from p-value:
ggcormat(
cor_mat = coeff_pvalues$corout,
p_mat = coeff_pvalues$pout, maxpoint = 5)
}
|
55139c8394ceb025fc515ccee8843261859b22ad | 70be1f3ab42b122e0523c49f0615705b675aa2bd | /tests/testthat/test_kmeans_init.R | 5ad84403c49d34cf45dfb703cf8e06e1ba334c89 | [
"MIT"
] | permissive | panntingg/kmeans_R | c23249898fd980c1acddce79d0747461bba9ea66 | 6cb68e13944770e6860225f8038bd8bac9954b87 | refs/heads/master | 2020-04-25T13:34:10.636732 | 2018-03-17T21:23:50 | 2018-03-17T21:23:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,074 | r | test_kmeans_init.R | # Kmeans initilization tests
#
# Testing includes:
# - graceful failure when handed null data
# - graceful failure when handed nullnumber of clusters
# - defaults to 0 when no data and zero clusters are provided
# - checks that returns object of type matrix
# - checks that the dimension of returned matrix is correct
library(kmeansR)
context("kmeans initialization")
# initialize variables
set.seed(1234)
data_df <- data.frame(x = runif(100, min = 0, max = 10) + rep(c(0, 10), 50),
y = rnorm(100, 5, 1) + rep(c(0, 10), 50))
cluster_borders <- list('x' = quantile(data_df$x, probs = c(0, 0.5, 1)),
'y' = quantile(data_df$y, probs = c(0, 0.5, 1)))
init_vals <- kmeans_init(data = data_df, K = 2)
test_that("Correct error handling if no data object is given as input", {
expect_error(kmeans_init(data = NULL),
"Data object is missing or in the wrong format.")
})
test_that("Correct error handling if no K value is given as input", {
expect_error(kmeans_init(data = data.frame(), K = NULL),
"K value is missing or not a numeric integer.")
})
test_that("Correct error handling if K is larger than the number of data rows", {
expect_error(kmeans_init(data = data_df, K = nrow(data_df) + 1),
"Cannot generate more initializing values than available data points.")
})
test_that("test for correct error handling if invalid method is given as input", {
expect_error(kmeans_init(data = data_df, K = 2, method = "blah"),
"Please choose a valid method or revert to default.")
})
test_that("test for correct error handling if K value is zero.", {
expect_error(kmeans_init(data = data.frame(), K = 0),
"K value cannot be 0.")
})
# test_that("test that no columns are returned where empty data object is given as input with zero K value", {
# expect_equal(ncol(kmeans_init(data = data.frame(), K = 0)), 0)
# })
test_that("test if returned object is matrix given valid input", {
expect_equal(is.matrix(kmeans_init(data = data_df,
K = 2)), TRUE)
})
test_that("test if returned object has same number of rows as input K value for K = 1", {
expect_equal(nrow(kmeans_init(data = data_df,
K = 1)), 1)
})
test_that("test if returned object has same number of rows as input K value", {
expect_equal(nrow(kmeans_init(data = data_df,
K = 2)), 2)
})
test_that("test if returned object has same number of columns as input data object", {
expect_equal(ncol(kmeans_init(data = data_df,
K = 2)), 2)
})
test_that("test if initialization values fall within the logical clusters", {
expect_equal(all(c(min(init_vals[ ,1]) >= cluster_borders$x[1],
min(init_vals[ ,1]) <= cluster_borders$x[2])), TRUE)
expect_equal(all(c(max(init_vals[ ,1]) >= cluster_borders$x[2],
max(init_vals[ ,1]) <= cluster_borders$x[3])), TRUE)
expect_equal(all(min(init_vals[ ,2]) >= cluster_borders$y[1],
min(init_vals[ ,2]) <= cluster_borders$y[2]), TRUE)
expect_equal(all(max(init_vals[ ,2]) >= cluster_borders$y[2],
max(init_vals[ ,2]) <= cluster_borders$y[3]), TRUE)
})
test_that("test for correct error handling if invalid seed is provided", {
expect_error(kmeans_init(data = data_df,
K = 2, method = "rp", seed = 12.12),
"Invalid seed has been provided. Please specify seed as integer or omit.")
})
test_that("test if same seed gives same result", {
expect_equal(identical(kmeans_init(data = data_df,
K = 2, method = "rp", seed = 1234), kmeans_init(data = data_df,
K = 2, method = "rp", seed = 1234)), TRUE)
})
test_that("test if different seeds give different results", {
expect_equal(identical(kmeans_init(data = data_df,
K = 2, method = "rp", seed = 1234), kmeans_init(data = data_df,
K = 2, method = "rp", seed = 2)), FALSE)
})
|
12a2ec6b2ed69e1d07799574ddecc8f0c82334f0 | 25294c318866d0c8f3e9ab81e5e594afa1e8fd96 | /timeSeriesForecastMethods.r | 6e5a4951bdd71d3feb7c6c9587643a632759895d | [] | no_license | jaidprakash/timeseries | 35f6f81c92f318c6d961859d67743472adac3d34 | b75584bdeb926767993aaec20daf9692e00ca8d4 | refs/heads/master | 2021-07-05T23:26:27.385457 | 2020-11-27T17:06:26 | 2020-11-27T17:06:26 | 209,422,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,288 | r | timeSeriesForecastMethods.r | install.packages("forecasts")
library(forecast)
#Package has sample data
install.packages("fma")
library(fma)
# Using the beer data set
head(beer)
plot(beer)
str(beer)
summary(beer)
autoplot(beer)
###############################################################
#Forecast for 5 periods using Average Method
meanf(beer,5)
###############################################################
# Naive Method forecast uses the most recent observation as forecast
naive(beer,5)
# Random walk forecast is similar to Naive
rwf(beer,5)
###############################################################
#Simple exponential smoothening forecast method
## Used when there is no trend or seasonality. Uses more weight for recent past.
## Using alpha of 0.1, 0.5 and 0.9 and checking when the RSME (root mean sq error) is lowest
beer1 <- ses(beer,h=25, level = c(80,95), alpha = .1)
summary(beer1)
accuracy(beer1)
autoplot(beer1)
beer5 <- ses(beer,h=25, level = c(80,95), alpha = .5)
summary(beer5)
accuracy(beer1)
autoplot(beer5)
beer9 <- ses(beer,h=10, alpha = .9)
summary(beer9)
accuracy(beer1)
autoplot(beer9)
###############################################################
#Holt's linear trend method
## Good with trending data
###############################################################
#Linear Regression forecast method
head(books)
plot(books)
str(books)
summary(books)
autoplot(books)
### Paperback is dependent, Hardcover is independent. Store results in fit variable
fit <- lm(Paperback ~ Hardcover, data = books)
### Slope is 0.19, Intercept is 147.8
summary(fit)
plot(Paperback ~ Hardcover, data=books, pch =19)
abline(fit)
###############################################################
##############################
# Holt's Seasonal Trend Method
##############################
hw1 <-hw(airpass, seasonal = "additive")
hw2 <-hw(airpass, seasonal = "multiplicative")
autoplot(airpass) +
autolayer(hw1, series="HW additive forecasts", PI=FALSE) +
autolayer(hw2, series="HW multiplicative forecasts",
PI=FALSE) +
#ggtitle("International visitors nights in Australia") +
guides(colour=guide_legend(title="Forecast"))
###############################################################
|
822ab98e52365e52e53c82ee699a3e906aca50fc | 52671f54fac60d724612ff353d589a5ebbce7ad8 | /R/setup.R | edba2bf019684409750324c81ccac11f1f4e88b5 | [
"CC0-1.0"
] | permissive | wdwatkins/trainR | 4a076485cf1896b6e0cd8d94824b817a7ec117e0 | 1562b596a987bc9c06198bfebfca57a18ae467a8 | refs/heads/master | 2021-01-23T12:38:19.854058 | 2017-06-06T15:19:55 | 2017-06-06T15:19:55 | 93,183,704 | 0 | 0 | null | 2017-06-02T16:20:54 | 2017-06-02T16:20:54 | null | UTF-8 | R | false | false | 1,300 | r | setup.R | #'
#' Automatically create issues on the repo
#'
#' @description This function should be run by instructors to setup the issues
#' that will be created for students in each class. The idea is that they fix and
#' close out the issues in each instance of the course, and we reset the code to
#' have errors before the next course. We also need to reinstate the issues
#' associated with the errors. This function should automate that.
#'
#' @param repo_name string, name for the new repository
#' @param issue_json file path indicating the JSON file to be used to define what
#' issues to create. Defaults to the `issuetemplates.json` file in this package.
#' @param org string, GitHub organization to create repository. Defaults to "USGS-R"
#' @param ctx GitHub context for authentication, see \link[grithub]{get.github.context}
#'
#' @importFrom grithub get.github.context
#' @importFrom grithub create.issue
#'
#' @export
create.new.issues <- function(repo.name, issue.json="inst/extdata/issuetemplates.json",
org="USGS-R", ctx = get.github.context()){
# make issues from the issue template JSON file
issue.content <- readLines(issue.json)
new.issues <- lapply(issue.content, create.issue, owner=org, repo=repo.name, ctx=ctx)
return(new.issues)
}
|
12aee3c339157b7af78882088fe81da74d59b3e1 | 93079ba3946266d77b8b190aab61c5f5717f7902 | /week_1/homework_1_Q2.3.R | a868e85a64b42397332dcd54abfd49eac88486a9 | [] | no_license | aten2001/GT_ISYE6501-3 | df8df45205ceaf0029b77a689e6bf3ee440e8f14 | 6d6a2373659a8bee9a2d55a839d7a4235c4e6997 | refs/heads/master | 2021-09-08T08:33:24.409863 | 2018-03-08T18:47:55 | 2018-03-08T18:47:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,161 | r | homework_1_Q2.3.R | #clear the global environment
rm(list=ls())
#load knn library
library(kknn)
#set wd
setwd("~/Desktop/courses/ISYE6501_Intro_to_Analytics_modeling/week_1")
#load data
cc_data <- read.table('credit_card_data-headers.txt', header = TRUE, sep='')
#preallocate prediction matrix
prediction = matrix(0, ncol = 1, nrow = nrow(cc_data))
#preallocate prediction_accuracy
prediction_accuracy = matrix(0, ncol = 1, nrow = 100)
k_val_matrix = matrix(0, ncol = 1, nrow = 100)
#this loop tries different values for k
for (i in 1 : 100){
#this loop applies the kknn model to every data point (row of cc_data)
for (j in 1 : nrow(cc_data)) {
#implement the kknn model
model = kknn(R1 ~., cc_data[-j,], cc_data[j,], k = i, scale = TRUE, distance = 1)
#round to the nearest integer (since continuous values are returned)
prediction[j] = as.integer(fitted(model)+0.5)
}
#find the prediction accuracy for k = i
temp_prediction <- sum(prediction == cc_data[,11]) / nrow(cc_data)
#update matrices
k_val_matrix[i] <- i
prediction_accuracy[i] <- temp_prediction
}
#bind results into single matrix and name
kknn_results <- cbind(k_val_matrix, prediction_accuracy)
colnames(kknn_results) <- c("k", "prediction_accuracy")
#plot the results
#---------NOTE: plot window should be opened (large) to prevent a potential insufficient margins error-----
#----------------sweep the plots and increase the plot window if you still get an error--------------------
plot(kknn_results[,2], xlab = "k (# of neighbors)", ylab = "accuracy", main = "Accuracy vs. k")
#find the maxiumum value
max_accuracy <- max(kknn_results[,2])
#find the highest accuracy indices
max_indices <- as.matrix(which(kknn_results[,2] == max(kknn_results[,2])))
#find the number of maxima
num_max <- nrow(max_indices)
#preallocate max_accuracy matrix
max_accuracy_matrix <- matrix(0, ncol = 2, nrow = num_max)
max_accuracy_matrix[,2] <- max_accuracy
#loop through max_indices and insert the corresponding value of k
for(h in 1 : num_max){
max_accuracy_matrix[h,1] <- kknn_results[max_indices[h], 1]
}
colnames(max_accuracy_matrix) <- c("k", "prediction_accuracy")
|
a13eb056b5fb8da1f8737f7f69abef571c814c1b | c4686487bc5bca473db882c368f32cd6eb66cb60 | /man/step_log_interval.Rd | 7eeacc1fcffb53d1d2986e6307cf110e11f71f8f | [] | no_license | cran/timetk | a76350c9afec47750976a67dd62bce8e05cd571e | fd37c527527d820c4678dd9f7da616c650388d04 | refs/heads/master | 2023-04-14T02:39:01.465221 | 2023-03-30T13:20:05 | 2023-03-30T13:20:05 | 98,353,024 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 4,803 | rd | step_log_interval.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recipes-step_log_interval.R
\name{step_log_interval}
\alias{step_log_interval}
\alias{tidy.step_log_interval}
\title{Log Interval Transformation for Constrained Interval Forecasting}
\usage{
step_log_interval(
recipe,
...,
limit_lower = "auto",
limit_upper = "auto",
offset = 0,
role = NA,
trained = FALSE,
limit_lower_trained = NULL,
limit_upper_trained = NULL,
skip = FALSE,
id = rand_id("log_interval")
)
\method{tidy}{step_log_interval}(x, ...)
}
\arguments{
\item{recipe}{A \code{recipe} object. The step will be added to the sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose which
variables are affected by the step. See \code{\link[=selections]{selections()}}
for more details. For the \code{tidy} method, these are not
currently used.}
\item{limit_lower}{A lower limit. Must be less than the minimum value.
If set to "auto", selects zero.}
\item{limit_upper}{An upper limit. Must be greater than the maximum value.
If set to "auto", selects a value that is 10\% greater than the maximum value.}
\item{offset}{An offset to include in the log transformation.
Useful when the data contains values less than or equal to zero.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for preprocessing have been estimated.}
\item{limit_lower_trained}{A numeric vector of transformation values. This
is \code{NULL} until computed by \code{prep()}.}
\item{limit_upper_trained}{A numeric vector of transformation values. This
is \code{NULL} until computed by \code{prep()}.}
\item{skip}{A logical. Should the step be skipped when the recipe
is baked by \code{bake.recipe()}? While all operations are baked when \code{prep.recipe()} is run,
some operations may not be able to be conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_log_interval} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} (the
selectors or variables selected) and \code{value} (the
lambda estimate).
}
\description{
\code{step_log_interval} creates a \emph{specification} of a recipe
step that will transform data using a Log-Inerval
transformation. This function provides a \code{recipes} interface
for the \code{log_interval_vec()} transformation function.
}
\details{
The \code{step_log_interval()} function is designed specifically to handle time series
using methods implemented in the Forecast R Package.
\strong{Positive Data}
If data includes values of zero, use \code{offset} to adjust the series to make the values positive.
\strong{Implementation}
Refer to the \code{\link[=log_interval_vec]{log_interval_vec()}} function for the transformation implementation details.
}
\examples{
library(dplyr)
library(tidyr)
library(recipes)
library(timetk)
FANG_wide <- FANG \%>\%
select(symbol, date, adjusted) \%>\%
pivot_wider(names_from = symbol, values_from = adjusted)
recipe_log_interval <- recipe(~ ., data = FANG_wide) \%>\%
step_log_interval(FB, AMZN, NFLX, GOOG, offset = 1) \%>\%
prep()
recipe_log_interval \%>\%
bake(FANG_wide) \%>\%
pivot_longer(-date) \%>\%
plot_time_series(date, value, name, .smooth = FALSE, .interactive = FALSE)
recipe_log_interval \%>\% tidy(1)
}
\seealso{
Time Series Analysis:
\itemize{
\item Engineered Features: \code{\link[=step_timeseries_signature]{step_timeseries_signature()}}, \code{\link[=step_holiday_signature]{step_holiday_signature()}}, \code{\link[=step_fourier]{step_fourier()}}
\item Diffs & Lags \code{\link[=step_diff]{step_diff()}}, \code{recipes::step_lag()}
\item Smoothing: \code{\link[=step_slidify]{step_slidify()}}, \code{\link[=step_smooth]{step_smooth()}}
\item Variance Reduction: \code{\link[=step_log_interval]{step_log_interval()}}
\item Imputation: \code{\link[=step_ts_impute]{step_ts_impute()}}, \code{\link[=step_ts_clean]{step_ts_clean()}}
\item Padding: \code{\link[=step_ts_pad]{step_ts_pad()}}
}
Transformations to reduce variance:
\itemize{
\item \code{recipes::step_log()} - Log transformation
\item \code{recipes::step_sqrt()} - Square-Root Power Transformation
}
Recipe Setup and Application:
\itemize{
\item \code{recipes::recipe()}
\item \code{recipes::prep()}
\item \code{recipes::bake()}
}
}
|
fc237a3a6d03923287a5741f5145636bb7d46040 | 0266ef330078f17136848b4062c60cb02fadfe54 | /man-roxygen/ssAdvancedParam.R | 79fd8444e7df0044edac8489b60b69e4597f43ad | [] | no_license | lixixibj/smooth | af025910b65e118c168683636979fb3acd398223 | 5deed8e4eeb5feafcca0ce4e591a1d3f524b8e2d | refs/heads/master | 2022-04-23T12:37:23.294574 | 2020-04-17T15:21:43 | 2020-04-17T15:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,402 | r | ssAdvancedParam.R | #' @param loss The type of Loss Function used in optimization. \code{loss} can
#' be: \code{MSE} (Mean Squared Error), \code{MAE} (Mean Absolute Error),
#' \code{HAM} (Half Absolute Moment), \code{TMSE} - Trace Mean Squared Error,
#' \code{GTMSE} - Geometric Trace Mean Squared Error, \code{MSEh} - optimisation
#' using only h-steps ahead error, \code{MSCE} - Mean Squared Cumulative Error.
#' If \code{loss!="MSE"}, then likelihood and model selection is done based
#' on equivalent \code{MSE}. Model selection in this cases becomes not optimal.
#'
#' There are also available analytical approximations for multistep functions:
#' \code{aMSEh}, \code{aTMSE} and \code{aGTMSE}. These can be useful in cases
#' of small samples.
#'
#' Finally, just for fun the absolute and half analogues of multistep estimators
#' are available: \code{MAEh}, \code{TMAE}, \code{GTMAE}, \code{MACE}, \code{TMAE},
#' \code{HAMh}, \code{THAM}, \code{GTHAM}, \code{CHAM}.
#' @param bounds What type of bounds to use in the model estimation. The first
#' letter can be used instead of the whole word.
#' @param occurrence The type of model used in probability estimation. Can be
#' \code{"none"} - none,
#' \code{"fixed"} - constant probability,
#' \code{"general"} - the general Beta model with two parameters,
#' \code{"odds-ratio"} - the Odds-ratio model with b=1 in Beta distribution,
#' \code{"inverse-odds-ratio"} - the model with a=1 in Beta distribution,
#' \code{"direct"} - the TSB-like (Teunter et al., 2011) probability update
#' mechanism a+b=1,
#' \code{"auto"} - the automatically selected type of occurrence model.
#' @param oesmodel The type of ETS model used for the modelling of the time varying
#' probability. Object of the class "oes" can be provided here, and its parameters
#' would be used in iETS model.
#' @param xreg The vector (either numeric or time series) or the matrix (or
#' data.frame) of exogenous variables that should be included in the model. If
#' matrix included than columns should contain variables and rows - observations.
#' Note that \code{xreg} should have number of observations equal either to
#' in-sample or to the whole series. If the number of observations in
#' \code{xreg} is equal to in-sample, then values for the holdout sample are
#' produced using \link[smooth]{es} function.
#' @param xregDo The variable defines what to do with the provided xreg:
#' \code{"use"} means that all of the data should be used, while
#' \code{"select"} means that a selection using \code{ic} should be done.
#' \code{"combine"} will be available at some point in future...
#' @param initialX The vector of initial parameters for exogenous variables.
#' Ignored if \code{xreg} is NULL.
#' @param updateX If \code{TRUE}, transition matrix for exogenous variables is
#' estimated, introducing non-linear interactions between parameters.
#' Prerequisite - non-NULL \code{xreg}.
#' @param persistenceX The persistence vector \eqn{g_X}, containing smoothing
#' parameters for exogenous variables. If \code{NULL}, then estimated.
#' Prerequisite - non-NULL \code{xreg}.
#' @param transitionX The transition matrix \eqn{F_x} for exogenous variables. Can
#' be provided as a vector. Matrix will be formed using the default
#' \code{matrix(transition,nc,nc)}, where \code{nc} is number of components in
#' state vector. If \code{NULL}, then estimated. Prerequisite - non-NULL
#' \code{xreg}.
|
a7905f109ac209c75394d737d00f977f60b9995b | bc61b8e912a578aeba89a6364dc58b54c158a8bf | /logreg_trees.R | 46ac8c2784b1ce12a5ea9c81f59379fd1681c4fb | [] | no_license | cfamigli/cs229 | 84944ae089d97d45a1a8d15c2244e25f7c1e8109 | 983130281b20410ca5a7266148a36a4da9b5fb41 | refs/heads/master | 2021-10-08T14:52:41.651635 | 2018-12-13T18:22:27 | 2018-12-13T18:22:27 | 161,676,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,235 | r | logreg_trees.R | library(dplyr)
alldata = read.csv('C:/Users/nholtzma/Downloads/fire_data_2001_2017.csv')
nodup = distinct(alldata, lat, lon, year, .keep_all=TRUE)
ndyear = nodup$year
nodup = nodup[-c(1,102,103,112)] #all vars
#nodup = nodup[-c(1,102,103,112)][c(c(1:3,12:17,22:39,44:46,79:90),18,100)] #for reflectance only
#nodup = nodup[-c(1,102,103,112)][-c(1:3,12:17,22:39,44:46,79:90)] #for climate only
nodup$LC = as.factor(nodup$LC)
trainset = nodup[ndyear <= 2015,]
valset = nodup[ndyear == 2016,]
testset = nodup[ndyear == 2017,]
mylist = rep(0,43)
for (i in (1:43)) {
print(i)
mymod= glm(fire ~ LC + GCVI_1w + SWIR2_3m+ NDVI_1w+ NDWI_1w + NDMI_1w +trainset[,i], data= trainset, family = binomial)
mylist[i] = mymod$deviance
#}
}
mylist[mylist==0] = NA
names(trainset)[order(mylist)]
plot(mylist)
which.min(mylist)
names(trainset)[43]
acclist = c()
sel10 = strsplit('LC +GCVI_1w + SWIR2_3m +NIR_1w+NBR1_3m+ SWIR1_1w','+',fixed=T)[[1]]
sel10 = strsplit('LC + GCVI_1w + SWIR2_3m+ NDVI_1w+ NDWI_1w + NDMI_1w','+',fixed=T)[[1]]
for (i in 1:6) {
print(i)
myformula = paste("fire ~ ",paste(sel10[1:i], collapse=" + "),sep = "")
mymod= glm(as.formula(myformula), data= trainset, family = binomial)
mypred = predict(mymod, valset)
acclist = c(acclist,mean((mypred > 0) == valset$fire))
}
plot(acclist - mean(valset$fire==0))
finalmod = glm(fire ~ LC +GCVI_1w + SWIR2_3m +NIR_1w+NBR1_3m ,data= trainset, family = binomial)
finalmodsel = glm(fire ~ LC+GCVI_1w+ SWIR2_3m +ET_1w ,data= trainset, family = binomial)
finalmod = glm(fire ~ .,data= trainset, family = binomial)
mypred = predict(finalmod, testset)
myprob = 1/(1+exp(-mypred))
cutoffs = seq(0,1,0.01)
fpr = c()
tpr = c()
for (i in cutoffs) {
fpr = c(fpr, sum(myprob > i & testset$fire==0)/sum(testset$fire==0))
tpr = c(tpr, sum(myprob > i & testset$fire==1)/sum(testset$fire==1))
}
plot(fpr,tpr, t='l',col='orange', xlab='False positive rate', ylab='True positive rate',lwd=2,asp=1)
abline(0,1)
myfun = approxfun(fpr,tpr)
myauc = sum(myfun(seq(0,1,0.01)))*0.01
myauc #0.676025
library(xgboost)
mymod = xgboost(data = model.matrix(fire ~ ., trainset), label = trainset$fire, objective='binary:logistic',
max.depth=2, eta=1, nrounds = 100)
#watchlist=xbb.DMatrix(model.matrix(fire ~ ., testset),label=testset$fire))
plot(mymod$evaluation_log)
myacc = rep(0,100)
for (i in 1:100) {
print(i)
xpred = predict(mymod, newdata=model.matrix(fire ~ ., valset), ntreelimit=i)
myacc[i] = mean((xpred>0.5) == valset$fire)
}
plot(myacc - mean(valset$fire==0))
which.max(myacc)
xpred = predict(mymod, newdata=model.matrix(fire ~ ., testset),ntreelimit=57)
qtab = table(testset$fire,xpred > 0.5)
fpr = c()
tpr = c()
for (i in cutoffs) {
fpr = c(fpr, sum(xpred > i & testset$fire==0)/sum(testset$fire==0))
tpr = c(tpr, sum(xpred > i & testset$fire==1)/sum(testset$fire==1))
}
lines(fpr,tpr,col='blue',lwd=2)
legend('bottomright', pch=15, legend=c('Log reg (no selection)','Log reg (feature sel)', 'XGBoost'), col=c('red','green','blue'), inset = c(0.05,0.05))
myfun = approxfun(fpr,tpr)
myauc2 = sum(myfun(seq(0,1,0.01)))*0.01
myauc2 #0.7540388
|
1230330621fb8b90eed8be32976a3377a99ec2ee | 130bec3acb8248c96da384b20e41213783f0dbf6 | /cachematrix.R | 75dbb3bb9710c4afc725d359bc4e110daade8b46 | [] | no_license | jameswang8/ProgrammingAssignment2 | 16490dc570f145e0586d0386331a3a44937ddc73 | f18b7fb3186a385f8bdb0d6b4dba00f97dc294c5 | refs/heads/master | 2020-12-27T09:28:23.530933 | 2015-06-21T20:15:36 | 2015-06-21T20:15:36 | 37,816,711 | 0 | 0 | null | 2015-06-21T16:45:38 | 2015-06-21T16:45:38 | null | UTF-8 | R | false | false | 1,221 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## start inverse property
inv <- NULL
## Set the Matrix
set <- function(y) {
matrix <<- y
inv <<- NULL
}
## Method to get the matrix
get <- function(){matrix}
## set the inverse of the matrix
setInverse <- function(inverse) {inv <<- inverse}
## Method to get the inverse of the matrix
getInverse <- function() {inv}
## Returns the list of methods
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## getting the martix that in the inverse
inv<- x$getInverse()
##return if the inverrse has already been calculate
if(!is.null(inv)) {message("getting cached data")
return(inv)
}
##if the inverse wasn't calculated
## getting the matrix
data <- x$get()
## calculating the inverse by using matrix multiplication
m <- solve(data) %*% data
## storing the inverse to the object
x$setInverse(m)
## returing a matrix that is the inverse
m
}
|
8f132d64a3642f8ead4b5dd0e5973b643eaca70b | 517feee5084dc68fa17d51d25a6860b10d876d0b | /man/replaceNAs.Rd | 41aa2906a4b6ea201e041f01d4529e11824fbbf1 | [
"MIT"
] | permissive | arorarshi/utilar | c6055a15be345ffa65efd34907bbaf580eacaea7 | 4fce04f5b43891d2d30328f4bd3dd6f877334320 | refs/heads/master | 2021-06-24T11:54:48.758911 | 2021-02-05T21:26:24 | 2021-02-05T21:26:24 | 199,532,239 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,312 | rd | replaceNAs.Rd | \name{replaceNAs}
\alias{replaceNAs}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Replaces missing variables in a vector as \code{<NA>}.
}
\description{
Given a vector (numeric, character, double, factor) with missingness coded as not \code{NA}, replace it as \code{NA}. Some examples are - 9 N/A Unknown, Not Available etc.
}
\usage{
replaceNAs(x, vNA)
}
\arguments{
\item{x}{ A vector of values - numeric, character, double or factor.}
\item{vNA}{ A character vector of values to be replaced that are coded as missing into \code{NA}}
}
\details{
Both arguments should be supplied. If \code{typeof} is not matched to one of the following - \code{character, double, integer} or \code{factor}, a \code{character} vector is returned
}
\value{
\item{x.na}{A vector with misisng values coded as \code{NA}. If \code{typeof} did not match the following - \code{character, double, integer} or \code{factor}, a \code{character} vector is returned }
}
\author{
Arshi Arora
}
\examples{
set.seed(123)
#sample 20 numbers from 1 -10, say we want to replace the 9s to NA
x<-sample(1:10, 20, replace=TRUE)
#x
#[1] 3 8 5 9 10 1 6 9 6 5 10 5 7 6 2 9 3 1 4 10
x.na<-replaceNAs(x,9)
#[1] 3 8 5 NA 10 1 6 NA 6 5 10 5 7 6 2 NA 3 1 4 10
}
|
226ca5fc06a45d0ff05c45817fb15039247af87c | 88ef281798009d121b409053e3adc46fcc93a984 | /scripts/3_runMDSstatisticalTestsRevised.R | 3465416e0b247e6a4bbb407a64b82675811f0a26 | [] | no_license | rbarner/swabVsStool | a23ea69c254ae87676f829dc7763ef0c99176c30 | d64999a0160a8b1c1408b8c705854d4862338848 | refs/heads/master | 2021-01-01T05:22:51.412832 | 2018-01-02T07:55:23 | 2018-01-02T07:55:23 | 56,530,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,925 | r | 3_runMDSstatisticalTestsRevised.R | library(nlme)
library(matrixStats)
####### Functions used #######
mdsStatisticalModels <- function(dataSet,eigen,tool,level)
{
bacteriaSwab <- split(dataSet,dataSet$Origin)$SWAB
bacteriaStool <- split(dataSet,dataSet$Origin)$STOOL
stoolMeans <- round(colMeans(bacteriaStool[,37:51]),3);
swabMeans <- round(colMeans(bacteriaSwab[,37:51]),3);
stoolSDs <- round(colSds(as.matrix(bacteriaStool[,37:51])),3);
swabSDs <- round(colSds(as.matrix(bacteriaSwab[,37:51])),3);
pValOriginList=numeric(0);
pValIndividualList=numeric(0);
for(i in 1:15)
{
f <- as.formula(paste("MDS",i,"~","Origin","+","visit",sep=""));
simpleMod <- gls(f,method="REML",data=dataSet);
mixedMod <- lme(f,method="REML",random=~1|study_id,data=dataSet);
pValOrigin <- pf(anova(mixedMod)$"F-value"[2],anova(mixedMod)$"numDF"[2],anova(mixedMod)$"denDF"[2],lower.tail = FALSE);
pValParticipant <- pchisq(anova(simpleMod,mixedMod)$"L.Ratio"[2],1,lower.tail = FALSE)
pValOriginList[[length(pValOriginList)+1]]<- format(pValOrigin,digits=3);
pValIndividualList[[length(pValIndividualList)+1]]<- format(pValParticipant,digits=3);
}
originAdj <- p.adjust(pValOriginList,method = "BH")
individualAdj <- p.adjust(pValIndividualList,method = "BH")
makeTable=data.frame(eigen[1:15]*100,stoolMeans,stoolSDs,swabMeans,swabSDs,originAdj,individualAdj);
names(makeTable) <- cbind("stool mean","stool sd","swab mean","swab sd","Origin adj p-value","Participant adj p-value");
write("MDS Axis\t% variation explained\tstoolMeans\tstoolSDs\tswabMeans\tswabSDs\tOrigin adj p-value\tParticipant adj p-value",paste("../statisticalModels/3_mds_",taxa,"_",classifier,"_individual_origin_pVal.txt",sep=""));
write.table(makeTable,paste("../statisticalModels/3_mds_",level,"_",tool,"_individual_origin_pVal.txt",sep=""),quote=FALSE, sep="\t",append=TRUE, col.names=FALSE);
}
############ MDS classifications #######################
sampleData <- read.delim("data/key/mapping_key_16S.txt",header = TRUE, row.names=1);
sampleData$visit <- unlist(strsplit(as.character(sampleData$type),split = "_"))[c(FALSE,TRUE)]
sampleData2 <- read.delim("data/key/mapping_key_WGS.txt",header = TRUE, row.names=1);
names(sampleData2)[1] <- "Origin"
classifierList <- c("krakenWGS","krakenWGSNoTissue","kraken16S","rdpClassifications", "qiime","metaphlan")
classifierList <- c("qiime","krakenWGSNoTissue","kraken16S","rdpClassifications")
for(classifier in classifierList)
{
if(classifier %in% c("qiime"))
{
taxaLevels <- c("phylum","phylumRarefied","class","classRarefied","order","orderRarefied","family","familyRarefied","genus","genusRarefied","otu","otuRarefied")
}else{
taxaLevels <- c("phylum","class","order","family","genus")
}
for(taxa in taxaLevels )
{
setwd("mds")
mdsFile <- paste(classifier,"_mds_", taxa, "_loggedFiltered.RData",sep="");
print(mdsFile)
eigenFile <- paste(classifier,"_eigenValues_", taxa, "_loggedFiltered.RData",sep="");
mds <-readRDS(mdsFile);
if(classifier %in% c("krakenWGS","metaphlan"))
{
mdsMeta <- merge(sampleData2,mds, by = "row.names")
}else
{
mdsMeta <- merge(sampleData,mds, by = "row.names")
}
eigen <-readRDS(eigenFile)
mdsStatisticalModels(dataSet = mdsMeta,eigen = eigen,tool = classifier,level=taxa);
setwd("..")
}
}
setwd("C://Users/Roshonda/swabVsStoolMicrobiome/")
functionList <- c("wgs","picrust")
for(funct in functionList)
{
if(funct %in% c("wgs"))
{
wgsLevels <- c("keggFamilies",
"keggPathwaysLevel3",
"keggPathwaysLevel2",
"keggPathwaysLevel1",
"metabolickeggPathwaysLevel2",
"metabolickeggPathwaysLevel3",
"keggFamiliesNoTissue",
"keggPathwaysLevel3NoTissue",
"keggPathwaysLevel2NoTissue",
"keggPathwaysLevel1NoTissue",
"metabolickeggPathwaysLevel2NoTissue",
"metabolickeggPathwaysLevel3NoTissue")
}else{
wgsLevels <- c("keggFamilies",
"keggPathwaysLevel3",
"keggPathwaysLevel2",
"keggPathwaysLevel1",
"metabolickeggPathwaysLevel2",
"metabolickeggPathwaysLevel3")
}
for(wgs in wgsLevels )
{
setwd("mds")
mdsFile <- paste(funct,"_mds_", wgs, "_loggedFiltered.RData",sep="");
print(mdsFile)
eigenFile <- paste(funct,"_eigenValues_", wgs, "_loggedFiltered.RData",sep="");
mds <-readRDS(mdsFile);
#sampleData <- sampleData[-26,]
if(funct %in% "wgs")
{
mdsMeta <- merge(sampleData2,mds, by = "row.names")
}
else
{
mdsMeta <- merge(sampleData,mds, by = "row.names")
}
eigen <-readRDS(eigenFile);
mdsStatisticalModels(mdsMeta,mds,eigen,funct,wgs);
setwd("..")
}
} |
288a8cc061c9daedd79db3b389451891728f0392 | bffb5bbabb0598c467d66730ad5de742eecdd8e9 | /testerapp.R | 031948a3c4293ff5f2b5ae02519f193bcdc8893b | [] | no_license | daisyduursma/OEHDecisionFramework | eb4506e534aa1705c5fbbc8c9caadc2320f3912c | 45416fcba7ba22bec924ceeb2c27bca49ddd8df1 | refs/heads/master | 2021-09-09T16:43:27.320901 | 2018-03-16T02:05:25 | 2018-03-16T02:05:25 | 110,914,451 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,352 | r | testerapp.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
#get test data
library(rgdal)
require(cluster)
library(leaflet)
library(leaflet.extras)
library(sp)
source("AppFunctions/extractEnviroData.R", local = T)
source("AppFunctions/plotEnviroHists.R", local = T)
source("AppFunctions/ClusterAnalysis.R", local = T)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Cluster analysis"),
# Sidebar to select inputs
sidebarLayout(
sidebarPanel(
selectInput("tmax", "Avg. annual Tmax", c("yes","no")),
selectInput("rain", "Avg. annual rainfall", c("yes","no")),
selectInput("rainVar", "Avg. annual rainfall variability", c("yes","no")),
selectInput("elev", "Elevation", c("yes","no")),
selectInput("soils", "Soil type", c("yes","no")),
numericInput('clusters', 'Cluster count',2,
min = 2, max = 9)
),
mainPanel(
# Choices for the drop-downs menu, colour the points by selected variable in map, "cluster", "tmax", etc. are the names in the data after the cluster analysis is run
vars <- c(
"Cluster" = "cluster",
"Avg. annual Tmax" = "tmax",
"Avg. annual rainfall" = "rain",
"Avg. annual rainfall variability" = "rainVar",
"Elevation" = "elev",
"Soil type" = "soil"
),
#sets location for base leaflet map and make dropdown menu to select the backgroudn map
leafletOutput('ClusterPlot'),
absolutePanel(top = 45, right = 20, width = 150, draggable = TRUE,
selectInput("bmap", "Select base map",
choices = c("Base map",
"Satellite imagery"),
selected = "Base map"),
selectInput("variable", "Display Variable", vars)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
################## in the real app this aleady exists
#get the data set up
source("AppFunctions/extractEnviroData.R", local = T)
sp<-"Acacia acanthoclada"
spdat<-read.csv("AppEnvData/SpeciesObservations/SOSflora.csv",header=TRUE)
spdat<-subset(spdat,Scientific==sp)
sites<-readOGR("AppEnvData/ManagmentSites/OEHManagmentSites.shp")
spdat$lat <- spdat[, "Latitude_G"]
spdat$long <- spdat[, "Longitude_"]
dat<-EnvExtract(spdat$lat,spdat$long)
#select site data
coords <- dat[,c("long","lat")]
coordinates(coords) <-c("long","lat")
proj4string(coords)<-crs(sites)
managmentSite <- sites[sites$SciName == sp,]
EnvDat<-cbind(dat,over(coords,managmentSite,returnList = FALSE))
################################
#perform cluster analysis
variablesUSE <- c("soil", "elev", "rain", "tmax", "rainVar") #this needs to be reacitve
clusters<-4 #this needs to be reactive
clusDat<- EnvCluserData(EnvDat,variablesUSE,clusters) #make reactive
# generate two set of unique location IDs
#the unique id’s are needed to color the locations we select.
clusDat$locationID <- paste0(as.character(1:nrow(clusDat)), "_ID")
clusDat$secondLocationID <- paste0(clusDat$LocationID, "_selectedLayer")
#######################
#make coordinates from the clusDat, this will be used when selecting points for SOS managment sites
ClusCoordinates <- SpatialPointsDataFrame( clusDat[,c('long', 'lat')] , clusDat)#reactive?
# list to store the selections for tracking
data_of_click <- reactiveValues(clickedMarker = list())
#make empty leaflet plot, this has the boundaries of the species data, but no points
output$ClusterPlot <- renderLeaflet({
#get base map name
if(input$bmap== "Base map"){
mapType<-"OpenStreetMap.Mapnik"
}
if(input$bmap== "Satellite imagery"){
mapType<-"Esri.WorldImagery"
}
#main map
leaflet() %>%
addProviderTiles(mapType) %>%
fitBounds(min(clusDat$long), min(clusDat$lat), max(clusDat$long), max(clusDat$lat))
})
#set colouring options for factors and numeric variables
observe({
colorBy <- input$variable
if (colorBy == "tmax" |colorBy =="rain" |colorBy =="elev") {
# Color and palette if the values are continuous.
colorData <- clusDat[[colorBy]]
pal <- colorBin("viridis", colorData, 7, pretty = FALSE)
} else {
colorData <- clusDat[[colorBy]]
pal <- colorFactor("viridis", colorData)
}
#updating points on map based on selected variable and menu to draw polygons
leafletProxy("ClusterPlot", data = clusDat) %>% #adds points to the graph
clearShapes() %>%
addCircles(~long, ~lat,
radius=5000,
fillOpacity=1,
fillColor=pal(colorData),
weight = 2,
stroke = T,
layerId = as.character(clusDat$locationID),
highlightOptions = highlightOptions(color = "deeppink",
fillColor="deeppink",
opacity = 1.0,
weight = 2,
bringToFront = TRUE)) %>%
addLegend("bottomleft", pal=pal, values=colorData,
layerId="colorLegend")%>% #legend for varibales
addDrawToolbar( #toolbar to drawshapes
targetGroup='Selected',
polylineOptions=FALSE,
markerOptions = FALSE,
polygonOptions = drawPolygonOptions(shapeOptions=drawShapeOptions(fillOpacity = 0
,color = 'black'
,weight = 3)),
rectangleOptions = drawRectangleOptions(shapeOptions=drawShapeOptions(fillOpacity = 0
,color = 'black'
,weight = 3)),
circleOptions = drawCircleOptions(shapeOptions = drawShapeOptions(fillOpacity = 0
,color = 'black'
,weight = 3)),
editOptions = editToolbarOptions(edit = FALSE, selectedPathOptions = selectedPathOptions()))
})
############subsetting obseration to get those inside the polygons ##################
observeEvent(input$mymap_draw_new_feature,{#tells r-shiny that if the user draws a shape return all teh uighe locations based on the location ID
#Only add new layers for bounded locations
found_in_bounds <- findLocations(shape = input$mymap_draw_new_feature
, location_coordinates = ClusCoordinates
, location_id_colname = "locationID")
for(id in found_in_bounds){
if(id %in% data_of_click$clickedMarker){
# don't add id
} else {
# add id
data_of_click$clickedMarker<-append(data_of_click$clickedMarker, id, 0)
}
}
# look up clusDat by ids found
selected <- subset(clusDat, locationID %in% data_of_click$clickedMarker)
proxy <- leafletProxy("ClusterPlot")
proxy %>% addCircles(data = selected,
radius = 6000,
lat = selected$lat,
lng = selected$long,
fillColor = "red",
fillOpacity = 1,
color = "red",
weight = 3,
stroke = T,
layerId = as.character(selected$secondLocationID),
highlightOptions = highlightOptions(color = "purple",
opacity = 1.0,
weight = 2,
bringToFront = TRUE))
})
# ############################################### section four ##################################################
observeEvent(input$mymap_draw_deleted_features,{
# loop through list of one or more deleted features/ polygons
for(feature in input$mymap_draw_deleted_features$features){
# get ids for locations within the bounding shape
bounded_layer_ids <- findLocations(shape = feature
, location_coordinates = ClusCoordinates
, location_id_colname = "secondLocationID")
# remove second layer representing selected locations
proxy <- leafletProxy("ClusterPlot")
proxy %>% removeShape(layerId = as.character(bounded_layer_ids))
first_layer_ids <- subset(clusDat, secondLocationID %in% bounded_layer_ids)$locationID
data_of_click$clickedMarker <- data_of_click$clickedMarker[!data_of_click$clickedMarker
%in% first_layer_ids]
}
})
# },
}
findLocations <- function(shape, location_coordinates, location_id_colname){
# derive polygon coordinates and feature_type from shape input
polygon_coordinates <- shape$geometry$coordinates
feature_type <- shape$properties$feature_type
if(feature_type %in% c("rectangle","polygon")) {
# transform into a spatial polygon
drawn_polygon <- Polygon(do.call(rbind,lapply(polygon_coordinates[[1]],function(x){c(x[[1]][1],x[[2]][1])})))
# use 'over' from the sp package to identify selected locations
selected_locs <- sp::over(location_coordinates
, sp::SpatialPolygons(list(sp::Polygons(list(drawn_polygon),"drawn_polygon"))))
# get location ids
x = (location_coordinates[which(!is.na(selected_locs)), location_id_colname])
selected_loc_id = as.character(x[[location_id_colname]])
return(selected_loc_id)
} else if (feature_type == "circle") {
center_coords <- matrix(c(polygon_coordinates[[1]], polygon_coordinates[[2]])
, ncol = 2)
# get distances to center of drawn circle for all locations in location_coordinates
# distance is in kilometers
dist_to_center <- spDistsN1(location_coordinates, center_coords, longlat=TRUE)
# get location ids
# radius is in meters
x <- location_coordinates[dist_to_center < shape$properties$radius/1000, location_id_colname]
selected_loc_id = as.character(x[[location_id_colname]])
return(selected_loc_id)
}
}
# Run the application
shinyApp(ui = ui, server = server)
|
ab404abcc128fb8848344e5621da7cc91c493d53 | f5bc9ede0f17e4a1bbcca3d76cc68c216cdbafd6 | /GC_olives/graph_cut.R | 777b3b098e419cb4673841ae172719f1fc14966e | [] | no_license | thaos/GraphCut | 419b85d357a03b9326285c264518994bc829dda5 | 14412b65f7a5bdd344a6b03f2b64a0917f9b095e | refs/heads/master | 2020-04-27T18:18:37.404257 | 2019-11-14T15:21:40 | 2019-11-14T15:21:40 | 174,563,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,052 | r | graph_cut.R | library(magick)
library(magrittr)
library(igraph)
library(optrees)
source("graph_cut_algo.R")
library(magick)
library(magrittr)
library(igraph)
library(optrees)
source("graph_cut_algo.R")
img <- image_read("olives.gif")
plot(img)
img %<>%
image_convert(type = 'grayscale') %>%
image_data("gray") %>%
"["(1,,) %>%
as.integer() %>%
matrix(ncol = 128, nrow = 128)
image(img, col = grey.colors(256))
canvas <- canvas_origin <- matrix(NA, ncol = 32, nrow = 32*2 - 8)
canvas_id <- matrix(1:length(canvas), ncol = ncol(canvas), nrow = nrow(canvas))
patch_A <- img[1:32, 1:32]
patch_A_id <- canvas_id[1:32, 1:32]
canvas <- update_canvas(canvas = canvas, patch = patch_A, patch_id = patch_A_id)
canvas_origin <- update_canvas_origin(canvas_origin = canvas_origin, label = "1", patch_id = patch_A_id)
image(seq.int(nrow(canvas)), seq.int(ncol(canvas)), canvas, , zlim = c(0, 256), col = grey.colors(256))
new_canvas <- add_newpatch(
xstart = 20, ystart = 15,
xlength = 20, ylength = 10,
canvas = canvas, canvas_origin = canvas_origin, canvas_id = canvas_id,
training_img = img, patch_list = NULL, cutset_global = NULL
)
par(mfrow = c(2, 1))
image(1:nrow(canvas), 1:ncol(canvas), new_canvas$canvas, zlim = c(0, 256), col = grey.colors(256))
lines_seams(cutset_global = new_canvas$cutset_global, canvas = new_canvas$canvas)
image(1:nrow(canvas), 1:ncol(canvas), matrix(as.numeric(new_canvas$canvas_origin), ncol = ncol(canvas)), col = rainbow(nrow(new_canvas$patch_list)))
lines_seams(cutset_global = new_canvas$cutset_global, canvas = new_canvas$canvas)
new_canvas2 <- add_newpatch(
xstart = 20, ystart = 23,
xlength = 20, ylength = 10,
canvas = new_canvas$canvas, canvas_origin = new_canvas$canvas_origin, canvas_id = new_canvas$canvas_id,
training_img = img, patch_list = new_canvas$patch_list, cutset_global = new_canvas$cutset_global
)
par(mfrow = c(2, 1))
image(1:nrow(canvas), 1:ncol(canvas), new_canvas2$canvas, zlim = c(0, 256), col = grey.colors(256))
lines_seams(cutset_global = new_canvas2$cutset_global, canvas = new_canvas2$canvas)
abline(v = 19.5)
abline(h = 22.5)
image(1:nrow(canvas), 1:ncol(canvas), matrix(as.numeric(new_canvas2$canvas_origin), ncol = ncol(canvas)), col = rainbow(nrow(new_canvas2$patch_list)))
lines_seams(cutset_global = new_canvas2$cutset_global, canvas = new_canvas2$canvas)
new_canvas3 <- add_newpatch(
xstart = 20, ystart = 14,
xlength = 20, ylength = 10,
canvas = new_canvas2$canvas, canvas_origin = new_canvas2$canvas_origin, canvas_id = new_canvas2$canvas_id,
training_img = img, patch_list = new_canvas2$patch_list, cutset_global = new_canvas2$cutset_global
)
par(mfrow = c(2, 1))
image(1:nrow(canvas), 1:ncol(canvas), new_canvas3$canvas, zlim = c(0, 256), col = grey.colors(256))
lines_seams(cutset_global = new_canvas3$cutset_global, canvas = new_canvas3$canvas)
image(1:nrow(canvas), 1:ncol(canvas), matrix(as.numeric(new_canvas3$canvas_origin), ncol = ncol(canvas)), col = rainbow(nrow(new_canvas3$patch_list)))
lines_seams(cutset_global = new_canvas3$cutset_global, canvas = new_canvas3$canvas)
new_canvas4 <- add_newpatch(
xstart = 20, ystart = 4,
xlength = 20, ylength = 10,
canvas = new_canvas3$canvas, canvas_origin = new_canvas3$canvas_origin, canvas_id = new_canvas3$canvas_id,
training_img = img, patch_list = new_canvas3$patch_list, cutset_global = new_canvas3$cutset_global
)
par(mfrow = c(2, 1))
image(1:nrow(canvas), 1:ncol(canvas), new_canvas4$canvas, zlim = c(0, 256), col = grey.colors(256))
lines_seams(cutset_global = new_canvas4$cutset_global, canvas = new_canvas4$canvas)
image(1:nrow(canvas), 1:ncol(canvas), matrix(as.numeric(new_canvas4$canvas_origin), ncol = ncol(canvas)), col = rainbow(nrow(new_canvas4$patch_list)))
lines_seams(cutset_global = new_canvas4$cutset_global, canvas = new_canvas4$canvas)
new_canvas5 <- add_newpatch(
xstart = 20, ystart = 1,
xlength = 20, ylength = 10,
canvas = new_canvas4$canvas, canvas_origin = new_canvas4$canvas_origin, canvas_id = new_canvas4$canvas_id,
training_img = img, patch_list = new_canvas4$patch_list, cutset_global = new_canvas4$cutset_global
)
par(mfrow = c(2, 1))
image(1:nrow(canvas), 1:ncol(canvas), new_canvas5$canvas, zlim = c(0, 256), col = grey.colors(256))
lines_seams(cutset_global = new_canvas5$cutset_global, canvas = new_canvas5$canvas)
image(1:nrow(canvas), 1:ncol(canvas), matrix(as.numeric(new_canvas5$canvas_origin), ncol = ncol(canvas)), col = rainbow(nrow(new_canvas5$patch_list)))
lines_seams(cutset_global = new_canvas5$cutset_global, canvas = new_canvas5$canvas)
new_canvas6 <- add_newpatch(
xstart = 30, ystart = 1,
xlength = 27, ylength = 32,
canvas = new_canvas5$canvas, canvas_origin = new_canvas5$canvas_origin, canvas_id = new_canvas5$canvas_id,
training_img = img, patch_list = new_canvas5$patch_list, cutset_global = new_canvas5$cutset_global
)
par(mfrow = c(2, 1))
image(1:nrow(canvas), 1:ncol(canvas), new_canvas6$canvas, zlim = c(0, 256), col = grey.colors(256))
lines_seams(cutset_global = new_canvas6$cutset_global, canvas = new_canvas6$canvas)
image(1:nrow(canvas), 1:ncol(canvas), matrix(as.numeric(new_canvas6$canvas_origin), ncol = ncol(canvas)), col = rainbow(nrow(new_canvas6$patch_list)))
lines_seams(cutset_global = new_canvas6$cutset_global, canvas = new_canvas6$canvas)
new_canvas7 <- add_newpatch(
xstart = 10, ystart = 5,
xlength = 16, ylength = 24,
canvas = new_canvas6$canvas, canvas_origin = new_canvas6$canvas_origin, canvas_id = new_canvas6$canvas_id,
training_img = img, patch_list = new_canvas6$patch_list, cutset_global = new_canvas6$cutset_global
)
par(mfrow = c(2, 1))
image(1:nrow(canvas), 1:ncol(canvas), new_canvas7$canvas, zlim = c(0, 256), col = grey.colors(256))
lines_seams(cutset_global = new_canvas7$cutset_global, canvas = new_canvas6$canvas)
image(1:nrow(canvas), 1:ncol(canvas), matrix(as.numeric(new_canvas7$canvas_origin), ncol = ncol(canvas)), col = rainbow(nrow(new_canvas7$patch_list)))
lines_seams(cutset_global = new_canvas7$cutset_global, canvas = new_canvas7$canvas)
|
e7eadda0a3db1ec226af568b04394f78c0c3d162 | 1b537061a9e36ab16bb600d63d4589fdcf8a801b | /hackathon.R | 957f1fc0fcb1bb9e9645cfbbe131804599766cb4 | [] | no_license | arshiyaansari/Twitter-Sentiment-Analysis | 75c485b850a093ae47f2bdba89abe6f1251205ce | 6714112905fb43871f4bf256f2efb8f701b3d6b4 | refs/heads/master | 2023-01-06T18:50:41.821591 | 2020-11-01T23:03:45 | 2020-11-01T23:03:45 | 171,375,502 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,155 | r | hackathon.R | install.packages("SnowballC")
install.packages("tm")
install.packages("twitteR")
install.packages("syuzhet")
install.packages("wordcloud")
library("SnowballC")
library("tm")
library("twitteR")
library("syuzhet")
library("stringr")
library("wordcloud")
consumer_key = "Zw5cQNtd0rEXVnaQz0qACzD72"
consumer_secret = "Hy0OtEfwa4Mlp5ll4nTwWr8P5juHpA6sccCHdFZs4Km9ZoMIbf"
access_token = "2596885015-zHpPQ6MYow9Q3J39IM4jsWYLRmGFAULwwGutUzl"
access_secret = "HFVhI8AtGpHL1D8KIEp1A5cy2rxeHJmRzD7Fu3dEaiD1f"
# consumer_key = "PJ1v0CNlENGRWij2XqkSVqd6c"
# consumer_secret = "vtjH0vETnmU07r0KLFrS9BfhZQWrWemd8ricIuO5Fwb1KrybWM"
# access_token = "1019291391678124033-K0xOmNP19kwTQSV2lBkpa1HQl99Sz7"
# access_secret = "9weqcmTa60BD3HP6kxrWxefZmmOlbzoVZzNFbpYfODJ55"
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
(tweets <- searchTwitter('juul', n = 3200, since = "2018-09-22", until = "2018-09-23"))
tweets
help(searchTwitter)
tweets.df <- twListToDF(tweets)
tweets.df2 <- gsub("http.*","",tweets.df$text)
tweets.df2 <- gsub("https.*","",tweets.df2)
tweets.df2 <- gsub("#.*","",tweets.df2)
tweets.df2 <- gsub("(RT )?@\\S*","",tweets.df2)
tweets.df2
word.df <- as.vector(tweets.df2)
word2.df = unlist(word.df)
emotion.df <- get_nrc_sentiment(word2.df)
emotion.df
emotion2.df <- get_sentiment(word2.df)
write.csv(emotion2.df, file = "22.csv")
most.positive <- word2.df[emotion2.df == max(emotion2.df)]
most.positive
most.negative <- word2.df[emotion2.df == min(emotion2.df)]
most.negative
(cigs <- searchTwitter("cigarettes", n = 3200, since = "2018-09-20", until = "2018-09-21"))
# tweets1
cigs.df <- twListToDF(cigs)
cigs.df2 <- gsub("http.*","",cigs.df$text)
cigs.df2 <- gsub("https.*","",cigs.df2)
cigs.df2 <- gsub("#.*","",cigs.df2)
cigs.df2 <- gsub("(RT )?@\\S*","",cigs.df2)
w3.df <- as.vector(cigs.df2)
w4.df = unlist(w3.df)
emotion3.df <- get_nrc_sentiment(w4.df)
emotion3.df
emotion4.df <- get_sentiment(w4.df)
emotion4.df
most.positive <- w4.df[emotion4.df == max(emotion4.df)]
most.positive
most.negative <- w4.df[emotion4.df == min(emotion4.df)]
most.negative
write.csv(emotion2.df, file = "20CIGS.csv")
|
a7cea387a67982a0b743d568a0686ab13f6b0c2b | ff6ca8e3a11a1445c44759895e11655d0c178cd2 | /man/gaussSave.Rd | efd37a4bb56d7e670b5b893a1cbfa3f0e6fab963 | [] | no_license | cran/rSFA | a375b3402107ecf9bfeb36a9fdafbeacb7881ab4 | c8faff4caa5007db462de83f9814539174a543fd | refs/heads/master | 2022-05-06T15:11:09.907585 | 2022-03-29T09:00:07 | 2022-03-29T09:00:07 | 17,698,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 439 | rd | gaussSave.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfaFileIO.R
\name{gaussSave}
\alias{gaussSave}
\title{Save a GAUSS object.}
\usage{
gaussSave(gauss, filename)
}
\arguments{
\item{gauss}{A list that contains all information about the handled gauss-structure}
\item{filename}{Save list \code{gauss} to this file}
}
\description{
Save a GAUSS object.
}
\references{
\code{\link{gaussLoad}}
}
\keyword{internal}
|
7a45389261a69a44cee466349719141153fc89d2 | 1f9b135708835e4c542f9c0c743dfb49620f6685 | /man/map_data.Rd | 7901f96b605cf3e45011f4440e4915956a0f67de | [
"MIT"
] | permissive | kafetzakid/morphotype | 71c337e1b237df3d4a39f829f224e0f2f04e7f05 | 2e44431232701ce0186f93793dcd1b3caaa7a731 | refs/heads/main | 2023-08-11T14:54:53.943013 | 2023-07-30T14:06:55 | 2023-07-30T14:06:55 | 485,943,302 | 1 | 0 | MIT | 2023-07-30T14:06:56 | 2022-04-26T20:49:18 | R | UTF-8 | R | false | true | 1,161 | rd | map_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_data.R
\name{map_data}
\alias{map_data}
\title{Estimate graph from distance matrix}
\usage{
map_data(distM, filter_values = NULL, num_intervals = NULL)
}
\arguments{
\item{distM}{a distance matrix. Computed using compute_dist_0, compute_dist_1 or compute_dist_2.}
\item{filter_values}{stad parameter. Default is NULL.}
\item{num_intervals}{stad parameter. Default is NULL.}
}
\value{
A list with the following items:
\itemize{
\item graph_est - an igraph object. The estimated graph which is either the graps estimated based on the stad algorithm or the minimum spanning tree.
\item df_links - a dataframe. Contains the links of the graph under the columns 'Source' and 'Target' and the edge weight under name 'Value2'.
\item plot.shepard - a list of four. Shepard diagram data as provided by MASS::Shepard plus the Pearson correlation value as quality measure for the map estimation.
}
}
\description{
Estimate graph from distance matrix
}
\examples{
distM = read.csv('~/myRpacks/morphotype/inst/extdata/distM.csv', row.names = 1)
map_data(distM)
}
\author{
Danai Kafetzaki
}
|
7650d64d8d0ca6512a9a9812d9da54e78e90d616 | 6916839b0ebdfa142464deda61009a53a4ffed11 | /lib/R-4.0.0/DESeq/doc/DESeq.R | 5123c4777bd13500e51982562aa7f041452a8c22 | [] | no_license | soccin/BIC-RNAseq | e433b2a6cbe4e03cb072cc35f5c56742b8931dd0 | daa585abf432d7fdf6c72b0f02ecd8d693292e94 | refs/heads/master | 2022-08-25T04:46:26.490989 | 2022-06-21T18:56:08 | 2022-08-11T17:09:25 | 94,916,981 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 18,816 | r | DESeq.R | ### R code from vignette source 'DESeq.Rnw'
###################################################
### code chunk number 1: options
###################################################
options(digits=3, width=100)
###################################################
### code chunk number 2: systemFile
###################################################
datafile = system.file( "extdata/pasilla_gene_counts.tsv", package="pasilla" )
datafile
###################################################
### code chunk number 3: readTable
###################################################
pasillaCountTable = read.table( datafile, header=TRUE, row.names=1 )
head( pasillaCountTable )
###################################################
### code chunk number 4: pasillaDesign
###################################################
pasillaDesign = data.frame(
row.names = colnames( pasillaCountTable ),
condition = c( "untreated", "untreated", "untreated",
"untreated", "treated", "treated", "treated" ),
libType = c( "single-end", "single-end", "paired-end",
"paired-end", "single-end", "paired-end", "paired-end" ) )
pasillaDesign
###################################################
### code chunk number 5: pairedSamples
###################################################
pairedSamples = pasillaDesign$libType == "paired-end"
countTable = pasillaCountTable[ , pairedSamples ]
condition = pasillaDesign$condition[ pairedSamples ]
###################################################
### code chunk number 6: DESeq.Rnw:163-165
###################################################
head(countTable)
condition
###################################################
### code chunk number 7: condition (eval = FALSE)
###################################################
## #not run
## condition = factor( c( "untreated", "untreated", "treated", "treated" ) )
###################################################
### code chunk number 8: conditionCheck
###################################################
stopifnot( identical( condition, factor( c( "untreated", "untreated", "treated", "treated" ) ) ) )
###################################################
### code chunk number 9: instantiate
###################################################
library( "DESeq" )
cds = newCountDataSet( countTable, condition )
###################################################
### code chunk number 10: estimateSizeFactors
###################################################
cds = estimateSizeFactors( cds )
sizeFactors( cds )
###################################################
### code chunk number 11: headcounts2
###################################################
head( counts( cds, normalized=TRUE ) )
###################################################
### code chunk number 12: estimateDispersions
###################################################
cds = estimateDispersions( cds )
###################################################
### code chunk number 13: str
###################################################
str( fitInfo(cds) )
###################################################
### code chunk number 14: figFit
###################################################
plotDispEsts( cds )
###################################################
### code chunk number 15: DESeq.Rnw:309-310
###################################################
all(table(conditions(cds))==2)
###################################################
### code chunk number 16: head
###################################################
head( fData(cds) )
###################################################
### code chunk number 17: nbt1
###################################################
res = nbinomTest( cds, "untreated", "treated" )
###################################################
### code chunk number 18: nbt2
###################################################
head(res)
###################################################
### code chunk number 19: checkClaims
###################################################
stopifnot(identical(colnames(res), c("id", "baseMean", "baseMeanA", "baseMeanB", "foldChange",
"log2FoldChange", "pval", "padj")))
###################################################
### code chunk number 20: figDE
###################################################
plotMA(res)
###################################################
### code chunk number 21: histp
###################################################
hist(res$pval, breaks=100, col="skyblue", border="slateblue", main="")
###################################################
### code chunk number 22: ressig1
###################################################
resSig = res[ res$padj < 0.1, ]
###################################################
### code chunk number 23: ressig2
###################################################
head( resSig[ order(resSig$pval), ] )
###################################################
### code chunk number 24: ressig3
###################################################
head( resSig[ order( resSig$foldChange, -resSig$baseMean ), ] )
###################################################
### code chunk number 25: ressig4
###################################################
head( resSig[ order( -resSig$foldChange, -resSig$baseMean ), ] )
###################################################
### code chunk number 26: writetable
###################################################
write.csv( res, file="My Pasilla Analysis Result Table.csv" )
###################################################
### code chunk number 27: ncu
###################################################
ncu = counts( cds, normalized=TRUE )[ , conditions(cds)=="untreated" ]
###################################################
### code chunk number 28: MArepl
###################################################
plotMA(data.frame(baseMean = rowMeans(ncu),
log2FoldChange = log2( ncu[,2] / ncu[,1] )),
col = "black")
###################################################
### code chunk number 29: subset
###################################################
cdsUUT = cds[ , 1:3]
pData( cdsUUT )
###################################################
### code chunk number 30: est123
###################################################
cdsUUT = estimateSizeFactors( cdsUUT )
cdsUUT = estimateDispersions( cdsUUT )
resUUT = nbinomTest( cdsUUT, "untreated", "treated" )
###################################################
### code chunk number 31: figDE_Tb
###################################################
plotMA(resUUT)
###################################################
### code chunk number 32: subset2
###################################################
cds2 = cds[ ,c( "untreated3", "treated3" ) ]
###################################################
### code chunk number 33: cds2
###################################################
cds2 = estimateDispersions( cds2, method="blind", sharingMode="fit-only" )
###################################################
### code chunk number 34: res2
###################################################
res2 = nbinomTest( cds2, "untreated", "treated" )
###################################################
### code chunk number 35: figDE2
###################################################
plotMA(res2)
###################################################
### code chunk number 36: addmarg
###################################################
addmargins( table( res_sig = res$padj < .1, res2_sig = res2$padj < .1 ) )
###################################################
### code chunk number 37: reminderFullData
###################################################
head( pasillaCountTable )
pasillaDesign
###################################################
### code chunk number 38: fct
###################################################
cdsFull = newCountDataSet( pasillaCountTable, pasillaDesign )
###################################################
### code chunk number 39: estsfdisp
###################################################
cdsFull = estimateSizeFactors( cdsFull )
cdsFull = estimateDispersions( cdsFull )
###################################################
### code chunk number 40: figFitPooled
###################################################
plotDispEsts( cdsFull )
###################################################
### code chunk number 41: fit1
###################################################
fit1 = fitNbinomGLMs( cdsFull, count ~ libType + condition )
fit0 = fitNbinomGLMs( cdsFull, count ~ libType )
###################################################
### code chunk number 42: fitstr
###################################################
str(fit1)
###################################################
### code chunk number 43: pvalsGLM
###################################################
pvalsGLM = nbinomGLMTest( fit1, fit0 )
padjGLM = p.adjust( pvalsGLM, method="BH" )
###################################################
### code chunk number 44: addmarg2
###################################################
tab1 = table( "paired-end only" = res$padj < .1, "all samples" = padjGLM < .1 )
addmargins( tab1 )
###################################################
### code chunk number 45: tablesignfitInfocdsperGeneDispEsts
###################################################
table(sign(fitInfo(cds)$perGeneDispEsts - fitInfo(cdsFull)$perGeneDispEsts))
###################################################
### code chunk number 46: figDispScatter
###################################################
trsf = function(x) log( (x + sqrt(x*x+1))/2 )
plot( trsf(fitInfo(cds)$perGeneDispEsts),
trsf(fitInfo(cdsFull)$perGeneDispEsts), pch=16, cex=0.45, asp=1)
abline(a=0, b=1, col="red3")
###################################################
### code chunk number 47: lookatfit1
###################################################
head(fit1)
###################################################
### code chunk number 48: fullAnalysisSimple
###################################################
cdsFullB = newCountDataSet( pasillaCountTable, pasillaDesign$condition )
cdsFullB = estimateSizeFactors( cdsFullB )
cdsFullB = estimateDispersions( cdsFullB )
resFullB = nbinomTest( cdsFullB, "untreated", "treated" )
###################################################
### code chunk number 49: table
###################################################
tab2 = table(
`all samples simple` = resFullB$padj < 0.1,
`all samples GLM` = padjGLM < 0.1 )
addmargins(tab2)
###################################################
### code chunk number 50: rs
###################################################
rs = rowSums ( counts ( cdsFull ))
theta = 0.4
use = (rs > quantile(rs, probs=theta))
table(use)
cdsFilt = cdsFull[ use, ]
###################################################
### code chunk number 51: check
###################################################
stopifnot(!any(is.na(use)))
###################################################
### code chunk number 52: fitFilt
###################################################
fitFilt1 = fitNbinomGLMs( cdsFilt, count ~ libType + condition )
fitFilt0 = fitNbinomGLMs( cdsFilt, count ~ libType )
pvalsFilt = nbinomGLMTest( fitFilt1, fitFilt0 )
padjFilt = p.adjust(pvalsFilt, method="BH" )
###################################################
### code chunk number 53: doublecheck
###################################################
stopifnot(all.equal(pvalsFilt, pvalsGLM[use]))
###################################################
### code chunk number 54: tab
###################################################
padjFiltForComparison = rep(+Inf, length(padjGLM))
padjFiltForComparison[use] = padjFilt
tab3 = table( `no filtering` = padjGLM < .1,
`with filtering` = padjFiltForComparison < .1 )
addmargins(tab3)
###################################################
### code chunk number 55: figscatterindepfilt
###################################################
plot(rank(rs)/length(rs), -log10(pvalsGLM), pch=16, cex=0.45)
###################################################
### code chunk number 56: histindepfilt
###################################################
h1 = hist(pvalsGLM[!use], breaks=50, plot=FALSE)
h2 = hist(pvalsGLM[use], breaks=50, plot=FALSE)
colori = c(`do not pass`="khaki", `pass`="powderblue")
###################################################
### code chunk number 57: fighistindepfilt
###################################################
barplot(height = rbind(h1$counts, h2$counts), beside = FALSE, col = colori,
space = 0, main = "", ylab="frequency")
text(x = c(0, length(h1$counts)), y = 0, label = paste(c(0,1)), adj = c(0.5,1.7), xpd=NA)
legend("topright", fill=rev(colori), legend=rev(names(colori)))
###################################################
### code chunk number 58: sortP
###################################################
orderInPlot = order(pvalsFilt)
showInPlot = (pvalsFilt[orderInPlot] <= 0.08)
alpha = 0.1
###################################################
### code chunk number 59: sortedP
###################################################
plot(seq(along=which(showInPlot)), pvalsFilt[orderInPlot][showInPlot],
pch=".", xlab = expression(rank(p[i])), ylab=expression(p[i]))
abline(a=0, b=alpha/length(pvalsFilt), col="red3", lwd=2)
###################################################
### code chunk number 60: doBH
###################################################
whichBH = which(pvalsFilt[orderInPlot] <= alpha*seq(0, 1, length=length(pvalsFilt)))
## Test some assertions:
## - whichBH is a contiguous set of integers from 1 to length(whichBH)
## - the genes selected by this graphical method coincide with those
## from p.adjust (i.e. padjFilt)
stopifnot(length(whichBH)>0,
identical(whichBH, seq(along=whichBH)),
padjFilt[orderInPlot][ whichBH] <= alpha,
padjFilt[orderInPlot][-whichBH] > alpha)
###################################################
### code chunk number 61: SchwSpjot
###################################################
j = round(length(pvalsFilt)*c(1, .66))
px = (1-pvalsFilt[orderInPlot[j]])
py = ((length(pvalsFilt)-1):0)[j]
slope = diff(py)/diff(px)
###################################################
### code chunk number 62: SchwederSpjotvoll
###################################################
plot(1-pvalsFilt[orderInPlot],
(length(pvalsFilt)-1):0, pch=".",
xlab=expression(1-p[i]), ylab=expression(N(p[i])))
abline(a=0, b=slope, col="red3", lwd=2)
###################################################
### code chunk number 63: defvsd
###################################################
cdsBlind = estimateDispersions( cds, method="blind" )
vsd = varianceStabilizingTransformation( cdsBlind )
###################################################
### code chunk number 64: vsd1
###################################################
##par(mai=ifelse(1:4 <= 2, par("mai"), 0))
px = counts(cds)[,1] / sizeFactors(cds)[1]
ord = order(px)
ord = ord[px[ord] < 150]
ord = ord[seq(1, length(ord), length=50)]
last = ord[length(ord)]
vstcol = c("blue", "black")
matplot(px[ord],
cbind(exprs(vsd)[, 1], log2(px))[ord, ],
type="l", lty=1, col=vstcol, xlab="n", ylab="f(n)")
legend("bottomright",
legend = c(
expression("variance stabilizing transformation"),
expression(log[2](n/s[1]))),
fill=vstcol)
###################################################
### code chunk number 65: vsd2
###################################################
library("vsn")
par(mfrow=c(1,2))
notAllZero = (rowSums(counts(cds))>0)
meanSdPlot(log2(counts(cds)[notAllZero, ] + 1))
meanSdPlot(vsd[notAllZero, ])
###################################################
### code chunk number 66: modlr
###################################################
mod_lfc = (rowMeans( exprs(vsd)[, conditions(cds)=="treated", drop=FALSE] ) -
rowMeans( exprs(vsd)[, conditions(cds)=="untreated", drop=FALSE] ))
###################################################
### code chunk number 67: dah
###################################################
lfc = res$log2FoldChange
table(lfc[!is.finite(lfc)], useNA="always")
###################################################
### code chunk number 68: colourramp
###################################################
logdecade = 1 + round( log10( 1+rowMeans(counts(cdsBlind, normalized=TRUE)) ) )
lfccol = colorRampPalette( c( "gray", "blue" ) )(6)[logdecade]
###################################################
### code chunk number 69: figmodlr
###################################################
ymax = 4.5
plot( pmax(-ymax, pmin(ymax, lfc)), mod_lfc,
xlab = "ordinary log-ratio", ylab = "moderated log-ratio",
cex=0.45, asp=1, col = lfccol,
pch = ifelse(lfc<(-ymax), 60, ifelse(lfc>ymax, 62, 16)))
abline( a=0, b=1, col="red3")
###################################################
### code chunk number 70: cdsFullBlind
###################################################
cdsFullBlind = estimateDispersions( cdsFull, method = "blind" )
vsdFull = varianceStabilizingTransformation( cdsFullBlind )
###################################################
### code chunk number 71: heatmap
###################################################
library("RColorBrewer")
library("gplots")
select = order(rowMeans(counts(cdsFull)), decreasing=TRUE)[1:30]
hmcol = colorRampPalette(brewer.pal(9, "GnBu"))(100)
###################################################
### code chunk number 72: figHeatmap2a
###################################################
heatmap.2(exprs(vsdFull)[select,], col = hmcol, trace="none", margin=c(10, 6))
###################################################
### code chunk number 73: figHeatmap2b
###################################################
heatmap.2(counts(cdsFull)[select,], col = hmcol, trace="none", margin=c(10,6))
###################################################
### code chunk number 74: sampleClust
###################################################
dists = dist( t( exprs(vsdFull) ) )
###################################################
### code chunk number 75: figHeatmapSamples
###################################################
mat = as.matrix( dists )
rownames(mat) = colnames(mat) = with(pData(cdsFullBlind), paste(condition, libType, sep=" : "))
heatmap.2(mat, trace="none", col = rev(hmcol), margin=c(13, 13))
###################################################
### code chunk number 76: figPCA
###################################################
print(plotPCA(vsdFull, intgroup=c("condition", "libType")))
###################################################
### code chunk number 77: sessi
###################################################
sessionInfo()
|
45a471703519f222f2c199156882e466ab038d10 | e15f6d2671a5e3c4bbc77cf8c8055f87fe06b0a5 | /R/rmf-create-upw.R | 50ee9d88953fd7d613c1aaf5dd05e754622ac0df | [] | no_license | matejgedeon/RMODFLOW | 32c5a0b5cbfd0b605df8219b48a3ce3ba52dfb9b | df755343a820fff52ac26994eebee2c7c9a07808 | refs/heads/master | 2021-10-27T07:16:30.240311 | 2019-02-24T22:22:27 | 2019-02-24T22:22:27 | 238,649,825 | 0 | 0 | null | 2020-02-06T09:18:24 | 2020-02-06T09:18:23 | null | UTF-8 | R | false | false | 6,898 | r | rmf-create-upw.R | #' Create an \code{RMODFLOW} upw object
#'
#' \code{rmf_create_upw} creates an \code{RMODFLOW} upw object.
#'
#' @param dis RMODFLOW dis object
#' @param iupwcb flag and unit number for writing cell-by-cell flow terms; defaults to 0
#' @param hdry head assigned to cells that are converted to dry cells; defaults to -888
#' @param npupw number of upw parameters; defaults to 0
#' @param iphdry logical; indicating if head will be set to hdry when it's less than 1E-4 above the cell bottom; defaults to TRUE
#' @param laytyp vector of flags for each layer, specifying layer type; defaults to all confined (0) except the first layer (1)
#' @param layavg vector of flags for each layer, specifying interblock transmissivity calculation method; defaults to 0 for each layer
#' @param chani vector of flags or horizontal anisotropies for each layer; defaults to 1 for each layer
#' @param layvka vector of flags for each layer, indicating whether vka is the vertical hydraulic conductivity or the ratio of horizontal to vertical; defaults to 0 for each layer
#' @param parnam vector of parameter names; names should not be more than 10 characters, are not case sensitive, and should be unique
#' @param partyp vector of parameter types; the upw parameter types are HK, HANI, VK, VANI, SS, SY, or VKCB
#' @param parval vector of parameter values
#' @param nclu vector with the number of clusters required for each parameter
#' @param mltarr matrix of multiplier array names, with dis$nlay rows and upw$npupw columns; cells with non-occurring layer-parameter combinations should be NA
#' @param zonarr matrix of zone array names, with dis$nlay rows and upw$npupw columns; cells with non-occurring layer-parameter combinations should be NA
#' @param iz character matrix of zone number combinations separated by spaces, with dis$nlay rows and upw$npupw columns; cells with non-occurring layer-parameter combinations should be NA; if zonarr is "ALL", iz should be ""
#' @param hk 3d array with hydraulic conductivity along rows; defaults to 1. If not read for a specific layer, set all values in that layer to NA.
#' @param hani 3d array with the ratio of hydraulic conductivity along columns to that along rows; defaults to 1. If not read for a specific layer, set all values in that layer to NA.
#' @param vka 3d array with vertical hydraulic conductivity or the ratio of horizontal to vertical; defaults to hk. If not read for a specific layer, set all values in that layer to NA.
#' @param ss 3d array with specific storage; only required when there are transient stress periods; defaults to 1E-5. If not read for a specific layer, set all values in that layer to NA.
#' @param sy 3d array with specific yield; only required when there are transient stress periods; defaults to 0.15. If not read for a specific layer, set all values in that layer to NA.
#' @param vkcb 3d array with vertical hydraulic conductivity of quasi-three-dimensional confining beds; defaults to 0. If not read for a specific layer, set all values in that layer to NA.
#' @return Object of class upw
#' @note upw input structure is nearly identical to lpf but calculations are done differently. Differences include the addition of the iphdry value and the ommision of optional keywords. Layer wetting capabilities are also not supported by upw.
#' @note upw must be used with the Newton solver. See also \code{\link{rmf_create_nwt}}.
#' @export
#' @seealso \code{\link{rmf_read_upw}}, \code{\link{rmf_write_upw}} and \url{https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/}
rmf_create_upw <- function(dis = rmf_create_dis(),
iupwcb = 0,
hdry = -888,
npupw = 0,
iphdry = TRUE,
laytyp = ifelse(dis$nlay == 1, list(1), list(c(1,rep(0, dis$nlay - 1))))[[1]],
layavg = laytyp * 0,
chani = rep(1, dis$nlay),
layvka = rep(0, dis$nlay),
parnam = NULL,
partyp = NULL,
parval = NULL,
nclu = NULL,
mltarr = NULL,
zonarr = NULL,
iz = NULL,
hk = rmf_create_array(0.0001, dim = c(dis$nrow, dis$ncol, dis$nlay)),
hani = rmf_create_array(1, dim = c(dis$nrow, dis$ncol, dis$nlay)),
vka = hk,
ss = rmf_create_array(1E-5, dim = c(dis$nrow, dis$ncol, dis$nlay)),
sy = rmf_create_array(0.15, dim = c(dis$nrow, dis$ncol, dis$nlay)),
vkcb = rmf_create_array(0, dim = c(dis$nrow, dis$ncol, dis$nlay))) {
upw <- NULL
# data set 0
# to provide comments, use ?comment on the resulting upw object
# data set 1
upw$iupwcb <- iupwcb
upw$hdry <- hdry
upw$npupw <- npupw
upw$iphdry <- iphdry
# data set 2
upw$laytyp <- laytyp
# data set 3
upw$layavg <- layavg
# data set 4
upw$chani <- chani
# data set 5
upw$layvka <- layvka
# data set 6
upw$laywet <- rep(0, dis$nlay)
# data set 7-8
upw$parnam <- parnam
upw$partyp <- partyp
upw$parval <- parval
upw$nclu <- nclu
upw$mltarr <- mltarr
upw$zonarr <- zonarr
upw$iz <- iz
# data set 9-14
if(!("HK" %in% upw$partyp)) upw$hk <- rmf_create_array(hk,
dim = rmfi_ifelse0(length(dim(hk)) > 2, dim(hk), c(dim(hk),1)))
if(!("HANI" %in% upw$partyp) && any(upw$chani <= 0)) upw$hani <- rmf_create_array(hani,
dim = rmfi_ifelse0(length(dim(hani)) > 2, dim(hani), c(dim(hani),1)))
if(!("VK" %in% upw$partyp | "VANI" %in% upw$partyp)) upw$vka <- rmf_create_array(vka,
dim = rmfi_ifelse0(length(dim(vka)) > 2, dim(vka), c(dim(vka),1)))
if(!("SS" %in% upw$partyp) && 'TR' %in% dis$sstr) upw$ss <- rmf_create_array(ss,
dim = rmfi_ifelse0(length(dim(ss)) > 2, dim(ss), c(dim(ss),1)))
if(!("SY" %in% upw$partyp) && 'TR' %in% dis$sstr && any(upw$laytyp != 0)) upw$sy <- rmf_create_array(sy,
dim = rmfi_ifelse0(length(dim(sy)) > 2, dim(sy), c(dim(sy),1)))
if(!("VKCB" %in% upw$partyp) && any(dis$laycbd != 0)) upw$vkcb <- rmf_create_array(vkcb,
dim = rmfi_ifelse0(length(dim(vkcb)) > 2, dim(vkcb), c(dim(vkcb),1)))
class(upw) <- c('upw','rmf_package')
return(upw)
}
|
70d1a668b2f0b8b7146e7db8f7c04ce9a83b6010 | 1b25e84bb182ad28f1c1582c509ae27c396bdb2a | /R/Clusters.R | 301304c8602129a5b4072c172fc5a39d9f8a467d | [] | no_license | katrikorpela/mare | acb9d17d5e42e27919b1a360564607d6c0a2596b | 6e84a2a39757e3cd0745e3f7c0d654c7e0748e47 | refs/heads/master | 2022-07-22T03:33:37.859806 | 2022-07-14T08:47:58 | 2022-07-14T08:47:58 | 56,917,206 | 9 | 9 | null | null | null | null | UTF-8 | R | false | false | 4,600 | r | Clusters.R | Clusters <- function(taxonomic.table, meta, N.taxa = NULL, readcount.cutoff = 0,
minimum.correlation = 0.5, minimum.network = 1,
select.by = NULL, select = NULL, keep.result = F, pdf = F, relative = T){
if(Sys.info()[['sysname']] == "Linux") {
quartz <- function() {X11()}
}
if(Sys.info()[['sysname']] == "Windows") {
quartz <- function() {X11()}
}
cluster.similarity = 1-minimum.correlation
taxatable <- read.delim(taxonomic.table)
metadata <- read.delim(meta)
if(relative) taxatable <- taxatable/metadata$ReadCount
taxatable <- taxatable[metadata$ReadCount > readcount.cutoff, ]
if (length(select.by) != 0) {
metadata$selection <- metadata[, select.by]
taxatable <- taxatable[metadata$selection == select, ]
metadata <- metadata[metadata$selection == select, ]
}
if (length(N.taxa) == 0) N.taxa = ncol(taxatable)
vars <- c(rev(names(colSums(taxatable,na.rm=T)[order(colSums(taxatable,na.rm=T))])))[1:N.taxa]
gs<-taxatable[,vars]
tgs <-data.frame(t(scale(gs)))
g2.cor<-cor(t(tgs),method="spearman",use="pairwise.complete.obs")
g2.cor[is.na(g2.cor)] <- 0
g2.cor2 <- g2.cor
g2.cor2[abs(g2.cor2)<minimum.correlation] <- 0
g2.cor2 <- g2.cor2[rowSums(abs(g2.cor2)>0)>minimum.network,rowSums(abs(g2.cor2)>0)>minimum.network]
spnames1 <- rownames(g2.cor)
spnames1 <- sapply(spnames1, function(x) gsub("_NA", ".", x))
spnames1 <- sapply(spnames1, function(x) gsub("_1", ".", x))
spnames1 <- sapply(spnames1, function(x) gsub("_2", ".", x))
spnames1 <- sapply(spnames1, function(x) gsub("_3", ".", x))
spnames1 <- sapply(spnames1, function(x) gsub("_4", ".", x))
spnames1 <- sapply(spnames1, function(x) gsub("_5", ".", x))
spnames1 <- sapply(spnames1, function(x) strsplit(x, split = "_",
fixed = T)[[1]][length(strsplit(x, split = "_", fixed = T)[[1]])])
spnames <- rownames(g2.cor2)
classnames <- sapply(spnames, function(x) strsplit(x, split = "_", fixed = T)[[1]][2])
spnames <- sapply(spnames, function(x) gsub("_NA", ".", x))
spnames <- sapply(spnames, function(x) gsub("_1", ".", x))
spnames <- sapply(spnames, function(x) gsub("_2", ".", x))
spnames <- sapply(spnames, function(x) gsub("_3", ".", x))
spnames <- sapply(spnames, function(x) gsub("_4", ".", x))
spnames <- sapply(spnames, function(x) gsub("_5", ".", x))
spnames <- sapply(spnames, function(x) strsplit(x, split = "_",
fixed = T)[[1]][length(strsplit(x, split = "_", fixed = T)[[1]])])
clusters <- hclust(as.dist(1-g2.cor),"average")
clus<-cutree(clusters,h=cluster.similarity)
if (pdf){
pdf(paste("CorrelatingTaxa_",select.by,select,".pdf",sep=""))
plot(clusters, ylab="",labels=spnames1,xlab="",cex=0.5)
abline(h=cluster.similarity, lty=2, col="gray")
qgraph::qgraph(g2.cor2,vsize=5,rescale=T,repulsion=0.8,
labels=substr(spnames,start=1,stop=4),
layout="spring",diag=F,
legend.cex=0.5,
groups=classnames,
color=c("#E41A1C","#FFA500","#377EB8","#87CEFA","#4DAF4A" ,'#9ACD32',"#984EA3",'#DA70D6', "#999999","gainsboro",
"#008080","#00CED1","#F781BF","thistle1","#8DA0CB","lightsteelblue1","#FFD92F","#FFFFB3",
"#8DD3C7","#FB8072","#80B1D3","#FDB462","#B3DE69","#FCCDE5","#D9D9D9","#BC80BD",
"#CCEBC5","#FFED6F","#C71585","#EE82EE","#66C2A5","#FC8D62","#A65628")[1:length(unique(classnames))],
label.prop=0.99)
mtext(side=3,text="Correlations",line=2)
dev.off()
}
quartz()
plot(clusters, ylab="",labels=spnames1,xlab="",cex=0.5)
abline(h=cluster.similarity, lty=2, col="gray")
quartz()
qgraph::qgraph(g2.cor2,vsize=5,rescale=T,repulsion=0.8,
labels=substr(spnames,start=1,stop=4),
layout="spring",diag=F,
legend.cex=0.5,
groups=classnames,
color=c("#E41A1C","#FFA500","#377EB8","#87CEFA","#4DAF4A" ,'#9ACD32',"#984EA3",'#DA70D6', "#999999","gainsboro",
"#008080","#00CED1","#F781BF","thistle1","#8DA0CB","lightsteelblue1","#FFD92F","#FFFFB3",
"#8DD3C7","#FB8072","#80B1D3","#FDB462","#B3DE69","#FCCDE5","#D9D9D9","#BC80BD",
"#CCEBC5","#FFED6F","#C71585","#EE82EE","#66C2A5","#FC8D62","#A65628")[1:length(unique(classnames))],
label.prop=0.99)
networks <- data.frame(metadata,taxatable)
for(i in names(table(clus)[table(clus)>1])) networks[,paste('cluster',i,sep="")] <- rowSums(networks[, names(clus)[clus==i]],na.rm=T)
for(i in names(table(clus)[table(clus)==1])) networks[,paste('cluster',i,sep="")] <- networks[, names(clus)[clus==i]]
networks <- list(networks, clus)
write.table(networks[[1]], file = "Clusters.txt", quote=F, sep="\t")
if (keep.result) return(networks)
}
|
e2dca710ae87d59a14e48440469fb3964c22124e | 77a9b044f9e5122e882cfbdd48641310b3423173 | /tests/testthat/test-remove_default_params.R | 0bc4806f95f61bd63e86326655a451bcabd5d820 | [
"MIT"
] | permissive | rnaimehaom/ggannotate | 157f07e29d7644579f8bb6eb0ccf1aaf6514b219 | 926a22f2db904d9dc21636d839061cd552b55044 | refs/heads/master | 2023-08-21T12:17:18.884170 | 2021-10-07T22:44:54 | 2021-10-07T22:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,271 | r | test-remove_default_params.R |
test_that("remove_default_params() removes params that match geom defaults", {
params_list <- list(
size = 0.5,
angle = 90L,
colour = "black",
curvature = 0.5,
arrow = arrow(
30L,
unit(
0.1,
"inches"
),
"last",
"closed"
)
)
params_list_nodefaults <- remove_default_params("geom_curve", params_list)
expect_identical(
params_list_nodefaults,
list(arrow = arrow(
30L,
unit(
0.1,
"inches"
),
"last",
"closed"
))
)
})
test_that("remove_default_params() leaves params that do not match geom defaults", {
params_list <- list(
size = 10,
angle = 45,
colour = "blue",
curvature = 0.4,
arrow = arrow(
30L,
unit(
0.1,
"inches"
),
"last",
"closed"
)
)
params_list_nodefaults <- remove_default_params("geom_curve", params_list)
identical(params_list, params_list_nodefaults)
expect_identical(params_list_nodefaults, params_list)
})
test_that("remove_default_params() leaves intact params that aren't in defaults", {
params_list <- list(random_param = "foo")
expect_identical(
remove_default_params("geom_text", params_list),
params_list
)
})
|
8f20ae5772c407d95ad03262c62a319b039fdf20 | 1f3ad322d895f2ab1f42491c7e350da796e5472f | /man/limmaTwoGroups.Rd | 9a6a13d982525f016bb2375e235ed8bfaa33e400 | [] | no_license | cran/nlcv | 1be0ee0e0fa75ced7cb889be0f7cfaf2a93a1ba0 | 6d2e9bddb46f2b506db703fa87c7c8a3048d9862 | refs/heads/master | 2021-09-17T09:25:10.864465 | 2018-06-29T20:49:59 | 2018-06-29T20:49:59 | 107,578,868 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 994 | rd | limmaTwoGroups.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/limma.R
\name{limmaTwoGroups}
\alias{limmaTwoGroups}
\title{Wrapper around limma for the comparison of two groups}
\usage{
limmaTwoGroups(object, group)
}
\arguments{
\item{object}{object of class ExpressionSet}
\item{group}{string indicating the variable defining the two groups to be
compared}
}
\value{
\code{topTable} output for the second (i.e. slope) coefficient of
the linear model.
}
\description{
Wrapper around limma for the comparison of two groups
}
\details{
Basically, the wrapper combines the \code{lmFit}, \code{eBayes} and
\code{topTable} steps
}
\references{
Smyth, G. K. (2004). Linear models and empirical Bayes methods
for assessing differential expression in microarray experiments.
\emph{Statistical Applications in Genetics and Molecular Biology}, Vol. 3,
No. 1, Article 3.
\url{http://www.bepress.com/sagmb/vol3/iss1/art3}
}
\author{
Tobias Verbeke
}
\keyword{models}
\keyword{regression}
|
dac28177a59e4930824f941342e35f311df1bc2b | 8ad7d2053c3ab6b8a22210690a23d9216e6ada58 | /NormalizeRNAseq.R | bb40509350486c98372136a48394d92414101566 | [] | no_license | johnmous/RNASeq_Analysis | 766e9c952fa6827211ba9b074970927323b108dc | 969e2adb602be48b66d1ae6e2098b8bbf62d7b77 | refs/heads/master | 2016-09-12T09:25:09.823549 | 2016-04-21T12:29:49 | 2016-04-21T12:29:49 | 56,771,852 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,276 | r | NormalizeRNAseq.R | # Author: Ioannis Moustakas, i.moustakas@uva.nl
# Title: Normalize Frank Takken RNA-seq data and check the result with the aid of ERCCs
library(ggplot2)
library(reshape2)
library(ggvis)
library(plotly)
interactive()
# load the ERCCs concetration table
concetrationTable <- read.delim("/zfs/datastore0/group_root/MAD-RBAB/05_Reference-db/external/ERCC/ERCC_Controls_Analysis.txt", header=T)
ERCCsConcTable <- concetrationTable[,c(2,4)]
colnames(ERCCsConcTable)[1] <- "Names"
geneAndERRCCsTable <- read.delim("/zfs/datastore0/group_root/MAD-RBAB/02_Collaborators/MAD1208-Frank_Takken/MAD1208-P001-DTL_Hotel/MAD1208-P001-E001_2014_RNASeq_Tomato_svleeuw1/Results/mappingWithTophatNewData/MAPQTen/combinedERCCsGeneCountTable.txt")
# extract the ERCCs fron the geneAndERRCCsTable
ERCCsInSamples <- geneAndERRCCsTable[grepl("ERCC", geneAndERRCCsTable$Names), ]
# Normalize the geneAndERRCCsTable
# remove the bottom 5 lines that are not gene counts but the report of htseq-count
nrows <- nrow(geneAndERRCCsTable)
countTableGenesOnly <- geneAndERRCCsTable[-c((nrows-6):nrows), ]
# save the first columns as row name and then remove it (gene names)
row.names(countTableGenesOnly) <- countTableGenesOnly[,1]
countTableGenesOnly <- countTableGenesOnly[,-1]
# now normalize the table
# the function to normalize
sizeFactors.mad <- function (counts, locfunc = median){
loggeomeans <- rowMeans(log(counts))
apply(counts, 2, function(cnts) exp(locfunc((log(cnts) -
loggeomeans)[is.finite(loggeomeans)])))
}
sf <- sizeFactors.mad(countTableGenesOnly)
#divide countdata by sizefactors#
CountTable.scaled <- countTableGenesOnly
for(i in 1:ncol(CountTable.scaled)){
CountTable.scaled[,i] <- CountTable.scaled[,i]/sf[i]
}
write.table(CountTable.scaled,"/zfs/datastore0/group_root/MAD-RBAB/02_Collaborators/MAD1208-Frank_Takken/MAD1208-P001-DTL_Hotel/MAD1208-P001-E001_2014_RNASeq_Tomato_svleeuw1/Results/mappingWithTophatNewData/MAPQTen/NormalizedGenesERCCs.txt", sep="\t")
# extract the ERCCs fron the CountTable.scaled (Normalized)
ERCCsInNormalizedSamples <- CountTable.scaled[grepl("ERCC", row.names(CountTable.scaled)), ]
ERCCsInNormalizedSamples$Names <- row.names(ERCCsInNormalizedSamples)
ERCCsInNormalizedSamplesConc <- merge(ERCCsInNormalizedSamples, ERCCsConcTable, by="Names")
row.names(ERCCsInNormalizedSamplesConc) <- ERCCsInNormalizedSamplesConc$Names
ERCCsInNormalizedSamplesConc[,1] <- ERCCsInNormalizedSamplesConc[,ncol(ERCCsInNormalizedSamplesConc)]
colnames(ERCCsInNormalizedSamplesConc)[1] <- "Concentration"
ERCCsInNormalizedSamplesConc <- ERCCsInNormalizedSamplesConc[,-ncol(ERCCsInNormalizedSamplesConc)]
ERCCsNormalizedCollapsed <- ddply(ERCCsInNormalizedSamplesConc, "Concentration", numcolwise(sum))
ERCCsNormalizedMelted <- melt(ERCCsNormalizedCollapsed, id="Concentration")
names(ERCCsNormalizedMelted) <- c("Concentration", "Sample", "Count")
# log trans
ERCCsNormalizedMelted$Count <- log2(ERCCsNormalizedMelted$Count+1)
ggplot(ERCCsNormalizedMelted, aes(Concentration, Count)) + ggtitle("Normalized") + scale_x_log10()+ geom_line(aes(colour = Sample))
normPlot <- qplot(Concentration, Count, data=ERCCsNormalizedMelted) + ggtitle("Normalized") + scale_x_log10()+ geom_line(aes(colour = Sample))+theme(legend.position = "right")
set_credentials_file("Ioannis.moustakas1", "ytm8z8n5em")
py <- plotly()
py$ggplotly(normPlot)
# put a column for the concetration of each of the ERCCs
ERCCsAllSamplesConc <- merge(ERCCsInSamples, ERCCsConcTable, by="Names")
# Set the name of the ERCCs as the row name
row.names(ERCCsAllSamplesConc) <- ERCCsAllSamplesConc$Names
ERCCsAllSamplesConc[,1] <- ERCCsAllSamplesConc[,ncol(ERCCsAllSamplesConc)]
colnames(ERCCsAllSamplesConc)[1] <- "Concentration"
ERCCsAllSamplesConc <- ERCCsAllSamplesConc[,-ncol(ERCCsAllSamplesConc)]
ERCCsCollapsed <- ddply(ERCCsAllSamplesConc, "Concentration", numcolwise(sum))
ERCCsMelted <- melt(ERCCsCollapsed, id="Concentration")
names(ERCCsMelted) <- c("Concentration", "Sample", "Count")
ERCCsMelted$Count <- log2(ERCCsMelted$Count+1)
ggplot(ERCCsMelted, aes(Concentration, Count)) + ggtitle("Original") + scale_x_log10()+ geom_line(aes(colour = Sample))
########## Normalize on ERCCs only ##########
# save the first columns as row name and then remove it (gene names)
row.names(ERCCsInSamples) <- ERCCsInSamples[,1]
ERCCsInSamples <- ERCCsInSamples[,-1]
sf <- sizeFactors.mad(ERCCsInSamples)
#divide countdata by sizefactors#
CountTable.scaled <- ERCCsInSamples
for(i in 1:ncol(CountTable.scaled)){
CountTable.scaled[,i] <- CountTable.scaled[,i]/sf[i]
}
write.table(CountTable.scaled,"/zfs/datastore0/group_root/MAD-RBAB/02_Collaborators/MAD1208-Frank_Takken/MAD1208-P001-DTL_Hotel/MAD1208-P001-E001_2014_RNASeq_Tomato_svleeuw1/Results/mappingWithTophatNewData/MAPQTen/NormalizedOnERCCs.txt", sep="\t")
ERCCsInNormalizedSamples <- CountTable.scaled[grepl("ERCC", row.names(CountTable.scaled)), ]
ERCCsInNormalizedSamples$Names <- row.names(CountTable.scaled)
ERCCsInNormalizedSamplesConc <- merge(ERCCsInNormalizedSamples, ERCCsConcTable, by="Names")
row.names(ERCCsInNormalizedSamplesConc) <- ERCCsInNormalizedSamplesConc$Names
ERCCsInNormalizedSamplesConc[,1] <- ERCCsInNormalizedSamplesConc[,ncol(ERCCsInNormalizedSamplesConc)]
colnames(ERCCsInNormalizedSamplesConc)[1] <- "Concentration"
ERCCsInNormalizedSamplesConc <- ERCCsInNormalizedSamplesConc[,-ncol(ERCCsInNormalizedSamplesConc)]
ERCCsNormalizedCollapsed <- ddply(ERCCsInNormalizedSamplesConc, "Concentration", numcolwise(sum))
ERCCsNormalizedMelted <- melt(ERCCsNormalizedCollapsed, id="Concentration")
names(ERCCsNormalizedMelted) <- c("Concentration", "Sample", "Count")
# log trans
ERCCsNormalizedMelted$Count <- log2(ERCCsNormalizedMelted$Count+1)
ggplot(ERCCsNormalizedMelted, aes(Concentration, Count)) + ggtitle("NormalizedOnERCCs") + scale_x_log10()+ geom_line(aes(colour = Sample))
all_values <- function(x) {
if(is.null(x)) return(NULL)
paste0(names(x), ": ", format(x), collapse = "<br />")
}
ERCCsNormalizedMelted %>% ggvis(~Concentration, ~Count, size.hover := 200) %>%
scale_numeric("x", trans="log", expand=0) %>%
layer_points(fill=~factor(Sample))
ERCCsMelted %>% ggvis(~Concentration, ~Count) %>%
scale_numeric("x", trans="log", expand=0) %>%
layer_points(fill=~factor(Sample)) %>%
layer_smooth(method = "lm")
# melt the ERCCs
ERCCsAllSamplesConcMelted <- melt(ERCCsAllSamplesConc[,1:5], id="Concentration")
names(ERCCsAllSamplesConcMelted) <- c("Concentration", "Sample", "Count")
# log trans
ERCCsAllSamplesConcMelted$Count <- log2(ERCCsAllSamplesConcMelted$Count+1)
all_values <- function(x) {
if(is.null(x)) return(NULL)
paste0(names(x), ": ", format(x), collapse = "<br />")
}
ERCCsAllSamplesConcMelted %>% ggvis(~Concentration, ~Count, size.hover := 200) %>%
scale_numeric("x", trans="log", expand=0) %>% layer_points(fill=~factor(Sample))
# Data.frame with S01 original and normalized
t <- data.frame(Concentration=ERCCsAllSamplesConc$Concentration,
S01=ERCCsAllSamplesConc$S01, S01Norm=ERCCsInNormalizedSamplesConc$S01)
# melt the ERCCs
ERCCsAllSamplesConcMelted <- melt(t, id="Concentration")
names(ERCCsAllSamplesConcMelted) <- c("Concentration", "Sample", "Count") |
a1ee58cdb2d1c8125ad77aa3921dbc5297f20e9e | e9e2da7dcf679ef9a5cb7ae2e520f3b98da87b45 | /lib/gbm/gbm_caret.R | cf9de77f5c5ec8c4a2353ba1dde544dd7c0e2746 | [] | no_license | xingao1994/spr2017-proj3-group8 | 85dbcde53c5b335d80cd49c9f7426bcfbf05abe0 | 041f7b71b067609ad18fe2aaf54c72d2d8e99860 | refs/heads/master | 2021-06-12T13:32:00.137794 | 2017-03-24T21:25:14 | 2017-03-24T21:25:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,260 | r | gbm_caret.R | # Setup
setwd("../data")
used.packages<-c("gbm","data.table","dplyr","caret","e1071")
library(gbm)
library(data.table)
library(dplyr)
library(caret)
# Load data
sift <- fread("C:/Users/yj2360/Documents/project3/project3/spr2017-proj3-group8/output/sift_features/sift_features.csv", header = TRUE)
sift <- data.frame(t(sift))
label <- read.table("labels.csv",header=T)
label <- c(t(label))
label_train<-label
dat_train<-sift
gbm_train(sift,label)
# Train the model and tune parameters
##################################################
# train.R
# tune parameter: n.tree & shrinkage & depth
# ntree = best iter, generated automatically.. no need to be tuned
# so, tune shrinkage & depth
gbm_train <- function(dat_train, label_train, par=NULL){
### Train a Gradient Boosting Model (GBM) using processed features from training images
### tuning is included
### Input:
### - processed features from images
### - class labels for training images
### Output: training model specification
### load libraries
library("gbm")
if(is.null(par)){
depth <- c(1,2,3)
} else {
depth <- par$depth
}
# Find best parameters using cross validation: shrinkage + tree depth
gbmGrid <- expand.grid(interaction.depth = depth,
# Since the gbm package tunes the number of trees for fixed values of the tree depth and shrinkage.
n.trees=250,
# n.trees=(1:10)*100,
shrinkage = 0.001,
n.minobsinnode = 10)
fitControl <- trainControl( method = "repeatedcv",
number = 10,
repeats = 5)
set.seed(825)
fit_gbm <- train(x=dat_train, y=label_train,
method = "gbm",
trControl = fitControl,
verbose = FALSE,
## Now specify the exact models
## to evaluate:
tuneGrid = gbmGrid)
paras<-fit_gbm$bestTune
plot(fit_gbm)
fit <- gbm.fit(x=dat_train, y=label_train,
n.trees=paras$n.trees,
distribution="adaboost",
interaction.depth=paras$interaction.depth,
shrinkage=paras$shrinkage,
bag.fraction = 0.5,
verbose=FALSE)
best_iter <- gbm.perf(fit, method="OOB",plot.it = FALSE)
return(list(fit=fit, iter=best_iter))
}
###############################################
# test.R
test<-function(fit_train,dat_test) {
library("gbm")
pred_gbm<-predict(fit_train,newdata=dat_test,
n.trees=fit_train$iter,type="response")
result<-as.numeric(pred_gbm>0.5)
if (saveFile == TRUE){
write.csv(result, file = "../output/gbm_predict.csv")
}
return(result)
}
###############################################
# feature.R
# On a new set of images and SIFT descriptors,
# each team will have 30 minutes to process them into features chosen.
# Submit the processed features as a folder of feature objects file.
# [https://github.com/TZstatsADS/Fall2016-proj3-grp10/blob/master/lib/SIFTtry.R] |
f4ab27cc91173ae6ddd531f2bc5afd61908d818f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/future.BatchJobs/vignettes/future.BatchJobs.R | 13c6c422e155a853261f9c62592d3ac81cefc3e5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 267 | r | future.BatchJobs.R | ###########################################################################
### This 'tangle' R script was created from an RSP document.
### RSP source document: './future.BatchJobs.md.rsp'
###########################################################################
|
145a94686fc7c39bd6995fd5b35c67de6e5939b7 | 1513303cd0bbae5998e196a69d8a56017cca1e32 | /data_cleaning/R/clean.item.data.R | 74e1bfbf59476b7135da4d98ce1e6a4ff7bb0d63 | [] | no_license | eryka-nosal/item_analysis_automation | a6321999ce6d5cb7db1b025e49d1bc8433c5ab69 | 9b254538d9a17a3e02dc9f0b061510d2c71586ef | refs/heads/main | 2023-08-05T05:29:55.294766 | 2021-09-15T03:07:42 | 2021-09-15T03:07:42 | 406,593,319 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 43,642 | r | clean.item.data.R |
#' Clean and combine item response data and test & content item level information
#'
#' @param data_path
#' @param results_path
#' @param df
#' @param analysis.name
#' @param section.map list with specific structure
#' @param detect.section.totals
#' @param scores.to.include
#' @param date.columns
#' @param date.formats
#' @param remove.unscored
#' @param remove.incomplete
#' @param remove.tutor
#' @param remove.no.kbsEID
#' @param repeat.treatment
#' @param recode.answers
#' @param seqHist.to.exclude
#' @param precombined.files
#' @param total.minutes.threshold
#' @param mSec.min.threshold
#' @param mSec.max.threshold
#' @param sec.min.threshold
#' @param sec.max.threshold
#' @param ci.cols.to.include
#' @param interaction.type.list
#' @param cidf
#' @param CI.old.keys
#' @param field.test.items
#'
#' @return list of data frames, one per "row" in section.map
#' @export
#'
#' @examples
clean.item.data <- function(data_path,
results_path = data_path,
df = NULL,
analysis.name,
test.map = NULL,
section.map = NULL,
qbank = FALSE,
detect.section.totals = FALSE,
scores.to.include = "overall",
date.columns = c("timestamp_created","timestamp_completed"),
date.formats = c("%B %d %Y %I:%M:%OS %p","%B %d %Y %I:%M:%OS %p"),
remove.unscored = FALSE,
remove.incomplete = TRUE,
remove.tutor = TRUE,
remove.no.kbsEID = TRUE,
repeat.treatment = "omit",
seqHist.to.exclude = NULL,
precombined.files = TRUE,
remove.no.response.scored = TRUE,
remove.over.time.activities = TRUE,
remove.repeat.test.administrations = FALSE,
recode.answers = FALSE,
total.minutes.threshold = NULL,
mSec.min.threshold = NULL,
mSec.max.threshold = NULL,
sec.min.threshold = NULL,
sec.max.threshold = NULL,
min.items.per.seq = NULL,
timing.excl.map = NULL,
ci.cols.to.include = NULL,
interaction.type.list = 1,
cidf = NULL,
seqdf = NULL,
CI.old.keys = NULL,
CI.old.version.dates = NULL,
CI.remove.before.after = "before",
CI.old.version.list = NULL,
field.test.items = NULL,
v = TRUE,
all.or.nothing = FALSE,
section.calc = TRUE,
section.separated = FALSE) {
# inputs
#
# test name
# sections/categories and what column they are located in - this is a small data frame where the first column is the prettified section name, the second is the jasper section name, the third is how many items are expected in that section
# if section, total number administered as denominator for total responses expected (important for GMAT)
# which final calculated scores (+thetas for adaptive tests) are worth including in the cleaned data
# data location/path
# which columns are dates and numbers
# whether to remove/omit repeat questions
# timing exclusion # of mSec
# seq IDs to exclude
# list of FT items if any, or other category not in the source data
# threshold for % of questions answered to allow activity into analysis
# whether to remove unscored items
if (is.null(test.map) & is.null(section.map)) {
stop("No section.map or test.map!")
}
cleaning_info <- paste0("Starting clean item data function at ", Sys.time())
if (is.null(df)) {
stop("No response df")
}
num_seq_current <- length(unique(df$activity_id_hist))
num_users_current <- length(unique(df$student_id))
num_items_current <- length(unique(df$content_item_name))
cleaning_info <- print.if.verbose(paste0("Total activities at start: ", num_seq_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Total users at start: ", num_users_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Unique items at start: ", num_items_current), v, cleaning_info)
## activity LEVEL EXCLUSIONS
if(!is.null(section.map)) {
resp_excl <- df %>% ## only sections in the section map
dplyr::filter(sectionName %in% unlist(section.map$jasperSectionName))
removed.record.count(resp_excl, thing.to.say = "activities without responses specified in the section map, removed: ")
} else {
resp_excl <- df
}
initial_columns <- names(resp_excl)
if(remove.no.kbsEID == TRUE) {
resp_excl <- resp_excl %>%
filter(!is.na(kbs_enrollment_id))
removed.record.count(resp_excl, thing.to.say = "activities with no KBS EID removed: ")
}
if(remove.incomplete == TRUE) {
resp_excl <- resp_excl %>%
dplyr::filter(activity_status == 4 | activity_status == "completed" | activity_status == "Complete")
removed.record.count(resp_excl, thing.to.say = "Non Complete activities removed: ")
}
if(remove.tutor == TRUE) {
resp_excl <- resp_excl %>%
dplyr::filter(tutor_mode != "True" | is.na(tutor_mode)) ## accounting for possibility of empty tutor_mode field - empty would mean "False" which means FALSE
removed.record.count(resp_excl, thing.to.say = "Tutor mode activities removed: ")
}
if(!is.null(total.minutes.threshold)) {
resp_excl <- resp_excl %>% ## exclude activities that took longer than specified time to complete
dplyr::mutate(total_time = as.numeric(difftime(timestamp_completed, timestamp_created, units = "mins")))
# print(paste0("Number of activities with more than ",total.minutes.threshold," minutes: ",dim(resp_excl %>% dplyr::filter(total_time > total.minutes.threshold) %>% select(activity_id_hist) %>% distinct())[1]))
resp_excl <- resp_excl %>%
dplyr::filter(total_time <= total.minutes.threshold) ## defaulted to 1440 minutes (24 hours) in parameters
removed.record.count(resp_excl, thing.to.say = paste0("activities taking longer than ",total.minutes.threshold," minutes to complete, removed: "))
}
if (!is.null(section.map)) {
if (qbank == TRUE) {
warning("Why do you have a section map for qbank")
section.map.df <- data.frame(sectionName = unlist(section.map$jasperSectionName),
test_minutes_allowed = section.map$minutes_allowed,
test_response_threshold = section.map$response_threshold)
} else {
section.map.df <- section.map %>% rename(sectionName = jasperSectionName, section_response_threshold = sectionResponseThreshold)
}
resp_excl <- resp_excl %>% ## prep to find activities with bad records in them, or too much time in a section, or multiple items seen in one test, or too many items in a section (repeated positions) for total exclusion
merge(.,section.map.df) %>%
dplyr::group_by(activity_id_hist, sectionName) %>%
dplyr::mutate(actual_num_ques = length(content_item_name)) %>%
dplyr::ungroup()
}
if (!is.null(test.map) & qbank == FALSE) {
resp_excl <- resp_excl %>% ## prep to find activities with bad records in them, or too much time in a section, or multiple items seen in one test, or too many items in a section (repeated positions) for total exclusion
merge(.,data.frame(template_name = test.map$template_name,
test_minutes_allowed = test.map$minutes_allowed,
test_num_ques = test.map$num_ques,
test_response_threshold = test.map$response_threshold,
strings_as_factors = FALSE)) %>%
dplyr::group_by(activity_id_hist, template_name) %>%
dplyr::mutate(actual_num_ques = length(content_item_name)) %>%
dplyr::ungroup()
} else if (!is.null(test.map) & qbank == TRUE) {
temp_record_check <- dim(resp_excl)[1]
resp_excl <- resp_excl %>% ## prep to find activities with bad records in them, or too much time in a section, or multiple items seen in one test, or too many items in a section (repeated positions) for total exclusion
merge(.,data.frame(test_minutes_allowed = test.map$minutes_allowed,
test_num_ques = test.map$num_ques,
test_response_threshold = test.map$response_threshold,
strings_as_factors = FALSE)) %>%
dplyr::group_by(activity_id_hist, template_name) %>%
dplyr::mutate(actual_num_ques = length(content_item_name)) %>%
dplyr::ungroup()
if (temp_record_check != dim(resp_excl)[1]) { stop("Too many things in test.map, probably")}
}
print("Here are the new columns after joining all the test and section maps")
print(names(resp_excl)[!(names(resp_excl) %in% initial_columns)])
if (remove.no.response.scored == TRUE) {
seqHist.to.exclude.calc1 <- resp_excl %>%
dplyr::filter(scored_response == 1 & is.na(raw_response)) %>% ## Excluding activities that have weird response records - scored as correct without a response
dplyr::ungroup() %>% dplyr::select(activity_id_hist) %>% dplyr::distinct(activity_id_hist)
if (length(seqHist.to.exclude.calc1$activity_id_hist) > 0) {
resp_excl <- resp_excl %>%
filter(!(activity_id_hist %in% seqHist.to.exclude.calc1$activity_id_hist))
}
removed.record.count(resp_excl, thing.to.say = "activities with bad records (raw_response = 0 with scored_response = 1), removed: ")
}
seqHist.to.exclude.calc1.5 <- resp_excl %>%
dplyr::filter(milliseconds_used < 0) %>% ## EXCLUDING activities that have a response with negative time
dplyr::ungroup() %>% dplyr::select(activity_id_hist) %>% dplyr::distinct(activity_id_hist)
if (length(seqHist.to.exclude.calc1.5$activity_id_hist) > 0) {
resp_excl <- resp_excl %>%
filter(!(activity_id_hist %in% seqHist.to.exclude.calc1.5$activity_id_hist))
}
removed.record.count(resp_excl, thing.to.say = "activities with bad timing (milliseconds_used < 0), removed: ")
# if (remove.over.time.activities == TRUE) {
# if (!is.null(section.map)) {
# seqHist.to.exclude.calc2 <- resp_excl %>%
# dplyr::group_by(activity_id_hist, sectionName, test_minutes_allowed) %>%
# dplyr::summarise(section_time = sum(milliseconds_used/60000)) %>% ## this gets the time in minutes
# dplyr::filter(section_time > test_minutes_allowed) %>% ## EXCLUDING all activities where a section is over the number of minutes allowed
# dplyr::ungroup() %>% dplyr::select(activity_id_hist) %>% dplyr::distinct(activity_id_hist)
# } else if (!is.null(test.map)) {
# seqHist.to.exclude.calc2 <- resp_excl %>%
# dplyr::group_by(activity_id_hist, test_minutes_allowed) %>%
# dplyr::summarise(test_sum_time = sum(milliseconds_used/60000)) %>% ## this gets the time in minutes
# dplyr::filter(test_sum_time > test_minutes_allowed) %>% ## EXCLUDING activities over the number of minutes allowed
# dplyr::ungroup() %>% dplyr::select(activity_id_hist) %>% dplyr::distinct(activity_id_hist)
# }
# if (length(seqHist.to.exclude.calc2$activity_id_hist) > 0) {
# resp_excl <- resp_excl %>%
# filter(!(activity_id_hist %in% seqHist.to.exclude.calc2$activity_id_hist))
# }
# }
# removed.record.count(resp_excl, thing.to.say = "activities (or sections) over the minutes allowed threshold, removed: ")
if (qbank == FALSE) {
seqHist.to.exclude.calc3 <- resp_excl %>%
dplyr::filter(actual_num_ques > test_num_ques) %>% ## Excluding activities that have more questions than they should (or sections)
dplyr::ungroup() %>% dplyr::select(activity_id_hist) %>% dplyr::distinct(activity_id_hist)
if (length(seqHist.to.exclude.calc3$activity_id_hist) > 0) {
resp_excl <- resp_excl %>%
filter(!(activity_id_hist %in% seqHist.to.exclude.calc3$activity_id_hist))
}
removed.record.count(resp_excl, thing.to.say = "activities with too many questions in a section, removed: ")
}
seqHist.to.exclude.calc4 <- resp_excl %>%
filter(content_item_id != -1) %>%
group_by(activity_id_hist, content_item_name) %>%
summarise(count = length(content_item_name)) %>%
filter(count > 1) %>% ## THIS IS THE REAL FILTER - Excluding activities that have a single content item more than once (after filtering out tutorials/breaks/staged)
dplyr::ungroup() %>% dplyr::select(activity_id_hist) %>% dplyr::distinct(activity_id_hist)
if (length(seqHist.to.exclude.calc4$activity_id_hist) > 0) {
resp_excl <- resp_excl %>%
filter(!(activity_id_hist %in% seqHist.to.exclude.calc4$activity_id_hist))
}
removed.record.count(resp_excl, thing.to.say = "activities with dupe content items within the same exam, removed: ")
if (remove.repeat.test.administrations == TRUE) {
seqHist.to.exclude.calc5 <- resp_excl %>%
group_by(student_id, template_name, activity_id_hist, timestamp_created) %>%
summarise(num_ques = length(content_item_name)) %>%
dplyr::ungroup() %>% dplyr::group_by(student_id, template_name) %>%
mutate(activity_order = dplyr::row_number(timestamp_created)) %>%
filter(activity_order > 1) %>% ## Excluding activities that are not the first of their template administered to the user
dplyr::select(activity_id_hist) %>% dplyr::distinct(activity_id_hist)
if (length(seqHist.to.exclude.calc5$activity_id_hist) > 0) {
resp_excl <- resp_excl %>%
filter(!(activity_id_hist %in% seqHist.to.exclude.calc5$activity_id_hist))
}
removed.record.count(resp_excl, thing.to.say = "activities that were not the first administration for the user, removed: ")
}
if (recode.answers) {
if (class(resp_excl$raw_response) == "character"){
resp_excl$rawest_response <- resp_excl$raw_response
resp_excl$raw_response <- recode(resp_excl$raw_response, "A" = 1,"B" = 2, "C" = 3, "D" = 4, missing = NULL)
}
}
if (!is.null(seqHist.to.exclude)) {
resp_excl <- resp_excl %>%
filter(!(activity_id_hist %in% seqHist.to.exclude))
num_seq_new <- length(unique(resp_excl$activity_id_hist))
num_users_new <- length(unique(resp_excl$student_id))
cleaning_info <- print.if.verbose(paste0("activities input from list, removed: ", num_seq_current - num_seq_new), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Users removed: ", num_users_current - num_users_new), v, cleaning_info)
num_seq_current <- num_seq_new
num_users_current <- num_users_new
}
cleaning_info <- print.if.verbose(paste0("Current number of activities: ", num_seq_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of users: ", num_users_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of items: ", num_items_current), v, cleaning_info)
## ITEM LEVEL EXCLUSIONS
cleaning_info <- print.if.verbose(paste0("ITEM LEVEL EXCLUSIONS"), v, cleaning_info)
num_item_responses <- dim(resp_excl)[1]
cleaning_info <- print.if.verbose(paste0("Total item responses: ", num_item_responses), v, cleaning_info)
## create a column for attempted TRUE or FALSE based on responseStatus
resp_excl <- resp_excl %>% dplyr::mutate(attempted = ifelse(is.na(raw_response), FALSE, raw_response != 0))
resp_excl <- resp_excl %>% dplyr::filter(content_item_id != -1) ## these are staged records and do not represent a question that was viewed
num_item_responses_new <- dim(resp_excl)[1]
cleaning_info <- print.if.verbose(paste0("Staged response records removed: ", num_item_responses - num_item_responses_new), v, cleaning_info)
num_item_responses <- num_item_responses_new
if (remove.unscored == TRUE) { ## remove unscored items if requested, otherwise do nothing. defaults to doing nothing.
resp_excl <- resp_excl %>% dplyr::filter(is_scored == 1)
num_item_responses_new <- dim(resp_excl)[1]
cleaning_info <- print.if.verbose(paste0("Unscored item responses removed: ", num_item_responses - num_item_responses_new), v, cleaning_info)
num_item_responses <- num_item_responses_new
}
remove.value <- FALSE
if (!(repeat.treatment %in% c("omit","remove","ignore"))) {
message("Unknown repeat.treatment value. Allowed values include 'omit','remove', and 'ignore'. Repeated questions are recorded as omit by default.")
} else if (repeat.treatment == "omit") {
remove.value <- FALSE
} else if (repeat.treatment == "remove") {
remove.value <- TRUE
} else if (repeat.treatment == "ignore") {
remove.value <- NULL
}
cleaning_info <- print.if.verbose(paste0("remove repeat item responses, instead of recoding as omitted = ",remove.value), v, cleaning_info)
num_items_omitted <- dim(resp_excl[!resp_excl$attempted,])[1]
num_seq_w_omitted <- dim(unique(resp_excl[!resp_excl$attempted,"activity_id_hist"]))[1]
num_users_w_omitted <- dim(unique(resp_excl[!resp_excl$attempted,"student_id"]))[1]
num_items_omitted_new <- 0
num_seq_w_omitted_new <- 0
num_users_w_omitted_new <- 0
cleaning_info <- print.if.verbose(paste0("Original number of responses omitted: ", num_items_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Original number of seq w items omitted: ", num_seq_w_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Original number of users w items omitted: ", num_users_w_omitted), v, cleaning_info)
if (!is.null(CI.old.version.dates)) {
## If items were under an earlier version the response should be recoded as omitted
if (sum(names(CI.old.version.dates) == "content_item_name") > 0) {
for (i in seq_along(CI.old.version.dates$content_item_name)) {
resp_excl <- recode.as.omitted(resp_excl,
omit.condition = (resp_excl$content_item_name == CI.old.version.dates$content_item_name[i] &
if (CI.remove.before.after == "before") {resp_excl$timestamp_created < CI.old.version.dates$cutoff_date[i]} else if (CI.remove.before.after == "after") {resp_excl$timestamp_created > CI.old.version.dates$cutoff_date[i]})
)
}
} else if (sum(names(CI.old.version.dates) == "content_item_id") > 0) {
for (i in seq_along(CI.old.version.dates$content_item_id)) {
resp_excl <- recode.as.omitted(resp_excl,
omit.condition = (resp_excl$content_item_id == CI.old.version.dates$content_item_id[i] &
resp_excl$timestamp_created < CI.old.version.dates$cutoff_date[i])
)
}
}
num_items_omitted_new <- dim(resp_excl[!resp_excl$attempted,])[1]
num_seq_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"activity_id_hist"]))[1]
num_users_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"student_id"]))[1]
cleaning_info <- print.if.verbose(paste0("Item responses under previous version marked as omitted: ", num_items_omitted_new - num_items_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Affected activities : ", num_seq_w_omitted_new - num_seq_w_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Affected users : ", num_users_w_omitted_new - num_users_w_omitted), v, cleaning_info)
num_items_omitted <- num_items_omitted_new
num_seq_w_omitted <- num_seq_w_omitted_new
num_users_w_omitted <- num_users_w_omitted_new
}
if (!is.null(CI.old.version.list)) {
## If items were under an earlier version the response should be recoded as omitted
#browser()
for (i in seq_along(CI.old.version.list$content_item_id)) {
resp_excl <- recode.as.omitted(resp_excl,
omit.condition = (resp_excl$content_item_id == CI.old.version.list$content_item_id[i])
)
}
num_items_omitted_new <- dim(resp_excl[!resp_excl$attempted,])[1]
num_seq_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"activity_id_hist"]))[1]
num_users_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"student_id"]))[1]
cleaning_info <- print.if.verbose(paste0("Item responses under previous version (from id list) marked as omitted: ", num_items_omitted_new - num_items_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Affected activities : ", num_seq_w_omitted_new - num_seq_w_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Affected users : ", num_users_w_omitted_new - num_users_w_omitted), v, cleaning_info)
num_items_omitted <- num_items_omitted_new
num_seq_w_omitted <- num_seq_w_omitted_new
num_users_w_omitted <- num_users_w_omitted_new
}
if (!is.null(remove.value)) {
resp_excl <- remove.repeat.questions(resp_excl, remove = remove.value, add.col = TRUE)
num_items_omitted_new <- dim(resp_excl[!resp_excl$attempted,])[1]
num_seq_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"activity_id_hist"]))[1]
num_users_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"student_id"]))[1]
cleaning_info <- print.if.verbose(paste0("Repeated items marked as omitted: ", num_items_omitted_new - num_items_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of responses omitted: ", num_items_omitted_new), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of seq w items omitted: ", num_seq_w_omitted_new), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of users w items omitted: ", num_users_w_omitted_new), v, cleaning_info)
# cleaning_info <- print.if.verbose(paste0("Affected activities: ", num_seq_w_omitted_new - num_seq_w_omitted), v, cleaning_info)
# cleaning_info <- print.if.verbose(paste0("Affected users: ", num_users_w_omitted_new - num_users_w_omitted), v, cleaning_info)
num_items_omitted <- num_items_omitted_new
num_seq_w_omitted <- num_seq_w_omitted_new
num_users_w_omitted <- num_users_w_omitted_new
marked_omit_items <- resp_excl %>% filter(raw_response != orig_response)
write.csv(marked_omit_items, file.path(results_path, "Marked omitted repeat items.csv"), row.names = FALSE)
}
resp_excl <- timing.exclusion(resp_excl, mSec.min.threshold = mSec.min.threshold, sec.min.threshold = sec.min.threshold,
mSec.max.threshold = mSec.max.threshold, sec.max.threshold = sec.max.threshold)
## Responses given in less than the threshold allowed will be recoded as omitted
num_items_omitted_new <- dim(resp_excl[!resp_excl$attempted,])[1]
num_seq_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"activity_id_hist"]))[1]
num_users_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"student_id"]))[1]
cleaning_info <- print.if.verbose(paste0("Item response time under threshold of ",mSec.min.threshold," mSec or over ",mSec.max.threshold, " mSec, marked as omitted: ", num_items_omitted_new - num_items_omitted), v, cleaning_info)
# cleaning_info <- print.if.verbose(paste0("Affected activities: ", num_seq_w_omitted_new - num_seq_w_omitted), v, cleaning_info)
# cleaning_info <- print.if.verbose(paste0("Affected users: ", num_users_w_omitted_new - num_users_w_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of responses omitted: ", num_items_omitted_new), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of seq w items omitted: ", num_seq_w_omitted_new), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Current number of users w items omitted: ", num_users_w_omitted_new), v, cleaning_info)
num_items_omitted <- num_items_omitted_new
num_seq_w_omitted <- num_seq_w_omitted_new
num_users_w_omitted <- num_users_w_omitted_new
if (!is.null(CI.old.keys)) {
## If items are scored from an earlier answer key the response should be recoded as omitted
for (i in seq_along(CI.old.keys$content_item_id)) {
resp_excl <- recode.as.omitted(resp_excl,
omit.condition = (resp_excl$content_item_id == CI.old.keys$content_item_id[i] &
resp_excl$correctAnswer == CI.old.keys$correctAnswer[i])
)
}
num_items_omitted_new <- dim(resp_excl[!resp_excl$attempted,])[1]
num_seq_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"activity_id_hist"]))[1]
num_users_w_omitted_new <- dim(unique(resp_excl[!resp_excl$attempted,"student_id"]))[1]
cleaning_info <- print.if.verbose(paste0("Item responses with previous version of answer key marked as omitted: ", num_items_omitted_new - num_items_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Affected activities : ", num_seq_w_omitted_new - num_seq_w_omitted), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Affected users : ", num_users_w_omitted_new - num_users_w_omitted), v, cleaning_info)
num_items_omitted <- num_items_omitted_new
num_seq_w_omitted <- num_seq_w_omitted_new
num_users_w_omitted <- num_users_w_omitted_new
}
removed.record.count(resp_excl, thing.to.say = "Number of activities removed during item exclusions: ")
if (precombined.files == FALSE) {
## ADD CONTENT ITEM INFO
if (!is.null(cidf)) {
if (!is.null(ci.cols.to.include)) {
resp_excl <- combine.CIinfo(data_path, resp_excl,cidf = cidf, ci.cols.to.include = ci.cols.to.include, interaction.type.list = interaction.type.list)
} else resp_excl <- combine.CIinfo(data_path, resp_excl, cidf = cidf, interaction.type.list = interaction.type.list)
} else resp_excl <- combine.CIinfo(data_path, resp_excl, interaction.type.list = interaction.type.list)
removed.record.count(resp_excl, thing.to.say = "Number of activities removed during content item join (should be 0): ")
}
if (!is.null(field.test.items)) {
resp_excl$FT <- resp_excl$content_item_name %in% field_test_items
}
cleaning_info <- print.if.verbose(paste0("Remaining number of activities at this point: ", num_seq_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of users at this point: ", num_users_current), v, cleaning_info)
# Split into separate data frames for each section, or consider together if no section split
if (!is.null(section.map) | section.separated == TRUE) {
if (qbank == TRUE) {
output.df.list <- vector("list", max(seq_along(section.map$jasperSectionName))+1) ## stage the list to have one more element than number of sections, the first element will hold the cleaning info
# activities with less than a predetermined number of valid activity responses in each of the sections (considered separately) will be excluded
resp_excl <- resp_excl %>%
group_by(student_id) %>% ## get calculations across entire pool of questions
mutate(overall_raw_correct = sum(scored_response),
overall_num_attempted = sum(attempted),
overall_pTotal = overall_raw_correct/length(unique(resp_excl$content_item_name)), ## divide by total number of unique questions in this section
overall_pPlus = overall_raw_correct/overall_num_attempted)
resp_excl <- resp_excl %>%
group_by(activity_id_hist) %>% ## get all activity level calculations
mutate(template_raw_correct = sum(scored_response),
template_num_attempted = sum(attempted),
template_pTotal = template_raw_correct/actual_num_ques, ## total questions on a single exam across all sections
template_pPlus = template_raw_correct/template_num_attempted)
resp_excl <- resp_excl %>%
group_by(activity_id_hist, sectionName) %>% ## get all the calculations at the section level
mutate(section_num_omitted = sum(!attempted),
section_num_attempted = sum(attempted),
section_perc_attempted = section_num_attempted/section_num_ques,
section_raw_correct = sum(scored_response),
section_num_scored = sum(scored),
section_pTotal = section_raw_correct/section_num_ques,
section_pPlus = section_raw_correct/section_num_attempted) %>%
ungroup()
if (is.null(min.items.per.seq)) {
cleaning_info <- print.if.verbose("No minimum item threshold provided for this qbank.", v = v, cleaning_info)
} else {
seq.below.resp.threshold <- resp_excl %>%
filter(template_num_attempted < min.items.per.seq)
seq.below.resp.threshold <- seq.below.resp.threshold$activity_id_hist ## overwrite with vector to reduce size
resp_excl <- resp_excl %>%
dplyr::filter(!(activity_id_hist %in% seq.below.resp.threshold))
print("finding the activity order")
seq_order_df <- resp_excl %>% ## calculate overall activity order after all cleaning is complete - only needed here because of qbank
ungroup() %>%
select(student_id, activity_id_hist, timestamp_created) %>%
distinct() %>%
group_by(student_id) %>%
arrange(timestamp_created) %>%
mutate(actual_activity_order = dplyr::row_number(timestamp_created))
resp_excl <- merge(resp_excl, seq_order_df)
removed.record.count(resp_excl, thing.to.say = "activities removed under the threshold of attempted questions: ")
}
for (i in seq_along(section.map$jasperSectionName)) {
output.df.list[[i+1]] <- resp_excl %>%
dplyr::filter(sectionName == section.map$jasperSectionName[i])
names(output.df.list)[i+1] <- section.map$jasperSectionName[i]
}
# names(output.df.list) <- section.map$section
cleaning_info <- print.if.verbose(paste0("Remaining number of responses in final output: ", dim(resp_excl)[1]), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of activities in final output: ", num_seq_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of users in final output: ", num_users_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of unique items in final output: ", num_items_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Cleaning function time completed: ", Sys.time()), v, cleaning_info)
output.df.list[[1]] <- cleaning_info
names(output.df.list)[1] <- "cleaning_info"
output.df.list
} else { ## if this is NOT a qbank
if (section.separated & !is.null(section.map)) {
total_qs <- sum(section.map$section_num_ques)
output.df.list <- vector("list", max(seq_along(section.map$jasperSectionName))+1) ## stage the list to have one more element than number of sections, the first element will hold the cleaning info
# activities with less than a predetermined number of valid activity responses in each of the sections (considered separately) will be excluded
} else {
if (!is.null(test.map)) {
total_qs <- sum(test.map$numQues)
} else warning("No test.map or section.map")
output.df.list <- vector("list",2)
}
resp_excl <- resp_excl %>%
group_by(student_id) %>% ## get calculations across entire pool of questions
mutate(overall_raw_correct = sum(scored_response),
overall_num_attempted = sum(attempted),
overall_pTotal = overall_raw_correct/(length(unique(activity_id_hist))*total_qs) , ## divide by total number of questions expected across all tests, this only works if tests are all the same
overall_pPlus = overall_raw_correct/overall_num_attempted)
resp_excl <- resp_excl %>%
group_by(activity_id_hist) %>% ## get all activity level calculations
mutate(template_raw_correct = sum(scored_response),
template_num_attempted = sum(attempted),
template_pTotal = template_raw_correct/test_num_ques, ## total questions on a single exam across all sections
template_pPlus = template_raw_correct/template_num_attempted)
resp_excl <- resp_excl %>%
group_by(activity_id_hist, sectionName) %>% ## get all the calculations at the section level
mutate(section_num_omitted = sum(!attempted),
section_num_attempted = sum(attempted),
section_perc_attempted = section_num_attempted/section_num_ques,
section_raw_correct = sum(scored_response),
section_num_scored = sum(is_scored),
section_pTotal = section_raw_correct/section_num_ques,
section_pPlus = section_raw_correct/section_num_attempted) %>%
ungroup()
## response threshold filter - first use section map if any, otherwise use response threshold
# browser()
if (!is.null(section.map$min.items.per.seq)) {
seq.below.resp.threshold <- resp_excl %>%
merge(.,data.frame(sectionName = unlist(section.map$jasperSectionName),
min.items.per.seq = section.map$min.items.per.seq)) %>%
filter(section_num_attempted < min.items.per.seq | template_num_attempted < sum(section.map$min.items.per.seq))
seq.below.resp.threshold <- seq.below.resp.threshold$activity_id_hist ## overwrite with vector to reduce size
resp_excl <- resp_excl %>%
dplyr::filter(!(activity_id_hist %in% seq.below.resp.threshold))
removed.record.count(resp_excl, thing.to.say = "activities removed because one or more sections were under the threshold of attempted questions: ")
} else if(section.calc == TRUE) {
seq.below.resp.threshold <- resp_excl %>%
filter(section_perc_attempted < section_response_threshold)
seq.below.resp.threshold <- seq.below.resp.threshold$activity_id_hist ## overwrite with vector to reduce size
resp_excl <- resp_excl %>%
dplyr::filter(!(activity_id_hist %in% seq.below.resp.threshold))
removed.record.count(resp_excl, thing.to.say = "activities removed because one or more sections were under the threshold of attempted questions: ")
} else if(section.calc == FALSE) {
seq.below.resp.threshold <- resp_excl %>%
filter(section_perc_attempted < test_response_threshold) ## This should come from test.map, might not, idk
seq.below.resp.threshold <- seq.below.resp.threshold$activity_id_hist ## overwrite with vector to reduce size
resp_excl <- resp_excl %>%
dplyr::filter(!(activity_id_hist %in% seq.below.resp.threshold))
removed.record.count(resp_excl, thing.to.say = "activities removed because one or more sections were under the threshold of attempted questions: ")
}
if (!is.null(section.map) & section.separated) {
for (i in seq_along(section.map$jasperSectionName)) {
output.df.list[[i+1]] <- resp_excl %>%
dplyr::filter(sectionName == section.map$jasperSectionName[i])
names(output.df.list)[i+1] <- section.map$jasperSectionName[i]
}
} else {
output.df.list[[2]] <- resp_excl
names(output.df.list)[2] <- "cleaned_data"
}
# names(output.df.list) <- section.map$section
cleaning_info <- print.if.verbose(paste0("Remaining number of responses in final output: ", dim(resp_excl)[1]), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of activities in final output: ", num_seq_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of users in final output: ", num_users_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of unique items in final output: ", num_items_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Cleaning function time completed: ", Sys.time()), v, cleaning_info)
output.df.list[[1]] <- cleaning_info
names(output.df.list)[1] <- "cleaning_info"
output.df.list
}
} else if (!is.null(test.map)) {
if(qbank == TRUE) {
output.df.list <- vector("list", 2) ## qbank assumes only one test
# activities with less than a predetermined number of valid activity responses in each of the sections (considered separately) will be excluded
resp_excl <- resp_excl %>%
group_by(activity_id_hist) %>% ## get all activity level calculations
mutate(template_raw_correct = sum(scored_response),
template_num_attempted = sum(attempted))
print("activity level sums complete")
resp_excl <- resp_excl %>%
mutate(template_pTotal = template_raw_correct/actual_num_ques, ## total questions on a single exam across all sections
template_pPlus = template_raw_correct/template_num_attempted)
print("activity level calcs complete")
if (is.null(min.items.per.seq)) {
cleaning_info <- print.if.verbose("No minimum item threshold provided for this qbank.", v = v, cleaning_info)
} else {
seq.below.resp.threshold <- resp_excl %>%
filter(template_num_attempted < min.items.per.seq)
seq.below.resp.threshold <- seq.below.resp.threshold$activity_id_hist ## overwrite with vector to reduce size
resp_excl <- resp_excl %>%
dplyr::filter(!(activity_id_hist %in% seq.below.resp.threshold))
print("finding the activity order")
seq_order_df <- resp_excl %>% ## calculate overall activity order after all cleaning is complete - only needed here because of qbank
ungroup() %>%
select(student_id, activity_id_hist, timestamp_created) %>%
distinct() %>%
group_by(student_id) %>%
arrange(timestamp_created) %>%
mutate(actual_activity_order = dplyr::row_number(timestamp_created))
resp_excl <- merge(resp_excl, seq_order_df)
removed.record.count(resp_excl, thing.to.say = "activities removed under the threshold of attempted items: ")
}
number_of_unique_CIs <- length(unique(resp_excl$content_item_name))
resp_excl <- resp_excl %>%
group_by(student_id) %>% ## get calculations across entire pool of questions
mutate(overall_raw_correct = sum(scored_response),
overall_num_attempted = sum(attempted))
print("Overall level sums complete")
resp_excl <- resp_excl %>%
mutate(overall_pTotal = overall_raw_correct/number_of_unique_CIs, ## divide by total number of unique questions in this section
overall_pPlus = overall_raw_correct/overall_num_attempted)
print("Overall level calcs complete")
print("finding the activity order")
seq_order_df <- resp_excl %>% ## calculate overall activity order after all cleaning is complete - only needed here because of qbank
ungroup() %>%
select(student_id, activity_id_hist, timestamp_created) %>%
distinct() %>%
group_by(student_id) %>%
arrange(timestamp_created) %>%
mutate(actual_activity_order = dplyr::row_number(timestamp_created))
resp_excl <- merge(resp_excl, seq_order_df)
output.df.list[[2]] <- resp_excl
names(output.df.list)[2] <- "cleaned_data"
# names(output.df.list) <- section.map$section
cleaning_info <- print.if.verbose(paste0("Remaining number of responses in final output: ", dim(resp_excl)[1]), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of activities in final output: ", num_seq_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of users in final output: ", num_users_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of unique items in final output: ", num_items_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Cleaning function time completed: ", Sys.time()), v, cleaning_info)
output.df.list[[1]] <- cleaning_info
names(output.df.list)[1] <- "cleaning_info"
output.df.list
} else if (qbank == FALSE) {
# activities with less than a predetermined number of valid responses will be excluded
resp_excl <- resp_excl %>%
group_by(activity_id_hist) %>%
mutate(template_num_omitted = sum(!attempted),
template_num_attempted = sum(attempted),
template_perc_attempted = template_num_attempted/test_num_ques,
template_raw_correct = sum(scored_response),
template_num_attempted = sum(attempted),
template_pTotal = template_raw_correct/test_num_ques,
template_pPlus = template_raw_correct/template_num_attempted) %>%
filter(template_perc_attempted >= test_response_threshold) %>%
ungroup()
removed.record.count(resp_excl, thing.to.say = "Number of activities below response attempt threshold, removed: ")
# resp_excl <- resp_excl %>%
# group_by(student_id) %>% ## get calculations across entire pool of questions
# mutate(overall_raw_correct = sum(scored_response),
# overall_num_attempted = sum(attempted),
# overall_pTotal = overall_raw_correct/length(unique(resp_excl$content_item_name)), ## divide by total number of unique questions in this section
# overall_pPlus = overall_raw_correct/overall_num_attempted)
# seq_order_df <- resp_excl %>% ## calculate overall activity order after all cleaning is complete - only needed here because of qbank
# ungroup() %>%
# select(student_id, activity_id_hist, timestamp_created) %>%
# distinct() %>%
# group_by(student_id) %>%
# arrange(timestamp_created) %>%
# mutate(actual_activity_order = dplyr::row_number(timestamp_created))
# resp_excl <- merge(resp_excl, seq_order_df)
cleaning_info <- print.if.verbose(paste0("Remaining number of responses in final output: ", dim(resp_excl)[1]), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of activities in final output: ", num_seq_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of users in final output: ", num_users_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Remaining number of unique items in final output: ", num_items_current), v, cleaning_info)
cleaning_info <- print.if.verbose(paste0("Cleaning function time completed: ", Sys.time()), v, cleaning_info)
output.df.list <- list(cleaning_info, resp_excl)
names(output.df.list) <- c("cleaning_info",analysis.name)
output.df.list
} else warning("Parameter qbank was not true or false? somehow?")
} else warning("No section.map or test.map!")
}
|
eef3ed1d990e5097942bd4a674957d91593ff358 | bb86b99ddc0695884cfa7fbe099165433ddab38f | /tests/testthat.R | 588aecc1935223d8dbc16bb2140ac2848a954be7 | [] | no_license | rpruim/WestMIR | 71a50a6e93b6373bb1766edd8cdd875ba0c5916a | 6980dabd85e76c384769017a4a478863b3bc8855 | refs/heads/master | 2020-03-21T01:37:30.204154 | 2018-06-19T23:47:22 | 2018-06-19T23:47:22 | 137,952,002 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 58 | r | testthat.R | library(testthat)
library(WestMIR)
test_check("WestMIR")
|
c1019a9b34629bbcbc4051194699e2ae35739c7e | fc4cd95087e62913ca6f06f3921028d9520c3993 | /slides/lecture20_oop.R | 25de2ebcfa5d81b663e221528d315f6b8ed25f9e | [] | no_license | couthcommander/Bios6301 | b53a0e16ccae3433afdfc196490ab6795fbf83b1 | 06eb0e5d0da151b74de47ffae24c2a0f4ab04ffb | refs/heads/main | 2022-10-19T10:33:10.069916 | 2022-09-22T15:11:57 | 2022-09-22T15:11:57 | 200,882,515 | 8 | 9 | null | null | null | null | UTF-8 | R | false | false | 3,826 | r | lecture20_oop.R | class(1)
x <- 1:3
y <- x^2
lmout <- lm(y ~ x)
class(lmout)
unclass(lmout)
lmout
methods(print)
print.lm
stats:::print.lm
getAnywhere(print.lm)
methods(,lm)
plot(lmout)
lmoutsum <- summary(lmout)
coef(lmout)
coef(lmoutsum)
methods(coef)
getAnywhere(coef.default)
# make a new class
j <- list(name="Joe", salary=55000, union=TRUE)
class(j) <- 'employee'
attributes(j)
print.employee <- function(wrkr) {
cat(sprintf("name: %s\nsalary: %s\nunion member: %s",
wrkr$name, wrkr$salary, wrkr$union), "\n")
}
methods(class='employee')
j
print.default(j)
# class with inheritance
k <- list(name="Kate", salary=NA, union=FALSE, rate=10.50, hrs_this_month=2)
class(k) <- c('hourly_employee', 'employee')
inherits(k, 'employee')
k
# make a new method
pvalue <- function(x) {
UseMethod("pvalue")
}
pvalue.default <- function(x) {
if('p.value' %in% names(x)) return(x$p.value)
stop('no p.value for this object')
}
pvalue.summary.lm <- function(x) {
cv <- coef(x)
cv[,ncol(cv)]
}
pvalue(lmoutsum)
pvalue(t.test(rnorm(100)))
pvalue(1:10)
# complete example with attributes
set.seed(1)
n <- 60
x <- seq(n)/n
y <- sin((3*pi/2)*x) + x^2 + rnorm(n, mean=0, sd=0.5)
# fit polynomial of degree D to these points
polyfit <- function(y, x, maxdeg) {
pwrs <- outer(x, seq(maxdeg), "^")
lmout <- vector('list', maxdeg)
attributes(lmout) <- list(degrees=maxdeg)
class(lmout) <- 'polyreg'
for(i in seq(maxdeg)) {
lmo <- lm(y ~ pwrs[,seq(i)])
lmo$fitted.cvvalues <- leave_one_out(y, pwrs[,seq(i),drop=FALSE])
lmout[[i]] <- lmo
}
lmout$x <- x
lmout$y <- y
lmout
}
leave_one_out <- function(y, xmat) {
n <- length(y)
pred_y <- numeric(n)
for(i in seq(n)) {
lmo <- lm(y[-i] ~ xmat[-i,])
beta_hat <- unname(coef(lmo))
pred_y[i] <- beta_hat %*% c(1, xmat[i,])
}
pred_y
}
print.polyreg <- function(fits) {
maxdeg <- attr(fits, 'degrees')
n <- length(fits$y)
tbl <- matrix(nrow=maxdeg, ncol=1)
# mean squared prediction error
colnames(tbl) <- "MSPE"
for(i in seq(maxdeg)) {
fi <- fits[[i]]
errs <- fits$y - fi$fitted.cvvalues
spe <- crossprod(errs, errs)
tbl[i,1] <- spe/n
}
print(tbl)
}
dg <- 15
lmo <- polyfit(y, x, dg)
lmo
plot.polyreg <- function(fits) {
maxdeg <- attr(fits, 'degrees')
cf <- coef(fits[[maxdeg]])
cf[is.na(cf)] <- 0
f <- function(x) sum(cf*x^seq(0,maxdeg))
x1 <- seq(min(fits$x), max(fits$x), length.out=500)
y1 <- sapply(x1, f)
plot(fits$x, fits$y)
par(new=TRUE)
plot(x1, y1, new=TRUE, type='l', axes=FALSE, xlab='', ylab='')
}
plot(lmo)
setClass("fun", representation(f="function", x="numeric", y="numeric"))
f <- function(x) sin((3*pi/2)*x) + x^2 + rnorm(length(x), mean=0, sd=0.5)
f1 <- new("fun", f=f, x=seq(0,10,by=0.1))
f1@y <- f1@f(f1@x)
plot(f1@x, f1@y, type='l', xlab='x', ylab='y',
main=sprintf("f(x) = %s", capture.output(body(f1@f))))
setMethod("initialize", "fun",
function(.Object, f=expression, x=numeric(0), y=numeric(0), seed=1) {
.Object@f <- f
if(length(x) == 0) x<-seq(0,10)
.Object@x <- x
set.seed(seed)
.Object@y <- f(x)
.Object
})
f2 <- new("fun", f=f)
fun <- function(...) {
new("fun", ...)
}
f3 <- fun(f=f, x=seq(0,10,by=0.1))
setMethod("plot", signature(x="fun", y="missing"), function(x,...) {
plot(x@x, x@y, type='l', xlab='x', ylab='y',
main=sprintf("f(x) = %s", capture.output(body(f1@f))), ...)
})
Tired <- setRefClass("Tired",
fields = list(speech='character'),
methods = list(show=function() {
zs <- paste(rep('z', sample(10, 1)), collapse='')
print(sprintf("%s... %s", speech, zs))
})
)
t <- Tired$new(speech="We're talking about practice")
|
82e7a1fd0ba4d8f82570c24372ffaccb1332d3c9 | 179fc537e7ef643a976ad5673a4729808807e6ee | /term_01/week05/linmodeld_week05.R | 792fa46332201edd2b286dd45c520944b639c467 | [] | no_license | jenniferp1/Foundations_of_Statistics | 3a1ae155b365a5dacd86eb32c3a2a8da18b8acad | 67e33653792046f88637cc4dc0347a42388e66fc | refs/heads/master | 2020-11-28T02:24:31.690847 | 2019-12-23T17:30:38 | 2019-12-23T17:30:38 | 229,679,944 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 627 | r | linmodeld_week05.R |
library(SDSFoundations)
#cor() describes the extent of the relationship between indpendent
#variable (x) and the dependent variable (y)
#linFit() model that expands on correlation coeff
#gives what the relationship looks like in terms of the
#actual variables involved - the specifice input and output
#model is defined as a function
statepop = c(35,8,13,64,13,87,193,124,11,6)
millionaires = c(86,18,22,141,26,207,368,228,20,11)
plot(statepop,millionaires)
cor(statepop,millionaires)
linFit(statepop,millionaires)
WR <- WorldRecords
View(WR)
mens800 <- WR[WR$Event=='Mens 800m',]
linFit(mens800$Year,mens800$Record)
|
191d34e8ed8ba528ecd85f8c5f89e732b2297e20 | b8b0b016c3ab270450a41d0f78620a5aa612a347 | /IWESEP2016/bubble_chart.r | 892238e79ecb65fc441d11715a22946d7faf18b2 | [
"MIT"
] | permissive | hideshis/scripts_for_research | 5fc232a5ec73811ece9d7d39ad619f52f2fabf94 | f633bdef0f9b959d7b18c8b95f169306eb8bb50d | refs/heads/master | 2020-04-15T22:03:36.481512 | 2016-10-12T06:44:37 | 2016-10-12T06:44:37 | 34,490,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 830 | r | bubble_chart.r | hoge <- read.csv("synthesized_info.csv")
dead <- subset(hoge, status == "dead")
arrive <- subset(hoge, status == "arrive")
library(ggplot2)
ggplot(data=hoge, aes(x=hoge$lifetime, y=hoge$co.evolution.rate, colour=hoge$status), xlab("lifetime")) + geom_point(aes(size=hoge$average.bug, alpha=.5)) + scale_size_continuous(range = c(2, 10)) + labs(size="size", x="lifetime", y="degree of co-evolution", alpha="alpha", colour="status")
ggplot(data=hoge, aes(x=hoge$lifetime, y=hoge$co.evolution.rate, colour=hoge$status), xlab("lifetime")) + geom_point(aes(size=hoge$average.bug, alpha=.5)) + scale_size_continuous(range = c(2, 10)) + labs(size="size", x="lifetime", y="degree of co-evolution", alpha="alpha", colour="status") + geom_vline(xintercept = median(hoge$lifetime)) + geom_hline(yintercept = median(hoge$co.evolution.rate))
|
f59484507857a3010bdcad1be3b76476afe5701d | 13dfdb1b95a769b214c10608459a2f4687d5fba3 | /man/run_monocle.Rd | fce9f8dd95373923220999ef54ffa01600af66d3 | [
"MIT"
] | permissive | kwells4/mtec.10x.pipeline | 79e4c9267f70af6a793e48502070f35cee74c1e5 | 19a7fdcc3a6ba4f3beda7af06cf925a3b6b97369 | refs/heads/master | 2023-01-28T00:57:24.550595 | 2019-04-03T17:22:10 | 2019-04-03T17:22:10 | 179,329,659 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,118 | rd | run_monocle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pseudotime.R
\name{run_monocle}
\alias{run_monocle}
\title{Runs monocle on cells}
\usage{
run_monocle(mtec, quality_plots = FALSE, cores = 1, seed = 0)
}
\arguments{
\item{mtec}{a Seurat object}
\item{quality_plots}{OPTIONAL if quality plots (including density plot of
expression values, PC variance explained, and tSNE coloured by monocle and
seurat clusters) should be plotted. Defaults to FALSE.}
\item{cores}{OPTIONAL the number of cores to use when running
differentialGeneTest. very slow when run on one core. Defaults to 1.}
\item{seed}{OPTIONAL seed for reproducibility. Defaults to 0}
}
\description{
This function allows you to run monocle on your samples. It also will add the
monocle output into the seurat object. While this can be run on a local, I
recommend only running this on a cluster and setting cores to at least 10.
If you only have a local, just use the mtec_trace object as it already
includes the data from monocle.
}
\examples{
\dontrun{
run_monocle(mTEC.10x.data::mtec_trace, cores = 10)
}
}
\keyword{monocle}
|
f4ff3a7d54b38130bbdb6cf0a54838ce1f39cc14 | 6936f25f3f1f5078ebffd995a3799e0537f484da | /EDA/Z_Test.R | 27636bbf7f74eb0bd3955c51a0f9fdbf9336754f | [] | no_license | Raghavaw/Mental_Health | 8f6c47fb66055ec6cb7f999a14ee21bc49c337aa | 66b3e5fbc978c62c52a68c6858745b173b7d3862 | refs/heads/main | 2023-03-28T15:46:43.762551 | 2021-04-04T12:38:42 | 2021-04-04T12:38:42 | 354,531,630 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 651 | r | Z_Test.R |
setwd('/Users/raghavawasthi/Desktop/COVID19/MentalHealth/Analysis/Cleand_Data/')
library(dplyr)
library(tidyr)
df = read.csv('Aprl_May_DataforBN.csv')
########## Gender ############
### Hypotheis is Female are more stressed than Men
Tbl = data.frame(table(df$GENDER,df$SOC5A))
table(df$GENDER)
res <- prop.test(x = c(5434,5629), n = c(9868 ,7886))
print(res)
################# Age group
Tbl = data.frame(table(df$AGE4,df$SOC5A))
Tbl = dplyr::filter(Tbl,Tbl$Var2 == '(4) 5-7 days')
Tbl$Var2 = NULL
tb= data.frame(table(df$AGE4))
res <- prop.test(x =Tbl$Freq[-5],tb$Freq[-5])
linearTrend = prop.trend.test(x =Tbl$Freq[-5],tb$Freq[-5])
print(res)
|
b138e01135f06953e95423c1fb37af225c3a0596 | b97c41a569011b73f60fa311eba730aee6a633f6 | /man/eikos_y_labels.Rd | a3bda1a0ad39210e0f67a7fb01365ec55ca2cfae | [] | no_license | rwoldford/eikosograms | 10599388af9cd64de2225f4efbfd943e52591613 | 55b2b1b39b6296a747ebc2089e46316fde321dac | refs/heads/master | 2020-03-24T14:51:09.984366 | 2019-07-20T15:30:05 | 2019-07-20T15:30:05 | 142,778,661 | 4 | 0 | null | null | null | null | UTF-8 | R | false | true | 856 | rd | eikos_y_labels.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eikos_labels.R
\name{eikos_y_labels}
\alias{eikos_y_labels}
\title{eikos helper function. Returns grob with y axis labels.}
\usage{
eikos_y_labels(y, data, margin = unit(2, "points"), yname_size = 12,
yvals_size = 10, lab_rot = 0)
}
\arguments{
\item{y}{response variable}
\item{data}{data frame from eikos_data.}
\item{margin}{unit specifying margin}
\item{yname_size}{font size for y axis variable names (in points)}
\item{yvals_size}{font size of labels for values of y variable (in points)}
\item{lab_rot}{integer indicating the rotation of the label, default is horizontal}
}
\value{
gList with x labels and x-axis names as grob frames.
grobFrame with response variable labels and axis text
}
\description{
eikos helper function. Returns grob with y axis labels.
}
|
01c872db1fc5575186a672d379c47da575c3bdb0 | 7f0ad73a929ed6dcc4e7367ada2f02444125ed8e | /rprog-data-ProgAssignment3-data/rankall.R | bd8eb330dcc158500d6f9e35433fd3ed632f2a97 | [] | no_license | Macking/PlantsViewer | 98a5f3b47200bf1f1133f90b8f43927c92812b3a | 9170b32279fa44f5aaca71b2b819f1b923be1b6c | refs/heads/master | 2021-01-17T11:58:59.154605 | 2014-09-18T14:16:41 | 2014-09-18T14:16:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,372 | r | rankall.R | rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv",colClasses = "character")
## Check that state and outcome are valid
if(!(outcome %in% c("heart attack","heart failure","pneumonia")))
stop("invalid outcome")
if(!(num == "best" | num == "worst" | !is.na(as.numeric(num))))
stop("invalid num")
attackState1 <- subset(data,Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack!= "Not Available")
attackState2 <- subset(attackState1,Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure!= "Not Available")
attackState3 <- subset(attackState2,Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia!= "Not Available")
keeps <- c("Hospital.Name","State","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure","Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
attack <- attackState3[keeps]
attack[,3] <- as.numeric(attack[,3])
attack[,4] <- as.numeric(attack[,4])
attack[,5] <- as.numeric(attack[,5])
colnames(attack) <- c("hospital","state","heartattack","heartfailure","pneumonia")
attack
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
} |
681490e978a879e6f3e6c9150dd0d9c19a07e1d9 | 962bbfcf46ea78970195b1434817c8c9840dc910 | /ps4/benchSubsetting.R | 77703f38b69dab3e9dd305d434ca74f9339a9da2 | [] | no_license | eyedvabny/berkeley-statistical-computing | 4bbd91b516753399f0555dd681e1a0b7a3bca4a9 | 2f73c4c857d315e07a58e642ad6d08b2dd9c8721 | refs/heads/master | 2021-01-22T10:22:20.482273 | 2014-12-14T06:20:15 | 2014-12-14T06:20:15 | 23,902,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,130 | r | benchSubsetting.R | ##---- benchSubsetting ----
library(dplyr)
library(microbenchmark)
# The following function will benchmark list subsetting for numeric and string lists
# The input parameter is the length of the vector to be subset
benchSubsetting <- function(length){
# Create a random vector of numbers
test.nums <- runif(length)
# Create a random vector of strings (of 10 characters)
test.strs <- replicate(length,
paste(sample(letters, 10, replace = T),
collapse=''))
# Create an accessing boolean vector (skewed probs favor smaller subsets)
test.bool <- sample(c(T,F), length, replace=T, prob=c(0.3,0.7))
# Create an interger index corresponding to the same items as above
test.index <- which(test.bool)
# Run the test
test.timing <- microbenchmark(
nums.ints = test.nums[test.index],
nums.bools = test.nums[test.bool],
strs.ints = test.strs[test.index],
strs.bools = test.strs[test.bool]
)
# Summarize on the mean timing?
test.timing %>%
group_by(expr) %>%
summarize(median.time = median(time)) %>%
mutate(length = length)
}
|
9c1301ac0eaef5b895aaa6a0cbb409f2b560d454 | 70a6150f46cb238dda699167c9af67b5641f4792 | /R/gbm1.R | f4901204458978a953fad4f225618b3e9a17f019 | [] | no_license | SherryFu0315/Click-Fraud | 72507851ec7ac621804865121a69b8f5e1aa54ff | bc2f10e071c1e5f0cc76d9e7abc69a3770acffbd | refs/heads/master | 2020-03-09T00:13:04.617109 | 2019-05-26T21:15:59 | 2019-05-26T21:15:59 | 128,482,409 | 0 | 0 | null | 2018-04-07T00:13:45 | 2018-04-07T00:13:44 | null | UTF-8 | R | false | false | 3,833 | r | gbm1.R | require(gbm)
require(dplyr)
library(RCurl)
require(foreign)
CalculateAveragePrecision <- function(expectedColumn, submittedColumn) {
df <- data.frame(expectedBySubmitted = expectedColumn, submitted = submittedColumn)
print(df)
df <- df[order(df$submitted, decreasing = T),]
df[, "expectedByExpected"] =sort(expectedColumn, decreasing = T)
totalNumerator = 0.0;
runningNumeratorExpected = 0.0;
runningNumeratorActual = 0.0;
print(df)
for (i in 1:nrow(df)) {
runningNumeratorExpected = runningNumeratorExpected + df$expectedByExpected[i]
runningNumeratorActual = runningNumeratorActual + df$expectedBySubmitted[i]
division = runningNumeratorActual/runningNumeratorExpected;
totalNumerator = totalNumerator + division;
}
result = totalNumerator / nrow(df)
result
}
train=read.arff(file = "final_120_train_w_labels.arff")
validation=read.arff(file ="final_120_validation_w_labels.arff")
test=read.arff(file = "final_120_test_w_labels.arff")
head(train)
summary(train)
train_status=train$status
print(train_status)
train=select(train, -status)
end_train=nrow(train)
validation_status=validation$status
print(validation_status)
validation=select(validation, -status)
end_validation=nrow(validation)
test_status=test$status
print(test_status)
test=select(test, -status)
end_test=nrow(test)
all=rbind(train,validation)
end_all=nrow(all)
head(all)
ntrees=5000
?gbm
train_status=as.numeric(train_status)-1
validation_status=as.numeric(validation_status)-1
test_status=as.numeric(test_status)-1
#print(train_status)
model=gbm.fit(
x=all[1:end_train,]
, y=train_status
, distribution="bernoulli"
, n.trees=ntrees
, shrinkage=0.001
, interaction.depth=5
, n.minobsinnode=5
)
summary(model)
gbm.perf(model)
#preety.gbm.tree(model)
for(i in 1:length(model$var.names)){
plot(model, i.var=i
, ntrees=ntrees
, type="response"
)
}
ValidationPredictions=predict(object=model,newdata=all[(end_train+1):end_all,]
, n.trees=ntrees
, type="response")
TestPredictions=predict(object=model,newdata=test[1:end_test,]
, n.trees=ntrees
, type="response")
TrainPredictions=predict(object=model,newdata=all[1:end_train,]
, n.trees=ntrees
, type="response")
#CalculateAveragePrecision(validation_status, ValidationPredictions)
TestPredictions=round(TestPredictions)
TrainPredictions=round(TrainPredictions)
ValidationPredictions=round(ValidationPredictions)
gbm.roc.area(validation_status,ValidationPredictions)
gbm.roc.area(test_status,TestPredictions)
#head(TestPredictions,n=300)
#head(validation_status, n=300)
library(SDMTools)
confusion.matrix(validation_status,ValidationPredictions,0.5)
confusion.matrix(test_status,TestPredictions,0.5)
#print(conf)
library(caret)
conf<-table(ValidationPredictions,validation_status)
confusionMatrix(conf)
conf<-table(TestPredictions,test_status)
confusionMatrix(conf)
library(Metrics)
?apk
ValidationPredictions=as.vector(ValidationPredictions)
TestPredictions=as.vector(TestPredictions)
validation_status=as.vector(validation_status)
test_status=as.vector(test_status)
apk(end_validation,validation_status,ValidationPredictions)
apk(end_test,test_status,TestPredictions)
mapk(end_validation,validation_status,ValidationPredictions)
mapk(end_test,test_status,TestPredictions)
CalculateAveragePrecision(validation_status, ValidationPredictions)
CalculateAveragePrecision(test_status, TestPredictions)
#submission
submission=data.frame(y_test=TestPredictions)
write.csv(submission,file="y_test.csv", row.names=FALSE)
submission=data.frame(y_validation=ValidationPredictions)
write.csv(submission,file="y_validation.csv", row.names=FALSE)
|
77f1f3e15665ac39fdbfe69f3dc734d90d89d2ec | df71fd15183d51e233fd380b5b15689af7575f88 | /cachematrix.R | 8abd79abd3f31628c9fe8e79e7aae44ea0568688 | [] | no_license | JosemiCan/ProgrammingAssignment2 | fea9f1b743796d73e04867e299d5d5e84c4e403c | b6d1d9d357c7aeb5e4b2f3de28db9b311ba4ca38 | refs/heads/master | 2020-03-28T09:35:57.729273 | 2018-09-09T16:47:21 | 2018-09-09T16:47:21 | 148,045,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 828 | r | cachematrix.R | ## the functions below cache the inverse of a matrix
## the first function creates a matrix object that cahe its inverse
makeCacheMatrix<-function(m=matrix())
{
n<-NULL
set<-function(x)
{ m<<-x
n<<-NULL }
get<-function()m
setinverse<-function(inverse) n <<-inverse
getinverse<-function() n
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)}
## the second function computes the inverse of the matrix returned by the other function
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve<-function(m,...)
{
n<-m$getinverse()
if(!is.null(n))
{ message("getting cached data")
return(n) }
data<-m$get()
n<-solve(data)%*%data
m$setinverse(n)
n }
|
b822232c98f5eedc6bfa50299f4e113fef5b4207 | fb157ec338c0cf5425ae7ccfa16173f2dfa341ec | /R/mse_acq_rate.R | fef1ce24ab4cac7a990e31101ac395d3abe7cc5e | [
"Apache-2.0"
] | permissive | weinbergerlab/carriage-simulation | 626bfc92ac8872fced16724bb02d656269edbbef | 0190bfe0921e5e9759ad926839e77bd9f6226b75 | refs/heads/master | 2022-06-20T02:51:14.232016 | 2022-05-31T21:17:48 | 2022-05-31T21:17:48 | 152,311,839 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 485 | r | mse_acq_rate.R | mse_acq_rate<-function(ds,set.acq.rate,set.ses.effect){
ds1a<- lapply(ds,function(x) return_transition_prob(x) )
ds2.lo<- lapply(ds1a, function(x) x[x$ses=='Low',1:3])
ds2.lo<- do.call(rbind, ds2.lo)
ds2.hi<- lapply(ds1a, function(x) x[x$ses=='high',1:3])
ds2.hi<- do.call(rbind, ds2.hi)
mse.lo<- mean( (ds2.lo$median - set.acq.rate)^2)
mse.hi<- mean( (ds2.hi$median - set.acq.rate*1/set.ses.effect)^2)
mse.combo<-c(mse.lo,mse.hi)
return(mse.combo)
}
|
4291a57f1ff08a0d708250fbbe6f3503aa342d10 | 0471999fce7bfcba220ae361a843b9fc69af53e7 | /R/swagger_args.R | bcb717c56969fdb90de20d70154fd297bfa7a23d | [
"MIT"
] | permissive | karthik/stevedore | e25d0c1fb9073de4979a22c69e92acb54d7ab2d6 | 8d12d3a02a211557ff264780a17a4789604ee40e | refs/heads/master | 2020-03-19T10:06:51.132342 | 2018-06-06T15:03:29 | 2018-06-06T15:03:29 | 136,344,185 | 0 | 0 | null | 2018-06-06T14:54:38 | 2018-06-06T14:54:37 | null | UTF-8 | R | false | false | 10,233 | r | swagger_args.R | ## Convert a specification for an endpoint into an R function that o
swagger_args <- function(method, path, x, handlers, types, spec) {
args <- swagger_args_parse(method, path, x, spec)
help <- swagger_args_help(x, args, handlers)
list(help = help,
handler = swagger_args_handler(args, handlers, types))
}
swagger_args_parse <- function(method, path, x, spec) {
args <- x$parameters
if (is.null(args)) {
return(NULL)
}
args_in <- vcapply(args, "[[", "in")
is_body <- args_in == "body"
if (any(is_body)) {
stopifnot(sum(is_body) == 1L)
i_body <- which(is_body)
body <- args[[i_body]]
body$schema <- resolve_schema_ref(body$schema, spec)
if (body$schema$type == "object") {
description <- tolower1(body$description) %||% "request body"
to_par <- function(x) {
el <- resolve_schema_ref(body$schema$properties[[x]], spec)
el$description <- el$description %||% paste("For", description)
c(list(name = x, "in" = "body"), el)
}
args_body <- lapply(names(body$schema$properties), to_par)
i1 <- seq_len(i_body - 1L)
i2 <- setdiff(seq_along(args), c(i1, i_body))
args <- c(args[i1], args_body, args[i2])
args_in <- c(args_in[i1], rep("body", length(args_body)), args_in[i2])
body_type <- "combine"
} else {
## here, body$schema$type == "string"
body_type <- "single"
p <- args[[i_body]]
args[[i_body]] <- c(p[names(p) != "schema"], p$schema)
}
} else {
body_type <- NULL
}
args_name <- vcapply(args, "[[", "name")
args_name_r <- args_name
args_name_r[args_in == "header"] <-
x_kebab_to_snake(args_name[args_in == "header"])
args_name_r <- pascal_to_snake_cached(args_name_r)
for (i in seq_along(args)) {
args[[i]]$name_r <- args_name_r[[i]]
args[[i]] <- resolve_schema_ref(args[[i]], spec)
}
if (any(duplicated(args_name)) || any(duplicated(args_name_r))) {
stop("fix duplicated names") # nocov [stevedore bug]
}
stopifnot(identical(args_name[args_in == "path"],
swagger_path_parse(path)$args))
i <- match(args_in, c("path", "body", "query", "header"))
stopifnot(all(!is.na(i)))
args_req <- vlapply(args, function(x) isTRUE(x$required))
args <- args[order(!args_req, i)]
attr(args, "body_type") <- body_type
args
}
swagger_args_handler <- function(args, handlers, types) {
## All the stopifnot bits are assertions that have more to do with
## making sure that the spec confirms to what we are expecting.
## They'd probably be better done with debugme because I don't think
## they should be run by users.
dest <- quote(dest)
env <- new.env(parent = parent.env(environment()))
if (!is.null(handlers)) {
stopifnot(names(handlers) %in% vcapply(args, "[[", "name_r"))
handler_fns <- lapply(handlers, function(x) types[[x]]$handler)
names(handler_fns) <- handler_name(names(handler_fns))
list2env(handler_fns, env)
handlers[] <- names(handler_fns)
}
body_type <- attr(args, "body_type")
if (is.null(body_type)) {
fbody_body_combine <- NULL
} else {
if (body_type == "combine") {
fbody_body_combine <-
as_call(quote(jsonlite::toJSON), dollar(dest, quote(body)))
} else if (body_type == "single") {
## We'd be better off doing this within the core body function
## probably but that requires a bit of faff.
nm <- as.symbol(args[[which(vcapply(args, "[[", "in") == "body")]]$name)
fbody_body_combine <- dollar(dest, quote(body), nm)
}
fbody_body_combine <- bquote(
.(dollar(dest, quote(body))) <- .(fbody_body_combine))
}
fbody_collect <- lapply(args, swagger_arg_collect, dest, handlers)
fbody <- c(quote(`{`),
bquote(.(dest) <- list()),
fbody_collect,
fbody_body_combine,
dest)
args_optional <- !vlapply(args, function(x) isTRUE(x$required))
args_name_r <- vcapply(args, "[[", "name_r")
a <- rep(alist(. =, . = NULL), c(sum(!args_optional), sum(args_optional)))
names(a) <- args_name_r
as.function(c(a, as.call(fbody)), env)
}
## The actual argument collectors (used only in this file)
swagger_arg_collect <- function(p, dest, handlers) {
switch(p[["in"]],
path = swagger_arg_collect_path(p, dest),
query = swagger_arg_collect_query(p, dest),
body = swagger_arg_collect_body(p, dest, handlers),
header = swagger_arg_collect_header(p, dest),
stop("assertion error"))
}
swagger_arg_collect_path <- function(p, dest) {
if (!isTRUE(p$required)) {
stop("all path parameters assumed required") # nocov [stevedore bug]
}
rhs <- as_call(quote(assert_scalar_character), as.symbol(p$name_r))
lhs <- dollar(dest, quote(path), as.symbol(p$name))
as_call(quote(`<-`), lhs, rhs)
}
## some of the 'query' bits within here must change - we might need to
## construct different validators depending on what sort of input
## we're getting? It might be better to realise that avoiding
## duplication here is just making this function worse, not better!
swagger_arg_collect_query <- function(p, dest) {
type <- p$type
stopifnot(length(type) == 1L)
if (type == "boolean") {
validate <- quote(assert_scalar_logical)
} else if (type == "integer") {
validate <- quote(assert_scalar_integer)
} else if (type == "string") {
if (isTRUE(p$multiple)) {
validate <- quote(assert_nonempty_character)
} else {
validate <- quote(assert_scalar_character)
}
} else if (type == "array") {
stop("Unknown query type") # nocov [stevedore bug]
} else {
stop("Unknown query type") # nocov [stevedore bug]
}
nm <- as.symbol(p$name)
nm_r <- as.symbol(p$name_r)
rhs <- as_call(validate, nm_r)
lhs <- dollar(dest, quote(query), nm)
expr <- as_call(quote(`<-`), lhs, rhs)
if (!isTRUE(p$required)) {
expr <- bquote(if (!is.null(.(nm_r))) .(expr))
}
expr
}
## This is really similar to above but not *that* similar really -
## when combined they're clumsy and hard to reason about.
swagger_arg_collect_body <- function(p, dest, handlers) {
type <- p$type
if (p$name_r %in% names(handlers)) {
is_scalar <- FALSE
validate <- as.name(handlers[[p$name_r]])
} else if (setequal(type, c("array", "string"))) {
is_scalar <- FALSE
validate <- quote(as_body_array_string)
} else if (type == "boolean") {
validate <- quote(assert_scalar_logical)
is_scalar <- TRUE
} else if (type == "integer") {
validate <- quote(assert_scalar_integer)
is_scalar <- TRUE
} else if (type == "string") {
if (identical(p$format, "binary")) {
validate <- quote(assert_raw)
is_scalar <- FALSE
} else {
validate <- quote(assert_scalar_character)
is_scalar <- TRUE
}
} else if (type == "array") {
if (identical(p$items$type, "string")) {
## Env, OnBuild Shell, Cmd, DeviceCgroupRules
validate <- quote(assert_character)
} else {
## TODO: Some of these do have specs so could be done totally
## automatically. But then doing it that way requires the user
## to guess how the mapping has been done. So a simpler way
## might be to have a 'types' element in the main docker_client
## object that can produce appropriate types. Then here we just
## feed things through. Eventually it would be good to validate
## all things that come through here though.
##
## BlkioWeightDevice, BlkioDeviceReadBps, BlkioDeviceWriteBps,
## BlkioDeviceReadIOps, BlkioDeviceWriteIOps (last four are all
## ThrottleDevice types)
##
## Devices, Ulimits
validate <- quote(identity)
}
is_scalar <- FALSE
} else {
if (identical(p$additionalProperties, list(type = "string"))) {
## Labels, Options, DriverOpts
validate <- quote(as_string_map)
} else {
## Processed elsewhere:
##
## ExposedPorts, Volumes
##
## Not yet explicitly handled:
##
## Healthcheck, HostConfig, NetworkingConfig, RestartPolicy,
## IPAM, EndpointConfig,
validate <- quote(identity)
}
is_scalar <- FALSE
}
nm <- as.symbol(p$name)
nm_r <- as.symbol(p$name_r)
rhs <- as_call(validate, nm_r)
if (is_scalar) {
rhs <- as_call(quote(jsonlite::unbox), rhs)
}
lhs <- dollar(dest, quote(body), nm)
expr <- as_call(quote(`<-`), lhs, rhs)
if (!isTRUE(p$required)) {
expr <- bquote(if (!is.null(.(nm_r))) .(expr))
}
expr
}
swagger_arg_collect_header <- function(p, dest) {
stopifnot(p$type == "string")
nm <- p$name_r
sym <- as.name(nm)
is_required <- isTRUE(p$required)
has_default <- !is.null(p$default)
if (is.null(p$enum)) {
expr <- bquote(assert_scalar_character(.(sym)))
} else {
values <- as_call(quote(c), p$enum)
expr <- bquote(match_value(.(sym), .(values)))
}
if (!is_required && has_default) {
expr <- bquote(if (is.null(.(sym))) .(p$default) else .(expr))
}
expr <- bquote(.(dest)$header[[.(p$name)]] <- .(expr))
if (!is_required) {
expr <- bquote(if (!is.null(.(sym))) .(expr))
}
expr
}
swagger_args_help <- function(x, args, handlers) {
if (length(args) == 0L) {
args <- NULL
} else {
args <- set_names(vcapply(args, pick, "description", NA_character_),
vcapply(args, "[[", "name_r"))
}
if (!is.null(handlers)) {
str <- sprintf(" Construct with `$types$%s()`",
vcapply(handlers, identity))
args[names(handlers)] <- paste0(args[names(handlers)], str)
}
list(summary = x$summary, description = x$description, args = args)
}
as_body_array_string <- function(x, name = deparse(substitute(x))) {
assert_character(x, name)
x
}
## For objects in the yaml that follow:
##
## type: "object"
## additionalProperties:
## type: "string"
##
## Used in Labels, Options, DriverOpts
as_string_map <- function(x, name = deparse(substitute(x))) {
if (!is.null(x)) {
what <- "named character vector"
assert_named(x, TRUE, name, what)
assert_character(x, name, what)
lapply(x, jsonlite::unbox)
}
}
handler_name <- function(x) {
sprintf(".handle_%s", x)
}
|
3c538fe4193b1635744f4d31482ddfe8e16d1b4a | 06dc6375f13b9094713657a38ae6432d5b8e177b | /Pj_MestradoIPT_Exp3/main_exp3.R | ccd08158411376683160f3704ec2bb0e67ebeef6 | [] | no_license | FernandoLimaVicente/mestrado2018 | 64fbf6c484fb8710c55bd41530e357113f7ae584 | f6c83b98ff68aa1d8fa2cdef150ccdf5092a615f | refs/heads/main | 2023-04-13T04:21:57.209951 | 2021-04-26T17:06:49 | 2021-04-26T17:06:49 | 361,784,791 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,161 | r | main_exp3.R | ###########################################################################################
# Instituto de Pesquisas Tecnologicas de São Paulo #
# #
# Aluno: Fernando de Lima Vicente #
# Curso: Infraestrutura Computacional #
# #
# Descrição: Testes com dados do dataset Intel Lab. #
# (Fonte 1: http://db.csail.mit.edu/labdata/data.txt.gz) #
# (Fonte 2: http://db.csail.mit.edu/labdata/labdata.html) #
# #
# Cabeçalho utilizado no dataset (Mica2Dot + Weather Board): #
# date time epoch moteid temperature humidity light voltage #
# #
# #
###########################################################################################
###########################################################################################
# Variáveis Globais #
###########################################################################################
DATASET_FILE = "data.49.txt"
NODE = 49
QFKN = as.double(0)
RFKN = as.double(0)
# Limiar de temperatura (Qual a variação máxima que o experimento
TL = 5 # permite em uma iteração)
ZI = 2.9 # Controle de influência zero
GM_LEN = 0150 # Tamanho da janela do GM(1,1)
JI = 00000 # Janela inicial de observação
JF = 50000 # Janela final de observação
###########################################################################################
# Funções #
###########################################################################################
# Captura de dados ###########################################
obter_dados_dataset = function (node) {
print ("Reading dataset file...")
dataset = read.csv(DATASET_FILE, header = T, sep = " ", stringsAsFactors = F, dec=".")
print ("Reading Complete....")
print ("Filtering dataset by mote...")
nodeset = dataset[which(dataset$moteid == node),]
print ("Filtering Complete...")
print ("Replacing NA by Zero...")
nodeset$temperature [which(is.na(nodeset$temperature) == T)] = 0
print ("Done...")
return (nodeset)
}
# Filtro de Kalman Clássico ##################################
filtro_kalman_classico = function (z, r, q) {
A = 1
B = 0
H = 1
I = 1
x = z[1]
u = 1:length(z)
P = 1
R = r # Confiança no modelo matemático
Q = 0.00075*q # Confiança na leitura do sistema
xp = numeric(0)
Pp = numeric(0)
K = numeric(0)
print ("Processando Filtro de Kalman Classico: ")
##########################################
# Status #
##########################################
total = length(z)
last = 0
print ("Status: 000%")
for (k in 2:total){
##########################################
# Status #
##########################################
status = as.integer(k/total*100)
if (status %% 10 == 0 && status != last)
{
last = status
print (sprintf("Status: %.3d%%", status))
}
##########################################
# Prediction #
##########################################
xp[k] = A*x[k-1] + B*u[k]
Pp[k] = A*P[k-1]*t(A) + Q
##########################################
# Correction #
##########################################
K[k] = (Pp[k]*t(H)) / (H*Pp[k]*t(H) + R)
x[k] = xp[k] + K[k]*(z[k] - H*xp[k])
P[k] = (I - K[k]*H)*Pp[k]
}
return (x);
}
# Filtro de Kalman Modificado ################################
obter_confianca_node = function (nodeVoltage) {
out = double(0)
for (i in 1:length(nodeVoltage))
{
if( nodeVoltage[i] >= 2.8)
{
out[i] = 0.075
}
else
{
if(nodeVoltage[i] >= 2.7)
{
out[i] = 0.0075
}
else
{
if(nodeVoltage[i] >= 2.6)
{
out[i] = 0.00075
}
else
{
out[i] = 0.000075
}
}
}
}
return (out)
}
obter_confianca_node_old = function (nodeVoltage) {
out = double(0)
for (i in 1:length(nodeVoltage))
{
if( nodeVoltage[i] >= 2.70)
{
out[i] = 0.001
}
else
{
if(nodeVoltage[i] >= 2.65)
{
out[i] = 0.0001
}
else
{
if(nodeVoltage[i] >= 2.60)
{
out[i] = 0.0001
}
else
{
out[i] = 0.0001
}
}
}
}
return (out)
}
filtro_kalman_novo = function (z, c) {
A = 1
B = 0
H = 1
I = 1
x = z[1]
u = 1:length(z)
P = 1
R = RFKN[1] <<- 1 # Confiança inicial no modelo matemático
Q = QFKN[1] <<- obter_confianca_node(c[1]) # Confiança inicial na leitura do sistema
xp = numeric(0)
Pp = numeric(0)
K = numeric(0)
print ("Processando Filtro de Kalman Novo: ")
##########################################
# Status #
##########################################
total = length(z)
last = 0
print ("Status: 000%")
for (k in 2:total){
##########################################
# Status #
##########################################
status = as.integer(k/total*100)
if (status %% 10 == 0 && status != last)
{
last = status
print (sprintf("Status: %.3d%%", status))
}
##########################################
# Prediction #
##########################################
xp[k] = A*x[k-1] + B*u[k]
Pp[k] = A*P[k-1]*t(A) + Q
##########################################
# Correction #
##########################################
K[k] = (Pp[k]*t(H)) / (H*Pp[k]*t(H) + R)
x[k] = xp[k] + K[k]*(z[k] - H*xp[k])
P[k] = (I - K[k]*H)*Pp[k]
##########################################
# New Block #
##########################################
R = RFKN[k] <<- 1.3 # Confiança no modelo matemático
Q = QFKN[k] <<- obter_confianca_node(c[k]) # Confiança na leitura do sistema
}
return (x);
}
filtro_kalman_novo_2 = function (z, c) {
A = 1
B = 0
H = 1
I = 1
x = z[1]
u = 1:length(z)
P = 1
R = RFKN[1] <<- 1 # Confiança inicial no modelo matemático
Q = QFKN[1] <<- obter_confianca_node(c[1]) # Confiança inicial na leitura do sistema
xp = numeric(0)
Pp = numeric(0)
K = numeric(0)
print ("Processando Filtro de Kalman Novo: ")
##########################################
# Status #
##########################################
total = length(z)
last = 0
print ("Status: 000%")
for (k in 2:total){
##########################################
# Status #
##########################################
status = as.integer(k/total*100)
if (status %% 10 == 0 && status != last)
{
last = status
print (sprintf("Status: %.3d%%", status))
}
##########################################
# Prediction #
##########################################
xp[k] = A*x[k-1] + B*u[k]
Pp[k] = A*P[k-1]*t(A) + Q
##########################################
# Correction (With outliar control) #
##########################################
K[k] = (Pp[k]*t(H)) / (H*Pp[k]*t(H) + R)
##########################################
if (abs (z[k]) < (abs (x[k-1]) + TL)) {
x[k] = xp[k] + K[k]*(z[k] - H*xp[k]) # Uses real value and estimation
}
else {
x[k] = xp[k] # Uses estimation only
}
##########################################
P[k] = (I - K[k]*H)*Pp[k]
##########################################
# New Block #
##########################################
R = RFKN[k] <<- 1 # Confiança no modelo matemático
Q = QFKN[k] <<- obter_confianca_node(c[k]) # Confiança na leitura do sistema
}
return (x);
}
filtro_kalman_novo_3 = function (z, c, r, q) {
A = 1
B = 0
H = 1
I = 1
x = z[1]
u = 1:length(z)
P = 1
R = RFKN[1] <<- r # Confiança inicial no modelo matemático
Q = QFKN[1] <<- q * obter_confianca_node(c[1]) # Confiança inicial na leitura do sistema
xp = numeric(0)
Pp = numeric(0)
K = numeric(0)
print ("Processando Filtro de Kalman Novo: ")
##########################################
# Status Inicial #
##########################################
total = length(z)
last = 0
print ("Status: 000%")
for (k in 2:total){
##########################################
# Status #
##########################################
status = as.integer(k/total*100)
if (status %% 10 == 0 && status != last)
{
last = status
print (sprintf("Status: %.3d%%", status))
}
##########################################
# Predição #
##########################################
xp[k] = A*x[k-1] + B*u[k]
Pp[k] = A*P[k-1]*t(A) + Q
##########################################
# Correção com controle de outliers e #
# mecanismo de incluência zero #
##########################################
K[k] = (Pp[k]*t(H)) / (H*Pp[k]*t(H) + R)
##########################################
if (c[k] >= ZI)
{
x[k] = z[k] # Não faz estimativa se a bateria está carregada
}
else
{
if (abs (z[k]) < (abs (x[k-1]) + TL)) {
x[k] = xp[k] + K[k]*(z[k] - H*xp[k]) # Utiliza o valor estimado e o da leitura
}
else {
x[k] = xp[k] # utiliza o valor estimado apenas
}
}
##########################################
P[k] = (I - K[k]*H)*Pp[k]
##########################################
# Atualiza a confiança do sistema #
##########################################
R = RFKN[k] <<- r # Confiança no modelo matemático
Q = QFKN[k] <<- q * obter_confianca_node(c[k]) # Confiança na leitura do sistema
}
return (x);
}
# Modelo Gray ou GM(1,1) #####################################
convert_x0_x1 = function (X0) {
##########################################
# build AGO Sequence #
##########################################
X1 = X0
for (i in 1:(length(X1)-1)) {
X1[i+1] = X1[i] + X1[i+1]
}
return (X1)
}
convert_x1_x0 = function (X1) {
##########################################
# unbuild AGO Sequence #
##########################################
X0 = X1
for (i in 1:(length(X0)-1)) {
X0[i+1] = X1[i+1] - X1[i]
}
return (X0)
}
get_b_matrix = function (X1) {
B = matrix(nrow=length(X1)-1,ncol=2)
for (i in 1:length(X1)-1){
B[i,1]=-0.5*(X1[i]+X1[i+1])
B[i,2]=1
}
#for (i in 1:length(X1)-1){
# B[i,1]=-0.5*(sum(X1[1:(i+1)]))
# B[i,2]=1
#}
return (B)
}
get_y_matrix = function (X0) {
Y = matrix(X0[2:length(X0)], nrow=length(X0)-1, ncol=1)
return (Y)
}
predict_gm_model = function (X0, predictions) {
##########################################
# build AGO Sequence #
##########################################
X1 = convert_x0_x1 (X0)
##########################################
# Solving dif eq model by least squares #
##########################################
B = get_b_matrix (X1)
Y = get_y_matrix (X0)
BtB = t(B) %*% B # Transposed B Matrix plus B Matrix
iBtB = solve (BtB) # Calcuate inversed Matrix from B
BtY = t(B) %*% Y # Transposed B Matrix plus Y Matrix
# Parameters from Least Square Estimate
a = (iBtB %*% BtY) [1,1]
u = (iBtB %*% BtY) [2,1]
##########################################
# Perform prediction #
##########################################
calculatedX1 = numeric (length(X1)+predictions)
calculatedX1[1] = X1[1]
for (i in 1:length (calculatedX1)) {
calculatedX1 [i+1] = (X0[1] -1*(u/a))*exp (-1*a*i) + u/a
}
return (convert_x1_x0 (calculatedX1))
}
predict_gm_enhanced = function (X0, win_size) {
win_size = as.integer(win_size)
out = numeric(0)
win_start = 1
win_end = win_size
while ((win_end + win_size) < length(X0)) {
cat ("start=", win_start, " end=", win_end," out_len=", length(out),"/", length(X0), "\n")
out[win_start:win_end] = predict_gm_model (X0[win_start:win_end],-1)
win_start = win_end + 1
win_end = win_end + win_size
}
cat ("startf=", win_start, " end=", win_end," out_len=", length(out), "\n")
out[win_start:length(X0)] = predict_gm_model (X0[win_start:length(X0)],-1)
cat ("startf=", win_start, " end=", win_end," out_len=", length(out), "\n")
return (out)
}
predict_gm_enhanced_2 = function (X0, win_size) {
win_size = as.integer (win_size)
out = NULL
win_start = 1
win_end = win_size
print ("Processando GM(1,1): ")
##########################################
# Status Inicial #
##########################################
total = length(X0)
last = 0
print ("Status: 000%")
if (win_size > 0)
{
while ((win_end + win_size) < length(X0)) {
##########################################
# Status #
##########################################
status = as.integer(win_end/total*100)
if (status %% 10 == 0 && status != last)
{
last = status
print (sprintf("Status: %.3d%%", status))
}
##########################################
# Precição #
##########################################
out[win_start:win_end] = tail(predict_gm_model (c(tail(out,1), X0[win_start:win_end]),-1),win_size)
win_start = win_end + 1
win_end = win_end + win_size
}
}
out[win_start:length(X0)] = predict_gm_model (X0[win_start:length(X0)],-1)
print ("Status: 100%")
return (out)
}
###########################################################################################
# Rotina Principal #
###########################################################################################
#y = obter_dados_dataset(NODE)
x = seq(0,nrow(y)-1)
#x = x * 0.034
r = q <- sd (y$temperature)
#z = filtro_kalman_novo_3 ( y$temperature, y$voltage, r, q )
#w = filtro_kalman_classico ( y$temperature, r, q )
#u = predict_gm_enhanced_2 ( y$temperature, length(y$temperature)/GM_LEN )
#plot ( x [JI:JF], y$voltage [JI:JF], type="l", col="black" )
plot ( x [JI:JF], y$temperature [JI:JF], type="l", col="black", xlab="Tempo (Ks)",ylab="Temperatura (ºC)",ylim=c(0,40))
#points ( x [JI:JF], w [JI:JF], type="l", col="red" )
#points ( x [JI:JF], z [JI:JF], type="l", col="red" )
#points ( x [JI:JF], u [JI:JF], type="l", col="red" )
###########################################################################################
# Fim #
###########################################################################################
|
16ff4ccdb8aa322ce6cb50276dad692e294659c4 | 68c52a5b67a20c720c7defec86f39c0cde9130b7 | /R/geom_qq_unif.R | 8cffef8a583106e7e1483d1a558cd1db12067ddd | [
"CC-BY-2.5"
] | permissive | Subhayan18/ggGWAS | 914ffbc07fd54db5c144a9df4ba720849d479166 | 5b4b09bbe9ef4ff3b225fd352937041edceb31fd | refs/heads/master | 2020-04-26T05:26:38.326229 | 2018-12-06T14:10:11 | 2018-12-06T14:10:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,199 | r | geom_qq_unif.R | #' @title Q-Q plot
#' @description Quantile-quantile plot to compare the p-values of a GWAS to a uniform distribution.
#'
#' @inheritParams ggplot2::geom_point
#' @param observed.thresh same scale as observed (e.g. 0.05), observed <= observed.thresh AFTER computing expected
#' @param geom \code{"point"} by default, \code{"ggrastr:::GeomPointRast"} for a rasterized version.
#'
#' @export
#' @details \code{\link[ggplot2]{stat_qq}} works for all kinds of distributions. But using \code{\link[ggplot2]{stat_qq}} with \eqn{-log10()} transformation does not work neatly.
#' @seealso \code{\link[ggplot2]{stat_qq}}, \code{\link{stat_qq_unif_hex}}
#' @note Plotting several thousand points might take time. If you want to speed things up use \code{geom="ggrastr:::GeomPointRast"} or \code{\link{stat_qq_unif_hex}}.
#' @aliases geom_qq_unif
#'
#' @examples
#' require(ggplot2)
#' n.sample <- 10000
#' df <- data.frame(P = runif(n.sample), GWAS = sample(c("a","b"), n.sample, replace = TRUE))
#'
#' ## default
#' (qp <- ggplot(df, aes(observed = P)) +
#' stat_qq_unif() +
#' geom_abline(intercept = 0, slope = 1))
#'
#' ## Group points
#' (qp <- ggplot(df, aes(observed = P)) + stat_qq_unif(aes(group = GWAS, color = GWAS)))
#'
#' ## show only p-values above a cerain threshold
#' ggplot(df, aes(observed = P)) +
#' stat_qq_unif(observed.thresh = 0.05) +
#' geom_abline(intercept = 0, slope = 1)
#'
#' ## plot a line instead
#' ggplot(df, aes(observed = P)) +
#' stat_qq_unif(geom = "line") +
#' geom_abline(intercept = 0, slope = 1)
#'
#' ## plot efficiently
#' ggplot(df, aes(observed = P)) +
#' stat_qq_unif(geom = ggrastr:::GeomPointRast) +
#' geom_abline(intercept = 0, slope = 1)
#'
#' ## adding nice stuff
#' ## identical limits (meaning truely square)
#' qp +
#' theme(aspect.ratio=1) + ## square shaped
#' expand_limits(x = -log10(max(df$P)), y = -log10(max(df$P))) +
#' ggtitle("QQplot") +
#' xlab("Expected -log10(P)") +
#' ylab("Observed -log10(P)")
#'
#' ## color
#' ggplot(df, aes(observed = P, color = GWAS)) +
#' stat_qq_unif() +
#' geom_abline(intercept = 0, slope = 1)
#'
#' ## facet
#' ggplot(df, aes(observed = P)) +
#' facet_wrap(~GWAS) +
#' stat_qq_unif() +
#' geom_abline(intercept = 0, slope = 1)
#'
#'
#' ## group
#' ggplot(df, aes(observed = P, group = GWAS)) +
#' stat_qq_unif() +
#' geom_abline(intercept = 0, slope = 1)
#'
#' ## group
#' library(GWAS.utils) ## devtools::install_github("sinarueeger/GWAS.utils")
#' data("giant")
#' ?giant
#'
#' ## generate two groups
#' giant <- giant %>%
#' dplyr::mutate(gr = dplyr::case_when(BETA <= 0 ~ "Neg effect size", BETA > 0 ~ "Pos effect size"))
#' ggplot(data = giant, aes(observed = P, group = gr, color = gr)) +
#' stat_qq_unif() +
#' geom_abline(intercept = 0, slope = 1)
#'
stat_qq_unif <- function(mapping = NULL,
data = NULL,
geom = "point",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
observed.thresh = NULL,
...) {
layer(
stat = StatQQplot,
data = data,
mapping = mapping,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, observed.thresh = observed.thresh, ...)
)
}
#' @export
#' @rdname stat_qq_unif
geom_qq_unif <- stat_qq_unif
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
#' @keywords internal.
StatQQplot <- ggproto(
"StatQQplot",
Stat,
required_aes = c("observed"),
default_aes = aes(y = stat(`observed_log10`), x = stat(`expected_log10`)),
compute_group = function(data,
scales,
dparams,
na.rm,
observed.thresh) {
observed <-
data$observed#[!is.na(data$x)]
N <- length(observed)
## calculate the expected axis
expected <-
sort(-log10((1:N) / N - 1 / (2 * N)))
observed <-
sort(-log10(observed))
## remove points if observed thresh is set.
if (!is.null(observed.thresh))
{
observed.thresh <- -log10(observed.thresh)
ind <-
which(observed >= observed.thresh)
expected <- expected[ind]
observed <- observed[ind]
}
data.frame(`observed_log10` = observed, `expected_log10` = expected)
}
#,
# draw_panels = function(data, panel_scales, coord) {
# ## Transform the data first
# coords <- coord$transform(data, panel_scales)
#
# ## Let's print out the structure of the 'coords' object
# str(coords)
#
# ## Construct a grid grob
# pointsGrob(
# x = coords$x,
# y = coords$y,
# pch = coords$shape
# )
# },
# draw_labels <- function(data, panel_scales, coord) {
# has something to do with gtable: https://ggplot2.tidyverse.org/reference/ggplot2-ggproto.html
## labels from qqman::qq()
# xlab(expression(Expected ~ ~-log[10](italic(p)))) +
# ylab(expression(Observed ~ ~-log[10](italic(p))))
# }
)
|
1b535541f0feb82d38bbcd9cf47447ab35916cb4 | 9e3969324cab013b3e6d6f3b91d592de58c30196 | /lib/doFuture/tests2/NMF/manual.R | 5a2e24fcab1851545324a6d80ddee2cb7acbaf01 | [] | no_license | MadeleineGastonguay/svenson_hf_DO | 157b04716ebc94d3a8fb8c7da2cba7dad3d1778b | 6308e51558e3aaf4a651806962e66b921e255bc5 | refs/heads/main | 2023-08-22T17:35:05.552279 | 2021-10-18T18:12:45 | 2021-10-18T18:12:45 | 418,558,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,070 | r | manual.R | path <- system.file("tests2", "incl", package = "doFuture", mustWork = TRUE)
source(file.path(path, "utils.R"))
install_missing_packages(c("cluster", "lattice", "MASS", "mgcv", "isoband", "testthat", "ggplot2"))
install_missing_packages(c("BiocGenerics", "Biobase"), bioc = TRUE)
pkg <- tests2_step("start", package = "NMF")
mprintf("*** doFuture() - manual %s tests ...", pkg)
## From NMF vignette
## run on all workers using the current parallel backend
data("esGolub", package = "NMF")
res_truth <- nmf(esGolub, rank = 3L, method = "brunet", nrun = 2L, .opt = "p",
seed = 0xBEEF)
for (strategy in test_strategies()) {
mprintf("- plan('%s') ...", strategy)
registerDoFuture()
plan(strategy)
res <- nmf(esGolub, rank = 3L, method = "brunet", nrun = 2L, .opt = "p",
seed = 0xBEEF, .pbackend = NULL)
str(res)
stopifnot(all.equal(res, res_truth, check.attributes = FALSE))
mprintf("- plan('%s') ... DONE", strategy)
} ## for (strategy ...)
mprintf("*** doFuture() - manual %s tests ... DONE", pkg)
tests2_step("stop")
|
910df7f01fc9e32df3fba668029254b202fcfd47 | 74c98a3887ec31986982ef71e7625416dd668e0b | /app.R | 5e520221e81331cef80f2b774fa3f5d868f7acd3 | [
"Apache-2.0"
] | permissive | sjentoft/pendling_viz | 908e0003f4468fb9a112a2d265266cc01109aabc | 89b656f4e0ad42c8a6085c519ecbb9a94e4d62ca | refs/heads/master | 2021-05-18T16:17:36.169897 | 2020-04-24T11:19:14 | 2020-04-24T11:19:14 | 251,313,592 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,526 | r | app.R | # This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
options(encoding="utf-8")
library(PxWebApiData) # For collecting in data from statbank
library(shiny)
library(leaflet)
library(leaflet.extras)
library(dplyr)
library(htmlwidgets)
library(ggplot2) # for plotting
library(grDevices) # for windowsFont function
library(stringr)
library(shinydashboard)
source("Dotmap_Functions.R")
# Preset fixed variables
adjA <- 13000 # Factor for circle size adjustment
adjL <- 100 # Factor for line size
antkom <- 20 # Number of possible connections
years_all <- c("2017", "2018", "2019") # Possible selectable years
circ_size <- list("Liten" = 16000, "Middels" = 24000, "Stor" = 64000)
# set intial kommune values for choices to NULL
geodata <- NULL
# Define UI for application
ui <- dashboardPage(
dashboardHeader(title = "Pendlingsstrømmer", titleWidth = 280),
dashboardSidebar(
width = 280,
selectInput("year",
label = "Velg år",
choices = years_all,
selected = years_all[length(years_all)]),
selectizeInput("kommuneid", "Velg en kommune",
choices = geodata$komm_shape$NAVN,
selected = NULL,
options = list(maxItems = 1, maxOptions = 4,
placeholder = "Skriv inn kommunenavn",
onInitialize = I('function() { this.setValue(""); }')
)
),
sliderInput("n","Vis antall kommuner", 1, antkom, value = 8, step = 1),
selectInput("adjA",
label = "Juster sirkelstørrelse",
choices = circ_size,
selected = 24000),
br(),
actionButton("reset", "Reset"),
br(),
br(),
br(),
HTML('<left><img src="ssb-logo.png", width = "200"></left>')
),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
),
tabsetPanel(
id = "display_panel",
tabPanel("Bosted",
column(8,
leafletOutput("map", height = 500)),
# Placement and spec. for the plot on right
column(4, offset = 0, style='padding:0px;',
h1("Hvor arbeider sysselsatte personer i..."),
h2(uiOutput("selected_komm")), # not textOutput
plotOutput("plot")
)
),
tabPanel("Arbeidssted",
column(8,
leafletOutput("map_arb", height = 500)),
# Placement and spec. for the plot on right
column(4, offset = 0, style='padding:0px;',
h1("Hvor bor sysselsatte personer i..."),
h2(uiOutput("selected_komm_arb")), #not textOutput
#tags$head(tags$style("#selected_komm_arb{color: #274247; font-size: 16px;}")), # Open Sans not working font-family: 'Open Sans', regular;})),
plotOutput("plot_arb")
)
)
),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
)
)
)
#### Define server function for running the output ####
server <- function(input, output, session){
# Set up reactive values
data_of_click <- reactiveValues(clickedMarker = NULL) # For clicked kommune - bosted
data_of_click_arb <- reactiveValues(clickedMarker = NULL) # For clicked kommune - arbsted
geodata <- reactiveValues(komm_shape = NULL) # For kommune boundary polygons
geodata <- reactiveValues(komm_punkt = NULL) # For kommune center points (with data attached)
statdata <- reactiveValues() # For the statistical data values
circdata <- reactiveValues() # For the circle sizes (bosted)
circdata_arb <- reactiveValues() # For the circle sizes (arbeidssted)
# Spesifications for base map - bosted
output$map <- renderLeaflet({
leaflet(data = geodata$komm_shape) %>%
addProviderTiles(providers$OpenStreetMap.HOT,
options = providerTileOptions(opacity = 0.4)) %>%
addMiniMap(tiles = providers$OpenStreeMap.Mapnik,
toggleDisplay = TRUE,
width = 80, height = 100,
zoomLevelFixed = 2) %>%
setView(lng=11.00, lat=59.50, zoom = 9) %>%
addLegend(position=c("topright"), colors=c("#83C1E9","#006CB6", "#F16539"),
labels=c("Befolkningen 15-74 år", "Syssesatte personer i kommunen", "Sysselsattes arbeidssted"),
opacity = 0.6)
})
# Spesifications for base map - arbeidssted
output$map_arb <- renderLeaflet({
leaflet(data = geodata$komm_shape) %>%
addProviderTiles(providers$OpenStreetMap.HOT,
options = providerTileOptions(opacity = 0.4)) %>%
addMiniMap(tiles = providers$OpenStreeMap.Mapnik,
toggleDisplay = TRUE,
width = 80, height = 100,
zoomLevelFixed = 2) %>%
setView(lng=11.00, lat=59.50, zoom = 9) %>%
addLegend(position=c("topright"), colors=c("#F16539","#006CB6"),
labels=c("Syssesatte personer i kommunen", "Sysselsattes bosted"),
opacity = 0.6)
})
# Reload new kommune boundaries file when year is change, update kommune name choices
observeEvent(input$year, {
# update shape file and save dynamically
geo <- Load_geo_data(input$year, package = FALSE)
geodata$komm_shape <- geo[[2]]
geodata$komm_punkt <- geo[[1]]
# update dropdown/searchable list
updateSelectizeInput(session, "kommuneid", label = NULL, choices = geodata$komm_shape$NAVN,
selected = NULL, options = list(), server = FALSE)
# Update statistical data for year change - same for both bosted og arbsted
ds <- Load_stat_data(input$year, geodata$komm_punkt)
statdata$befolk <- ds[[1]]
statdata$syss <- ds[[2]]
statdata$arbb <- ds[[3]]
statdata$pend <- ds[[4]]
statdata$mat_pop <- ds[[5]]
# remove points
data_of_click$clickedMarker$id <- NULL
data_of_click_arb$clickedMarker$id <- NULL
})
# Replot map with new kommune boundaries for new year - bosted
observeEvent(list(input$display_panel, input$year), {
proxy <- leafletProxy("map") %>%
clearGroup(group = "kommuner") %>%
addPolygons(data = geodata$komm_shape, fillColor = "#ffffff", color="#274247",
weight = 0.5, smoothFactor = 0.5,
opacity = 0.8, fillOpacity = 0.6, label = geodata$komm_shape$NAVN,
highlightOptions = highlightOptions(color = "#000000",
weight = 1, bringToFront = FALSE),
layerId = ~NR,
group = "kommuner")
proxy
})
# Replot map with new boundaries (arbsted) for change in year or tab
observeEvent(list(input$display_panel, input$year), {
proxy_arb <- leafletProxy("map_arb") %>%
clearGroup(group = "kommuner_arb") %>%
addPolygons(data = geodata$komm_shape, fillColor = "#ffffff", color="#4b7272",
weight = 0.5, smoothFactor = 0.5,
opacity = 0.8, fillOpacity = 0.3, label = geodata$komm_shape$NAVN,
highlightOptions = highlightOptions(color = "#274247",
weight = 1, bringToFront = FALSE),
layerId = ~NR,
group = "kommuner_arb")
proxy_arb
})
# Create observed event for clicking on a kommune on the map - bosted
observeEvent(input$map_shape_click, {
data_of_click$clickedMarker <- input$map_shape_click
})
# Create observed event for clicking on a kommune on the map - arbsted
observeEvent(input$map_arb_shape_click, {
data_of_click_arb$clickedMarker <- input$map_arb_shape_click
})
# Create event for text input of kommune name with flyTo
observeEvent(input$kommuneid, {
name <- input$kommuneid
#bosted
data_of_click$clickedMarker$id <- geodata$komm_shape$NR[match(name, geodata$komm_shape$NAVN)]
selectedKomm <- geodata$komm_punkt[geodata$komm_punkt$NR == data_of_click$clickedMarker$id, ]
leafletProxy("map") %>%
flyTo(lng = selectedKomm$lng, lat = selectedKomm$lat, zoom = 9)
#arbsted
data_of_click_arb$clickedMarker$id <- geodata$komm_shape$NR[match(name, geodata$komm_shape$NAVN)]
selectedKomm <- geodata$komm_punkt[geodata$komm_punkt$NR == data_of_click_arb$clickedMarker$id, ]
leafletProxy("map_arb") %>%
flyTo(lng = selectedKomm$lng, lat = selectedKomm$lat, zoom = 9)
})
# Create event for click of reset button
observeEvent(input$reset, {
data_of_click$clickedMarker$id <- NULL
data_of_click_arb$clickedMarker$id <- NULL
leafletProxy("map") %>%
clearGroup(group = "circles") %>%
removeShape(layerId = "line")
leafletProxy("map_arb") %>%
clearGroup(group = "circles_arb") %>%
removeShape(layerId = "line")
})
# Observe whether adjA or strat/geo data has change and update circles sizes - bosted
observe({
circ <- Beregn_sirkel(as.numeric(input$adjA), statdata$befolk, statdata$syss, statdata$arbb, geodata$komm_punkt)
circdata$pop1 <- circ[[1]]
circdata$mat_pop <- circ[[2]]
circdata$pop2 <- circ[[3]]
circdata$pop11 <- circ[[4]]
circdata$Region <- circ[[5]]
circ_arb <- Beregn_sirkel_arbsted(as.numeric(input$adjA), statdata$arbb, geodata$komm_punkt)
circdata_arb$pop_arb <- circ_arb[[1]]
})
# Create dynamic plot title text - bosted
output$selected_komm <- renderUI({ #dont use renderText here as doesnt recognise øåæ
kommid <- data_of_click$clickedMarker$id
if (is.null(kommid)) {
paste("Ingen kommune valgt")
} else if (is.na(kommid)) {
paste("Ingen kommune valgt")
} else {
komm_name <- geodata$komm_shape$NAVN[match(kommid, geodata$komm_shape$NR)]
temp_num <- statdata$syss[match(kommid, statdata$syss$Region), "value"]
HTML(paste0(komm_name, " kommune. ", input$year, "<br/>",
"Antall sysselsatte personer med bostedsadresse: ", temp_num, "<br/>",
"Arbeidssted:")
)
}
})
# Create dynamic plot title text - arbsted
output$selected_komm_arb <- renderUI({ #dont use renderText here as doesnt recognise øåæ
kommid <- data_of_click_arb$clickedMarker$id
if (is.null(kommid)) {
paste("Ingen kommune valgt")
} else if (is.na(kommid)) {
paste("Ingen kommune valgt")
} else {
kommune_navn <- geodata$komm_shape$NAVN[match(kommid, geodata$komm_shape$NR)]
temp_num <- statdata$arbb[match(kommid, statdata$arbb$Region), "value"]
HTML(paste0(kommune_navn, " kommune. ", input$year, "<br/>",
"Antall sysselsatte personer med arbeidsstedsadresse: ", temp_num, "<br/>",
"Bosted:"))
}
})
# Create dynamic plot - bosted
output$plot <- renderPlot({
kommid <- data_of_click$clickedMarker$id #Numeric
if (length(kommid) == 0){ plot(1, type="n", axes = F, xlab = "", ylab = "")} else {
if (is.na(kommid)) { plot(1, type="n", axes = F, xlab = "", ylab = "")} else {
Make_barplot(kommid, n = as.numeric(input$n), geodata$komm_shape, statdata$pend, antkom = antkom)
}
}
}, bg = "transparent")
# Create dynamic plot - arbsted
output$plot_arb <- renderPlot({
kommid <- data_of_click_arb$clickedMarker$id #Numeric
if (length(kommid) == 0){ plot(1, type="n", axes = F, xlab = "", ylab = "")} else {
if (is.na(kommid)) { plot(1, type="n", axes = F, xlab = "", ylab = "")} else {
Make_barplot_arb(kommid, n = as.numeric(input$n), geodata$komm_shape, statdata$pend, antkom = antkom)
}
}
}, bg = "transparent")
#### Specify action with clicking a kommune - bosted ####
observe({
kommid <- as.character(data_of_click$clickedMarker$id)
if (length(kommid) > 0) { # check if not null
if (!is.na(kommid)) { # check if not missing
outdata <- Filter_data(kommid, n = input$n, adjA = as.numeric(input$adjA), scaleLine = FALSE,
komm_punkt=geodata$komm_punkt, pend=statdata$pend,
pop11=statdata$pop11, befolk=statdata$befolk,
arbb=statdata$arbb)
# outdata <- Filter_data("0101", n = 5, adjA = 24000, scaleLine =FALSE, komm_punkt=komm_punkt2018, pend=pend, pop11=pop11, befolk=befolk, arbb=arbb) #for testing
selectedKomm <- outdata[[2]][1,]
selectedShape <- geodata$komm_shape[geodata$komm_shape$KOMM == kommid, ]
topShape <- geodata$komm_shape[geodata$komm_shape$KOMM %in% outdata[[1]]$KOMM, ]
#labs <- Add_popup(topShape, befolk=statdata$befolk, syss=statdata$syss, pend=statdata$pend)
proxy <- leafletProxy("map") %>%
# remove old circles
clearGroup(group = "circles") %>%
# Add population for chosen circle - dark blue
addCircles(data = selectedKomm, lat = ~lat, lng=~lng,
# radius = circdata$pop1[as.numeric(geodata$komm_punkt$KOMM[statdata$mat_pop]) == as.numeric(kommid)],
radius = circdata$pop1[as.numeric(circdata$Region) == as.numeric(kommid)],
stroke = F, color = "#83C1E9", fillOpacity = 0.5,
group = "circles"
) %>%
# Add employed population in selected kommune
addCircles(data = selectedKomm, lat = ~lat, lng=~lng,
radius = circdata$pop2[as.numeric(circdata$Region) == as.numeric(kommid)],
stroke = F, color = "#006CB6", fillOpacity = 0.5,
group = "circles"
) %>%
# Add circles for employed living and working in selected kommune
addCircles(data = outdata[[2]], lat =~lat, lng = ~lng,
radius = outdata[[5]],
color = "#F16539", stroke = F, fillOpacity = 1,
group = "circles"
) %>%
# Add total employment in other top commute kommune - not working?
addCircles(data = outdata[[1]], lat = ~lat, lng=~lng,
radius = outdata[[7]],
stroke = F, color = "#F16539", fillOpacity = 0.2,
group = "circles"
) %>%
# Add communing lines
addPolylines(data = outdata[[3]], lng = ~lng, lat = ~lat,
group = ~group,
weight = outdata[[6]] * adjL, # width of lines
color = "#F16539", stroke = TRUE, opacity = 0.6,
layerId = ~type
)
proxy
}
}
})
#### Specify action with clicking a kommune - arbsted ####
observe({
kommid <- as.character(data_of_click_arb$clickedMarker$id)
if (length(kommid) > 0) { # check if not null
if (!is.na(kommid)) { # check if not missing
outdata <- Filter_data_arb(kommid, n = input$n,
adjA = as.numeric(input$adjA),
scaleLine = FALSE,
komm_punkt=geodata$komm_punkt,
pend=statdata$pend,
pop_arb=statdata$arbb)
# outdata <- Filter_data_arb("0101", n = 5, adjA = 24000, scaleLine =FALSE, komm_punkt=komm_punkt2018, pend=pend, pop_arb=arbb) #for testing
selectedKomm <- outdata[[2]][1,]
selectedShape <- geodata$komm_shape[geodata$komm_shape$KOMM == kommid, ]
topShape <- geodata$komm_shape[geodata$komm_shape$KOMM %in% outdata[[1]]$KOMM, ]
#labs <- Add_popup(topShape, befolk=statdata$befolk, syss=statdata$syss, pend=statdata$pend)
radius_select <- circdata_arb$pop_arb[as.numeric(statdata$arbb$Region) == as.numeric(kommid)]
proxy <- leafletProxy("map_arb") %>%
# remove old circles
clearGroup(group = "circles_arb") %>%
# Add employed population working in own kommune
addCircles(data = selectedKomm, lat = ~lat, lng=~lng,
radius = radius_select,
stroke = F, color = "#F16539", fillOpacity = 0.8,
group = "circles_arb"
) %>%
#Add circles for employed living and working in selected top kommune
addCircles(data = outdata[[2]], lat =~lat, lng = ~lng,
radius = outdata[[5]],
color = "#006CB6", stroke = F, fillOpacity = 0.8,
group = "circles_arb"
) %>%
# Add communing lines
addPolylines(data = outdata[[3]], lng = ~lng, lat = ~lat,
group = ~group,
weight = outdata[[6]] * adjL, # width of lines
color = "#006CB6", stroke = TRUE, opacity = 0.6,
layerId = ~type
)
proxy
}
}
})
}
#### Run app ####
shinyApp(ui, server)
|
6c6ba64370fe367aced7c7091c888c6f0b5fd291 | 7c741b09d2b950adea6369ef861b3679fca47612 | /Final.R | 09c30aa193ec96e404abf2347164c2c05e1966d8 | [] | no_license | tanmay310/RepData_PeerAssessment1 | bdeb56b3a00d728824c7ce2a12f389da9d3fe5aa | c4a4cbba076ccbd5ef59d31969c854a40e9c5797 | refs/heads/master | 2021-03-17T21:53:23.059214 | 2020-03-14T05:37:25 | 2020-03-14T05:37:25 | 247,020,841 | 0 | 0 | null | 2020-03-13T08:18:51 | 2020-03-13T08:18:50 | null | UTF-8 | R | false | false | 3,794 | r | Final.R | # Reproducible Research: Peer Assessment 1
```{r, echo=FALSE, results='hide', warning=FALSE, message=FALSE}
library(ggplot2)
library(scales)
library(Hmisc)
```
## Loading and preprocessing the data
##### 1. Load the data (i.e. read.csv())
```{r, results='markup', warning=TRUE, message=TRUE}
if(!file.exists('activity.csv')){
unzip('activity.zip')
}
activityData <- read.csv('activity.csv')
```
##### 2. Process/transform the data (if necessary) into a format suitable for your analysis
```{r}
#activityData$interval <- strptime(gsub("([0-9]{1,2})([0-9]{2})", "\\1:\\2", activityData$interval), format='%H:%M')
```
-----
## What is mean total number of steps taken per day?
```{r}
stepsByDay <- tapply(activityData$steps, activityData$date, sum, na.rm=TRUE)
```
##### 1. Make a histogram of the total number of steps taken each day
```{r}
qplot(stepsByDay, xlab='Total steps per day', ylab='Frequency using binwith 500', binwidth=500)
```
##### 2. Calculate and report the mean and median total number of steps taken per day
```{r}
stepsByDayMean <- mean(stepsByDay)
stepsByDayMedian <- median(stepsByDay)
```
* Mean: `r stepsByDayMean`
* Median: `r stepsByDayMedian`
-----
## What is the average daily activity pattern?
```{r}
averageStepsPerTimeBlock <- aggregate(x=list(meanSteps=activityData$steps), by=list(interval=activityData$interval), FUN=mean, na.rm=TRUE)
```
##### 1. Make a time series plot
```{r}
ggplot(data=averageStepsPerTimeBlock, aes(x=interval, y=meanSteps)) +
geom_line() +
xlab("5-minute interval") +
ylab("average number of steps taken")
```
##### 2. Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps?
```{r}
mostSteps <- which.max(averageStepsPerTimeBlock$meanSteps)
timeMostSteps <- gsub("([0-9]{1,2})([0-9]{2})", "\\1:\\2", averageStepsPerTimeBlock[mostSteps,'interval'])
```
* Most Steps at: `r timeMostSteps`
----
## Imputing missing values
##### 1. Calculate and report the total number of missing values in the dataset
```{r}
numMissingValues <- length(which(is.na(activityData$steps)))
```
* Number of missing values: `r numMissingValues`
##### 2. Devise a strategy for filling in all of the missing values in the dataset.
##### 3. Create a new dataset that is equal to the original dataset but with the missing data filled in.
```{r}
activityDataImputed <- activityData
activityDataImputed$steps <- impute(activityData$steps, fun=mean)
```
##### 4. Make a histogram of the total number of steps taken each day
```{r}
stepsByDayImputed <- tapply(activityDataImputed$steps, activityDataImputed$date, sum)
qplot(stepsByDayImputed, xlab='Total steps per day (Imputed)', ylab='Frequency using binwith 500', binwidth=500)
```
##### ... and Calculate and report the mean and median total number of steps taken per day.
```{r}
stepsByDayMeanImputed <- mean(stepsByDayImputed)
stepsByDayMedianImputed <- median(stepsByDayImputed)
```
* Mean (Imputed): `r stepsByDayMeanImputed`
* Median (Imputed): `r stepsByDayMedianImputed`
----
## Are there differences in activity patterns between weekdays and weekends?
##### 1. Create a new factor variable in the dataset with two levels – “weekday” and “weekend” indicating whether a given date is a weekday or weekend day.
```{r}
activityDataImputed$dateType <- ifelse(as.POSIXlt(activityDataImputed$date)$wday %in% c(0,6), 'weekend', 'weekday')
```
##### 2. Make a panel plot containing a time series plot
```{r}
averagedActivityDataImputed <- aggregate(steps ~ interval + dateType, data=activityDataImputed, mean)
ggplot(averagedActivityDataImputed, aes(interval, steps)) +
geom_line() +
facet_grid(dateType ~ .) +
xlab("5-minute interval") +
ylab("avarage number of steps")
``` |
3fecfec322f51cf4f9329d7b932ee8246c4b1ded | 7804a576283d5bb9a8bab6c6ea8e43d991a6ef36 | /tikz/make.R | 0952dd5710d4eed4a552b29ba77c1e0074989f70 | [] | no_license | washingtonquintero/microeconr | 43d479de25c1b9edb4f94579c1b77dc60236e53c | 3ebc7f7a4aa313700de5abdb820c6ca0354c4f8d | refs/heads/main | 2023-03-19T23:42:38.392466 | 2021-02-19T05:31:49 | 2021-02-19T05:31:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 405 | r | make.R | files <- c("OLS2D", "vectors2D", "OLS3D", "frishWaugh")
for (i in files){
Sweave(paste("./Rnw/", i, ".Rnw", sep = "")) ;
system(paste("pdflatex ", i, ".tex", sep = ""))
system(paste("convert -density 600 ", i, ".pdf ", i, ".png", sep = ""))
system(paste("rm ", paste(i, c("aux", "log", "tex"), sep = ".", collapse = " ")))
system(paste("mv ", i, ".pdf ", i, ".png ./fig", sep = ""))
}
|
aef393b9de75037c011dbf5a7e38e518c25c72fd | f1cd85f61409e7bb90fbc0123cd70f29e77dd685 | /R/genSimGLMEM.R | 955809464e8dd40f772a3b8d9e1db2691a5386d2 | [] | no_license | cran/riskPredictClustData | b3074ba1b469501c9c2ef7b76e236def020fdfba | f2c67a5c732378e464caf4fdbfe0129371d69a9f | refs/heads/master | 2020-04-08T15:16:30.520055 | 2018-11-28T07:50:03 | 2018-11-28T07:50:03 | 159,472,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,873 | r | genSimGLMEM.R | # modified on June 13, 2018
# (1) remove row names after create data frame in simulated data
#
# modified on March 7, 2018
# (1) sort by cluster id
# (2) change 'sid' to 'cid'
# (3) change 'uid' to 'subuid'
#
# modified on Dec. 28, 2017
# (1) set the default values: beta0=-6
#
# simulate data from logistic mixed effects model
#
# \log\left(\frac{p_{ij}}{(1-p_{ij})}\right)=&\beta_{0i}+\beta_1 smkcur_i+
# \beta_2 lncalor_{ci} + \beta_3 inieye3_{ij} + \beta_4 inieye4_{ij} \\
# &+\beta_5 rtotfat_{1i} +\beta_6 rtotfat_{2i} + \beta_7 rtotfat_{3i},
# i=1,\ldots, N, j=1, 2,\\
# \beta_{0i}\sim & \N\left(\beta_0, \sigma^2_{\beta}\right),
genSimDataGLMEM=function(nSubj=131, beta0 = -6, sd.beta0i = 1.58,
beta1=1.58, beta2=-3.95, beta3=3.15, beta4=2.06,
beta5=0.51, beta6=1.47, beta7=3.11,
p.smkcur=0.08, p.inieye31=0.44, p.inieye32=0.42,
p.inieye41=0.12, p.inieye42=0.11, sd.lncalorc=0.33)
{
# generate intercept
beta0i=rnorm(nSubj, mean=beta0, sd=sd.beta0i)
# generate current smoking status
smkcuri=sample(c(1,0), size=nSubj, prob=c(p.smkcur, 1-p.smkcur), replace=TRUE)
# generate lncalorc
lncalorc = rnorm(nSubj, mean=0, sd=sd.lncalorc)
# generate inieye3_1 (left eye)
inieye31 = sample(c(1,0), size=nSubj, prob=c(p.inieye31, 1-p.inieye31),
replace = TRUE)
# generate inieye3_2 (right eye)
inieye32 = sample(c(1,0), size=nSubj, prob=c(p.inieye32, 1-p.inieye32),
replace = TRUE)
# generate inieye4_1 (left eye)
inieye41 = sample(c(1,0), size=nSubj, prob=c(p.inieye41, 1-p.inieye41),
replace = TRUE)
# generate inieye4_2 (right eye)
inieye42 = sample(c(1,0), size=nSubj, prob=c(p.inieye42, 1-p.inieye42),
replace = TRUE)
# generate rtotfat quartiles
rtotfat4=sample(c(1,2,3,4), size=nSubj, prob=c(1/4,1/4,1/4,1/4),
replace = TRUE)
rtotfat42=as.numeric(rtotfat4==2)
rtotfat43=as.numeric(rtotfat4==3)
rtotfat44=as.numeric(rtotfat4==4)
# generate outcome for left eye
a1 = beta0i+beta1*smkcuri+beta2*lncalorc+beta3*inieye31+beta4*inieye41+
beta5*rtotfat42+beta6*rtotfat43+beta7*rtotfat44
ea1 = exp(a1)
p1 = ea1/(1+ea1)
y1 = unlist(lapply(1:nSubj, function(i) {
tti=sample(c(1,0), size=1, prob=c(p1[i], 1-p1[i]), replace=TRUE)
return(tti)
}))
a2 = beta0i+beta1*smkcuri+beta2*lncalorc+beta3*inieye32+beta4*inieye42+
beta5*rtotfat42+beta6*rtotfat43+beta7*rtotfat44
ea2 = exp(a2)
p2 = ea2/(1+ea2)
y2 = unlist(lapply(1:nSubj, function(i) {
tti=sample(c(1,0), size=1, prob=c(p2[i], 1-p2[i]), replace=TRUE)
return(tti)
}))
# construct data frame
cid=c(1:nSubj, 1:nSubj)
subuid=c(rep(1, nSubj), rep(2, nSubj))
prog=c(y1, y2)
smkcurVec=c(smkcuri, smkcuri)
lncalorcVec=c(lncalorc, lncalorc)
inieye3Vec=c(inieye31, inieye32)
inieye4Vec=c(inieye41, inieye42)
rtotfatVec=c(rtotfat4, rtotfat4)
datFrame=data.frame(cid=cid, subuid=subuid, prog=prog, smkcur=smkcurVec, lncalorc=lncalorcVec,
inieye3=inieye3Vec, inieye4=inieye4Vec,
rtotfat=rtotfatVec)
datFrame.s=datFrame[order(datFrame$cid, datFrame$subuid),]
rownames(datFrame.s)=NULL
# need to use print(dataFrame.s, row.names=FALSE)
invisible(datFrame.s)
}
# test
#datFrame=genSimDataGLMEM(nSubj=131, beta0 = -6, sd.beta0i = 1.58,
# beta1=1.58, beta2=-3.95, beta3=3.15, beta4=2.06,
# beta5=0.51, beta6=1.47, beta7=3.11,
# p.smkcur=0.08, p.inieye31=0.44, p.inieye32=0.42,
# p.inieye41=0.12, p.inieye42=0.11, sd.lncalorc=0.33)
|
d932b95e6088df75a85201f0081ccfe6e818dd19 | ab476e4f2ae151a9da6c6d422ea5ada747a98d25 | /Source code/test1/output/wine-35_star_Saturated.r | 38ecc8ff9b8c3340c25559f60de22d67a7373463 | [] | no_license | Pushkarfrns/EECS_891_Project | ae10f3c586fe49545516e240e9724d65c1d636f0 | 003f3d41521af85a5f486f3bb053a76cc5c13089 | refs/heads/master | 2020-09-29T08:10:03.606204 | 2019-12-10T02:32:59 | 2019-12-10T02:32:59 | 226,995,246 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,375 | r | wine-35_star_Saturated.r |
9, 38, 38
(a3,1.36..2.86) & (a12,1.27..2.51) & (a7,0.34..2.37) & (a6,0.98..2.35) & (a11,0.48..0.88) & (a13,278..780) & (a5,70..116) & (a2,2.31..5.8) & (a4,20..30) -> (d,3)
8, 24, 24
(a3,1.36..2.86) & (a12,1.27..2.51) & (a7,0.34..2.37) & (a9,0.41..1.42) & (a6,0.98..2.35) & (a10,5.43..13) & (a11,0.48..0.88) & (a8,0.13..0.4) -> (d,3)
11, 24, 24
(a8,0.4..0.66) & (a3,1.36..2.86) & (a12,1.27..2.51) & (a7,0.34..2.37) & (a13,278..780) & (a5,70..116) & (a1,11.03..12.85) & (a9,0.41..1.42) & (a6,0.98..2.35) & (a2,2.31..5.8) & (a4,20..30) -> (d,3)
9, 15, 15
(a8,0.4..0.66) & (a3,1.36..2.86) & (a11,0.48..0.88) & (a12,1.27..2.51) & (a7,0.34..2.37) & (a10,5.43..13) & (a9,1.42..3.58) & (a4,20..30) & (a1,12.85..14.83) -> (d,3)
11, 3, 3
(a8,0.4..0.66) & (a12,2.51..4) & (a10,1.28..5.43) & (a13,278..780) & (a5,70..116) & (a3,1.36..2.86) & (a11,0.48..0.88) & (a2,2.31..5.8) & (a7,2.37..5.08) & (a1,12.85..14.83) & (a4,20..30) -> (d,3)
11, 10, 10
(a8,0.4..0.66) & (a12,1.27..2.51) & (a9,0.41..1.42) & (a6,0.98..2.35) & (a7,0.34..2.37) & (a4,10.6..20) & (a10,1.28..5.43) & (a13,278..780) & (a5,70..116) & (a3,1.36..2.86) & (a11,0.48..0.88) -> (d,3)
6, 15, 15
(a8,0.4..0.66) & (a12,1.27..2.51) & (a9,0.41..1.42) & (a4,20..30) & (a10,5.43..13) & (a6,2.35..3.88) -> (d,3)
8, 43, 43
(a3,1.36..2.86) & (a10,1.28..5.43) & (a5,70..116) & (a13,278..780) & (a1,11.03..12.85) & (a7,0.34..2.37) & (a6,0.98..2.35) & (a2,0.74..2.31) -> (d,2)
12, 28, 28
(a3,1.36..2.86) & (a10,1.28..5.43) & (a5,70..116) & (a12,2.51..4) & (a6,2.35..3.88) & (a9,1.42..3.58) & (a7,2.37..5.08) & (a8,0.13..0.4) & (a2,0.74..2.31) & (a11,0.88..1.71) & (a4,10.6..20) & (a1,12.85..14.83) -> (d,2)
6, 49, 49
(a3,1.36..2.86) & (a10,1.28..5.43) & (a13,278..780) & (a5,70..116) & (a1,11.03..12.85) & (a4,20..30) -> (d,2)
4, 70, 70
(a3,1.36..2.86) & (a10,1.28..5.43) & (a8,0.13..0.4) & (a6,0.98..2.35) -> (d,2)
11, 10, 10
(a13,278..780) & (a11,0.88..1.71) & (a4,10.6..20) & (a1,11.03..12.85) & (a9,1.42..3.58) & (a2,0.74..2.31) & (a6,2.35..3.88) & (a8,0.13..0.4) & (a5,70..116) & (a3,1.36..2.86) & (a7,0.34..2.37) -> (d,2)
13, 4, 4
(a8,0.4..0.66) & (a13,278..780) & (a11,0.88..1.71) & (a10,5.43..13) & (a9,1.42..3.58) & (a2,0.74..2.31) & (a12,1.27..2.51) & (a5,70..116) & (a3,1.36..2.86) & (a4,20..30) & (a6,0.98..2.35) & (a7,0.34..2.37) & (a1,12.85..14.83) -> (d,2)
7, 4, 4
(a8,0.4..0.66) & (a1,11.03..12.85) & (a4,10.6..20) & (a5,116..162) & (a9,1.42..3.58) & (a2,0.74..2.31) & (a3,2.86..3.23) -> (d,2)
9, 13, 13
(a8,0.4..0.66) & (a1,11.03..12.85) & (a9,0.41..1.42) & (a4,10.6..20) & (a5,70..116) & (a3,1.36..2.86) & (a2,2.31..5.8) & (a6,0.98..2.35) & (a7,0.34..2.37) -> (d,2)
8, 2, 2
(a5,116..162) & (a8,0.4..0.66) & (a11,0.48..0.88) & (a12,1.27..2.51) & (a1,11.03..12.85) & (a9,0.41..1.42) & (a7,2.37..5.08) & (a6,2.35..3.88) -> (d,2)
7, 52, 52
(a1,12.85..14.83) & (a6,2.35..3.88) & (a3,1.36..2.86) & (a7,2.37..5.08) & (a8,0.13..0.4) & (a9,1.42..3.58) & (a4,10.6..20) -> (d,1)
9, 11, 11
(a1,12.85..14.83) & (a2,0.74..2.31) & (a6,2.35..3.88) & (a3,1.36..2.86) & (a7,2.37..5.08) & (a8,0.13..0.4) & (a5,116..162) & (a4,20..30) & (a9,1.42..3.58) -> (d,1)
9, 14, 14
(a1,12.85..14.83) & (a2,0.74..2.31) & (a6,2.35..3.88) & (a3,1.36..2.86) & (a7,2.37..5.08) & (a5,70..116) & (a4,10.6..20) & (a8,0.4..0.66) & (a9,1.42..3.58) -> (d,1)
9, 11, 11
(a1,12.85..14.83) & (a2,0.74..2.31) & (a6,2.35..3.88) & (a3,1.36..2.86) & (a8,0.13..0.4) & (a9,0.41..1.42) & (a4,10.6..20) & (a7,0.34..2.37) & (a5,70..116) -> (d,1)
9, 20, 20
(a7,2.37..5.08) & (a1,12.85..14.83) & (a2,0.74..2.31) & (a4,20..30) & (a9,1.42..3.58) & (a6,2.35..3.88) & (a3,1.36..2.86) & (a8,0.13..0.4) & (a5,70..116) -> (d,1)
10, 5, 5
(a3,2.86..3.23) & (a5,116..162) & (a7,2.37..5.08) & (a10,5.43..13) & (a1,12.85..14.83) & (a2,0.74..2.31) & (a8,0.4..0.66) & (a4,20..30) & (a9,1.42..3.58) & (a6,2.35..3.88) -> (d,1)
10, 2, 2
(a3,2.86..3.23) & (a5,116..162) & (a12,1.27..2.51) & (a9,0.41..1.42) & (a7,2.37..5.08) & (a10,5.43..13) & (a6,0.98..2.35) & (a4,10.6..20) & (a1,12.85..14.83) & (a2,0.74..2.31) -> (d,1)
|
eaf7d0ac37576f73cb3e6cb6c9f9a4d37e44fb99 | 4223c866bd17f791523a9c11adfa5269df28d5c6 | /man/zoomtoseg.Rd | 9dfe98f66f3402caa3fcd8a6a9e0997c1c69bcc9 | [] | no_license | jsta/riverdist | f394c3dbe4266241b119e7e627c2b2d8e9dd1350 | ea5732ceecd62231264e9a72d481d707a74eb0f5 | refs/heads/master | 2021-09-02T16:31:35.069386 | 2017-12-29T15:27:19 | 2017-12-29T15:27:19 | 105,771,176 | 1 | 0 | null | 2017-10-04T13:23:17 | 2017-10-04T13:23:17 | null | UTF-8 | R | false | true | 721 | rd | zoomtoseg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dissolve_zoom.R
\name{zoomtoseg}
\alias{zoomtoseg}
\title{Zoom to segment}
\usage{
zoomtoseg(seg, rivers, ...)
}
\arguments{
\item{seg}{A segment or vector of segments to zoom to}
\item{rivers}{The river network object to use}
\item{...}{Additional plotting arguments (see \link[graphics]{par})}
}
\description{
Calls \link{plot.rivernetwork} and automatically zooms to a specified
segment or vector of segments. Not intended for any real mapping - just
investigating and error checking.
}
\examples{
data(Kenai3)
plot(x=Kenai3)
# checking out a particularly messy region...
zoomtoseg(c(110,63), rivers=Kenai3)
}
\author{
Matt Tyers
}
|
0ab7d0d66fcd9d41cd208df95edfe98c299d7142 | c459dd32d88158cb064c3af2bc2ea8c7ab77c667 | /integration/30_aliquot_integration/findmarkers/findmarkers_pt_vs_others_wilcox.R | 8ea096c2fe95c3b3fdb00c22b5abc77c0f320dd5 | [] | no_license | ding-lab/ccRCC_snRNA_analysis | d06b8af60717779671debe3632cad744467a9668 | ac852b3209d2479a199aa96eed3096db0b5c66f4 | refs/heads/master | 2023-06-21T15:57:54.088257 | 2023-06-09T20:41:56 | 2023-06-09T20:41:56 | 203,657,413 | 6 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,844 | r | findmarkers_pt_vs_others_wilcox.R | #!/usr/bin/env Rscript
## library
packages = c(
"ggplot2",
"Seurat",
"dplyr",
"plyr",
"data.table"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
print(paste0("No ", pkg_name_tmp, " Installed!"))
} else {
print(paste0("", pkg_name_tmp, " Installed!"))
}
library(package = pkg_name_tmp, character.only = T, quietly = T)
}
cat("Finish loading libraries!\n")
cat("###########################################\n")
## get the path to the seurat object
args = commandArgs(trailingOnly=TRUE)
## argument: directory to the output
path_output_dir <- args[1]
cat(paste0("Path to the output directory: ", path_output_dir, "\n"))
cat("###########################################\n")
## argument 2: filename for the output file
path_output_filename <- args[2]
cat(paste0("Filename for the output: ", path_output_filename, "\n"))
cat("###########################################\n")
path_output <- paste0(path_output_dir, path_output_filename)
## argument : path to seurat object
path_srat <- args[3]
cat(paste0("Path to the seurat object: ", path_srat, "\n"))
cat("###########################################\n")
## argument : path to the barcode to cell type table
path_barcode2celltype_df <- args[4]
cat(paste0("Path to the barcode to cell type marker table: ", path_barcode2celltype_df, "\n"))
cat("###########################################\n")
## input cell type marker table
barcode2celltype_df <- fread(input = path_barcode2celltype_df, data.table = F)
cat("finish reading the barcode-cell-type table!\n")
cat("###########################################\n")
## input srat
cat(paste0("Start reading the seurat object: ", "\n"))
srat <- readRDS(path_srat)
print("Finish reading the seurat object!\n")
cat("###########################################\n")
## add cell type info into meta data
metadata_tmp <- srat@meta.data
metadata_tmp$integrated_barcode <- rownames(srat@meta.data)
metadata_tmp <- merge(metadata_tmp, barcode2celltype_df, by = c("integrated_barcode"), all.x = T)
rownames(metadata_tmp) <- metadata_tmp$integrated_barcode
srat@meta.data <- metadata_tmp
## change identification for the cells to be cell type group
Idents(srat) <- "Most_Enriched_Cell_Type1"
## run findallmarkers
markers_df <- FindMarkers(object = srat, ident.1 = "Proximal tubule", ident.2 = c("Endothelial cells", "Fibroblasts", "Loop of Henle", "Lymphoid lineage immune cells", "Myeloid lineage immune cells"), test.use = "wilcox", only.pos = T, logfc.threshold = 0)
print("Finish running FindMarkers!\n")
markers_df$gene <- rownames(markers_df)
cat("###########################################\n")
## write output
write.table(markers_df, file = path_output, quote = F, sep = "\t", row.names = F)
cat("Finished saving the output\n")
cat("###########################################\n")
|
42da73be7aa48d5000439ff5721bb4f22772cea4 | bb0e4fe1cca50c8b389b9f0608e19c9ec49362a3 | /Ch6_Regression Methods.R | 367f8c00ff4e667929ac27c0d360ebdaa7f5ba8c | [] | no_license | Soy-code/MachineLearning_with_R | 1228a2ea466cb20340a90f583682f4f956bc5aa8 | 04ffdd5bbb32ab003b9c2e98769011fa1be3abf2 | refs/heads/master | 2022-12-08T07:38:33.788131 | 2020-09-08T11:04:08 | 2020-09-08T11:04:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,225 | r | Ch6_Regression Methods.R | setwd('E:\\BITAmin\\Machine Learning with R, Second Edition_Code\\Chapter 06')
launch=read.csv('challenger.csv', stringsAsFactors = T)
str(launch)
b=cov(launch$temperature, launch$distress_ct) / var(launch$temperature) ; b
a <- mean(launch$distress_ct) - b * mean(launch$temperature)
a
r <- cov(launch$temperature, launch$distress_ct) / (sd(launch$temperature) * sd(launch$distress_ct))
r
cor(launch$temperature, launch$distress_ct)
r * (sd(launch$distress_ct) / sd(launch$temperature))
model <- lm(distress_ct ~ temperature, data = launch)
model
summary(model)
reg <- function(y, x) {
x <- as.matrix(x)
x <- cbind(Intercept = 1, x)
b <- solve(t(x) %*% x) %*% t(x) %*% y
colnames(b) <- "estimate"
print(b)
}
str(launch)
reg(y = launch$distress_ct, x = launch[2])
reg(y = launch$distress_ct, x = launch[2:4])
model <- lm(distress_ct ~ temperature + field_check_pressure + flight_num, data = launch)
model
####
insurance <- read.csv("insurance.csv", stringsAsFactors = TRUE)
str(insurance)
summary(insurance$expenses)
hist(insurance$expenses)
table(insurance$region)
cor(insurance[c("age", "bmi", "children", "expenses")])
pairs(insurance[c("age", "bmi", "children", "expenses")])
library(psych)
pairs.panels(insurance[c("age", "bmi", "children", "expenses")])
ins_model <- lm(expenses ~ age + children + bmi + sex + smoker + region,
data = insurance)
summary(ins_model)
ins_model <- lm(expenses ~ ., data = insurance)
ins_model
summary(ins_model)
insurance$age2 <- insurance$age^2
insurance$bmi30 <- ifelse(insurance$bmi >= 30, 1, 0)
ins_model2 <- lm(expenses ~ age + age2 + children + bmi + sex +
bmi30*smoker + region, data = insurance)
summary(ins_model2)
### update 함수 ex)update(ins.reg, ~., -sex)
### AIC 값이 낮을수록 예측력이 높다.
### step(ins.reg)
###
tee <- c(1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 7, 7, 7)
at1 <- c(1, 1, 1, 2, 2, 3, 4, 5, 5)
at2 <- c(6, 6, 7, 7, 7, 7)
bt1 <- c(1, 1, 1, 2, 2, 3, 4)
bt2 <- c(5, 5, 6, 6, 7, 7, 7, 7)
sdr_a <- sd(tee) - (length(at1) / length(tee) * sd(at1) + length(at2) / length(tee) * sd(at2))
sdr_b <- sd(tee) - (length(bt1) / length(tee) * sd(bt1) + length(bt2) / length(tee) * sd(bt2))
sdr_a
sdr_b
wine <- read.csv("whitewines.csv")
str(wine)
hist(wine$quality)
summary(wine)
wine_train <- wine[1:3750, ]
wine_test <- wine[3751:4898, ]
library(rpart)
## rpart 회귀나무를 생성하는 함수.
m.rpart <- rpart(quality ~ ., data = wine_train)
m.rpart
summary(m.rpart)
##alcohol이 잘 설명하는 변수이다.(?)
library(rpart.plot)
rpart.plot(m.rpart, digits = 3)
rpart.plot(m.rpart, digits = 4, fallen.leaves = TRUE, type = 3, extra = 101)
p.rpart <- predict(m.rpart, wine_test)
summary(p.rpart)
summary(wine_test$quality)
cor(p.rpart, wine_test$quality)
MAE <- function(actual, predicted) {
mean(abs(actual - predicted))
}
MAE(p.rpart, wine_test$quality)
mean(wine_train$quality)
MAE(5.87, wine_test$quality)
library(RWeka)
m.m5p <- M5P(quality ~ ., data = wine_train)
m.m5p
summary(m.m5p)
p.m5p <- predict(m.m5p, wine_test)
summary(p.m5p)
cor(p.m5p, wine_test$quality)
MAE(wine_test$quality, p.m5p)
## 상관관계 증가, 평균오차 감소 -> 더 좋아짐.
# ---------------------------------- example -------------------------------------#
football=read.csv("FM2019.csv")
set.seed(123)
N=nrow(football)
str(football)
sampling=sample(N, N*0.7 )
ft_train=football[sampling, ]
ft_test=football[-sampling, ]
install.packages("rpart")
library(rpart)
m.part = rpart(Performance~., data = ft_train)
m.part
rpart.plot(m.part, digits=3)
p.rpart=predict(m.part, ft_test)
summary(p.rpart)
summary(ft_test$Performance)
cor(p.rpart, ft_test$Performance)
MAE=function(actual, predict) { mean(abs(actual-predict))}
MAE(ft_test$Performance, p.rpart)
mean(ft_train$Performance)
MAE(68.13, ft_test$Performance)
library(RWeka)
m.m5p=M5P(Performance~., data=ft_train)
m.m5p ## num5 참ㄱ
summary(m.m5p)
p.m5p = predict(m.m5p, ft_test)
summary(p.m5p)
cor(p.m5p, ft_test$Performance)
cor(p.m5p, ft_test$Performance)
cor(p.rpart, ft_test$Performance)
|
1653686f636486667c652f18aa971e1b3683c441 | 682be37f828a663f0c1afb936988cbbc9fa8fe00 | /man/player_profile.Rd | 2ca3dc7a0a215f9a8b7e406a10877698a66a4817 | [] | no_license | cran/bigchess | 33a7929bbb10729a50be4d1b6c0193e244466be4 | a7447339542daaae957262810e3a0276f23fdd3e | refs/heads/master | 2021-07-03T13:28:43.254433 | 2020-08-05T15:10:02 | 2020-08-05T15:10:02 | 133,085,082 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,755 | rd | player_profile.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/player_profile.R
\name{player_profile}
\alias{player_profile}
\title{Compute player profile}
\usage{
player_profile(df, player)
}
\arguments{
\item{df}{data frame from read.pgn or read.pgn.ff files with stats computed.}
\item{player}{string used in grepl(player,White) and grepl(player,Black)}
}
\value{
Data frame with player (column prefix P_) and opponent (column prefix O_) figure move counts. Column Player_Col indicating pieces colour for player (factor White or Black).
Example column P_Q_moves means number of player Queen moves count.
}
\description{
Computes players profile from data frame obtained from read.pgn() function into data frame
}
\examples{
f <- system.file("extdata", "Kasparov.gz", package = "bigchess")
con <- gzfile(f,encoding = "latin1")
df <- read.pgn(con,quiet = TRUE,ignore.other.games = TRUE)
nrow(df) # 2109
df_pp <- player_profile(df,"Kasparov, Gary")
nrow(df_pp) # 1563
df_pp <- player_profile(df,"Kasparov,G")
nrow(df_pp) # 543
df_pp <- player_profile(df,"Kasparov, G\\\\.")
nrow(df_pp) # 2
df_pp <- player_profile(df,"Kasparov")
nrow(df_pp) # 2109 - correct
boxplot(P_Q_moves/NMoves~Player_Col,df_pp,
main = "Average Queen Moves\\n Kasparov as Black (909 games) vs Kasparov as White (1200 games)",
col = c("black","white"),border = c("black","black"),notch = TRUE)
# Magnus Carlsen data example
f <- system.file("extdata", "Carlsen.gz", package = "bigchess")
con <- gzfile(f,encoding = "latin1")
df <- read.pgn(con,quiet = TRUE,ignore.other.games = TRUE)
nrow(df) # 2410
df_pp <- player_profile(df,"Carlsen")
nrow(df_pp) # 2411 - ??
# One game was played by Carlsen,H
df_pp <- player_profile(df,"Carlsen,M")
nrow(df_pp) # 2410 - correct
}
|
b62cae96ea480527092187f8cc804f675b1dab2d | 80e4457a50f9f27e8f3dbf6b583ed09c37422b9c | /demo/autoreg.R | 9e38136e526af31049df4571d317722e3e06cd62 | [] | no_license | snowdj/midasr | 65afacc06adf5bafd8f8afe34ffa041708f95d41 | 6800e04a0d6205b4b4bfc532c9732232dd3f965c | refs/heads/master | 2021-01-18T21:07:20.848052 | 2014-04-30T08:09:32 | 2014-04-30T08:09:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 860 | r | autoreg.R | library(midasr)
theta.h0 <- function(p, dk) {
i <- (1:dk-1)/100
pol <- p[3]*i + p[4]*i^2
(p[1] + p[2]*i)*exp(pol)
}
##Generate coefficients
theta0 <- theta.h0(c(-0.1,10,-10,-10),4*12)
##Generate the predictor variable
xx <- simplearma.sim(list(ar=0.6),3000*12,1,12)
aa <- lapply(c(50,100,200,500,1000,1500,2000), function(n) {
y <- midas.auto.sim(n,theta0,c(0.5),xx,1,n.start=100)
x <- window(xx,start=start(y))
midas_r(y~mls(y,1,1)+fmls(x,4*12-1,12,theta.h0),start=list(x=c(-0.1,10,-10,-10)))
})
sapply(aa,function(x)c(nrow(x$model),coef(x)))
bb <- lapply(c(50,100,200,500,1000,1500,2000), function(n) {
y <- midas.auto.sim(n,theta0,c(0.5,0.1),xx,1,n.start=100)
x <- window(xx,start=start(y))
midas_r(y~mls(y,1:2,1)+fmls(x,4*12-1,12,theta.h0),start=list(x=c(-0.1,10,-10,-10)))
})
sapply(bb,function(x)c(nrow(x$model),coef(x)))
|
24cace937fcf2a770ae52d6c16797588a827d046 | 1c8fb441a707ebb3e9bc504a28db19e11745b05d | /The_R_Graph_Gallery/circlize_demo.R | 220208f73f5a609abbc8152d500a9b31270d4184 | [
"MIT"
] | permissive | HouyuZhang/Learn_R | 48006affa7a12c551ee906281407fe86d9983c4c | c0f6fe9d6dd35686f323662dec77e826aa1c09cd | refs/heads/master | 2023-02-17T21:54:29.261004 | 2021-01-18T08:23:14 | 2021-01-18T08:23:14 | 278,016,759 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 951 | r | circlize_demo.R | chrM <- read.table("hg38.50bp.chrM_cutfreqs.bed", sep = "\t", header = T)
colnames(chrM) <- gsub("Human_|_ATACseq|_chrM","",colnames(chrM))
scaled_chrM <- cbind(chrM[,1:3], scale(chrM[,4:ncol(chrM)]))
pdf("Human.pdf")
circos.clear()
circos.par(start.degree = 90)
circos.initializeWithIdeogram(species ="hg38", sort.chr = TRUE, chromosome.index="chrM",
plotType = c("labels", "axis"))
gene_bed = read.table("Mouse_chrM.gtf", sep = "\t", header = F)
circos.genomicTrack(gene_bed, ylim = c(0,0.2),
panel.fun = function(region, value, ...) {
circos.genomicRect(region, value, col = "red", border = "white", ...)
}, track.height = 0.05, bg.border = NA)
col_fun = colorRamp2(c(-3, 0, 3), rev(brewer.pal(n = 3, name = "RdBu")))
circos.genomicHeatmap(scaled_chrM, col = col_fun, side = "inside", border = NA, heatmap_height = 0.4,)
circos.clear()
dev.off()
|
bd71b788627908198b29047aeec85702e567d373 | a05a747d3d5167feb74a417d242eed70e950c1ae | /reserver.R | 55b4cb73b000c474984d36fa794fad14b9342005 | [] | no_license | cuibaps1/RTest | d3939e72c596710daa8be0eb62b63b497a84a856 | 66663811f5b81bf7ce73b38ba2d5ae5bc4039ca9 | refs/heads/master | 2020-04-26T11:07:06.064886 | 2019-03-03T12:47:33 | 2019-03-03T12:47:33 | 173,506,020 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 437 | r | reserver.R | #' Reverses a string or number
#' @param toReverse A string or number
#' @return the reverse of the provided string or number
#' @examples colin_reverser("foo")
#' @importFrom magrittr "%>%"
#' @export
colin_reverser <- function(toReverse){
split <- autoSplit(toReverse)
rev(split) %>% paste (collapse = "")
}
# Healper function to make splitting easier
autoSplit <- function(toSplit){
strsplit(as.character(toSplit), "")[[1]]
}
|
272843e46c4c2530a05423717fb96a2a18eae1ae | d1f45559b044ccbf7486099a3c41c98d1741a247 | /man/GoodmanKruskalGamma.Rd | c5c4de7adc8b6b763251fcba2172f7654a908da0 | [] | no_license | MathieuMarauri/DescTools | 74ecb8568696ca6501d11d3a7f5ca9cd83f1bbb2 | c8abac510b9ea126aad2034df6dc11856dc9608d | refs/heads/master | 2020-03-11T08:06:54.418764 | 2018-04-17T10:00:22 | 2018-04-17T10:00:22 | 129,875,184 | 0 | 0 | null | 2018-04-17T08:52:29 | 2018-04-17T08:52:29 | null | UTF-8 | R | false | false | 3,866 | rd | GoodmanKruskalGamma.Rd | \name{GoodmanKruskalGamma}
\alias{GoodmanKruskalGamma}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Goodman Kruskal's Gamma
%% ~~function to do ... ~~
}
\description{Calculate Goodman Kruskal's Gamma statistic, a measure of
association for ordinal factors in a two-way table.\cr
The function has interfaces for a table (matrix) and for single vectors.}
\usage{
GoodmanKruskalGamma(x, y = NULL, conf.level = NA, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a numeric vector or a contingency table. A matrix will be treated as a table.
%% ~~Describe \code{x} here~~
}
\item{y}{NULL (default) or a vector with compatible dimensions to \code{x}. If y is provided, \code{table(x, y, \dots)} is calculated.
%% ~~Describe \code{y} here~~
}
\item{conf.level}{confidence level of the interval. If set to \code{NA} (which is the default) no confidence intervals will be calculated.
%% ~~Describe \code{conf.level} here~~
}
\item{\dots}{further arguments are passed to the function \code{\link{table}}, allowing i.e. to set useNA. This refers only to the vector interface.
%% ~~Describe \code{\dots} here~~
}
}
\details{The estimator of \eqn{\gamma}{gamma} is based only on the number of concordant and discordant pairs of observations. It ignores tied pairs (that is, pairs of observations that have equal values of X or equal values of Y). Gamma is appropriate only when both variables lie on an ordinal scale. \cr
It has the range [-1, 1]. If the two variables are independent, then the estimator of gamma tends to be close to zero.
For \eqn{2 \times 2}{2 x 2} tables, gamma is equivalent to Yule's Q (\code{\link{YuleQ}}). \cr
Gamma is estimated by \deqn{ G = \frac{P-Q}{P+Q}}{G = (P-Q) / (P+Q) } where P equals twice the number of concordances and Q twice the number of discordances.
%% ~~ If necessary, more details than the description above ~~
}
\value{
a single numeric value if no confidence intervals are requested,\cr
and otherwise a numeric vector with 3 elements for the estimate, the lower and the upper confidence interval
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Agresti, A. (2002) \emph{Categorical Data Analysis}. John Wiley & Sons,
pp. 57-59.
Goodman, L. A., & Kruskal, W. H. (1954) Measures of
association for cross classifications. \emph{Journal of the
American Statistical Association}, 49, 732-764.
Goodman, L. A., & Kruskal, W. H. (1963) Measures of
association for cross classifications III: Approximate
sampling theory. \emph{Journal of the American Statistical
Association}, 58, 310-364.
%% ~put references to the literature/web site here ~
}
\author{Andri Signorell <andri@signorell.net>
%% ~~who you are~~
}
\seealso{There's another implementation of gamma in \pkg{vcdExtra} \code{\link[vcdExtra]{GKgamma}}\cr
\code{\link{ConDisPairs}} yields concordant and discordant pairs \cr\cr
Other association measures: \cr
\code{\link{KendallTauA}} (tau-a), \code{\link{KendallTauB}} (tau-b), \code{\link{cor}} (method="kendall") for tau-b, \code{\link{StuartTauC}} (tau-c), \code{\link{SomersDelta}}\cr
\code{\link{Lambda}}, \code{\link{GoodmanKruskalTau}} (tau), \code{\link{UncertCoef}}, \code{\link{MutInf}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# example in:
# http://support.sas.com/documentation/cdl/en/statugfreq/63124/PDF/default/statugfreq.pdf
# pp. S. 1821
tab <- as.table(rbind(
c(26,26,23,18, 9),
c( 6, 7, 9,14,23))
)
GoodmanKruskalGamma(tab, conf.level=0.95)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ multivar}
\keyword{nonparametric}
|
de65386b9a621ada366fb10beea6f9cdf33f8ee0 | 0ae23d02aa6b7c50ad50ced88b0791e67ab41d01 | /server/modules/analysis/service/rChain/rFile/system/init-utils.R | c13f84a9ea2ba78a388c582bbb2f92a658a2ea92 | [
"MIT"
] | permissive | openforis/arena | 84a493acdae9a36d684e279a99ad2994c0e05966 | 49a51a9cb185751626666a81e257c8c8b9b78fab | refs/heads/master | 2023-08-16T15:47:41.403916 | 2023-08-16T11:02:50 | 2023-08-16T11:02:50 | 139,414,164 | 15 | 5 | MIT | 2023-09-14T09:10:41 | 2018-07-02T08:31:42 | JavaScript | UTF-8 | R | false | false | 484 | r | init-utils.R | arena.dfColumnsAs = function (df, columns, mutateFunction) {
return ( df %>%
dplyr::mutate(across( all_of( columns), mutateFunction))
)
}
arena.dfColumnsAsCharacter = function (df, columns) {
return ( arena.dfColumnsAs(df, columns, as.character) )
}
arena.dfColumnsAsLogical = function (df, columns) {
return ( arena.dfColumnsAs(df, columns, as.logical) )
}
arena.dfColumnsAsNumeric = function (df, columns) {
return ( arena.dfColumnsAs(df, columns, as.numeric) )
}
|
92e332825ac369a5f34dae75406f1233b85f63ed | e5b50b052d111753a7beb335911b6a98aca845d7 | /tests/testthat/test-build_gran.R | 2b4ea8462e0f6330dc0280f59cd8ebccc5b2f2d6 | [] | no_license | Sayani07/gravitas | a0d33639aa3329570ba02a67faa7f015a0b0b7e3 | c1430843c5cd9bc547dc0f768cd38b979f4b5f6a | refs/heads/master | 2022-06-28T06:36:04.146695 | 2022-06-14T01:25:28 | 2022-06-14T01:25:28 | 177,514,371 | 15 | 6 | null | 2021-12-03T03:14:16 | 2019-03-25T04:30:24 | R | UTF-8 | R | false | false | 1,331 | r | test-build_gran.R | context("build_gran")
x <- lubridate::ymd_hms("2018-11-04 18:37:04 EST")
# test_that("build_gran inputs", {
# expect_is(x,c("POSIXct", "POSIXt"))
# })
test_that("build_gran output length equals input length of time vector", {
expect_length(build_gran(x, "hour", "week"), length(x))
})
#
# test_that("build_gran error with null input", {
# expect_error(build_gran(x, "hour"), "function requires both gran1 and gran2 to be specified")
# })
test_that("build_gran outputs a numeric value", {
expect_is(build_gran(x, "hour", "week"), "numeric")
})
test_that("build_gran expected output hour_week", {
expect_equal(build_gran(x, "hour", "week"), 18)
})
test_that("build_gran expected output minute_hhour", {
expect_equal(build_gran(x, "minute", "hhour"), 8)
})
test_that("build_gran expected output day_month", {
expect_equal(build_gran(x, "day", "month"), 4)
})
test_that("build_gran expected output month_semester", {
expect_equal(build_gran(x, "month", "semester"), 5)
})
test_that("build_gran expected output week_quarter", {
expect_equal(build_gran(x, "week", "quarter"), 5)
})
test_that("build_gran expected output week_semester", {
expect_equal(build_gran(x, "week", "semester"), 19)
})
test_that("build_gran expected output second_hhour", {
expect_equal(build_gran(x, "second", "hhour"), 424)
})
|
1554f13b48a791374af1a3c1317cf2b6b5f3defd | e1c699092a3f005f762b46ceb0457dab3b9842a4 | /Simulation/kadane simulation.R | b6710dd0a4c1aecce80b1a118be0b5638882bd58 | [] | no_license | ArjanHuizing/kadaneImpute | 8eb9db94159203a3e626722027a6d6e6d9cdb5ba | 881a00e0caaa8eff31188e2df4554d1eb57bcdac | refs/heads/master | 2020-06-27T05:23:28.056728 | 2019-08-11T21:02:43 | 2019-08-11T21:02:43 | 199,855,698 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,531 | r | kadane simulation.R | # Testing the method
library(MASS)
# simulation parameters
res <- data.frame(set.r = NA, match = NA, r = NA, bias = NA, ci = NA, width = NA)
nsim <- 5000
match <- c(FALSE, TRUE)
corr <- c(seq(0, 0.9, by = 0.1), 0.99)
set.seed(134589)
pb <- txtProgressBar(min = 0, max = length(match)*length(corr)*nsim, style = 3)
for(j in 1:length(match)){
for(r in 1:length(corr)){
simres <- data.frame(r = NA, bias = NA, ci = NA, width = NA)
for(i in 1:nsim){
setTxtProgressBar(pb, ((j-1)*(length(corr)*nsim)) + ((r-1)*nsim) + i)
data <- mvrnorm(n = 100, mu = c(0, 0.5, 1), Sigma = matrix(c(1, corr[r], 0.5,
corr[r], 1, 0.5,
0.5, 0.5, 1), nrow = 3))
obs <- as.data.frame(data)
obs[1:50, 1] <- NA
obs[51:100, 2] <- NA
# impute
kadaneimp <- mice(obs, method = c("kadane", ""), kadane.match = match[j],
kadane.corr = corr[r], blocks = list(c("V1", "V2"), c("V3")),
maxit = 1, m = 1, printFlag = FALSE)
imp <- complete(kadaneimp, action = "long")
# evaluate - bias, ci, width, realised correlation
simres[i, "r"] <- cor(imp[3:5])[1,2]
biases <- c(imp[1:50, "V1"] - data[1:50, 1], imp[51:100, "V2"] - data[51:100, 2])
simres[i, "bias"] <- mean(biases)
ci <- quantile(biases, probs = c(0.025, 0.975), na.rm = TRUE)
simres[i, "ci"] <- ifelse(ci[1] < 0 & ci[2] > 0, 1, 0)
simres[i, "width"] <- abs(ci[1] - ci[2])
}
store <- ifelse(j == 1, r, r + length(corr))
res[store, "set.r"] <- corr[r]
res[store, "match"] <- match[j]
res[store, "r"] <- mean(simres[, "r"], na.rm = T)
res[store, "bias"] <- mean(simres[, "bias"], na.rm = T)
res[store, "ci"] <- mean(simres[, "ci"], na.rm = T)
res[store, "width"] <- mean(simres[, "width"], na.rm = T)
}
}
close(pb)
# Results
res
# Plot it
library(ggplot2)
library(cowplot)
plotIt <- ggplot(res, aes(x = set.r, colour = match)) + theme_minimal()
plot_grid(plotIt + geom_line(aes(y = r)) + labs(y = "imputed correlation"),
plotIt + geom_line(aes(y = bias)),
plotIt + geom_line(aes(y = ci)) + lims(y = c(0.9, 1)) + labs(y = "coverage rate") +
geom_hline(yintercept = 0.95, linetype = 2),
plotIt + geom_line(aes(y = width)) + labs(y = "average width"),
nrow = 2)
|
fb1bd91c16cc21662091395176489e6bf78847c6 | 673c7afcbec4e4e99c5e6f0e0f300a3b0751ece5 | /man/gen_object_stack.Rd | 2f820a81bf03b048efb70d673b23b1d9402e92ed | [
"MIT"
] | permissive | chenxifreidrich/MicrogliaMorphoClass | be94d678fafa1163855a9800a674a3757706a4ea | 4e29b11b54799e4f0ce38e42d95dd6003abdbb85 | refs/heads/master | 2021-10-01T17:52:01.346663 | 2018-11-27T21:57:29 | 2018-11-27T21:57:29 | 159,403,375 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 617 | rd | gen_object_stack.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen_object_stack.R
\name{gen_object_stack}
\alias{gen_object_stack}
\title{Generating Object Stack}
\usage{
gen_object_stack(label_image, image)
}
\arguments{
\item{label_image}{The object identified labeled image stack}
\item{image}{The original image that the labeled image was generating from}
}
\value{
A Image class image stack that contains the obejct identified
}
\description{
generating object stack. This code wraps the EBImage function stackObjects in order to make the function compatible with the massive images processing
}
|
0c714e24fccb22ee1e8a731c1f3d01ff0cdb4a65 | 9d3ccd4b3908742dac1a1be4cb6f3e1be9287c4d | /code/simulation/IVW/plot_IVW.r | 6bb88e73435507b143a003c7ae41f231ced0c479 | [] | no_license | andrewhaoyu/MR_MA | f8a35d74ea48d1662de26ee234f305606be2742b | 2e7ac792f1c40ddbc0eb6639d68981672584ccef | refs/heads/master | 2022-07-30T21:53:34.668451 | 2022-07-07T14:29:09 | 2022-07-07T14:29:09 | 203,250,618 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,868 | r | plot_IVW.r | #plot the ratio estimate distribution
n_vec <- c(15000,75000,150000)
alpha_vec <- c(0.00,0.01,0.03,0.05)
beta_vec <- c(0,0.3,0.5,1)
setwd("/Users/zhangh24/GoogleDrive/MR_MA")
times = 100000
#i1 correponding to n
#i2 corresponding to alpha
library(ggplot2)
n.row <- length(alpha_vec)
n.col <- length(n_vec)
ratio_est_list <- list()
ratio_cover_list <- list()
ci_low_ratio_list <- list()
ci_high_ratio_list <- list()
ratio_est_c_list <- list()
ratio_var_c_list <- list()
ratio_cover_c_list <- list()
ci_low_ratio_c_list <- list()
ci_high_ratio_c_list <- list()
ratio_est_AR_list <- list()
ratio_est_AR_low_list <- list()
ratio_est_AR_high_list <- list()
cover_AR_list <- list()
ratio_est_MR_list <- list()
ratio_est_MR_low_list <- list()
ratio_est_MR_high_list <- list()
cover_MR_list <- list()
temp <- 1
load("./result/simulation/IVW/IVW_merged.Rdata")
for(i4 in 1:4){
ratio_est <- matrix(0,n.row,n.col)
ratio_cover <- matrix(0,n.row,n.col)
ci_low_ratio <- matrix(0,n.row,n.col)
ci_high_ratio <- matrix(0,n.row,n.col)
ratio_est_c <- matrix(0,n.row,n.col)
ratio_cover_c <- matrix(0,n.row,n.col)
ci_low_ratio_c <- matrix(0,n.row,n.col)
ci_high_ratio_c <- matrix(0,n.row,n.col)
ci_high_ratio <- matrix(0,n.row,n.col)
ratio_est_AR <- matrix(0,n.row,n.col)
ratio_est_AR_low <- matrix(0,n.row,n.col)
ratio_est_AR_high <- matrix(0,n.row,n.col)
cover_AR <- matrix(0,n.row,n.col)
ratio_est_MR <- matrix(0,n.row,n.col)
ratio_est_MR_low <- matrix(0,n.row,n.col)
ratio_est_MR_high <- matrix(0,n.row,n.col)
cover_MR <- matrix(0,n.row,n.col)
for(i1 in 1:3){
for(i2 in 1:4){
#
temp = 12*(i4-1)+4*(i1-1)+i2
n <- n_vec[i1]
alpha_G = alpha_vec[i2]
beta_M = beta_vec[i4]
result <- result_final[[temp]]
ratio_est[i2,i1] <- mean(result[[5]])
ci_low_ratio[i2,i1] <- mean(result[[8]])
ci_high_ratio[i2,i1] <- mean(result[[9]])
ratio_cover[i2,i1] <- mean(result[[7]])
ratio_est_c[i2,i1] <- mean(result[[10]])
ci_low_ratio_c[i2,i1] <- mean(result[[14]])
ci_high_ratio_c[i2,i1] <- mean(result[[15]])
ratio_cover_c[i2,i1] <- mean(result[[12]])
ratio_est_AR[i2,i1] <- mean(result[[16]])
ratio_est_AR_low[i2,i1] <- mean(result[[17]],na.rm=T)
ratio_est_AR_high[i2,i1] <- mean(result[[18]],na.rm=T)
cover_AR[i2,i1] <- mean(result[[19]])
ratio_est_MR[i2,i1] <- mean(result[[20]])
ratio_est_MR_low[i2,i1] <- mean(result[[21]])
ratio_est_MR_high[i2,i1] <- mean(result[[22]])
cover_MR[i2,i1] <- mean(result[[23]])
temp <- temp+1
}
}
ratio_est_list[[i4]] <- ratio_est
ratio_cover_list[[i4]] <- ratio_cover
ci_low_ratio_list[[i4]] <- ci_low_ratio
ci_high_ratio_list[[i4]] <- ci_high_ratio
ratio_cover_c_list[[i4]] <- ratio_cover_c
ci_low_ratio_c_list[[i4]] <- ci_low_ratio_c
ci_high_ratio_c_list[[i4]] <- ci_high_ratio_c
ratio_est_AR_list[[i4]] <- ratio_est_AR
ratio_est_AR_low_list[[i4]] <- ratio_est_AR_low
ratio_est_AR_high_list[[i4]] <- ratio_est_AR_high
cover_AR_list[[i4]] <- cover_AR
ratio_est_MR_list[[i4]] <- ratio_est_MR
ratio_est_MR_low_list[[i4]] <- ratio_est_MR_low
ratio_est_MR_high_list[[i4]] <- ratio_est_MR_high
cover_MR_list[[i4]] <- cover_MR
}
ratio_cover_table <- round(rbind(ratio_cover_list[[1]],
ratio_cover_list[[2]],
ratio_cover_list[[3]],
ratio_cover_list[[4]]),2)
write.csv(ratio_cover_table,file = "./result/simulation/IVW/cover_cover_table.csv")
ratio_cover_c_table <- round(rbind(ratio_cover_c_list[[1]],
ratio_cover_c_list[[2]],
ratio_cover_c_list[[3]],
ratio_cover_c_list[[4]]),2)
write.csv(ratio_cover_c_table,file = "./result/simulation/IVW/ratio_cover_c_table.csv")
cover_AR_table <- round(rbind(cover_AR_list[[1]],
cover_AR_list[[2]],
cover_AR_list[[3]],
cover_AR_list[[4]]),2)
write.csv(cover_AR_table,file = "./result/simulation/IVW/cover_AR_table.csv")
cover_MR_table <- round(rbind(cover_MR_list[[1]],
cover_MR_list[[2]],
cover_MR_list[[3]],
cover_MR_list[[4]]),2)
write.csv(cover_MR_table,file = "./result/simulation/IVW/cover_MR_table.csv")
library(gridExtra)
png("./result/simulation/ratio_estimate/ratio_sd_plot.png",width = 16,height = 8,
unit = "in",res = 300)
grid.arrange(p[[1]],p[[5]],p[[9]],
p[[2]],p[[6]],p[[10]],
p[[3]],p[[7]],p[[11]],
p[[4]],p[[8]],p[[12]],
ncol=3)
dev.off()
png("./result/simulation/ratio_estimate/ratio_sd_plot_legend.png",width = 8,height = 8,
unit = "in",res = 300)
ggplot(data.m.temp,aes(value,colour=variable))+
geom_density()+
theme_Publication()
dev.off()
png("./result/simulation/ratio_estimate/ratio_plot.png",width = 16,height = 8,
unit = "in",res = 300)
grid.arrange(p_ratio[[1]],p_ratio[[4]],p_ratio[[7]],
p_ratio[[2]],p_ratio[[5]],p_ratio[[8]],
p_ratio[[3]],p_ratio[[6]],p_ratio[[9]],ncol=3)
dev.off()
png("./result/simulation/ratio_estimate/ratio_plot_legend.png",width = 8,height = 8,
unit = "in",res = 300)
temp =1
result <- result_final[[temp]]
Gamma = result[[1]]
var_Gamma = result[[2]]
gamma = result[[3]]
var_gamma = result[[4]]
var_ratio <- result[[6]]
cover_ratio[i2,i1] <- mean(result[[8]])
cover_true[i2,i1] <- mean(result[[9]])
cover_epi[i2,i1] <- mean(result[[10]])
cover_exact[i2,i1] <- mean(result[[11]])
cover_true_exact[i2,i1] <- mean(result[[12]])
ci_low_ratio[i2,i1] <- mean(result[[13]])
ci_high_ratio[i2,i1] <- mean(result[[14]])
ci_ratio[i2,i1] <- paste0(ci_low_ratio[i2,i1],", ",ci_high_ratio[i2,i1])
ci_low_epi[i2,i1] <- mean(result[[15]])
ci_high_epi[i2,i1] <- mean(result[[16]])
ci_epi[i2,i1] <- paste0(ci_low_epi[i2,i1],", ",ci_high_epi[i2,i1])
ci_low_exact[i2,i1] <- mean(result[[17]])
ci_high_exact[i2,i1] <- mean(result[[18]])
ci_exact[i2,i1] <- paste0(ci_low_exact[i2,i1],", ",ci_high_exact[i2,i1])
ratio_est = result[[5]]
ratio_var = result[[6]]
z_est = ratio_est/sqrt(ratio_var)
standard_norm = rnorm(times)
z_Gamma <- rnorm(times)
z_gamma <- rnorm(times,mean = alpha_vec[i2]*sqrt(n_vec[i1]),sd = 1)
true_distribution <- z_Gamma/sqrt(1+z_Gamma^2/z_gamma^2)
data <- data.frame(z_est,standard_norm,true_distribution)
colnames(data) <- c("Proposed method","IVW","Empirical distribution")
library(reshape2)
data.m <- melt(data)
data.m.temp <- data.m
ggplot(data.m,aes(value,colour=variable))+
geom_density()+
theme_Publication()+
theme(legend.position = "bottom")+
theme(legend.text = element_text(face="bold"))+
scale_fill_discrete(name = "New Legend Title")
dev.off()
|
8b013ac1cc3d523741fb709edd25f4f6b7c4f487 | 07dce07a38713513603901a3daa5dd23d264a093 | /ui.R | 8819ed8c195351d650a96544dc4d40b72e4c528d | [] | no_license | avonholle/int-and-conf | 614ae4210df031a3559e7e81b28d21d011598029 | 5da68a23c74e589cb21b6fb67f8540f2136b7ea4 | refs/heads/master | 2021-03-12T21:35:04.627508 | 2015-09-20T12:25:24 | 2015-09-20T12:25:24 | 31,473,633 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,850 | r | ui.R | # ui.R
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Demonstration of statistical interaction and confounding"),
sidebarPanel(
h2("Select parameters for simulation"),
p("Regression coefficients for logistic regression:"),
sliderInput("n1","Sample size:", min=50, max=5000, value=500, step=1, format="###", animate=FALSE),
sliderInput("beta0",
withMathJax(
helpText('\\(
\\text{Intercept (log odds for outcome at x=0 and z=0): } (\\beta_0)
\\)')
),
min=-3, max=3, value=0.5, step=0.1, format="#.#", animate=FALSE),
sliderInput("beta1",
withMathJax(
helpText('\\(
\\text{Coefficient for x (exposure): } (\\beta_1)
\\)')
),
min=-3, max=3, value=0.1, step=0.1, format="#.#", animate=FALSE),
sliderInput("beta2",
withMathJax(
helpText('\\(
\\text{Coefficient for z (confounder): } (\\beta_2)
\\)')
),
min=-3, max=3, value=0.0, step=0.1, format="#.#", animate=FALSE),
sliderInput("beta3",
withMathJax(
helpText('\\(
\\text{Coefficient for } x \\times z \\text{ interaction: } (\\beta_3)
\\)')
),
min=-3, max=3, value=1, step=0.1, format="#.#", animate=FALSE),
br(),
p("Click on 'confounding' box and/or 'interaction' box to add confouding and/or interaciton to the model."),
p("Default model is no confounding or interaction."),
checkboxInput(inputId = "conf", label = "Confounding", value=F),
checkboxInput(inputId = "interact", label = "Interaction", value=F)
),
mainPanel(
withMathJax(),
h3("Full model for simulation"),
h3(uiOutput("eqn1")),
h4("Default model for simulation (no interaction)"),
h3(uiOutput("eqn2")),
h4("Model for confounding between x and z"),
h3(uiOutput("textconf")),
h3("Selected parameters"),
textOutput("textn"),
h3(uiOutput("text0")),
h3(uiOutput("text0i")),
h3(uiOutput("text1i")),
h3(uiOutput("text2i")),
h3(uiOutput("text3i")),
textOutput("textc"),
textOutput("texti"),
br(),
h3("DAG"),
imageOutput("myImage"),
h3("Plot of crude and stratified odds ratios"),
plotOutput("oddsplot.2"),
h4("Estimated values"),
h5("Compare crude odds ratio for x to strata estimates by z (to assess confounding)"),
textOutput("compare.odds.crude"),
# htmlOutput("check.odds.crude"),
# htmlOutput("check.odds.crude.2"),
# htmlOutput("check.odds.crude.3"),
textOutput("compare.odds.z0"),
textOutput("compare.odds.z1"),
br(),
h3("Table of odds ratios of y (vs the x=0 and z=0 group) by x and z (to assess interaction)"),
tableOutput("to.1"),
br(),
h3("ICR"),
textOutput("texticr"),
textOutput("texticr.2"),
h3("Sample of simulated data"),
tableOutput("table1"),
br(),
h3("Frequencies of y and x by the z strata"),
tableOutput("table2alt"),
br(),
h3("Summary of regression"),
tableOutput("summary"),
br(),
h3("Plot of log odds by groups"),
plotOutput("oddsplot"),
br()
#htmlOutput("summary.2")
# now need to add plots of param values and a stargazer across different model fits
)
))
|
6004ce4e62d09bd972a45e80f60bc9f3d91c735c | 14b74a9f7aabd4f63212d83984a68cb22e7aab3e | /man/gui_out_grid.Rd | 4128eb02183f9bb4bef1b919b65be0f973bcb38d | [] | no_license | mariasotoruiz/vmsbase | e85eb87276326fb998412c7b2b148cf29f4bbba3 | 7273270cde06a54b83c53abd55180b8edd41d283 | refs/heads/master | 2020-05-24T08:01:56.567016 | 2018-12-03T13:40:03 | 2018-12-03T13:40:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 866 | rd | gui_out_grid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gui_out_grid.R
\name{gui_out_grid}
\alias{gui_out_grid}
\title{VMS Effort Gridding GUI}
\usage{
gui_out_grid(vms_db_name = "")
}
\arguments{
\item{vms_db_name}{The path of a VMS DataBase}
}
\value{
This function does not return a value.
The result count will be plotted on the submitted grid. The user can both save the
result count vector as an r object (necessary for \code{\link{gui_dcf_ind}}),
or the annotated grid shape file.
}
\description{
The \code{gui_out_grid} function implements the graphical user interface
for the VMS Effort Gridding
}
\details{
This function, with a VMS DB and a Grid Sea Area Map shape file, computes the total
fishing effort (in hours) over each cell of the submitted grid, relative to the
selected metier
}
\seealso{
\code{\link{gui_dcf_ind}}
}
|
e2ecbbe421643b7e1437446599a201f2c0e80ded | 2defb970de80008d3a5f77728bf3f896832fe2e1 | /HutchCOVID/R/calc_model_stats.R | 21098a0985c2fd92b68f36431bed46948feae27a | [] | no_license | FredHutch/COVID_modeling_schools | bd211844ebd00c5977ac6ad0ef8b298aa7d6a8f2 | 98c1a879e1685b78a21427780f0f34941c309034 | refs/heads/master | 2023-08-31T14:37:46.365370 | 2021-10-06T16:36:47 | 2021-10-06T16:36:47 | 413,988,503 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,696 | r | calc_model_stats.R | #' Runs model and alculates metrics for calibration from the model output
#'
#' @param pars vector of parameters (i.e. those being calibrated)
#' @param pars_names names of pars parameters
#' @param pars_base other fixed parameters
#' @param pars_temporal other temporal parameters
#' @param state initial state
#' @param start_from_first_inf whether to start from very beginning
#' @param start_date if start_from_first_inf==TRUE, start from first_inf_day, if NULL, use min(dates)
#' @param end_date if NULL, use max(dates)
#' @param dates which dates to include
#' @param rescale_factors i.e. the mean of each time series to normalize by
#' @param stats_to_include which of cases, deaths and hosp to include
#'
#' @return vector of statistics
#' @export
calc_model_stats = function(pars, pars_names, pars_base, pars_temporal, state, start_from_first_inf = FALSE,
start_date = NULL, end_date = NULL, dates,
rescale_factors = list( cases = rep(1, 4), deaths = rep(1, 4), hosp = rep(1, 4), negtests = rep(1, 4) ),
stats_to_include = c("cases", "deaths", "hosp"))
{
parameters = get_params(pars, pars_names, pars_base)
parameters_temporal = get_temporal_params(pars, pars_names, pars_temporal)
if (is.null(start_date))
{
start_date = if (start_from_first_inf) { get_date_from_model_day(parameters$first_inf_day, parameters$model_day0_date) } else { min(dates) }
}
if (is.null(end_date))
{
end_date = max(dates)
}
out = run_model_by_date(parameters, parameters_temporal, state, start_date, end_date)
model_res = shape_data_wide(shape_data_long(out, parameters$model_day0_date))
model_res = model_res %>% filter(date %in% dates)
out_cases = model_res %>% dplyr::select(starts_with("diag"))
out_deaths = model_res %>% dplyr::select(starts_with("death"))
out_hosp = model_res %>% dplyr::select(starts_with("hosp"))
# out_negtests = model_res %>% dplyr::select(starts_with("testneg"))
# normalize model output (transpose is because otherwise division is by cols)
out_cases = t(t(out_cases) / rescale_factors$cases)
out_deaths = t(t(out_deaths) / rescale_factors$deaths)
out_hosp = t(t(out_hosp) / rescale_factors$hosp)
# out_negtests = t(t(out_negtests) / rescale_factors$negtests)
# important that these are in the same order as data!
stats = NULL
if ("cases" %in% stats_to_include) { stats = c(stats, out_cases)}
if ("deaths" %in% stats_to_include) { stats = c(stats, out_deaths)}
if ("hosp" %in% stats_to_include) { stats = c(stats, out_hosp)}
# if ("negtests" %in% stats_to_include) { stats = c(stats, out_negtests)}
return(stats)
}
|
6b976b2c3a44b406817edf0f333968f4580bff3c | 0853134802bde59234f5b0bd49735b9b39042cfb | /Rsite/source/Rd-man-files/mx.symbol.random_uniform.Rd | 004d6e05c3cfbc23a74bf33b09480103c983672d | [] | no_license | mli/new-docs | 2e19847787cc84ced61319d36e9d72ba5e811e8a | 5230b9c951fad5122e8f5219c4187ba18bfaf28f | refs/heads/master | 2020-04-02T03:10:47.474992 | 2019-06-27T00:59:05 | 2019-06-27T00:59:05 | 153,949,703 | 13 | 15 | null | 2019-07-25T21:33:13 | 2018-10-20T21:24:57 | R | UTF-8 | R | false | true | 1,311 | rd | mx.symbol.random_uniform.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxnet_generated.R
\name{mx.symbol.random_uniform}
\alias{mx.symbol.random_uniform}
\title{random_uniform:Draw random samples from a uniform distribution.}
\usage{
mx.symbol.random_uniform(...)
}
\arguments{
\item{low}{float, optional, default=0
Lower bound of the distribution.}
\item{high}{float, optional, default=1
Upper bound of the distribution.}
\item{shape}{Shape(tuple), optional, default=[]
Shape of the output.}
\item{ctx}{string, optional, default=''
Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls.}
\item{dtype}{{'None', 'float16', 'float32', 'float64'},optional, default='None'
DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None).}
\item{name}{string, optional
Name of the resulting symbol.}
}
\value{
out The result mx.symbol
}
\description{
.. note:: The existing alias ``uniform`` is deprecated.
}
\details{
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Example::
uniform(low=0, high=1, shape=(2,2)) = [[ 0.60276335, 0.85794562],
[ 0.54488319, 0.84725171]]
Defined in src/operator/random/sample_op.cc:L95
}
|
b42e24a2d28684fe43bf53014bec7f763835b0b1 | 86c3077f8fb8469f1f02f33be3ce0121846e2027 | /Rscript/tilFrode.R | 8dfca656d70c396190e9b6a4581dfe62b2b4631a | [] | no_license | asmundb/Master-project | 834ab9d4a2775c5e0abeaecb50e87e0f03d21450 | 354dc568acd9df1a1ba26f94d06c69d3f36071f0 | refs/heads/master | 2020-12-24T06:35:28.903067 | 2017-11-02T13:50:17 | 2017-11-02T13:50:17 | 73,470,769 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,713 | r | tilFrode.R | require(ncdf4)
require(fields)
source("read_ISBA.R")
mat_cor <- function(x,y){
if (all(dim(x) != dim(y))){
print("fu")
stop()
}
corr <- array(NA, dim=dim(x)[1:2])
for (i in 1:dim(x)[2]){
for (j in 1:dim(x)[1]){
corr[j,i] <- cor(x[j,i,], y[j,i,],use="na")
}
}
return(corr)
}
mat_rmse <- function(x,y){
if (all(dim(x) != dim(y))){
print("fu")
stop()
}
rmse <- array(NA, dim=dim(x)[1:2])
for (i in 1:dim(x)[2]){
for (j in 1:dim(x)[1]){
rmse[j,i] <- rmsd(x[j,i,], y[j,i,])
}
}
return(rmse)
}
rmsd <- function(x,y){
rmsd <- sqrt(sum((x-y)^2,na.rm=T)/length(x))
return(rmsd)
}
path <- "/lustre/storeA/users/asmundb/surfex/RESULTS/2014/SEKF/obs06_b005/ISBA/"
files1 <- list.files(path,
pattern="ISBA_PROGNOSTIC.OUT.nc",
recursive=T,
full.names=T)
vars1 <- c("WG1","WG2", "TG1","TG2")
prog <- load_isba(files1, vars1)
files2 <- list.files(path,
pattern="ISBA_DIAGNOSTICS.OUT.nc",
recursive=T,
full.names=T)
vars2 <- c("LE_ISBA","H_ISBA","RN_ISBA","T2M_ISBA")
diag <- load_isba(files2, vars2)
path <- "/lustre/storeB/users/asmundb/surfex/RESULTS/2014/SPINUP/ISBA/"
files3 <- list.files(path,
pattern="ISBA_PROGNOSTIC.OUT.nc",
recursive=T,
full.names=T)
files3 <- files3[125:492]
vars3 <- c("WG1","WG2", "TG1","TG2")
prog_ol <- load_isba(files3, vars3)
files4 <- list.files(path,
pattern="ISBA_DIAGNOSTICS.OUT.nc",
recursive=T,
full.names=T)
files4 <- files4[125:492]
vars4 <- c("LE_ISBA","H_ISBA","RN_ISBA","T2M_ISBA")
diag_ol <- load_isba(files4, vars4)
time <- seq(as.POSIXlt("2014-06-01 01:00"), as.POSIXlt("2014-09-01 00:00"), by=3600)
B <- diag$H_ISBA/diag$LE_ISBA
EF <- 1/(1+B)
june <- 1:719
july <- 720:1463
august <- 1464:2207
julaug <- 1057:1799
EF2 <- diag$LE_ISBA/diag$RN_ISBA
source("topo.R")
stop()
# LE WG
col <- two.colors(11, "blue","red", "#EEEEEE")
pdf("figures/2014/SEKF_06_005/LE_WG1_july.pdf")
image.plot( mat_cor(diag$LE_ISBA[,,july], prog$WG1[,,july]),zlim=c(-1,1),col=col, main="cor(LE, WG1) july 2014")
topo()
dev.off()
pdf("figures/2014/SEKF_06_005//LE_WG1_june.pdf")
image.plot( mat_cor(diag$LE_ISBA[,,june], prog$WG1[,,june]),zlim=c(-1,1),col=col, main="cor(LE, WG1) june 2014")
topo()
dev.off()
pdf("figures/2014/SEKF_06_005//LE_WG1_julaug.pdf")
image.plot( mat_cor(diag$LE_ISBA[,,julaug], prog$WG1[,,julaug]),zlim=c(-1,1),col=col, main="cor(LE, WG1) 15.july-14.aug 2014")
topo()
dev.off()
# EF WG
pdf("figures/2014/SEKF_06_005//EF_WG1_july.pdf")
image.plot( mat_cor(EF2[,,july], prog$WG1[,,july]),zlim=c(-0.4,0.4),col=col, main="cor(EF, WG1) july 2014")
topo()
dev.off()
pdf("figures/2014/SEKF_06_005//EF_WG1_june.pdf")
image.plot( mat_cor(EF2[,,june], prog$WG1[,,june]),zlim=c(-0.4,0.4),col=col, main="cor(EF, WG1) june 2014")
topo()
dev.off()
# LE TG
pdf("figures/2014/SEKF_06_005//LE_TG1_july.pdf")
image.plot( mat_cor(diag$LE_ISBA[,,july], prog$TG1[,,july]), zlim=c(-1,1), col=rev(col), main="cor(LE, TG1) july 2014")
topo()
dev.off()
pdf("figures/2014/SEKF_06_005//LE_TG1_julaug.pdf")
image.plot( mat_cor(diag$LE_ISBA[,,julaug], prog$TG1[,,julaug]), zlim=c(-1,1), col=rev(col), main="cor(LE, TG1) 15.july-14.aug 2014")
topo()
dev.off()
pdf("figures/2014/SEKF_06_005//LE_TG1_june.pdf")
image.plot( mat_cor(diag$LE_ISBA[,,june], prog$TG1[,,june]),zlim=c(-1,1), col=rev(col), main="cor(LE, TG1) june 2014")
topo()
dev.off()
pdf("figures/2014/SEKF_06_005//LE_T2M_julaug.pdf")
image.plot( mat_cor(diag$LE_ISBA[,,julaug], diag$T2M_ISBA[,,julaug]), zlim=c(-1,1), col=rev(col), main="cor(LE, T2M) 15.july-14.aug 2014")
topo()
dev.off()
rn <- as.numeric(diag$RN_ISBA)
le <- as.numeric(diag$LE_ISBA)
sm <- as.numeric(prog$WG1)
png("figures/2014/SEKF_06_005/EF.png")
plot(sm,le/rn,main="EF=LE/RN vs soil moisture")
dev.off()
rn <- as.numeric(diag$RN_ISBA[,,june])
le <- as.numeric(diag$LE_ISBA[,,june])
sm <- as.numeric(prog$WG1[,,june])
png("figures/2014/SEKF_06_005/EF_june.png")
plot(sm,le/rn,main="EF=LE/RN vs soil moisture june")
dev.off()
rn <- as.numeric(diag$RN_ISBA[,,july])
le <- as.numeric(diag$LE_ISBA[,,july])
sm <- as.numeric(prog$WG1[,,july])
png("figures/2014/SEKF_06_005/EF_july.png")
plot(sm,le/rn,main="EF=LE/RN vs soil moisture july")
dev.off()
sm <- as.numeric(prog$WG1)
ef <- as.numeric(EF2)
png("figures/2014/SEKF_06_005/EF2.png")
plot(sm, ef,ylim=c(-100,100))
dev.off()
ef <- diag$LE_ISBA[ob[1],ob[2],]/diag$RN_ISBA[ob[1],ob[2],]
ef1 <- ef[!ef %in% boxplot.stats(ef)$out]
sm <- prog$WG1[ob[1],ob[2],][!ef %in% boxplot.stats(ef)$out]
pdf("figures/2014/SEKF_06_005/ef_sm.pdf")
plot(sm,ef1,main="EF vs SM, outliers removed",ylab="EF=LE/RN")
LM <- lm(ef1~sm)
abline(LM,col="red")
abline(v=wwilt1[ob[1],ob[2]])
dev.off()
###
# domain average t2m
ncid <- nc_open("surfex_files/FORCING.nc")
I <- ncvar_get(ncid, ncid$var$LON)
J <- ncvar_get(ncid, ncid$var$LAT)
nc_close(ncid)
print("done")
print("read soil parameters from prep file...")
filename <- "surfex_files/PREP_SODA.nc"
ncid <- nc_open(filename)
wwilt1 <- ncvar_get(ncid, ncid$var$WWILT1)
wfc1 <- ncvar_get(ncid, ncid$var$WFC1)
nc_close(ncid)
# Blindern
source("ffunctions.R")
#aas <- 17850 blon <- 10.7818 blat <- 59.6605
#blindern <- 18700 #blon <- 10.719025 blat <- 59.942484
kise <- 12550
blon <- 10.9583
blat <- 60.7908
dagali <- 29720
blon <- 8.5263
blat <- 60.4188
bij <- fnn_lamb(I,J,blon,blat)$ij_out
m <- matrix(1:length(I),111,111)
ob <- which(m == bij, arr.ind=T)
ob[1] <- 74 # manual correction
plot(diag$T2M_ISBA[ob[1],ob[2],],type="l")
getObs <- function(tab, P, fd, td, stnr){
URL <- sprintf("http://klapp/metnopub/production/metno?re=30&tab=%s&%s&fd=%s&td=%s&split=0&nmt=0&ddel=dot&del=;&ct=text/plain&s=%s", tab, P, fd,td, stnr)
df <- read.table(URL,na.strings=c("-",".","<NA>"), header=TRUE)
colnames(df)[2] <- "TIME"
df[,"TIME"] <- gsub('\\D','\\1',df[,"TIME"])
return(df)
}
ta_obs <- getObs("T_ADATA","p=TA&p=TAX","01.06.2014","01.09.2014",aas)
t2m_obs <- ta_obs[2:2209,3]
t2m_max <- ta_obs[2:2209,4]
time2 <- seq(as.POSIXlt("2014-06-01 01:00"), as.POSIXlt("2014-09-01 00:00"), by=3600)
pdf("figures/2014/scatter_T2M_obs_mod.pdf")
plot(t2m_obs,diag$T2M_ISBA[ob[1],ob[2],]-273.15,main="T2M Ås 2014 may-aug; r= 0.9552136",xlab="obs",ylab="surfex offline")
abline(0,1,col="red")
dev.off()
pdf("figures/2014/timeserie_T2M_obs_mod.pdf")
plot(time2,t2m_obs,type='l',main="T2M Ås",ylab="T2M [C]")
lines(time2,diag$T2M_ISBA[ob[1],ob[2],]-273.15,col="red")
lines(
legend("topleft",legend=c("obs","sfx offln"), lty=1,col=c("black","red"))
dev.off()
pdf("figures/2014/timeserie_T2M_obs_mod_diff.pdf")
plot(time2,diag$T2M_ISBA[ob[1],ob[2],]-273.15-t2m_obs,type='l',main="T2M difference sfx-obs Ås",ylab="T2M [C]")
legend("topleft",legend=c("obs","sfx offln"), lty=1,col=c("black","red"))
dev.off()
T2Mdiff <- abs(diag$T2M - diag_ol$T2M)
T2Mdiff[is.infinite(T2Mdiff)] <- NA
T2MdiffMax <- apply(T2Mdiff, 1:2, max,na.rm=T)
T2MdiffMax[is.infinite(T2MdiffMax)] <- NA
image.plot(T2MdiffMax)
# SM MAPS
smax <- apply(prog$WG1,1:2,max,na.rm=T)
smin <- apply(prog$WG1,1:2,min,na.rm=T)
smax[is.infinite(smax)] <- NA
smin[is.infinite(smin)] <- NA
zlim <- c(min(smin,na.rm=T),max(smax,na.rm=T))
pdf("figures/2014/sm_maps.pdf")
par(mfrow=c(2,2))
image.plot(apply(prog$WG1,1:2,mean,na.rm=T), col=rev(tim.colors()),main="mean SM",zlim=zlim)
image.plot(apply(prog$WG1,1:2,sd,na.rm=T),col=rev(tim.colors()),main="SM sd",zlim=zlim)
image.plot(smax,col=rev(tim.colors()),main="max SM",zlim=zlim)
image.plot(smin,col=rev(tim.colors()),main="min SM",zlim=zlim)
dev.off()
## Timeseries
rms <- function(x,y){
if (length(x) == length(y)){
n <- length(x)
xrms <- sqrt(sum((x-y)^2)/n)
} else {
xrms <- "lengths differ"
}
return(xrms)
}
z <- (prog$WG2[,,1650]-prog_ol$WG2[,,1650])/prog$WG2[,,1650]*100
mx <- max(abs(z),na.rm=T)
zlim <- c(-mx,mx)
image.plot(z,col=two.colors(100,"red","blue","white"),
main="difference WG2 in percent, DA - open loop",
zlim=zlim)
topo()
Kg <- matrix(NA, 7, 368)
for (i in 1:7){
Kg[i,] <- as.numeric(HO07$K[i,ob[1],ob[2],])
}
Kg[which(Kg == 0)] <- NA
pdf("figures/2014/Kbox_07.pdf")
boxplot(t(Kg), xlab="Soil layer", ylab ="Kalman gain", main="Kalman gain at Kise JJA 2014")
dev.off()
kiseObs <- read.table("kise_1.csv",skip=1,sep=",",stringsAsFactors=F,header=T)
tmp <- strsplit(kiseObs$X,"/")
nveTime <- array(dim=length(tmp))
for (i in 1:length(tmp)){
nveTime[i] <- sprintf("%04d-%02d-%02d 12:00",as.numeric(tmp[[i]][3]),as.numeric(tmp[[i]][1]),as.numeric(tmp[[i]][2]))
}
nveTime <- as.POSIXlt(nveTime)
nvewhich <- which(nveTime > as.POSIXlt("2014-06-01 00:00:00") & nveTime < as.POSIXlt("2014-09-01 00:00:00"))
nveTime <- nveTime[nvewhich]
percentKise <- as.numeric(kiseObs$X.10.cm)
smKise <- 0.01*swi2sm(mm2perc(percentKise), 6, 48)
anaTime <- seq(as.POSIXlt("2014-06-01 06:00"), as.POSIXlt("2014-09-01 00:00"), by=3600*6)
sot <- seq(1,368,by=2)
plot(anaTime,x07$xa[ob[1],ob[2],,3],type='l',ylim=c(0.1, 0.5))
lines(nveTime,smKise[nvewhich],col="blue")
lines(anaTime[seq(1,368,by=2)],x07$yo[ob[1],ob[2],seq(1,368,by=2),1],col="red")
plot(x07$xf[ob[1],ob[2],,2], HO07$K[2,ob[1],ob[2],])
plot(x07$xf[ob[1],ob[2],sot,2], HO07$H[2,ob[1],ob[2],sot])
#### SCATTER PLOT
pdf("figures/2014/SMvsH.pdf")
par(mfrow=c(2,2),oma=c(0,0,1.5,0))
for (i in 1:4){
ylab <- sprintf("dWG2/dWG%d", i)
plot(x07$xf[ob[1],ob[2],sot,2], HO07$H[i,ob[1],ob[2],sot],xlab=paste("WG",2,sep=""), ylab=ylab,ylim=c(0,0.85))
}
title(main="Soil moisture vs. Jaobians", outer=T,cex.main=2)
dev.off()
#### MEANS
julaug_meanwg2_DA <- apply(prog$WG2[,,julaug],1:2, mean,na.rm=T)
julaug_meanwg2_OL <- apply(prog_ol$WG2[,,julaug],1:2, mean,na.rm=T)
pdf("figures/2014/julaug_mean_wg2_DA.pdf")
image.plot(julaug_meanwg2_DA, col=two.colors(11,"red","blue","white"), main="mean WG2 July 15 - Aug 14 2014")
topo()
dev.off()
pdf("figures/2014/julaug_mean_diff_wg2.pdf")
image.plot(julaug_meanwg2_DA-julaug_meanwg2_OL,col=two.colors(11,"red","blue","white"),
main="mean difference DA-OL WG2 July 15 - Aug 14 2014", zlim=c(-0.0004657016,0.0004657016))
topo()
dev.off()
##########################################################
smos <- readRDS("RDS_files/SEKF_smos.rds")
smap <- readRDS("RDS_files/SEKF_smap.rds")
smos_obs <- smos$yo[,,,1]
smos_inc <- smos$inc[,,,2]
smos_innov <- smos$innov[,,,1]
smos_inc[which(is.na(smos_obs))] <- NA
smos_innov[which(is.na(smos_obs))] <- NA
|
d1cdf1dd22377d072b999e672fa6e1de8195030c | ab7d15d06ed92cd51cc383dc9e98ae2a8fa41eaa | /R/add_node_clones_ws.R | a0cee175b602db9b64860747cf8a7beef613a0d3 | [
"MIT"
] | permissive | rich-iannone/DiagrammeR | 14c46eb994eb8de90c50166a5d2d7e0668d3f7c5 | 218705d52d445c5d158a04abf8107b425ea40ce1 | refs/heads/main | 2023-08-18T10:32:30.784039 | 2023-05-19T16:33:47 | 2023-05-19T16:33:47 | 28,556,914 | 1,750 | 293 | NOASSERTION | 2023-07-10T20:46:28 | 2014-12-28T08:01:15 | R | UTF-8 | R | false | false | 7,863 | r | add_node_clones_ws.R | #' Add clones of a selection of nodes
#'
#' @description
#'
#' Add new nodes to a graph object of class `dgr_graph` which are clones of
#' nodes in an active selection of nodes. All node attributes are preserved
#' except for the node `label` attribute (to maintain the uniqueness of non-`NA`
#' node label values). A vector of node `label` can be provided to bind new
#' labels to the cloned nodes.
#'
#' This function makes use of an active selection of nodes (and the function
#' ending with `_ws` hints at this).
#'
#' Selections of nodes can be performed using the following node selection
#' (`select_*()`) functions: [select_nodes()], [select_last_nodes_created()],
#' [select_nodes_by_degree()], [select_nodes_by_id()], or
#' [select_nodes_in_neighborhood()].
#'
#' Selections of nodes can also be performed using the following traversal
#' (`trav_*()`) functions: [trav_out()], [trav_in()], [trav_both()],
#' [trav_out_node()], [trav_in_node()], [trav_out_until()], or
#' [trav_in_until()].
#'
#' @inheritParams render_graph
#' @param add_edges An option for whether to add edges from the selected nodes
#' to each of their clones, or, in the opposite direction.
#' @param direction Using `from` will create new edges from existing nodes to
#' the new, cloned nodes. The `to` option will create new edges directed
#' toward the existing nodes.
#' @param label An optional vector of node label values. The vector length
#' should correspond to the number of nodes in the active selection of nodes.
#'
#' @return A graph object of class `dgr_graph`.
#'
#' @examples
#' # Create a graph with a path of
#' # nodes; supply `label`, `type`,
#' # and `value` node attributes,
#' # and select the created nodes
#' graph <-
#' create_graph() %>%
#' add_path(
#' n = 3,
#' label = c("d", "g", "r"),
#' type = c("a", "b", "c")) %>%
#' select_last_nodes_created()
#'
#' # Display the graph's internal
#' # node data frame
#' graph %>% get_node_df()
#'
#' # Create clones of all nodes
#' # in the selection but assign
#' # new node label values
#' # (leaving `label` as NULL
#' # yields NA values)
#' graph <-
#' graph %>%
#' add_node_clones_ws(
#' label = c("a", "b", "v"))
#'
#' # Display the graph's internal
#' # node data frame: nodes `4`,
#' # `5`, and `6` are clones of
#' # `1`, `2`, and `3`
#' graph %>% get_node_df()
#'
#' # Select the last nodes
#' # created (`4`, `5`, and `6`)
#' # and clone those nodes and
#' # their attributes while
#' # creating new edges between
#' # the new and existing nodes
#' graph <-
#' graph %>%
#' select_last_nodes_created() %>%
#' add_node_clones_ws(
#' add_edges = TRUE,
#' direction = "to",
#' label = c("t", "z", "s"))
#'
#' # Display the graph's internal
#' # edge data frame; there are
#' # edges between the selected
#' # nodes and their clones
#' graph %>% get_edge_df()
#'
#' @family Node creation and removal
#'
#' @export
add_node_clones_ws <- function(
graph,
add_edges = FALSE,
direction = NULL,
label = NULL
) {
# Get the time of function start
time_function_start <- Sys.time()
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no nodes, so, clones of nodes cannot be added")
}
# Validation: Graph object has valid node selection
if (graph_contains_node_selection(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "There is no selection of nodes available.")
}
# # Stop function if vector provided for label but it
# # is not of length `n`
# if (!is.null(label)) {
# if (length(label) != n) {
# stop(
# "The vector provided for `label` is not the same length as the value of `n`."),
# call. = FALSE
# }
# }
# Get the value for the latest `version_id` for
# graph (in the `graph_log`)
current_graph_log_version_id <-
graph$graph_log$version_id %>%
max()
# Get the number of columns in the graph's
# internal node data frame
n_col_ndf <-
graph %>%
get_node_df() %>%
ncol()
# Get the node ID values for
# the nodes in the active selection
selected_nodes <- suppressMessages(get_selection(graph))
# Clear the graph's selection
graph <-
suppressMessages(
graph %>%
clear_selection())
# Get the number of nodes in the graph
nodes_graph_1 <-
graph %>%
count_nodes()
# Get the number of edges in the graph
edges_graph_1 <-
graph %>%
count_edges()
node_id_value <- graph$last_node
for (i in 1:length(selected_nodes)) {
# Extract all of the node attributes
# (`type` and additional node attrs)
node_attr_vals <-
graph %>%
get_node_df() %>%
dplyr::filter(id %in% selected_nodes[i]) %>%
dplyr::select(-id, -label)
# Create a clone of the selected
# node in the graph
graph <-
graph %>%
add_node(
label = label[i])
# Obtain the node ID value for
# the new node
new_node_id <-
graph$nodes_df[nrow(graph$nodes_df), 1]
# Create a node selection for the
# new nodes in the graph
graph <-
graph %>%
select_nodes_by_id(
nodes = new_node_id)
# Iteratively set node attribute values for
# the new nodes in the graph
for (j in 1:ncol(node_attr_vals)) {
for (k in 1:length(new_node_id)) {
graph$nodes_df[
which(graph$nodes_df[, 1] == new_node_id[k]),
which(colnames(graph$nodes_df) == colnames(node_attr_vals)[j])] <-
node_attr_vals[[j]]
}
}
# Create an edge if `add_edges = TRUE`
if (add_edges) {
if (direction == "from") {
graph <-
graph %>%
add_edge(
from = new_node_id,
to = selected_nodes[i])
} else {
graph <-
graph %>%
add_edge(
from = selected_nodes[i],
to = new_node_id)
}
}
# Increment the node ID value
node_id_value <- node_id_value + 1
# Clear the graph's active selection
graph <-
suppressMessages(
graph %>%
clear_selection())
}
# Remove extra items from the `graph_log`
graph$graph_log <-
graph$graph_log %>%
dplyr::filter(version_id <= current_graph_log_version_id)
# Get the updated number of nodes in the graph
nodes_graph_2 <- graph %>% count_nodes()
# Get the number of nodes added to
# the graph
nodes_added <- nodes_graph_2 - nodes_graph_1
# Get the updated number of edges in the graph
edges_graph_2 <- graph %>% count_edges()
# Get the number of edges added to
# the graph
edges_added <- edges_graph_2 - edges_graph_1
# Update the `last_node` value
graph$last_node <- max(graph$nodes_df$id)
# Update the `last_edge` value
graph$last_edge <- max(graph$edges_df$id)
# Update the `graph_log` df with an action
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df),
d_n = nodes_added,
d_e = edges_added)
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
graph
}
|
96b06cae3d26d2ac66245eeec6c40742d8b25bbc | 72c48a44d1adfa0e6391cbf3f38b449cf64dafd6 | /cachematrix.R | 12bee2dbea019559b3e43b88330bc196241d019c | [] | no_license | marcocaldascruz/ProgrammingAssignment2 | 5fecfbebce296033a94a95b89d0128fb2b4c7c07 | fb34837f7d836b698f7b29a0c556dc37c1262598 | refs/heads/master | 2020-04-06T04:49:42.535746 | 2015-07-26T10:42:00 | 2015-07-26T10:42:00 | 39,719,877 | 0 | 0 | null | 2015-07-26T09:07:10 | 2015-07-26T09:07:10 | null | UTF-8 | R | false | false | 1,739 | r | cachematrix.R |
## This function sets the values (matrix and inverse matrix values).
makeCacheMatrix <- function(x = matrix()) {
## Init the value of m, its will be the inverse value of the matrix.
m <- NULL
## it implements the “set” Function which sets the value of the matrix.
## the m value is NULL (yet).
set <- function(y) {
x <<- y
m <<- NULL
}
## it implements the “get” Function which returns the value of the matrix.
get <- function() x
## it implements the “setsolve” Function which sets the inverse of the matrix.
setsolve <- function(solve) m <<- solve
## it implements the “getsolve” Function which returns the inverse of the matrix.
getsolve <- function() m
## The functions list associates to the makeCacheMatrix function.
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function returns the inverse matrix value cached or it computes.
cacheSolve <- function(x, ...) {
## In variable m, the function deposits the value of the inverse of the matrix.
m <- x$getsolve()
## If the value is defined then the function only shows the variable (any compute).
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## else in “data” gets the value of the matrix.
data <- x$get()
## in “m” gets the value of the inverse of the matrix.
m <- solve(data, ...)
## it sets in m, the value calculated (inverse of the matrix).
x$setsolve(m)
## return m (inverse matrix value).
m
}
|
7878744a4ad061a47f3d84c828905e6266aa9cb6 | d3e21ebbd77a742fcdd6c5507fa98f22ad04c1c7 | /missForest.R | d3f67207dc7c65d8abec0d548cbee216752cf344 | [] | no_license | sunandha21/Data-Imputation-methods | 0a3f2cb6b154a2ebdfc545b4027b7eda8b3046d1 | 4f69aeccb68b726ade0cf6789ea167f0ffc817b5 | refs/heads/master | 2021-08-14T22:04:59.934964 | 2017-11-16T22:23:40 | 2017-11-16T22:23:40 | 111,028,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 796 | r | missForest.R | data("iris")
library(missForest)
iris.mis<-prodNA(iris,noNA = 0.1)
iris.mis <- subset(iris.mis, select = -c(Species))
summary(iris.mis)
iris.imp<-missForest(iris.mis)
iris.imp$ximp
summary(iris.imp)
iris.org<-subset(iris, select = -c(Species))
RMSE(iris.imp$ximp,iris.org)
#knn
newtrain = data.frame(sepal_length = iris$Sepal.Length,sepal_width = iris$Sepal.Width,petal_length = iris$Petal.Length,petal_width = iris$Petal.Width)
newtest = data.frame(sepal_length = iris.imp$ximp$Sepal.Length,sepal_width = iris.imp$ximp$Sepal.Width,petal_length = iris.imp$ximp$Petal.Length,petal_width = iris.imp$ximp$Petal.Width)
iris.imp_species = knn(train = newtrain, test = newtest,cl=iris$Species,k=3)
iris.imp$ximp$species<-iris.imp_species
cm<-confusionMatrix(iris$Species,iris.imp$ximp$species)
cm
|
e0e34c1d477e71ef37995a4a4e6016070e5b4f68 | c54aba701f4d66469027c63149a6d3bee27d30f9 | /man/is_compressed.Rd | ab809918a465396e8a17d842bdfa463b534276e0 | [
"PDDL-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | ezwelty/dpkg | b5d052dd9d5b0f48f4a8e19e45ac21d266632d53 | c85622136bfb4d3ee68c1e1debdb4caf0ad15a37 | refs/heads/master | 2020-06-27T13:06:05.566736 | 2017-08-24T21:46:25 | 2017-08-24T21:46:25 | 97,056,022 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 318 | rd | is_compressed.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compression.R
\name{is_compressed}
\alias{is_compressed}
\title{Test if path is for a compressed file}
\usage{
is_compressed(file)
}
\arguments{
\item{file}{(character) Path to file.}
}
\description{
Test if path is for a compressed file
}
|
4591edaddb2be9cdea0095f4e31b8918838888ee | 7bd4dcab4ad9e36e9482ca21b048db0079ac5ffe | /man/RcmdrPlugin.TeachingDemos-internal.Rd | 76bc3b3a35adde13ad6d620706ad54cd86302369 | [] | no_license | cran/RcmdrPlugin.lfstat | ac44c644366515a29a859727ca4937beccb55c23 | ca8531c2e9b9e4b1e41123eefaa33c5e6ffb1ca9 | refs/heads/master | 2020-12-25T17:25:08.222651 | 2018-06-26T09:11:20 | 2018-06-26T09:11:20 | 17,693,121 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 973 | rd | RcmdrPlugin.TeachingDemos-internal.Rd | \name{RcmdrlfstatPlugin-internal}
\title{Internal RcmdrlfstatPlugin objects}
\alias{BFIcalc}
\alias{MAMcalc}
\alias{Q95calc}
\alias{activelf}
\alias{activelfandbf}
\alias{bfplotcalc}
\alias{createlfdatacalc}
\alias{dmcurvecalc}
\alias{fdccalc}
\alias{getlfopt}
\alias{hydrocalc}
\alias{listlfobj}
\alias{loadlfopt}
\alias{meanflowcalc}
\alias{multitablecalc}
\alias{nalfcheckcalc}
\alias{readlfdatasheet}
\alias{recessionanalysis}
\alias{resetlfoptions}
\alias{rfacalc}
\alias{isthereanRFD}
\alias{listrfd}
\alias{rcgquantiles}
\alias{rcgsitequantiles}
\alias{rfaindex}
\alias{rfap}
\alias{savelfopt}
\alias{sbplotcalc}
\alias{seasindexcalc}
\alias{seasratiocalc}
\alias{setunitcalc}
\alias{streamdefcalc}
\alias{streamdefplotcalc}
\alias{tyearscalc}
\alias{updatelfcalc}
\alias{nainterpolation}
\alias{tyearsn}
\description{Internal RcmdrlfstatPlugin objects.}
\details{These are not to be called by the user.}
\keyword{internal}
|
c31062a271ad367cb87f88f74af6eab5cb552620 | 4ade856b9ead7be1e1ce0918e2a74560c4d5d074 | /Utility/omisAPI.R | 4643e8912401f9fcdd53d386161c055cfe26007c | [] | no_license | richardblades/omis-support | c9592c860f8198c38d81ee1a81e804aa4cee1891 | d8ad1313731df7d6529870e698ba536638e70bc5 | refs/heads/master | 2021-01-18T22:12:39.046487 | 2016-08-31T11:45:44 | 2016-08-31T11:45:44 | 63,946,830 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,653 | r | omisAPI.R | #'------------------------------------------------------------------------------------------
#'
#' o m i s A P I . R
#'
#'------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Establish environment
#-------------------------------------------------------------------------------------------
library(jsonlite)
library(httr) # Simplifies URL and HTTP interaction
#-------------------------------------------------------------------------------------------
# Declare OpenCPU URL
#-------------------------------------------------------------------------------------------
# url <- "http://localhost:5941/ocpu/library/omis/"
url <- "http://www.omis-scarborough.uk/ocpu/library/omis/"
#-------------------------------------------------------------------------------------------
# Load JSON from URL directly into R data frame and then write out as a CSV file.
#-------------------------------------------------------------------------------------------
df <- fromJSON(paste0(url, "data/nuts1Year/json"))
write.csv(df, file="~/Downloads/nuts1Year.csv", quote=FALSE, row.names=FALSE)
#-------------------------------------------------------------------------------------------
# Useful omis API commands
#-------------------------------------------------------------------------------------------
r <- GET(paste0(url, "")) # package information
r <- GET(paste0(url, "R/")) # R code directory
r <- GET(paste0(url, "R/nuts4D47Model")) # R code > print
r <- GET(paste0(url, "data/")) # data directory
r <- GET(paste0(url, "data/nuts1Year")) # data object > print
r <- GET(paste0(url, "data/nuts1Year/json")) # data object > json
r <- GET(paste0(url, "data/nuts1Year/md")) # data object > markdown
r <- GET(paste0(url, "data/nuts1Year/csv")) # data object > CSV
r <- GET(paste0(url, "data/nuts1Year/rda")) # data object > R dataset
r <- GET(paste0(url, "data/nuts1Year/tab")) # data object > table
r <- GET(paste0(url, "data/nuts1Year/tab?sep='|'")) # data object > table with sep
#-------------------------------------------------------------------------------------------
# Examine httr output
#-------------------------------------------------------------------------------------------
print(r)
status_code(r)
headers(r)
r$status_code
r$headers
r$args
str(content(r))
http_status(r)
content(r, "text")
content(r, "raw")
content(r, "parsed")
|
6fb45345d22ec79555d0e27244c5184def35d469 | 13dce2c67a4637add28f79831b3f223304d8913d | /Code/3.Statistics and hashtags.R | b38b023d5ff36b0aec16d7e1386537a8007654e4 | [] | no_license | ycui4/Inferring-Twitters-Socio-Demographics-to-Correct-Sampling-Bias-of-Social-Media-Data-for-Augmenting | 31bd9a6c5102cf3bb7ba1dddb98ac591066183c8 | b20212a787abb7a5476d7b7bbf23b00add7ef4da | refs/heads/master | 2020-07-11T08:18:45.924621 | 2019-08-26T14:11:34 | 2019-08-26T14:11:34 | 204,486,073 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,375 | r | 3.Statistics and hashtags.R | library(dplyr)
file1<-read.csv('direction/TF1-1500.csv', header=T)
file1$is_tweet_get<-NA
file1$numberOfTweetGet<-NA
file1$numberOfGeoTweet<-NA
file02<-'direction/TF Timeline csv/'
hashtag<-c()
f1<-function(x){
a1<-strsplit(x, "<")[[1]][2]
a2<-strsplit(a1,">")[[1]][2]
return(a2)
}
for(i in 1:nrow(file1)){
file2<-paste0(file02,file1$UserID[i],'.csv')
if(file.exists(file2)){
Data<-read.csv(file2, header=T)
if(nrow(Data)>0){
Data$source<-as.character(Data$source)
Data$source<-matrix(unlist(lapply(Data$source, f1)), ncol=1)
Data$hashtag1<-as.character(Data$hashtag1)
Data$hashtag2<-as.character(Data$hashtag2)
hashtag<-unique(c(hashtag, Data$hashtag1))
hashtag<-unique(c(hashtag, Data$hashtag2))
file1$is_tweet_get[i]<-'true'
file1$numberOfTweetGet[i]<-nrow(Data)
file1$numberOfGeoTweet[i]<-length(which(!is.na(Data$lat)))
}else{
file1$is_tweet_get[i]<-'false'
}
}
cat(i, '\n')
}
file3<-filter(file1, is_tweet_get=='true')
hashtag<-hashtag[-1]
write.csv(file1, '/Users/yu/Documents/Study/Project/Twitter Demographic/Work/20180308 Facebook Labeling/TF1-1500.csv', row.names=FALSE)
write.csv(matrix(hashtag,ncol=1), '/Users/yu/Documents/Study/Project/Twitter Demographic/Work/20180308 Facebook Labeling/hashtag.csv', row.names = FALSE)
|
0b66039406a10f027576ab0f393a2e1b8f498434 | 34eeedb61265a9c6cfdbf788ec106d98171a9b9a | /NPSDashboard/server.R | 974c143e0f012a03579b5cc547fd1a8e4f853e41 | [] | no_license | artsclubtheatre/npsdashboard | 61df01c6a92d2965de1db95ae641ba7a60a491e4 | 41c0e955939d036d6895324ee89962a2455c337a | refs/heads/master | 2020-08-17T07:02:53.286493 | 2020-03-16T22:48:53 | 2020-03-16T22:48:53 | 215,629,481 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,178 | r | server.R | library(shiny)
library(tidyverse)
library(flexdashboard)
library(plotly)
library(reshape2)
library(ggwordcloud)
library(DT)
load("npsData.RData")
shinyServer(function(input, output, session) {
output$companyScoreGauge <- renderGauge({
score <- round(companyScore$npsScore)
gauge(value = score, min = -100, max = 100, symbol = '', gaugeSectors(
success = c(50, 100),
warning = c(0, 50),
danger = c(-100, 0)
))
})
output$companyScoreOverTime <- renderPlotly({
plot <- ggplot(
nps_company,
aes(
x=create_dt,
y=cumulativeScore,
group=created_by,
text=paste("Score: ", round(cumulativeScore, 1)
)
)
)+
geom_line(size=1.5, col="steelblue")+
ylab("Net Promoter Score")+
xlab("Date")+
theme_minimal()+
theme(axis.text = element_text(size=14))
ggplotly(plot, tooltip=c("text"))
})
output$companyRatingsOverTime <- renderPlot({
smallCompany <- nps_company %>%
select(create_dt, totalPromoters, totalPassives, totalDetractors) %>%
group_by(create_dt) %>%
mutate(total = sum(totalPromoters, totalPassives, totalDetractors),
promoterPercent = totalPromoters / total,
passivePercent = totalPassives / total,
detractorPercent = totalDetractors / total) %>%
select(create_dt, promoterPercent, passivePercent, detractorPercent)
meltedCompany <- melt(list(smallCompany),
id.vars = c("create_dt"))
ggplot(meltedCompany, aes(create_dt, value, group=variable, col=variable))+
geom_line(size=1.5)+
ylab("Total Patrons")+
xlab("Date")+
scale_y_continuous(labels = scales::percent)+
theme_minimal()+
theme(legend.position = "bottom")+
scale_color_manual(name="Patrons",
labels=c("Promoters",
"Passives",
"Detractors"),
values = c("promoterPercent" = "seagreen4",
"passivePercent" = "steelblue",
"detractorPercent" = "firebrick3")
)+
theme(axis.text = element_text(size=14))
})
output$companyWordCloud <- renderPlot({
text <- companyText %>% top_n(75)
ggplot(text, aes(label=word, size=n, col=word))+
geom_text_wordcloud()+
scale_size_area(max_size = 25)+
theme_minimal()
})
output$companyScoreBySegment <- renderPlot({
companyScoreWithLabels <- companyScoreBySegment %>%
mutate(donor = ifelse(donor, "Donor", "Not Donor"),
nLabel = paste("N=", total))
ggplot(companyScoreWithLabels, aes(segment, score, group=donor, fill=segment))+
geom_bar(stat='identity')+
geom_text(aes(label=score), position = position_nudge(y=-5), size=10, color="white")+
geom_text(aes(label=nLabel), vjust=-1)+
facet_wrap("donor")+
ylab("Score")+
xlab("Patron Segment")+
theme_minimal()
})
output$companyProductionGreater <- renderText({
avgCompany <- mean(allScores$nps_company_score, na.rm = TRUE)
avgProd <- mean(allScores$nps_prod_score, na.rm = TRUE)
if(avgCompany > avgProd){
return(paste(
"On average, patrons rate the company (",
round(avgCompany, 2),
" avg ) higher than the production (",
round(avgProd, 2),
" avg )"
))
} else if (avgCompany < avgProd){
return(paste(
"On average, patrons rate the company (",
round(avgCompany, 1),
" avg ) lower than the production (",
round(avgProd, 1),
" avg )"
))
} else {
return(paste(
"On average, patrons rate the company (",
round(avgCompany, 1),
" avg ) and the production (",
round(avgProd, 1),
" avg ) about the same"
))
}
})
output$companyProductionCorrelation <- renderPlot({
ggplot(allScores, aes(nps_company_score, nps_prod_score))+
geom_count(col="steelblue")+
geom_smooth(method="lm")+
geom_abline(intercept = 0, linetype="dashed")+
scale_size_area(max_size = 20)+
scale_x_continuous(breaks = c(0:10))+
scale_y_continuous(breaks = c(0:10))+
ylab("Production Score")+
xlab("Company Score")+
theme_minimal()+
theme(axis.text = element_text(size=14))
})
output$commentTagging <- renderDataTable({
DT::datatable(
surveyAnswers %>%
select(field.ref, text, patronId,segment, prodTitle),
rownames = FALSE,
filter = 'top',
colnames = c("Field", "Patron Response", "Patron ID", "Segment", "Production")
)
})
output$prodPlots <- renderUI({
plotOuputList <- lapply(productionScores$prodSeason, function(prod){
plotname <- prod
cloudname <- paste0(prod, "cloud")
title <- productionScores$title[productionScores$prodSeason == plotname]
list(
div(class="col-xs-12 col-md-4 panel panel-default",
h3(title),
p(
strong(productionScores$totalPromoters[productionScores$prodSeason == plotname]), " Promoters, ",
strong(productionScores$totalPassives[productionScores$prodSeason == plotname]), " Passives, and ",
strong(productionScores$totalDetractors[productionScores$prodSeason == plotname]), " Detractors "
),
gaugeOutput(plotname)
)
)
})
do.call(tagList, plotOuputList)
})
for(prod in factor(productionScores$prodSeason)) {
local({
plotname <- prod
cloudname <- paste0(prod, "cloud")
output[[plotname]] <- renderGauge({
score <- productionScores %>%
filter(prodSeason == plotname) %>%
mutate(npsScore = round(npsScore))
gauge(value = score$npsScore, min = -100, max = 100, symbol = '', gaugeSectors(
success = c(50, 100),
warning = c(0, 50),
danger = c(-100, 0)
))
})
})
}
})
|
192775d8bf3d6b00068333b3d68aa81169e1283a | e681fc551cfcff3f6358c62cf12cedc7cde320e1 | /occ change idea.R | 3989c79fb90fc26cdf8a79ef7e8134e621e1fec3 | [] | no_license | Privlko/ru_mobility | e6d8ae5311e44ed7ff8173ce551744ea47cebc66 | f9dd20d5ae216427391ff2fe6bd4f518293a762c | refs/heads/master | 2021-07-09T14:14:26.057468 | 2020-07-02T15:44:01 | 2020-07-02T15:44:01 | 146,010,528 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,693 | r | occ change idea.R |
# package space -----------------------------------------------------------
library(tidyverse)
library(lme4)
library(ggplot2)
library(plm)
library(dplyr)
# load the data -----------------------------------------------------------
load('C:/Users/Ivan/Desktop/dir/papers/ru_mobility/data.Rda')
?plm
# quick plot --------------------------------------------------------------
ggplot(q2)+
geom_bar(aes(x=mob,
y=..prop..,
group=1),
position='dodge')+
scale_y_continuous(labels = scales::percent,
breaks = seq(0,1 , by=.1))+
labs(title=my_title,
subtitle = my_subtitle,
caption= my_caption,
y= '',
x='Mobility Type')
# two digit and three digit isco's ----------------------------------------
q2<- q2 %>%
mutate(occ_three_digit = as.integer(occ/10),
occ_two_digit = as.integer(occ_three_digit/10)) %>%
filter(occ < 9999,
occ > 1000)
# declare data as panel ---------------------------------------------------
q2 <- pdata.frame(q2,
index = c("id", "round"),
drop.index = FALSE)
ggplot(q2, aes(occ)) +
geom_histogram()
# code the occupational mobility measures -----------------------------------------------------
q2$diff_4digit_occ <- diff(q2$occ, 1)
q2$diff_3digit_occ <- diff(q2$occ_three_digit, 1)
q2$diff_2digit_occ <- diff(q2$occ_two_digit, 1)
q2 <- q2 %>%
mutate(occ.change_4 = case_when(diff_4digit_occ ==0 ~ "No change",
diff_4digit_occ > 0 ~ "Upward change",
diff_4digit_occ < 0 ~ "Downward change")) %>%
filter(!is.na(diff_4digit_occ))
q2 <- q2 %>%
mutate(occ.change_2 = case_when(diff_2digit_occ ==0 ~ "No change",
diff_2digit_occ > 0 ~ "Upward change",
diff_2digit_occ < 0 ~ "Downward change")) %>%
filter(!is.na(diff_2digit_occ))
ggplot(q2, aes(mob)) + geom_bar(aes(fill = occ.change_2), position = "identity")
ggplot(q2, aes(x = mob, fill = occ.change_2)) +
geom_bar()
tab_cnt <- table(q2$occ.change_2, q2$mob)
tab_cnt
prop.table(tab_cnt, 2)
ggplot(q2, aes(x = mob, fill = occ.change_2)) +
geom_bar(position = "fill") +
ylab("proportion")+
facet_wrap(~gender)
ggplot(q2, aes(x = occ.change_2, fill = mob)) +
geom_bar(position = "fill") +
ylab("proportion")
my_title <- 'Job mobility is uncommon among the panel, \nmost respondents do not list a job change in the last 12 months'
my_subtitle <- 'Although the data measures repeat observations, most observations report no job change.'
my_caption <- 'Source: RLMS \nPlot: @privlko'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.