blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d8bea0d483ba3bcf5980c2a54ccead68feaa8548
|
55e042f05ee3da0db86ecfb806c0e695382a843d
|
/tests/testthat/test-parse-remotes.R
|
5caef05f2883a5220ee7f9d63b3d9b10e0b3bd60
|
[
"MIT"
] |
permissive
|
r-lib/pkgdepends
|
f507dfe031e34c994311ca9a139dda9a6d7e016a
|
a0f5132320498780c8b87ce8eb4f66e754906376
|
refs/heads/main
| 2023-08-03T15:56:48.339228
| 2023-07-19T09:13:42
| 2023-07-19T09:13:42
| 102,942,545
| 86
| 23
|
NOASSERTION
| 2023-09-11T20:44:59
| 2017-09-09T09:17:38
|
R
|
UTF-8
|
R
| false
| false
| 8,613
|
r
|
test-parse-remotes.R
|
test_that("package_name_rx", {
good <- c("A1", "a1", "Z1", "z1", "foo.bar", "foo.bar.baz", "a1.b2")
for (t in good) expect_true(is_valid_package_name(t), info = t)
bad <- list(
c("pkg", "forbidden"),
c("pak\u00e1ge", "ASCII"),
c("good-package", "letters, numbers and dot"),
c("x", "two characters"),
c("1stpackage", "must start with a letter"),
c("dots.", "must not end with a dot")
)
for (t in bad) {
ans <- is_valid_package_name(t[1])
expect_false(ans, info = t[1])
expect_match(attr(ans, "reason"), t[2], info = t[1])
}
})
test_that("parse_pkg_refs, standard", {
cases <- list(
list("pkg",
list(package = "pkg", atleast = "", version = "")),
list("pkg@0.1-2",
list(package = "pkg", atleast = "==", version = "0.1-2")),
list("pkg@>=2.9",
list(package = "pkg", atleast = ">=", version = "2.9")),
list("pkg@last",
list(package = "pkg", atleast = "==", version = "last")),
list("standard::pkg",
list(package = "pkg", atleast = "", version = "")),
list("standard::pkg@0.1-2",
list(package = "pkg", atleast = "==", version = "0.1-2"))
)
expect_equal(
get_remote_types(vcapply(cases, "[[", 1)),
rep("standard", length(cases))
)
for (case in cases) {
expect_equal_named_lists(
p <- parse_pkg_refs(case[[1]])[[1]],
c(case[[2]], list(ref = case[[1]], type = "standard", params = character()))
)
expect_s3_class(p, c("remote_ref_cran", "remote_ref"))
}
})
test_that("parse_pkg_refs, cran", {
cases <- list(
list("cran::pkg",
list(package = "pkg", atleast = "", version = "")),
list("cran::pkg@0.1-2",
list(package = "pkg", atleast = "==", version = "0.1-2"))
)
expect_equal(
get_remote_types(vcapply(cases, "[[", 1)),
rep("cran", length(cases))
)
for (case in cases) {
expect_equal_named_lists(
p <- parse_pkg_refs(case[[1]])[[1]],
c(case[[2]], list(ref = case[[1]], type = "cran", params = character()))
)
expect_s3_class(p, c("remote_ref_cran", "remote_ref"))
}
})
test_that("github regexes", {
username <- list(
list("foobar", "foobar"),
list("", NA_character_),
list("-bad", NA_character_),
list("123456789012345678901234567890123456789",
"123456789012345678901234567890123456789"),
list("1234567890123456789012345678901234567890", NA_character_)
)
for (c in username) {
rx <- paste0("^", github_username_rx(), "$")
expect_equal(
re_match(c[[1]], rx)$username,
c[[2]]
)
}
commitish <- list(
list("", NA_character_),
list("x", NA_character_),
list("foobar", NA_character_),
list("@foobar", "foobar"),
list("@*foobar", NA_character_)
)
for (c in commitish) {
expect_equal(
re_match(c[[1]], github_commitish_rx())$commitish,
c[[2]]
)
}
pull <- list(
list("", NA_character_),
list("x", NA_character_),
list("@foobar", NA_character_),
list("#", NA_character_),
list("#123", "123"),
list("#1", "1"),
list("#foobar", NA_character_),
list("#1foo", "1")
)
for (c in pull) {
expect_equal(
re_match(c[[1]], github_pull_rx())$pull,
c[[2]]
)
}
release <- list(
list("", NA_character_),
list("x", NA_character_),
list("@foobar", NA_character_),
list("@*foobar", NA_character_),
list("@*release", "*release")
)
for (c in release) {
expect_equal(
re_match(c[[1]], github_release_rx())$release,
c[[2]]
)
}
detail <- list(
list("@foobar", c("foobar", "", "")),
list("#123", c("", "123", "")),
list("@*release", c("", "", "*release")),
list("foobar", c("", "", ""))
)
for (c in detail) {
expect_equal(
unlist(re_match(
c[[1]],
github_detail_rx())[, c("commitish", "pull", "release")]),
structure(c[[2]], names = c("commitish", "pull", "release"))
)
}
})
test_that("parse_pkg_refs error on unknown type", {
expect_snapshot(
error = TRUE,
parse_pkg_refs(c("notgood::pkg", "good", "my_package"))
)
})
test_that("custom remote types", {
xspecs <- NULL
xargs <- NULL
parse_remote_foo <- function(specs, ...) {
xspecs <<- specs
xargs <<- list(...)
list(list())
}
res <- withr::with_options(
list(pkg.remote_types = list(foo = list(parse = parse_remote_foo))),
parse_pkg_refs("foo::arbitrary_string/xxx", ex1 = "1", ex2 = "2")
)
expect_identical(
res,
list(structure(list(type = "foo", params = character()),
class = c("remote_ref_foo", "remote_ref", "list")))
)
expect_identical(xspecs, "foo::arbitrary_string/xxx")
expect_identical(xargs, list(ex1 = "1", ex2 = "2"))
res2 <- parse_pkg_refs(
"foo::arbitrary_string/xxx", ex1 = "1", ex2 = "2",
remote_types = list(foo = list(parse = parse_remote_foo)))
expect_identical(res, res2)
})
test_that("type_default_parse", {
res <- type_default_parse(c("foo::bar", "package=foo2::bar2"))
expect_identical(res,
list(
list(package = "", type = "foo", rest = "bar", ref = "foo::bar"),
list(package = "package", type = "foo2", rest = "bar2",
ref = "package=foo2::bar2")
)
)
})
test_that("default parse function", {
res <- withr::with_options(
list(pkg.remote_types = list(foo = list(), foo2 = list())),
parse_pkg_refs(c("foo::bar", "package=foo2::bar2"))
)
expect_identical(res,
list(
structure(list(package = "", type = "foo", rest = "bar", ref = "foo::bar",
params = character()),
class = c("remote_ref_foo", "remote_ref", "list")),
structure(list(package = "package", type = "foo2", rest = "bar2",
ref = "package=foo2::bar2", params = character()),
class = c("remote_ref_foo2", "remote_ref", "list"))
)
)
res2 <- parse_pkg_refs(
c("foo::bar", "package=foo2::bar2"),
remote_types = list(foo = list(), foo2 = list()))
expect_identical(res, res2)
})
test_that("parse_pkg_refs, local", {
cases <- list(
list("local::path", "path"),
list("local::/path", "/path"),
list("local::~/path", "~/path"),
list("local::./path", "./path"),
list("local::\\path", "\\path"),
list("/path", "/path"),
list("~/path", "~/path"),
list("./path", "./path"),
list(".\\path", ".\\path"),
list("\\path", "\\path"),
list(".", ".")
)
for (c in cases) {
expect_equal(
parse_pkg_ref(c[[1]]),
structure(
list(package = NA_character_, path = c[[2]],
ref = paste0("local::", c[[2]]), type = "local",
params = character()),
class = c("remote_ref_local", "remote_ref", "list")
)
)
}
})
test_that("parse_query", {
empty <- c(a = "1")[-1]
cases <- list(
list("", empty),
list("?", empty),
list("?foo", c(foo = "")),
list("?foo=1", c(foo = "1")),
list("?foo&bar", c(foo = "", bar = "")),
list("?foo=1&bar=2&foo", c(foo = "1", bar = "2", foo = "")),
list("?foo=1%202&bar=x", c(foo = "1 2", bar = "x"))
)
for (c in cases) {
suppressMessages(expect_equal(parse_query(c[[1]]), c[[2]]))
}
})
test_that("parameters", {
cases <- list(
list("foo", "?bar", c(bar = "")),
list("foo", "?bar=1&foo&bar=11", c(bar = "1", foo = "", bar = "11")),
list("user/repo", "?source", c(source = ""))
)
for (c in cases) {
wo <- parse_pkg_ref(c[[1]])
suppressMessages(wi <- parse_pkg_ref(paste0(c[[1]], c[[2]])))
wo$params <- c[[3]]
expect_equal(wi, wo)
}
})
test_that("explicit package names", {
expect_snapshot({
parse_pkg_ref("package=user/notpackage")
parse_pkg_ref("package=user/notpackage@tag")
parse_pkg_ref("package=github::user/notpackage@tag")
parse_pkg_ref("package=local::/abs/path")
parse_pkg_ref("package=local::rel/path")
parse_pkg_ref("package=local::~/home/path")
parse_pkg_ref("package=/abs/path")
parse_pkg_ref("package=./rel/path")
parse_pkg_ref("package=~/home/path")
parse_pkg_ref("package=url::https://example.com/p1.tar.gz")
})
})
test_that("gitlab", {
expect_snapshot({
parse_pkg_ref("gitlab::user/repo")
parse_pkg_ref("gitlab::user/repo@ref")
parse_pkg_ref("gitlab::user/repo/sub/dir")
parse_pkg_ref("gitlab::user/repo/sub/dir@ref")
parse_pkg_ref("pkg=gitlab::user/repo")
parse_pkg_ref("pkg=gitlab::user/repo@ref")
parse_pkg_ref("pkg=gitlab::user/repo/sub/dir")
parse_pkg_ref("pkg=gitlab::user/repo/sub/dir@ref")
})
})
|
1e08cf174881de1cfb1d8aaf3e75ef38883c7af9
|
b374790d0cb13fa5ac55decb7b5316a70a98125e
|
/Part 2/Part 2 - A/A2.R
|
8a6cfac785db310839bddcd630f3f467359e85ed
|
[] |
no_license
|
pathaksid03/Machine-Learning-Project
|
fdeb70d0af4e0bdcd15ca13891b4c9f4fa5787fd
|
d41be7bfaa214e812e889e3908a530cba7ae91eb
|
refs/heads/master
| 2021-01-21T21:32:45.416764
| 2016-07-20T16:33:46
| 2016-07-20T16:33:46
| 55,130,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 197
|
r
|
A2.R
|
# code to show most popular name
df= out[,1:5]
# data frame carrying sums of frequencies
df1 =data.frame(summarize(group_by(df,Name), sum(Fr)))
popu <- df1[which.max(df1$sum.Fr.), ]
print(popu)
|
941a868f723d463c0be1e386087bae47298efb24
|
4c9d2d93b2fa8661cc959320bef1d31384f9e89a
|
/R/kaggle-datasets.R
|
8fd61a403078c9f6974ecedd9631b322614f5e5a
|
[
"MIT"
] |
permissive
|
abdala9512/dareML
|
5a0d602fc28821e7f024543f4291bd172838ef16
|
9473f1eb81e277419e42cd1fdfef72b259c19c08
|
refs/heads/main
| 2023-06-20T08:22:04.492566
| 2021-07-09T02:49:12
| 2021-07-09T02:49:12
| 324,870,579
| 0
| 0
| null | 2020-12-28T00:43:49
| 2020-12-27T23:47:25
| null |
UTF-8
|
R
| false
| false
| 95
|
r
|
kaggle-datasets.R
|
# https://gist.github.com/nonsleepr/146e247f40ffb0ce3281
# https://github.com/mkearney/kaggler
|
32095221e45faa8ed91d59b01a1686a8beaebe44
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/imbalance/examples/trainWrapper.Rd.R
|
63270394924871690a857d9d5120ed313ee9b530
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
trainWrapper.Rd.R
|
library(imbalance)
### Name: trainWrapper
### Title: Generic methods to train classifiers
### Aliases: trainWrapper
### ** Examples
myWrapper <- structure(list(), class="C50Wrapper")
trainWrapper.C50Wrapper <- function(wrapper, train, trainClass){
C50::C5.0(train, trainClass)
}
|
31268614c33d696e026dc11ade0c4a580853494e
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/meta/R/rmSpace.R
|
5456f250f4915e98bfa8138f9fde70c31f4be8b5
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
rmSpace.R
|
rmSpace <- function(x, end=FALSE, pat=" "){
if ( !end ){
while ( any(substring(x, 1, 1) == pat, na.rm=TRUE) ){
sel <- substring(x, 1, 1) == pat
x[sel] <- substring(x[sel], 2)
}
}
else{
last <- nchar(x)
while ( any(substring(x, last, last) == pat, na.rm=TRUE) ){
sel <- substring(x, last, last) == pat
x[sel] <- substring(x[sel], 1, last[sel]-1)
last <- nchar(x)
}
}
x
}
|
08c9acca676b0f440df6ed126a924b0f627077d0
|
0dc54e49ef44041a15817fa0a44585edc3416790
|
/Project4/Main.R
|
f65328a8c671f9b18009672f286ba32025be4061
|
[] |
no_license
|
danmandel/MachineLearning
|
b088e99f04013f3722ed2424c504d8a53ff084b3
|
4e1f0d12a0e7d5b6607d103e62240de057727802
|
refs/heads/master
| 2020-04-14T03:05:35.592369
| 2015-04-26T05:35:17
| 2015-04-26T05:35:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
Main.R
|
library(caret)
library(ggplot)
library(e1071)
library(rpart)
getSymbols("UUP")
daily <- dailyReturn(UUP)
daily <- na.omit(daily)
t <- 10
k <- length(daily[,1])
|
c8b5d80a37b2555c8fc631d8a1d2c68dce0fadf7
|
faeb705a3e879b5d4341a840e09a7c3e9edfb7ea
|
/ProgrammingAssignment2/cachematrix.R
|
9b5f3f4ecef2df91f2cb2b8312137b653da61da2
|
[] |
no_license
|
Dzirik/Recent_Projects
|
472630b2f252fdfdb064a506595ae5f3e0fc2bf3
|
515de13a5a109e0217fed85a54a42f2c7fd81189
|
refs/heads/master
| 2020-06-05T13:52:16.326821
| 2015-09-20T13:01:21
| 2015-09-20T13:01:21
| 42,812,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,758
|
r
|
cachematrix.R
|
##19.06.2014
##functions for caching matrix and its inversion
## function "makeCacheMatrix" creates an object for storing matrix and its inversion
## usage and test of function "makeCacheMatrix"
## M<-makeCacheMatrix() #creating an object
## M$set(matrix(1:4,nrow=2,ncol=2)) #iserting a matrix into it
## M$get() #obtaining a value of the matrix
## M$setinverse(solve(M$get())) #computing an iverse
## M$getinverse() #obtaining a value of the inverse
## M$getinverse()%*%M$get() #testing if it is an inverse
makeCacheMatrix <- function(A = matrix()) {
inverse <- NULL
set <- function(B) {
A <<- B
inverse <<- NULL
}
get <- function() A
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## function "cacheSolve" return a matrix that is the inverse of matrix A
## in first run it creates inverse, in next runes it gets it from cache
## A is object created by function "makeCacheMatrix"
## usage and test of function "cacheSolve"
## M<-makeCacheMatrix() #creating an object
## M$set(matrix(1:4,nrow=2,ncol=2)) #iserting a matrix into it
## M$get() #obtaining a value of the matrix
## cacheSolve(M) #work with inverse
## M$get()%*%M$getinverse() #testing if it is an inverse
cacheSolve <- function(A, ...) {
inverse <- A$getinverse()
if(is.null(inverse)) {
inverse<-solve(A$get(),...)
A$setinverse(inverse)
message("creating inverse")
}else{
message("getting cached data")
}
inverse
}
|
12f6f2d981ae4aa0e89b526538acc54a534d5543
|
faf1f580595ad6912c1184858792870d88b965ff
|
/tablas/estadisticas01.R
|
cbca2ed0b5745a35635cb84a2b186ab3d29a226c
|
[] |
no_license
|
EncisoAlvaJC/TESIS
|
732cb07f5488a388ad4b6f2417717a6262817c0d
|
c2bad0f255e7d5795d2ac63c80e65e3d752ea5f8
|
refs/heads/master
| 2021-01-09T05:34:36.738141
| 2018-08-08T20:46:52
| 2018-08-08T20:46:52
| 80,755,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,045
|
r
|
estadisticas01.R
|
# leer las variables
if(TRUE){
edad.ctrl = scan()
edad.pdcl = scan()
escol.ctrl = scan()
escol.pdcl = scan()
neuropsi.ctrl = scan()
neuropsi.pdcl = scan()
mmse.ctrl = scan()
mmse.pdcl = scan()
sast.ctrl = scan()
sast.pdcl = scan()
gds.ctrl = scan()
gds.pdcl = scan()
total.ctrl = scan()
total.pdcl = scan()
mor.ctrl = scan()
mor.pdcl = scan()
porc.ctrl = scan()
porc.pdcl = scan()
}
if(FALSE){
###
# edad
mean(edad.ctrl)
sd(edad.ctrl)
mean(edad.pdcl)
sd(edad.pdcl)
print('Edad')
C = c(mean(edad.ctrl),sd(edad.ctrl),
NA,
mean(edad.pdcl),sd(edad.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(edad.ctrl,edad.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(edad.ctrl,edad.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# escolaridad
mean(escol.ctrl)
sd(escol.ctrl)
mean(escol.pdcl)
sd(escol.pdcl)
print('Escolaridad')
C = c(mean(escol.ctrl),sd(escol.ctrl),
NA,
mean(escol.pdcl),sd(escol.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(escol.ctrl,escol.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(escol.ctrl,escol.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# neuropsi
mean(neuropsi.ctrl)
sd(neuropsi.ctrl)
mean(neuropsi.pdcl)
sd(neuropsi.pdcl)
print('Neuropsi')
C = c(mean(neuropsi.ctrl),sd(neuropsi.ctrl),
NA,
mean(neuropsi.pdcl),sd(neuropsi.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(neuropsi.ctrl,neuropsi.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(neuropsi.ctrl,neuropsi.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# mmse
mean(mmse.ctrl)
sd(mmse.ctrl)
mean(mmse.pdcl)
sd(mmse.pdcl)
print('MMSE')
C = c(mean(mmse.ctrl),sd(mmse.ctrl),
NA,
mean(mmse.pdcl),sd(mmse.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(mmse.ctrl,mmse.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(mmse.ctrl,mmse.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# sast
mean(sast.ctrl,na.rm = T)
sd(sast.ctrl,na.rm = T)
mean(sast.pdcl,na.rm = T)
sd(sast.pdcl,na.rm = T)
print('SAST')
C = c(mean(sast.ctrl),sd(sast.ctrl),
NA,
mean(sast.pdcl),sd(sast.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(sast.ctrl,sast.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(sast.ctrl,sast.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# gds
mean(gds.ctrl)
sd(gds.ctrl)
mean(gds.pdcl)
sd(gds.pdcl)
print('GDS')
C = c(mean(gds.ctrl),sd(gds.ctrl),
NA,
mean(gds.pdcl),sd(gds.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(gds.ctrl,gds.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(gds.ctrl,gds.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# total
mean(total.ctrl)
sd(total.ctrl)
mean(total.pdcl)
sd(total.pdcl)
print('Sueno total')
C = c(mean(total.ctrl),sd(total.ctrl),
NA,
mean(total.pdcl),sd(total.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(total.ctrl,total.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(total.ctrl,total.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# mor
mean(mor.ctrl)
sd(mor.ctrl)
mean(mor.pdcl)
sd(mor.pdcl)
print('MOR tiempo')
C = c(mean(mor.ctrl),sd(mor.ctrl),
NA,
mean(mor.pdcl),sd(mor.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(mor.ctrl,mor.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(mor.ctrl,mor.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
###
# porc
mean(porc.ctrl)
sd(porc.ctrl)
mean(porc.pdcl)
sd(porc.pdcl)
print('MOR porcentaje')
C = c(mean(porc.ctrl),sd(porc.ctrl),
NA,
mean(porc.pdcl),sd(porc.pdcl))
View(t(C))
invisible(readline(prompt="Presion [enter] para continuar"))
a = wilcox.test(porc.ctrl,porc.pdcl,paired = F,exact = T,
correct = T,conf.int = T)
tmp = c(a$p.value,a$statistic,a$estimate,a$conf.int)
View(t(tmp))
invisible(readline(prompt="Presion [enter] para continuar"))
b = t.test(porc.ctrl,porc.pdcl,paired=F)
tm2 = c(b$p.value,b$statistic,b$parameter,b$estimate,b$conf.int)
View(t(tm2))
invisible(readline(prompt="Presion [enter] para continuar"))
}
##############################################################
edad = c(edad.ctrl,edad.pdcl)
escol = c(escol.ctrl,escol.pdcl)
neuropsi = c(neuropsi.ctrl,neuropsi.pdcl)
mmse = c(mmse.ctrl,mmse.pdcl)
sast = c(sast.ctrl,sast.pdcl)
gds = c(gds.ctrl,gds.pdcl)
total = c(total.ctrl,total.pdcl)
mor = c(mor.ctrl,mor.pdcl)
porc = c(porc.ctrl,porc.pdcl)
print('Edad')
A = cor.test(edad,edad,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,escol,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,neuropsi,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,mmse,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,sast,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,gds,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,total,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(edad,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('Edad')
A = cor.test(escol,escol,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(escol,neuropsi,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(escol,mmse,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(escol,sast,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(escol,gds,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(escol,total,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(escol,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(escol,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('Neuropsi')
A = cor.test(neuropsi,neuropsi,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(neuropsi,mmse,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(neuropsi,sast,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(neuropsi,gds,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(neuropsi,total,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(neuropsi,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(neuropsi,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('MMSE')
A = cor.test(mmse,mmse,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(mmse,sast,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(mmse,gds,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(mmse,total,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(mmse,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(mmse,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('SAST')
A = cor.test(sast,sast,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(sast,gds,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(sast,total,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(sast,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(sast,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('GDS')
A = cor.test(gds,gds,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(gds,total,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(gds,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(gds,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('Sueno, total')
A = cor.test(total,total,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(total,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(total,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('MOR, tiempo')
A = cor.test(mor,mor,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
A = cor.test(mor,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
print('MOR, porcentaje')
A = cor.test(porc,porc,method = 'spearman')
B = t(c(A$estimate,A$p.value,A$statistic))
View(B)
invisible(readline(prompt="Presion [enter] para continuar"))
|
48c4cecf5e23be3278a2e21d19c656404e100352
|
2bd3e91fc6be90f3e25c4c9f28846cf6e144686e
|
/Sampling/ECO6416_OLS_On_Repeat.R
|
9fa7eb66013e51e0f2088bdf21cea36dadabcdd4
|
[] |
no_license
|
LeeMorinUCF/ECO6416
|
5b809b79ce5ce2d34bad4995fe2b449bf3f9ad7e
|
7b411261bb722fa2044b51b23ab49ba30abb9232
|
refs/heads/master
| 2022-12-10T13:45:08.760651
| 2020-09-12T02:46:56
| 2020-09-12T02:46:56
| 291,760,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,377
|
r
|
ECO6416_OLS_On_Repeat.R
|
##################################################
#
# ECO 6416.0028 Applied Business Research Tools
#
# OLS Regression Demo
# Simulation with repeated estimation
#
# Lealand Morin, Ph.D.
# Assistant Professor
# Department of Economics
# College of Business Administration
# University of Central Florida
#
# September 9, 2020
#
##################################################
#
# ECO6416_OLS_On_Repeat gives an example of OLS regression
# using simulated data.
# It repeats the estimation several times to get a
# distribution of estimates.
#
##################################################
##################################################
# Preparing the Workspace
##################################################
# Clear workspace.
rm(list=ls(all=TRUE))
# Set working directory.
# wd_path <- '/path/to/your/folder'
wd_path <- 'C:/Users/le279259/Desktop/ECO6416_Demos/Module02'
setwd(wd_path)
# Or do this in one step (using buttons in File panel).
# setwd("C:/Users/le279259/Desktop/ECO6416_Demos/Module02")
# Read function for sampling data.
source('ECO6416_tools.R')
# This is the same as running the ECO6416_tools.R script first.
# It assumes that the script is saved in the same working folder.
# No libraries required.
# Otherwise would have a command like the following.
# library(name_of_R_package)
##################################################
# Setting the Parameters
##################################################
# Dependent Variable: Property values (in Millions)
beta_0 <- 0.10 # Intercept
beta_income <- 5.00 # Slope ceofficient for income
beta_cali <- 0.25 # Slope coefficient for California
beta_earthquake <- - 0.50 # Slope coefficient for earthquake
# beta_earthquake <- - 0.00 # Slope coefficient for earthquake
# Distribution of incomes (also in millions).
avg_income <- 0.1
sd_income <- 0.01
# Extra parameter for measurement error in income.
measurement_error_income <- 0.01
# Fraction of dataset in California.
pct_in_cali <- 0.5
# Frequency of earthquakes (only in California).
prob_earthquake <- 0.05
# Additional terms:
sigma_2 <- 0.1 # Variance of error term
num_obs <- 100 # Number of observations in dataset
# Set the number of replications in the simulation.
num_replications <- 1000
##################################################
# Generating the Fixed Data
##################################################
# Call the housing_sample function from ECO6416_Sim_Data.R.
housing_data <- housing_sample(beta_0, beta_income, beta_cali, beta_earthquake,
avg_income, sd_income, pct_in_cali, prob_earthquake,
sigma_2, num_obs)
# Summarize the data.
summary(housing_data)
# Check that earthquakes occurred only in California:
table(housing_data[, 'in_cali'], housing_data[, 'earthquake'])
# Data errors are the largest cause of problems in model-building.
##################################################
# Generating Additional Data
# The extra data that is not in the model
##################################################
#--------------------------------------------------
# Assume that true income is not observed but some variables
# that are correlated with income are available.
#--------------------------------------------------
# Income measure 1.
housing_data[, 'income_1'] <- 0
housing_data[, 'income_1'] <- housing_data[, 'income'] +
rnorm(n = num_obs, mean = 0, sd = measurement_error_income)
# Income measure 2.
housing_data[, 'income_2'] <- 0
housing_data[, 'income_2'] <- housing_data[, 'income'] +
rnorm(n = num_obs, mean = 0, sd = measurement_error_income)
##################################################
# Running a Simulation
# Estimating Again and Again
##################################################
# Set the list of variables for the estimation.
list_of_variables <- c('income', 'in_cali', 'earthquake')
# list_of_variables <- c('income_1', 'in_cali', 'earthquake')
# Add beta_0 to the beginning for the full list.
full_list_of_variables <- c('intercept', list_of_variables)
# Create an empty data frame to store the results.
reg_results <- data.frame(reg_num = 1:num_replications)
reg_results[, full_list_of_variables] <- 0
reg_results[, c('income', 'income_1', 'income_2')] <- 0
# Generate repeated realizations of the housing_data dataset.
for (reg_num in 1:num_replications) {
# Print a progress report.
# print(sprintf('Now estimating model number %d.', reg_num))
##################################################
# Generating the Random Data
##################################################
# Repeat again and again, replacing only the epsilon values.
# Generate the error term, which includes everything we do not observe.
housing_data[, 'epsilon'] <- rnorm(n = num_obs, mean = 0, sd = sigma_2)
# Finally, recalculate the simulated value of house prices,
# according to the regression equation.
housing_data[, 'house_price'] <-
beta_0 +
beta_income * housing_data[, 'income'] +
beta_cali * housing_data[, 'in_cali'] +
beta_earthquake * housing_data[, 'earthquake'] +
housing_data[, 'epsilon']
# Each time, this replaces the house_price with a different version
# of the error term.
##################################################
# Estimating the Regression Model
##################################################
# Specify the formula to estimate.
lm_formula <- as.formula(paste('house_price ~ ',
paste(list_of_variables, collapse = ' + ')))
# Estimate a regression model.
lm_full_model <- lm(data = housing_data,
formula = lm_formula)
# Note that the normal format is:
# model_name <- lm(data = name_of_dataset, formula = Y ~ X_1 + x_2 + x_K)
# but the above is a shortcut for a pre-set list_of_variables.
##################################################
# Saving the Results
##################################################
# Save the estimates in the row for this particular estimation.
reg_results[reg_num, full_list_of_variables] <- coef(lm_full_model)
}
##################################################
# Analyzing the Results
##################################################
#--------------------------------------------------
# Display some graphs
# Click the arrows in the bottom right pane to
# switch between previous figures.
#--------------------------------------------------
# Plot a histogram for each estimate.
# Note that some will be empty if they were not included in the estimation.
hist(reg_results[, 'intercept'],
main = 'Distribution of beta_0',
xlab = 'Estimated Coefficient',
ylab = 'Frequency',
breaks = 20)
# This will be blank if income is not in the regression:
hist(reg_results[, 'income'],
main = 'Distribution of beta_income',
xlab = 'Estimated Coefficient',
ylab = 'Frequency',
breaks = 20)
# This will be blank if income_1 is not in the regression:
hist(reg_results[, 'income_1'],
main = 'Distribution of beta_income_1',
xlab = 'Estimated Coefficient',
ylab = 'Frequency',
breaks = 20)
# This will be blank if income_2 is not in the regression:
hist(reg_results[, 'income_2'],
main = 'Distribution of beta_income_2',
xlab = 'Estimated Coefficient',
ylab = 'Frequency',
breaks = 20)
hist(reg_results[, 'in_cali'],
main = 'Distribution of beta_cali',
xlab = 'Estimated Coefficient',
ylab = 'Frequency',
breaks = 20)
hist(reg_results[, 'earthquake'],
main = 'Distribution of beta_earthquake',
xlab = 'Estimated Coefficient',
ylab = 'Frequency',
breaks = 20)
#--------------------------------------------------
# Output some statistics to screen
#--------------------------------------------------
# Display some statistics for the result.
summary(reg_results[, full_list_of_variables])
# Calculate the average estimates separately.
print('Average value of the coefficients are:')
sapply(reg_results[, full_list_of_variables], mean)
# Calculate the standard deviation of the estimates.
print('Standard Deviations of the coefficients are:')
sapply(reg_results[, full_list_of_variables], sd)
##################################################
# End
##################################################
|
fa8f28f8d7be65355ec66c9bd5eca14bd2805d01
|
98d1a4a349a2a916cca89ba8eb3e20b3ee68c84b
|
/R/protect_smooth.R
|
03bcb483bec0b908dd8d81d7892fcbd44a345804
|
[] |
no_license
|
edwindj/sdcSpatial
|
31b23f1c47dd2e90fd40fc822047e4c4d5358069
|
750d8ff7da14499a79ba9465e8e3ce7b92370aff
|
refs/heads/master
| 2023-08-02T19:24:56.369292
| 2023-07-28T13:43:44
| 2023-07-28T13:43:44
| 135,307,240
| 11
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,840
|
r
|
protect_smooth.R
|
#' Protect a sdc_raster by smoothing
#'
#' `protect_smooth` reduces the sensitivity by applying a Gaussian smoother,
#' making the values less localized.
#'
#' The sensitivity of a raster can be decreased by applying a kernel density smoother as
#' argued by de Jonge et al. (2016) and de Wolf et al. (2018). Smoothing spatially spreads
#' localized values, reducing the risk for location disclosure. Note that
#' smoothing often visually enhances detection of spatial patterns.
#' The kernel applied is a Gaussian kernel with a bandwidth `bw` supplied by the user.
#' The smoother acts upon the `x$value$count` and `x$value$sum`
#' from which a new `x$value$mean` is derived.
#'
#' @inheritParams smooth_raster
#' @example ./example/protect_smooth.R
#' @export
#' @family protection methods
#' @references de Jonge, E., & de Wolf, P. P. (2016, September).
#' Spatial smoothing and statistical disclosure control.
#' In International Conference on Privacy in Statistical Databases
#' (pp. 107-117). Springer, Cham.
#' @references de Wolf, P. P., & de Jonge, E. (2018, September).
#' Safely Plotting Continuous Variables on a Map. In International Conference
#' on Privacy in Statistical Databases (pp. 347-359). Springer, Cham.
protect_smooth <- function( x
, bw = raster::res(x$value)
, ...
){
assert_sdc_raster(x)
r <- x$value
# # check if this is copy or reference
# w <- raster::focalWeight(r$count, d = bw, type="Gaus")
#
# # currently choosing center: maybe off center is a better idea
# #x$scale <- x$scale * w[ceiling(nrow(w)/4), ceiling(ncol(w)/4)]
# x$scale <- x$scale * w[1,1]
# TODO adjust for keep_resolution
r <- smooth_raster(r, bw = bw, ...)
# mean should be recalculated
r$mean <- r$sum / r$count
x$value <- r
x
}
|
6a695cb2aa779033bf63177a2dde60aa34da2c1d
|
c3f6f9c917eb7c56780c162f164f0b7e5a88d099
|
/cachematrix.R
|
a7ca996adda32517888649867212f611e6963888
|
[] |
no_license
|
matt-d-walker/ProgrammingAssignment2
|
ffee3c5dd3e2e23d2d9b9649d0b0d024748a7180
|
02a765e8da11b9c487b8bf4de79e2374e293ecff
|
refs/heads/master
| 2021-01-21T16:28:22.557804
| 2016-08-22T01:59:29
| 2016-08-22T01:59:29
| 66,121,998
| 0
| 0
| null | 2016-08-20T01:27:19
| 2016-08-20T01:27:18
| null |
UTF-8
|
R
| false
| false
| 1,134
|
r
|
cachematrix.R
|
## makeCacheMatrix retuns a list of functions. The functions set and get matrix values. There is another set of fucntions that get
## and set the inverse of the matrix. cacheSolve uses the list from makeCacheMatrix and checks to see if a inverse of the matrix
## has already be calculated. If not, the matrix inverse is calculated and set.
## This function establishes a set of functions that are returned as a list. The list that is returned is intended to be the input
## for cacheSolve.
makeCacheMatrix <- function(x = matrix()){
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setSolve <- function(solve) m <<- solve
getSolve <- function() m
list(set = set, get = get, setSolve = setSolve, getSolve = getSolve)
}
## This functions takes in the list of functions from makeCacheMatrix. It tests to see if an inverse value of the matrix exists
## already. If it does it uses the cached value otherwise it calculates it.
cacheSolve <- function(x, ...) {
m <- x$getSolve ()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setSolve(m)
return(m)
}
|
16e9215b6e5ad6b29b005fe31d690b337ed93066
|
c12d5e3b1930f53a65931ae1e844382b9e44b163
|
/old_isac/agingstutdy/analysis/methylation/190905_aging_bsseq.R
|
7326f74b4a2f3fcb0af43135fcc766d0273ebcea
|
[
"MIT"
] |
permissive
|
timplab/ambic-epigenome
|
021318542060b15b71849ced9f68bd2cd2734b06
|
bc0abe4e5c075906f42f1208a6992ac8497f9b44
|
refs/heads/master
| 2023-04-19T01:51:04.649296
| 2020-10-22T17:32:16
| 2020-10-22T17:32:16
| 120,519,523
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,244
|
r
|
190905_aging_bsseq.R
|
rm(list=ls());gc()
source("/home/isac/Code/ilee/plot/ggplot_theme.R")
library(bsseq)
# data info ----
root <- "/home/isac/Data/ambic/pooled_rep/mfreq"
regpath <- "/mithril/Data/NGS/Reference/cho/picr_ensembl/annotation/cho_picr_genes.bed"
days <- factor(c(0,30,60,90))
reps <- factor(c(1,2,3))
samples <- c("Host","StableGln","StableNogln","UnstableGln","UnstableNogln")
pd <- tibble(sample = rep(samples,length(days)*length(reps)),
day = rep(rep(days,each = length(samples)),each = length(reps)),
replicate = rep(rep(reps,each = length(samples)),length(days)),
name = paste0("CHOZN",sample,"Day",day,"_",replicate),
fp = paste0(root,"/",name,".cpg.meth.freq.txt.gz"))
bspath <- file.path(root,"choSigmaAgingStudy_BSseq.Rds")
# read as bsseq object ----
if ( ! file.exists(bspath)){
bsobj <- read.bismark(pd$fp,colData = pd)
saveRDS(bsobj,bspath)
} else {
bsobj <- readRDS(bspath)
}
# read gene annotations ----
regs <- read_tsv(regpath,col_names = c("chrom","start","end","name","score","strand","id")) %>%
mutate(start = start + 1)
regs.gr <- GRanges(regs)
proms <- promoters(regs.gr)
# data in regions ----
meth.regs <- getMeth(bsobj,proms,type = "raw",what = "perRegion")
meth.regs <- as_tibble(meth.regs)
names(meth.regs) <- pData(bsobj)$name
meth.regs$idx <- seq_along(regs.gr)
# gather ----
meth.gather <- meth.regs %>%
gather(name,freq,-idx) %>%
mutate(sample = rep(pd$sample,each = length(regs.gr)),
day = rep(pd$day,each = length(regs.gr)),
replicate = rep(pd$replicate,each = length(regs.gr)))
# collapse replicates ----
meth.avg <- meth.gather %>%
group_by(sample,day,idx) %>%
summarize(freq = mean(freq)) %>%
na.omit()
# first density plots? ----
ggplot(meth.avg,aes(x = freq, group = day, color = day)) +
facet_wrap(~sample) +
geom_density()
ggplot(meth.avg,aes(y = freq, color = day, x = sample)) +
geom_violin(aes(fill = day),alpha = 0.3,width = 0.7) + geom_boxplot(width = 0.7,alpha = 0) +
labs( x = "Sample", y = "Methylation", title = "Distributions of promoter methylation")
# insertion is near :
ins <- GRanges(tibble(chrom="RAZU01001824.1",start = 199405, end = 200000))
nearest_gene <- proms[nearest(ins,proms)]
|
8b63f7ae44e72abfcd644e0d24ae49901fabcc66
|
d8b76f60b1a9dc4823bdad3d2ff7908a841a2bba
|
/Accuracy_Assessment/server.R
|
19fdd4679f44e39584a6963cc8fdcc08e9d17da5
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
DrRoad/TreeFindr
|
fabaa06faeee6e734c27d110b7bc01a7f217dad8
|
02aa5c0f35e19c636fd20f6caff180739eed0f5d
|
refs/heads/master
| 2021-06-12T19:51:27.388479
| 2017-03-26T22:11:05
| 2017-03-26T22:11:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,985
|
r
|
server.R
|
#server.R
library(shiny)
#source('Stem_Accuracy_Assessment.R')
shinyServer(function(input, output) {
output$contents <- renderTable({
#output raw distance matrix table
inFile = input$file
#check file is uploaded
if (is.null(inFile))
return(NULL)
filePath = inFile$datapath
read.csv(filePath)
})
output$stemStats <- renderTable({
#generate stem stats table
inFile = input$file
#check file is uploaded
if (is.null(inFile))
return()
filePath = inFile$datapath
newMat = readDistMat(filePath, usrCols, chooseHdr = TRUE)
statsDisplay = statsGen(newMat)
})
output$fileName <- renderText({
#output file name for display
inFile = input$file
txt = inFile$name
})
output$selectList <- renderUI({
#skip if no uploaded table
if (is.null(input$file)) {
return()
}
#get header names
datf = input$file
dat = read.csv(datf$datapath)
headers = names(dat)
#create selectable list
selectInput('selectAct',
label = 'Actual Stem Column',
choices = headers,
selected = 1)
})
output$selectList2 <- renderUI({
#skip if no uploaded table
if (is.null(input$file)) {
return()
}
#get header names
datf = input$file
dat = read.csv(datf$datapath)
headers = names(dat)
selectInput('selectEst',
label = 'Estimated Stem Column',
choices = headers,
selected = 2)
})
output$selectList3 <- renderUI({
#skip if no uploaded table
if (is.null(input$file)) {
return()
}
#get header names
datf = input$file
dat = read.csv(datf$datapath)
headers = names(dat)
selectInput('selectDist',
label = 'Distance Column',
choices = headers,
selected = 3)
})
})
|
812902f642873b476103b9d98f05dd5ca8bd04fb
|
ad48fb6f81fa7baa0d923292574205bed92a39f9
|
/man/rra.sgrna_summary.Rd
|
f70a3bc8019bfaf2011acaf0afe2e245274f23a4
|
[] |
no_license
|
jiayi-s/MAGeCKFlute
|
1356ec05b5cb00fe1b043599c899646024a6b1ea
|
09305825fb8bb76d41036f91e3ede3fea651f191
|
refs/heads/master
| 2020-05-05T11:20:06.402856
| 2019-03-26T12:30:28
| 2019-03-26T12:30:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
rd
|
rra.sgrna_summary.Rd
|
\name{rra.sgrna_summary}
\alias{rra.sgrna_summary}
\docType{data}
\title{
sgRNA summary data generated by running MAGeCK RRA
}
\description{
The sgRNA summary results generated by running `mageck test` on CRISPR screens.
}
\usage{data("rra.sgrna_summary")}
\format{
A data frame.
}
\references{
\url{https://www.ncbi.nlm.nih.gov/pubmed/25494202}
\url{https://www.ncbi.nlm.nih.gov/pubmed/25476604}
}
\examples{
data(rra.sgrna_summary)
head(rra.sgrna_summary)
}
\keyword{datasets}
|
73829df479f3848e8986d3dfc6412fb432dff042
|
8d64b5877b001fb938bfaba4b60bb92fac91b346
|
/_source/a-correlation-significance-curve/a-correlation-significance-curve.R
|
e3cd7f8c1fa64419552eef8cdfb244f6063e4286
|
[
"MIT"
] |
permissive
|
scholargj/bradleyboehmke.github.io
|
c092f0d2ce350e99efd8071c4e0f05aadb69d7ba
|
fd24375d27d4779d3c7d2e0d978a1e6ca730d0a5
|
refs/heads/master
| 2023-03-03T11:40:00.906704
| 2021-02-11T16:33:22
| 2021-02-11T16:33:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
a-correlation-significance-curve.R
|
# This R file accomanies the .Rmd blog post
# _source/a-correlation-significance-curve/2016-05-20-a-correlation-significance-curve.Rmd
|
d60516e17653e30e0298e68341580b8339bfdf88
|
fe477bab27aa3bba4b27dd6a7acfd25bb1e14ce4
|
/R/MissingDataGUI-package.r
|
1cd5b9ef2ec9841d2b7fe2a4b332cb09fac8db8f
|
[] |
no_license
|
chxy/MissingDataGUI
|
b4f265b59222989c9fa1d9d9dbdc1a469d84e777
|
87f85ccc65bbe4ed62c2fbdf98a51e6061e30f38
|
refs/heads/master
| 2021-01-21T12:53:00.528901
| 2016-04-29T04:53:40
| 2016-04-29T04:53:40
| 1,214,952
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,193
|
r
|
MissingDataGUI-package.r
|
##' A Graphical User Interface for Exploring Missing Values in Data
##'
##' This package was designed mainly for the exploration of
##' missing values structure, and results of imputation, using static
##' graphics and numerical summaries. A graphical user interface (GUI)
##' makes it accessible to novice users.
##'
##' @name MissingDataGUI-package
##' @docType package
##' @importFrom grDevices dev.off png
##' @importFrom stats na.omit as.formula complete.cases dist hclust median rnorm runif
##' @references Xiaoyue Cheng, Dianne Cook, Heike Hofmann (2015). Visually Exploring Missing Values in Multivariable Data Using a Graphical User Interface. Journal of Statistical Software, 68(6), 1-23. doi:10.18637/jss.v068.i06
##' @examples
##' if (interactive()) {
##' MissingDataGUI()
##' }
##'
NULL
##' West Pacific Tropical Atmosphere Ocean Data, 1993 & 1997.
##'
##' Real-time data from moored ocean buoys for improved detection,
##' understanding and prediction of El Ni'o and La Ni'a.
##'
##' The data is collected by the Tropical Atmosphere Ocean project (
##' \url{http://www.pmel.noaa.gov/tao/index.shtml}).
##'
##' Format: a data frame with 736 observations on the following 8
##' variables. \describe{\item{\code{year}}{A factor with levels
##' \code{1993} \code{1997}.} \item{\code{latitude}}{A factor with
##' levels \code{-5} \code{-2} \code{0}.} \item{\code{longitude}}{A
##' factor with levels \code{-110} \code{-95}.}
##' \item{\code{sea.surface.temp}}{Sea surface temperature(degree
##' Celsius), measured by the TAO buoys at one meter below the
##' surface.} \item{\code{air.temp}}{Air temperature(degree Celsius),
##' measured by the TAO buoys three meters above the sea surface.}
##' \item{\code{humidity}}{Relative humidity(%), measured by the TAO
##' buoys 3 meters above the sea surface.} \item{\code{uwind}}{The
##' East-West wind vector components(M/s). TAO buoys measure the wind
##' speed and direction four meters above the sea surface. If it is
##' positive, the East-West component of the wind is blowing towards
##' the East. If it is negative, this component is blowing towards the
##' West.}\item{\code{vwind}}{The North-South wind vector
##' components(M/s). TAO buoys measure the wind speed and direction
##' four meters above the sea surface. If it is positive, the
##' North-South component of the wind is blowing towards the North.
##' If it is negative, this component is blowing towards the South.}}
##' @name tao
##' @docType data
##' @usage data(tao)
##' @source \url{http://www.pmel.noaa.gov/tao/data_deliv/deliv.html}
##' @keywords datasets
##' @examples
##' if (interactive()) {
##' data(tao)
##' MissingDataGUI(tao)
##' }
##'
NULL
##' The Behavioral Risk Factor Surveillance System (BRFSS) Survey
##' Data, 2009.
##'
##' The data is a subset of the 2009 survey from BRFSS, an ongoing
##' data collection program designed to measure behavioral risk
##' factors for the adult population (18 years of age or older) living
##' in households.
##'
##' Also see the codebook:
##' \url{http://ftp.cdc.gov/pub/data/brfss/codebook_09.rtf}
##'
##' Format: a data frame with 245 observations on the following 34
##' variables. \describe{\item{\code{STATE}}{A factor with 52 levels.
##' The labels and states corresponding to the labels are as follows.
##' 1:Alabama, 2:Alaska, 4:Arizona, 5:Arkansas, 6:California,
##' 8:Colorado, 9:Connecticut, 10:Delaware, 11:District of Columbia,
##' 12:Florida, 13:Georgia, 15:Hawaii, 16:Idaho, 17:Illinois,
##' 18:Indiana, 19:Iowa, 20:Kansas, 21:Kentucky, 22:Louisiana,
##' 23:Maine, 24:Maryland, 25:Massachusetts, 26:Michigan,
##' 27:Minnesota, 28:Mississippi, 29:Missouri, 30:Montana,
##' 31:Nebraska, 32:Nevada, 33:New Hampshire, 34:New Jersey, 35:New
##' Mexico, 36:New York, 37:North Carolina, 38:North Dakota, 39:Ohio,
##' 40:Oklahoma, 41:Oregon, 42:Pennsylvania, 44:Rhode Island, 45:South
##' Carolina, 46:South Dakota, 47:Tennessee, 48:Texas, 49:Utah,
##' 50:Vermont, 51:Virginia, 53:Washington, 54:West Virginia,
##' 55:Wisconsin, 56:Wyoming, 66:Guam, 72:Puerto Rico, 78:Virgin
##' Islands} \item{\code{SEX}}{A factor with levels \code{Male}
##' \code{Female}.} \item{\code{AGE}}{A numeric vector from 7 to 97.}
##' \item{\code{HISPANC2}}{A factor with levels \code{Yes} \code{No}
##' corresponding to the question: are you Hispanic or Latino?}
##' \item{\code{VETERAN2}}{A factor with levels \code{1} \code{2}
##' \code{3} \code{4} \code{5}. The question for this variable is:
##' Have you ever served on active duty in the United States Armed
##' Forces, either in the regular military or in a National Guard or
##' military reserve unit? Active duty does not include training for
##' the Reserves or National Guard, but DOES include activation, for
##' example, for the Persian Gulf War. And the labels are meaning: 1:
##' Yes, now on active duty; 2: Yes, on active duty during the last 12
##' months, but not now; 3: Yes, on active duty in the past, but not
##' during the last 12 months; 4: No, training for Reserves or
##' National Guard only; 5: No, never served in the military.}
##' \item{\code{MARITAL}}{A factor with levels \code{Married}
##' \code{Divorced} \code{Widowed} \code{Separated}
##' \code{NeverMarried} \code{UnmarriedCouple}.}
##' \item{\code{CHILDREN}}{A numeric vector giving the number of
##' children less than 18 years of age in household.}
##' \item{\code{EDUCA}}{A factor with the education levels \code{1}
##' \code{2} \code{3} \code{4} \code{5} \code{6} as 1: Never attended
##' school or only kindergarten; 2: Grades 1 through 8 (Elementary);
##' 3: Grades 9 through 11 (Some high school); 4: Grade 12 or GED
##' (High school graduate); 5: College 1 year to 3 years (Some college
##' or technical school); 6: College 4 years or more (College
##' graduate).} \item{\code{EMPLOY}}{A factor showing the employment
##' status with levels \code{1} \code{2} \code{3} \code{4} \code{5}
##' \code{7} \code{8}. The labels mean -- 1: Employed for wages; 2:
##' Self-employed; 3: Out of work for more than 1 year; 4: Out of
##' work for less that 1 year; 5: A homemaker; 6: A student; 7:
##' Retired; 8: Unable to work.} \item{\code{INCOME2}}{The annual
##' household income from all sources with levels \code{<10k}
##' \code{10-15k} \code{15-20k} \code{20-25k} \code{25-35k}
##' \code{35-50k} \code{50-75k} \code{>75k} \code{Dontknow}
##' \code{Refused}.} \item{\code{WEIGHT2}}{The weight without shoes in
##' pounds.} \item{\code{HEIGHT3}}{The weight without shoes in
##' inches.} \item{\code{PREGNANT}}{Whether pregnant now with two
##' levels \code{Yes} and \code{No}.} \item{\code{GENHLTH}}{The
##' answer to the question "in general your health is" with levels
##' \code{Excellent} \code{VeryGood} \code{Good} \code{Fair}
##' \code{Poor} \code{Refused}.} \item{\code{PHYSHLTH}}{The number of
##' days during the last 30 days that the respondent's physical health
##' was not good. -7 is for "Don't know/Not sure", and -9 is for
##' "Refused".} \item{\code{MENTHLTH}}{The number of days during the
##' last 30 days that the respondent's mental health was not good. -7
##' is for "Don't know/Not sure", and -9 is for "Refused".}
##' \item{\code{POORHLTH}}{The number of days during the last 30 days
##' that poor physical or mental health keep the respondent from doing
##' usual activities, such as self-care, work, or recreation. -7 is
##' for "Don't know/Not sure", and -9 is for "Refused".}
##' \item{\code{HLTHPLAN}}{Whether having any kind of health care
##' coverage, including health insurance, prepaid plans such as HMOs,
##' or government plans such as Medicare. The answer has two levels:
##' \code{Yes} and \code{No}.} \item{\code{CAREGIVE}}{Whether
##' providing any such care or assistance to a friend or family member
##' during the past month, with levels \code{Yes} and \code{No}.}
##' \item{\code{QLACTLM2}}{ Whether being limited in any way in any
##' activities because of physical, mental, or emotional problems,
##' with levels \code{Yes} and \code{No}.}
##' \item{\code{DRNKANY4}}{Whether having had at least one drink of
##' any alcoholic beverage such as beer, wine, a malt beverage or
##' liquor during the past 30 days, with levels \code{Yes} and
##' \code{No}.} \item{\code{ALCDAY4}}{The number of days during the
##' past 30 days that the respondent had at least one drink of any
##' alcoholic beverage. -7 is for "Don't know/Not sure", and -9 is
##' for "Refused".} \item{\code{AVEDRNK2}}{The number of drinks on the
##' average the respondent had on the days when he/she drank, during
##' the past 30 days. -7 is for "Don't know/Not sure", and -9 is for
##' "Refused".} \item{\code{SMOKE100}}{ Whether having smoked at least
##' 100 cigarettes in the entire life, with levels \code{Yes} and
##' \code{No}.} \item{\code{SMOKDAY2}}{ The frequency of days now
##' smoking, with levels \code{Everyday} \code{Somedays} and
##' \code{NotAtAll}(not at all).} \item{\code{STOPSMK2}}{Whether
##' having stopped smoking for one day or longer during the past 12
##' months because the respondent was trying to quit smoking, with
##' levels \code{Yes} and \code{No}.} \item{\code{LASTSMK1}}{A factor
##' with levels \code{3} \code{4} \code{5} \code{6} \code{7} \code{8}
##' corresponding to the question: how long has it been since last
##' smokeing cigarettes regularly? The labels mean: 3: Within the past
##' 6 months (3 months but less than 6 months ago); 4: Within the past
##' year (6 months but less than 1 year ago); 5: Within the past 5
##' years (1 year but less than 5 years ago); 6: Within the past 10
##' years (5 years but less than 10 years ago); 7: 10 years or more;
##' 8: Never smoked regularly.} \item{\code{FRUIT}}{The number of
##' fruit the respondent eat every year, not counting juice. -7 is for
##' "Don't know/Not sure", and -9 is for "Refused".}
##' \item{\code{GREENSAL}}{The number of servings of green salad the
##' respondent eat every year. -7 is for "Don't know/Not sure",
##' and -9 is for "Refused".} \item{\code{POTATOES}}{ The number of
##' servings of potatoes, not including french fries, fried potatoes,
##' or potato chips, that the respondent eat every year. -7 is for
##' "Don't know/Not sure", and -9 is for "Refused".}
##' \item{\code{CARROTS}}{The number of carrots the respondent eat
##' every year. -7 is for "Don't know/Not sure", and -9 is for
##' "Refused".} \item{\code{VEGETABL}}{The number of servings of
##' vegetables the respondent eat every year, not counting carrots,
##' potatoes, or salad. -7 is for "Don't know/Not sure", and -9 is
##' for "Refused".} \item{\code{FRUITJUI}}{The number of fruit juices
##' such as orange, grapefruit, or tomato that the respondent drink
##' every year. -7 is for "Don't know/Not sure", and -9 is for
##' "Refused".} \item{\code{BMI4}}{Body Mass Index (BMI). Computed by
##' WEIGHT in Kilograms/(HEIGHT in Meters * HEIGHT3 in Meters).
##' Missing if any of WEIGHT2 or HEIGHT3 is missing.} }
##' @name brfss
##' @docType data
##' @usage data(brfss)
##' @source \url{http://www.cdc.gov/brfss/data_documentation/index.htm}
##' @keywords datasets
##' @examples
##' if (interactive()) {
##' data(brfss)
##' MissingDataGUI(brfss)
##' }
##'
NULL
|
d8e42f517b8b9ec4bdc7db8ec653fe05c51cd2f1
|
18461dbdf8dd94182fba5c56845fd67baf371d69
|
/man/pack.Rd
|
597b54ee65af6a1366cc40cfd0899235702f94ad
|
[
"Apache-2.0"
] |
permissive
|
nuest/r
|
7399b5b92e4a8f2448222464b7a3630f2eae160a
|
27be28837427b5e58e90c697eba581d9c6e15457
|
refs/heads/master
| 2020-03-16T22:03:19.423599
| 2018-05-11T10:18:41
| 2018-05-11T10:18:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 312
|
rd
|
pack.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/value.R
\name{pack}
\alias{pack}
\title{Pack a R value into a package}
\usage{
pack(value)
}
\arguments{
\item{value}{The R value to be packaged}
}
\value{
A package as an R \code{list}
}
\description{
Pack a R value into a package
}
|
40c5d62c573f116273ff3972dac30b1050c31411
|
03fd892a9c0d9b64973af09ff96d6bdb235d2fe5
|
/man/get_model.Rd
|
76cba1069e309804bcef0374ddf1bac3210dbf76
|
[] |
no_license
|
duke00x-junyuangao/GA
|
7d94c4d3baa5a79f62dab6e6452d5faadb28e620
|
46fedd7073278e7526a9ebd81540b3273b9cb7e2
|
refs/heads/master
| 2021-04-27T00:08:22.650218
| 2017-12-14T23:51:18
| 2017-12-14T23:51:18
| 111,476,886
| 0
| 0
| null | 2017-11-21T00:10:12
| 2017-11-21T00:10:12
| null |
UTF-8
|
R
| false
| true
| 554
|
rd
|
get_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_model}
\alias{get_model}
\title{get_model}
\usage{
get_model(candidate, fitness_values, method_text, X)
}
\arguments{
\item{candidate}{(binary vector length c): on or off for each columns of X
method: method for fitting}
\item{X}{(matrix n x (c+1)): data (n x c) and the last column is the value of y.}
}
\value{
lm/glm object : the model selected after GA.
}
\description{
This function returns the parameter of the model once we fit method on candidate
}
|
525f6a30c007a67b8cf153ecaf53202fea7d5894
|
6add714b854dc1f4a4d58fced9184d8334103b2a
|
/run_analysis.R
|
4267125e3bb080935023927c24cc1f8c7e852ec3
|
[] |
no_license
|
shaharpit/getdata-012
|
87ac026b61bc2161b267d995e46b06e2e095fb8e
|
039f9b5d75f58e612151c8ef211fe63d2bbb66f2
|
refs/heads/master
| 2021-01-21T21:49:00.796714
| 2015-03-22T23:50:26
| 2015-03-22T23:50:26
| 32,700,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,279
|
r
|
run_analysis.R
|
## INITIALIZE ENVIRONMENT. CHECK IF "dplyr" PACKAGE IS ALREADY INSTALLED. IF NOT, INSTALL IT.
if (!"dplyr" %in% installed.packages()) install.packages("dplyr")
library(dplyr)
## STEP 0: READ IN ALL DATA
## a) Read in feature names
features <- read.table("features.txt",stringsAsFactors=FALSE,col.names=c("Num","FeatureName"))
## b) Read in training data sets
## - X_train.txt = Training data set (assigned to variable "train_data")
## - y_train.txt = Training labels (assigned to variable "train_labels")
## - subject_train.txt = Subject performing activity (assigned to variable "train_subj")
train_data <- read.table("train\\X_train.txt",stringsAsFactors=FALSE,col.names=features$FeatureName)
train_labels <- read.table("train\\y_train.txt",stringsAsFactors=FALSE,col.names=c("Activity"))
train_subj <- read.table("train\\subject_train.txt",stringsAsFactors=FALSE,col.names=c("SubjectID"))
## c) Read in test data sets
## - X_test.txt = Test data set (assigned to variable "test_data")
## - y_test.txt = Test labels (assigned to variable "test_labels")
## - subject_test.txt = Subject performing activity (assigned to variable "test_subj")
test_data <- read.table("test\\X_test.txt",stringsAsFactors=FALSE,col.names=features$FeatureName)
test_labels <- read.table("test\\y_test.txt",stringsAsFactors=FALSE,col.names=c("Activity"))
test_subj <- read.table("test\\subject_test.txt",stringsAsFactors=FALSE,col.names=c("SubjectID"))
## STEP 1: MERGE TRAINING AND TEST DATA SETS TO CREATE ONE DATA SET
## a) Column bind training data sets with train_subj and train_labels to the left. Assign to "train_compl"
train_compl <- cbind(train_subj,train_labels,train_data)
## b) Column bind test data sets with test_subj and test_labels to the left. Assign to "test_compl"
test_compl <- cbind(test_subj,test_labels,test_data)
## c) Row bind training and test data. Assign to "data_compl"
data_compl <- rbind(train_compl,test_compl)
## STEP 2: EXTRACT ONLY THE MEASUREMENTS ON THE MEAN AND STANDARD DEVIATION FOR EACH MEASUREMENT
## a) Use the grep function to subset columns with ".mean." or ".std." in their names.
## - Pick only the data points in data_compl that have ".mean." or ".std." in their name (using grep function to subset columns)
## - Organize such that SubjectID and Activity columns remain to the left, followed by the subsetted data_compl
## - Assign bound data back to variable "data_compl"
data_compl <- cbind(SubjectID=data_compl[,"SubjectID"],
Activity=data_compl[,"Activity"],
data_compl[,grep(".mean.",names(data_compl),fixed=TRUE)],
data_compl[,grep(".std.",names(data_compl),fixed=TRUE)])
## STEP 3: USE DESCRIPTIVE ACTIVITY NAMES TO NAME THE ACTIVITIES IN THE DATA SET
## a) Convert the Activity field in data_compl to factors
data_compl$Activity <- as.factor(data_compl$Activity)
## b) Rename factor levels of Activity field to descriptive activity names based on "activity_labels.txt" file provided
levels(data_compl$Activity)[levels(data_compl$Activity) == "1"] <- "WALKING"
levels(data_compl$Activity)[levels(data_compl$Activity) == "2"] <- "WALKING_UPSTAIRS"
levels(data_compl$Activity)[levels(data_compl$Activity) == "3"] <- "WALKING_DOWNSTAIRS"
levels(data_compl$Activity)[levels(data_compl$Activity) == "4"] <- "SITTING"
levels(data_compl$Activity)[levels(data_compl$Activity) == "5"] <- "STANDING"
levels(data_compl$Activity)[levels(data_compl$Activity) == "6"] <- "LAYING"
## STEP 4: LABEL THE DATA SET WITH DESCRIPTIVE VARIABLE NAMES
## This step was completed as part of the Read process in STEP 0
## STEP 5: CREATE INDEPENDENT TIDY DATA SET WITH AVERAGE OF EACH VARIABLE FOR EACH ACTIVITY AND EACH SUBJECT
## a) Group data frame "data_compl" by Subject ID and Activity to prepare for the summarize function of dplyr package. Assign value to "data_compl_grp".
data_compl_grp <- group_by(data_compl,SubjectID,Activity)
## b) Summarize each column in grouped data to obtain mean and tidy the data set. Assign to data frame "data_tidy".
data_tidy <- summarise_each(data_compl_grp,funs(mean))
## c) Use write.table to output tidy dataset from data_tidy to file
write.table(data_tidy,file="getdata-012-proj-tidydata.txt",row.names=FALSE)
|
360feed165121042d1db930d01121c0d8c116769
|
bca194c55442436b19599cb90989c3b9d02083d4
|
/R/generate.adhb.revision.recalculated.one.event.op.patient.dt.R
|
a7ca06800214da36e4feeed0d470f0fc10879621
|
[] |
no_license
|
mattmoo/checkwho_analysis
|
7a9d50ab202e7ac96b417c11227f9db429007c48
|
3474382423aad80b8061bfdf0a32c7632215640a
|
refs/heads/main
| 2023-07-14T22:33:54.320577
| 2021-08-24T23:27:37
| 2021-08-24T23:27:37
| 311,141,531
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,818
|
r
|
generate.adhb.revision.recalculated.one.event.op.patient.dt.R
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param adhb.event.op.patient.dt
generate.adhb.revision.one.recalculated.event.op.patient.dt <-
function(adhb.event.op.patient.dt,
pre.period.start,
pre.period.end,
post.period.start,
post.period.end,
adhb.revision.one.event.op.patient.dt) {
adhb.revision.one.recalculated.event.op.patient.dt = copy(adhb.event.op.patient.dt)
adhb.revision.one.recalculated.event.op.patient.dt[,`Actual Into Theatre Date Time` := as_datetime(`Actual Into Theatre Date Time`)]
adhb.revision.one.recalculated.event.op.patient.dt[,in.pre.period := (`Actual Into Theatre Date Time` >= pre.period.start &
`Actual Into Theatre Date Time` <= pre.period.end)]
adhb.revision.one.recalculated.event.op.patient.dt[,in.post.period := (`Actual Into Theatre Date Time` >= post.period.start &
`Actual Into Theatre Date Time` <= post.period.end)]
adhb.revision.one.recalculated.event.op.patient.dt = adhb.revision.one.recalculated.event.op.patient.dt[in.pre.period |
in.post.period][order(`Actual Into Theatre Date Time`)]
# adhb.revision.one.recalculated.event.op.patient.dt[,age.years := as.numeric(lubridate::interval(`Date of Birth`,`Actual Into Theatre Date Time`,)/years(1))]
# adhb.revision.one.recalculated.event.op.patient.dt = adhb.revision.one.recalculated.event.op.patient.dt[age.years >= 16]
adhb.revision.one.recalculated.event.op.patient.dt[, in.pre.period.first.eligible := FALSE]
adhb.revision.one.recalculated.event.op.patient.dt[, in.post.period.first.eligible := FALSE]
adhb.revision.one.recalculated.event.op.patient.dt = rbindlist(list(
adhb.revision.one.recalculated.event.op.patient.dt[in.pre.period == TRUE, .SD[`Actual Into Theatre Date Time` == min(`Actual Into Theatre Date Time`)], by = NHI],
adhb.revision.one.recalculated.event.op.patient.dt[in.post.period == TRUE, .SD[`Actual Into Theatre Date Time` == min(`Actual Into Theatre Date Time`)], by = NHI]
))
missing.from.new = adhb.revision.one.event.op.patient.dt[!EventNumber %in% adhb.revision.one.recalculated.event.op.patient.dt[,`Event ID`]]
missing.from.old = adhb.revision.one.recalculated.event.op.patient.dt[!`Event ID` %in% adhb.revision.one.event.op.patient.dt[,EventNumber]]
print(missing.from.new[,.N])
print(missing.from.old[,.N])
return(adhb.revision.one.recalculated.event.op.patient.dt)
}
|
53da59f5aa49d44018b75c670879aaa81d1f4693
|
c7a902cf2e9201641d1dd1b2aea02eec39c70efe
|
/man/calculate_centralities.Rd
|
8506849763fa4910d4dcdc663a22bb6ff9ccaf7a
|
[] |
no_license
|
cran/CINNA
|
cc4cc8e41eae0a93ee49078f960f3ecbcb97954e
|
2323440667c6908ca7aa93cbb84574186c9e8992
|
refs/heads/master
| 2023-08-18T21:07:44.684850
| 2023-08-08T16:40:02
| 2023-08-08T17:30:25
| 97,342,206
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 13,272
|
rd
|
calculate_centralities.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CINNA.R
\name{calculate_centralities}
\alias{calculate_centralities}
\title{Centrality measure calculation}
\usage{
calculate_centralities(x, except = NULL, include = NULL, weights = NULL)
}
\arguments{
\item{x}{the component of a network as an igraph object}
\item{except}{A vector containing names of centrality measures which could be omitted from the calculations.}
\item{include}{A vector including names of centrality measures which should be computed.}
\item{weights}{A character scalar specifying the edge attribute to use. (default=NULL)}
}
\value{
A list containing centrality measure values. Each column indicates a centrality measure, and each row corresponds to a vertex.
The structure of the output is as follows:
- The list has named elements, where each element represents a centrality measure.
- The value of each element is a numeric vector, where each element of the vector corresponds to a vertex in the network.
- The order of vertices in the numeric vectors matches the order of vertices in the input igraph object.
- The class of the output is "centrality".
}
\description{
This function computes multitude centrality measures of an igraph object.
}
\details{
This function calculates various types of centrality measures which are applicable to the network topology
and returns the results as a list. In the "except" argument, you can specify centrality measures that are
not necessary to calculate.
}
\examples{
data("zachary")
p <- proper_centralities(zachary)
calculate_centralities(zachary, include = "Degree Centrality")
}
\references{
Bonacich, P., & Lloyd, P. (2001). Eigenvector like measures of centrality for asymmetric relations. Social Networks, 23(3), 191–201.
Bonacich, P. (1972). Factoring and weighting approaches to status scores and clique identification. The Journal of Mathematical Sociology, 2(1), 113–120.
Bonacich, P. (1987). Power and Centrality: A Family of Measures. American Journal of Sociology, 92(5), 1170–1182.
Burt, R. S. (2004). Structural Holes and Good Ideas. American Journal of Sociology, 110(2), 349–399.
Batagelj, V., & Zaversnik, M. (2003). An O(m) Algorithm for Cores Decomposition of Networks, 1–9. Retrieved from
Seidman, S. B. (1983). Network structure and minimum degree. Social Networks, 5(3), 269–287.
Kleinberg, J. M. (1999). Authoritative sources in a hyperlinked environment. Journal of the ACM, 46(5), 604–632.
Wasserman, S., & Faust, K. (1994). Social network analysis : methods and applications. American Ethnologist (Vol. 24).
Barrat, A., Barthélemy, M., Pastor Satorras, R., & Vespignani, A. (2004). The architecture of complex weighted networks. Proceedings of the National Academy of Sciences of the United States of America , 101(11), 3747–3752.
Brin, S., & Page, L. (2010). The Anatomy of a Large Scale Hypertextual Web Search Engine The Anatomy of a Search Engine. Search, 30(June 2000), 1–7.
Freeman, L. C. (1978). Centrality in social networks conceptual clarification. Social Networks, 1(3), 215–239.
Brandes, U. (2001). A faster algorithm for betweenness centrality*. The Journal of Mathematical Sociology, 25(2), 163–177.
Estrada E., Rodriguez-Velazquez J. A.: Subgraph centrality in Complex Networks. Physical Review E 71, 056103.
Freeman, L. C., Borgatti, S. P., & White, D. R. (1991). Centrality in valued graphs: A measure of betweenness based on network flow. Social Networks, 13(2), 141–154.
Brandes, U., & Erlebach, T. (Eds.). (2005). Network Analysis (Vol. 3418). Berlin, Heidelberg: Springer Berlin Heidelberg.
Stephenson, K., & Zelen, M. (1989). Rethinking centrality: Methods and examples. Social Networks, 11(1), 1–37.
Wasserman, S., & Faust, K. (1994). Social network analysis : methods and applications. American Ethnologist (Vol. 24).
Brandes, U. (2008). On variants of shortest path betweenness centrality and their generic computation. Social Networks, 30(2), 136–145.
Goh, K.-I., Kahng, B., & Kim, D. (2001). Universal Behavior of Load Distribution in Scale Free Networks. Physical Review Letters, 87(27), 278701.
Shimbel, A. (1953). Structural parameters of communication networks. The Bulletin of Mathematical Biophysics, 15(4), 501–507.
Assenov, Y., Ramrez, F., Schelhorn, S.-E., Lengauer, T., & Albrecht, M. (2008). Computing topological parameters of biological networks. Bioinformatics, 24(2), 282–284.
Diekert, V., & Durand, B. (Eds.). (2005). STACS 2005 (Vol. 3404). Berlin, Heidelberg: Springer Berlin Heidelberg.
Gräßler, J., Koschützki, D., & Schreiber, F. (2012). CentiLib: comprehensive analysis and exploration of network centralities. Bioinformatics (Oxford, England), 28(8), 1178–9.
Latora, V., & Marchiori, M. (2001). Efficient Behavior of Small World Networks. Physical Review Letters, 87(19), 198701.
Opsahl, T., Agneessens, F., & Skvoretz, J. (2010). Node centrality in weighted networks: Generalizing degree and shortest paths. Social Networks, 32(3), 245–251.
Estrada, E., Higham, D. J., & Hatano, N. (2009). Communicability betweenness in complex networks. Physica A: Statistical Mechanics and Its Applications, 388(5), 764–774.
Hagberg, Aric, Pieter Swart, and Daniel S Chult. Exploring network structure, dynamics, and function using NetworkX. No. LA-UR-08-05495; LA-UR-08-5495. Los Alamos National Laboratory (LANL), 2008.
Kalinka, A. T., & Tomancak, P. (2011). linkcomm: an R package for the generation, visualization, and analysis of link communities in networks of arbitrary size and type. Bioinformatics, 27(14), 2011–2012.
Faghani, M. R., & Nguyen, U. T. (2013). A Study of XSS Worm Propagation and Detection Mechanisms in Online Social Networks. IEEE Transactions on Information Forensics and Security, 8(11), 1815–1826.
Brandes, U., & Erlebach, T. (Eds.). (2005). Network Analysis (Vol. 3418). Berlin, Heidelberg: Springer Berlin Heidelberg.
Lin, C.-Y., Chin, C.-H., Wu, H.-H., Chen, S.-H., Ho, C.-W., & Ko, M.-T. (2008). Hubba: hub objects analyzer--a framework of interactome hubs identification for network biology. Nucleic Acids Research, 36(Web Server), W438–W443.
Chin, C., Chen, S., & Wu, H. (2009). cyto Hubba: A Cytoscape Plug in for Hub Object Analysis in Network Biology. Genome Informatics …, 5(Java 5), 2–3.
Qi, X., Fuller, E., Wu, Q., Wu, Y., & Zhang, C.-Q. (2012). Laplacian centrality: A new centrality measure for weighted networks. Information Sciences, 194, 240–253.
Joyce, K. E., Laurienti, P. J., Burdette, J. H., & Hayasaka, S. (2010). A New Measure of Centrality for Brain Networks. PLoS ONE, 5(8), e12200.
Lin, C.-Y., Chin, C.-H., Wu, H.-H., Chen, S.-H., Ho, C.-W., & Ko, M.-T. (2008). Hubba: hub objects analyzer--a framework of interactome hubs identification for network biology. Nucleic Acids Research, 36(Web Server), W438–W443.
Hubbell, C. H. (1965). An Input Output Approach to Clique Identification. Sociometry, 28(4), 377.
Dangalchev, C. (2006). Residual closeness in networks. Physica A: Statistical Mechanics and Its Applications, 365(2), 556–564.
Brandes, U. & Erlebach, T. (2005). Network Analysis: Methodological Foundations, U.S. Government Printing Office.
Korn, A., Schubert, A., & Telcs, A. (2009). Lobby index in networks. Physica A: Statistical Mechanics and Its Applications, 388(11), 2221–2226.
White, S., & Smyth, P. (2003). Algorithms for estimating relative importance in networks. In Proceedings of the ninth ACM SIGKDD international conference on Knowledge discovery and data mining KDD ’03 (p. 266). New York, New York, USA: ACM Press.
Cornish, A. J., & Markowetz, F. (2014). SANTA: Quantifying the Functional Content of Molecular Networks. PLoS Computational Biology, 10(9), e1003808.
Scardoni, G., Petterlini, M., & Laudanna, C. (2009). Analyzing biological network parameters with CentiScaPe. Bioinformatics, 25(21), 2857–2859.
Lin, N. (1976). Foundations of Social Research. Mcgraw Hill.
Borgatti, S. P., & Everett, M. G. (2006). A Graph theoretic perspective on centrality. Social Networks, 28(4), 466–484.
Newman, M. (2010). Networks. Oxford University Press.
Junker, Bjorn H., Dirk Koschutzki, and Falk Schreiber(2006). "Exploration of biological network centralities with CentiBiN." BMC bioinformatics 7.1 : 219.
Pal, S. K., Kundu, S., & Murthy, C. A. (2014). Centrality measures, upper bound, and influence maximization in large scale directed social networks. Fundamenta Informaticae, 130(3), 317–342.
Lin, C.-Y., Chin, C.-H., Wu, H.-H., Chen, S.-H., Ho, C.-W., & Ko, M.-T. (2008). Hubba: hub objects analyzer--a framework of interactome hubs identification for network biology. Nucleic Acids Research, 36(Web Server), W438–W443.
Scardoni, G., Petterlini, M., & Laudanna, C. (2009). Analyzing biological network parameters with CentiScaPe. Bioinformatics, 25(21), 2857–2859.
Freeman, L. C. (1978). Centrality in social networks conceptual clarification. Social Networks, 1(3), 215–239.
Chen, D.-B., Gao, H., L?, L., & Zhou, T. (2013). Identifying Influential Nodes in Large Scale Directed Networks: The Role of Clustering. PLoS ONE, 8(10), e77455.
Jana Hurajova, S. G. and T. M. (2014). Decay Centrality. In 15th Conference of Kosice Mathematicians. Herlany.
Viswanath, M. (2009). ONTOLOGY BASED AUTOMATIC TEXT SUMMARIZATION. Vishweshwaraiah Institute of Technology.
Przulj, N., Wigle, D. A., & Jurisica, I. (2004). Functional topology in a network of protein interactions. Bioinformatics, 20(3), 340–348.
del Rio, G., Koschtzki, D., & Coello, G. (2009). How to identify essential genes from molecular networks BMC Systems Biology, 3(1), 102.
Scardoni, G. and Carlo Laudanna, C.B.M.C., 2011. Network centralities for Cytoscape. University of Verona.
BOLDI, P. & VIGNA, S. 2014. Axioms for centrality. Internet Mathematics, 00-00.
MARCHIORI, M. & LATORA, V. 2000. Harmony in the small-world. Physica A: Statistical Mechanics and its Applications, 285, 539-546.
OPSAHL, T., AGNEESSENS, F. & SKVORETZ, J. 2010. Node centrality in weighted networks: Generalizing degree and shortest paths. Social Networks, 32, 245-251.
OPSAHL, T. 2010. Closeness centrality in networks with disconnected components (http://toreopsahl.com/2010/03/20/closeness-centrality-in-networks-with-disconnected-components/)
Michalak, T.P., Aadithya, K.V., Szczepanski, P.L., Ravindran, B. and Jennings, N.R., 2013. Efficient computation of the Shapley value for game-theoretic network centrality. Journal of Artificial Intelligence Research, 46, pp.607-650.
Macker, J.P., 2016, November. An improved local bridging centrality model for distributed network analytics. In Military Communications Conference, MILCOM 2016-2016 IEEE (pp. 600-605). IEEE. DOI: 10.1109/MILCOM.2016.7795393
DANGALCHEV, C. 2006. Residual closeness in networks. Physica A: Statistical Mechanics and its Applications, 365, 556-564. DOI: 10.1016/j.physa.2005.12.020
Alain Barrat, Marc Barthelemy, Romualdo Pastor-Satorras, Alessandro Vespignani: The architecture of complex weighted networks, Proc. Natl. Acad. Sci. USA 101, 3747 (2004)
}
\seealso{
\code{\link[igraph]{alpha.centrality}}, \code{\link[igraph]{bonpow}}, \code{\link[igraph]{constraint}},
\code{\link[igraph]{centr_degree}}, \code{\link[igraph]{eccentricity}}, \code{\link[igraph]{eigen_centrality}},
\code{\link[igraph]{coreness}}, \code{\link[igraph]{authority_score}}, \code{\link[igraph]{hub_score}},
\code{\link[igraph]{transitivity}}, \code{\link[igraph]{page_rank}}, \code{\link[igraph]{betweenness}},
\code{\link[igraph]{subgraph.centrality}}, \code{\link[sna]{flowbet}}, \code{\link[sna]{infocent}},
\code{\link[sna]{loadcent}}, \code{\link[sna]{stresscent}}, \code{\link[sna]{graphcent}}, \code{\link[centiserve]{topocoefficient}},
\code{\link[centiserve]{closeness.currentflow}}, \code{\link[centiserve]{closeness.latora}},
\code{\link[centiserve]{communibet}}, \code{\link[centiserve]{communitycent}},
\code{\link[centiserve]{crossclique}}, \code{\link[centiserve]{entropy}},
\code{\link[centiserve]{epc}}, \code{\link[centiserve]{laplacian}}, \code{\link[centiserve]{leverage}},
\code{\link[centiserve]{mnc}}, \code{\link[centiserve]{hubbell}}, \code{\link[centiserve]{semilocal}},
\code{\link[centiserve]{closeness.vitality}},
\code{\link[centiserve]{closeness.residual}}, \code{\link[centiserve]{lobby}},
\code{\link[centiserve]{markovcent}}, \code{\link[centiserve]{radiality}}, \code{\link[centiserve]{lincent}},
\code{\link[centiserve]{geokpath}}, \code{\link[centiserve]{katzcent}}, \code{\link[centiserve]{diffusion.degree}},
\code{\link[centiserve]{dmnc}}, \code{\link[centiserve]{centroid}}, \code{\link[centiserve]{closeness.freeman}},
\code{\link[centiserve]{clusterrank}}, \code{\link[centiserve]{decay}},
\code{\link[centiserve]{barycenter}}, \code{\link[centiserve]{bottleneck}}, \code{\link[centiserve]{averagedis}},
\code{\link[CINNA]{local_bridging_centrality}}, \code{\link[CINNA]{wiener_index_centrality}}, \code{\link[CINNA]{group_centrality}},
\code{\link[CINNA]{dangalchev_closeness_centrality}}, \code{\link[CINNA]{harmonic_centrality}}, \code{\link[igraph]{strength}}
}
\author{
Minoo Ashtiani, Mehdi Mirzaie, Mohieddin Jafari
}
|
25b229848e90756e6b5838f72d65860c07a67a36
|
7a44d412eedf8df51d0d11c721fdf8916d177c4a
|
/HW07_BhavishKumar.R
|
1d55787c9a30b19e12ad73a76ff1d02cfddae46a
|
[] |
no_license
|
bhavish2207/Intro-to-Data-Science-Lab
|
673e6845a86eb67f1c57c57670db41a0d8739a1a
|
98acf2bc18f8580f7db0b8ddf852ba270f46e249
|
refs/heads/master
| 2020-11-24T16:51:24.112852
| 2019-12-15T21:46:39
| 2019-12-15T21:46:39
| 228,257,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,702
|
r
|
HW07_BhavishKumar.R
|
# IST 687
#
# Student name: BHAVISH KUMAR
# Homework number: 06
# Date due: 7th OCTOBER 2019
#
# Attribution statement: (choose the statements that are true)
# 1. I did this homework by myself, with help from the book and the professor
# Run these three functions to get a clean test of homework code
dev.off() # Clear the graph window
cat('\014') # Clear the console
rm(list=ls()) # Clear all user objects from the environment!!!
# Set working directory
# Change to the folder containing your homework data files
setwd("C:\\Users\\LENOVO\\Desktop\\SYR ADS\\Sem 1\\IST 687 Intro to Data Science\\Prep Exercise & Homework")
### PREP EXERCISE 07
## 1. Getting Ready: Load and repair median income data
### A. Download the provided MedianZIP.csv file from Blackboard and read into R-studio into a dataframe called "mydata".
### HINT: Use read_csv() to simplify later steps!
mydata <- read.csv("MedianZIP.csv", stringsAsFactors = FALSE) ### Reading MedianZIP csv into mydata dataframe
mydata$Mean <- as.numeric(mydata$Mean) ### Converting Mean column to numeric values
### B. Cleaning up the NAs: Find and fix the missing data in the Mean column by substituting the value from the Median column
### in place of the missing mean values. Explain why the median is a reasonable replacement for the mean.
which(is.na(mydata$Mean)) ### Obtaining row indexes of rows containing NA values in Mean column
mydata$Mean[is.na(mydata$Mean)] <- mydata$Median[is.na(mydata$Mean)] ### Substituting the NAs with corresponding value from Median column
### Median is a reasonable replacement for the mean value because median is a good measure of central tendency and moreover median is not affected by outliers or skewness in the data.
### Hence since Mean is not available the best possible replacement is median.
### C. Examine the data with View( ) and add comments explaining what each column contains.
### Add a comment explaining why the first 2391 zip codes look weird.
View(mydata)
str(mydata)
### The data contains 5 columns which are zip, Median, Mean & Pop that contains the zipcode of the place, the Median income of the location, the Mean income of the location and the population of the location respectively.
## The first 2391 zip codes look weird becuase the zip codes contain values of integer data type and also they contain only 4 digits,
## whereas ideally a zipcode should contain 5 digits as that is the appropriate zip code format.
## 2. Merge the median income data with detailed zipcode data
### A. Code and execute the commands below. Write a paragraph below that explains what this code does.
install.packages("zipcode")
library(zipcode)
mydata$zip <- clean.zipcodes(mydata$zip)
View(mydata)
data(zipcode)
dfNew <- merge(mydata, zipcode, by="zip")
### The above code installs the zipcode package at first and uses the clean.zipcodes() function to get all the values in the zip column of mydata
### dataframe to the coorect zipcode format of 5 digits and also converts the datatype to character. It restores leading zeroes to all the zipcodes that have only 4 digits.
### The data() function loads in built datasets. The inbuilt zipcode dataset is being used.
### A new dataframe is obtained by merging the mydata dataframe and zipcode dataframe on the zip column
## 3. Merge the new dataset with stateNameDF data
### A. Create a new dataframe with the following code:
stateNameDF <- data.frame(state=state.abb, stateName=state.name, center=state.center)
stateNameDF$stateName <- tolower(stateNameDF$stateName)
### B. Comment each line of the code to explain what it is doing
### 1. Creating a new stateNameDF using the data.frame function and including the vectors state, stateName & center.
### The state vector gets its values from the abb(abbreviation) column of the inbuilt dataset state. Likewise the vectors stateName and center also get their values from state.name and state.center respectively.
### 2. The values in stateName column are reduced to lowercase using the tolower() function.
### C. Using steps similar to step 2 create a new dataframe that contains our previous information and the information from the stateNameDF.
new_df <- merge(dfNew, stateNameDF, by = "state") ### Merging the dfNew dataset and stateNameDF on state column to obtain all the columns.
## 4. Examine your new df with the View command.
### A. Include a screen shot of the first 10 rows of data and all of your columns.
View(new_df)
### HOMEWORK 07
## STEP 1: Plot points for each zipcode (don't forget to library ggplot2 and ggmap)
### A. Code and execute the following lines of code
library(ggplot2)
install.packages("ggmap")
library(ggmap)
install.packages("maps")
library(maps)
library(tidyverse)
us <- map_data("state") ## The map_data function is used to obtain the data from the maps package and load it into a dataframe. A new dataframe called us is created.
dotmap<- ggplot(new_df, aes(map_id = stateName)) ## A new variable called dotmap is created using the ggplot function and the new_df dataframe created in the prep exercise is being used as data source. The aesthetics are defined and stateName is assigned to map_id
dotmap<- dotmap + geom_map(map = us) ## geom_map function is used to obtain a map and us dataframe is passed to the map
dotmap<- dotmap + geom_point(aes(x=longitude,y=latitude,color=Mean)) ## The geom_point function is used to assign longitude & latitude and display the mean for these coordinates as dots and display it.
dotmap
### B. Comment each line of code explaining what it does
## The map_data function is used to obtain the data from the maps package and load it into a dataframe. A new dataframe called us is created.
## A new variable called dotmap is created using the ggplot function and the new_df dataframe created in the prep exercise is being used as data source.
## The aesthetics are defined and stateName is assigned to map_id
## geom_map function is used to obtain a map and us dataframe is passed to the map
## The geom_point function is used to assign longitude & latitude and display the mean for these coordinates as dots.
### C. Add a block comment that criticizes the resulting map. It's not very good (but you need to explain why it is not very good).
### The map is not very good because of the color scale being used, making it difficult to differentiate between the colors.
### Moreover, the map shows very little information as the there is no clear boundary seggregating the various zipcodes and hence it is very hard to tell what is the mean income level for each zipcode.
### Also there are certain points outside the geographical boundary of US in the ocean and we have no information about what these points represent.
## Step 2: Use Tidyverse to create a Data Frame of State-by-State Income
### A. Library the tidyverse() package (if you have not already done so), and then run the following command to create a new data frame:
summaryDF <- new_df %>%
group_by(stateName) %>%
summarize(totalPop=sum(Pop), Income=sum(Mean*Pop))
### B. Add a comment after each line, describing what each line of code does. Make sure to describe how many rows there are in the new dataframe,
### and how the new dataframe was created.
## The above code creates a new dataframe called summaryDF using the state dataframe and then groups by state to obtain Population and total Income for each state.
## The new dataframe will have 50 rows as we are grouping by state and rolling up the data to state level. Hence 1 row for each state.
### C. Create a new variable on this data frame called meanIncome. Divide Income by totalPop to find the average income for each state.
summaryDF$meanIncome <- summaryDF$Income/summaryDF$totalPop ### Dividing total Income column by total Population column to obtain mean Income column.
## Step 3: Create a map of the U.S. showing average income
### A. Create a map visualization, where the color of each state represents the mean income for that state.
#install.packages("mapproj")
library(mapproj)
statemap<- ggplot(summaryDF, aes(map_id = stateName)) ### Creating a new variable called statemap for ggplot and using summaryDF as data source. Assigning stateName as map_id.
statemap<- statemap + geom_map(map = us,aes(fill=meanIncome)) ### Using geom_map function to create a map with us states data and filling each state with a color to represent the mean income for that state.
statemap<- statemap + expand_limits(x= us$long, y = us$lat) ### We then expand the limits for the map based on longitude and latitude for US states.
statemap<- statemap + coord_map() ### coord_map() ensures that the map has the right proportions and it is not distorted or stretched
statemap<- statemap + ggtitle("state map") ### We add a title to the visualization
statemap
### B. If you need help creating the map, review Chapter 13, and how Figure 13.2 was created.
### C. You will notice some states are grey. In a block comment explain why they are grey
## None of the states are in grey here because there are no missing values for income at state level, i.e. all the NAs have been treated in the prep exercise by replacing the NAs with the median at zipcode level.
## This NA treated dataset has been used for grouping by at state level and hence all the states have their corresponding meanIncome value present and there are no missing values. Hence there are no grey states in the map visualization.
### D. Fix this issue so that all states have an appropriate shade of blue - i.e.,
### generate the map visualization, where the color of each state represents the mean income for that state without any state being grey
### (hint: look if there are NAs).
## The NAs have been treated and the required visualization with all the states having an appropriate shade of blue has been generated in Step A itself.
## Step 4: Show the population of each state on the map
### A. Use the stateNameDF (that was created and used in the prework) and merge that dataframe with the summaryDF dataframe, so that the center.x and center.y information is merged with the summaryDF dataframe information in your new dataframe.
state_summary <- merge(stateNameDF, summaryDF, by = "stateName") ## Merging the 2 dataframes on stateName column to create a resultant dataframe containing statename, income, pop & xy coordinates of center
### B. Create a new map visualization, which is the same visualization created in step 3, but now add a new layer,
### with a point, at the center of each state, where the point size represents the total population of that state.
statemap_new<- ggplot(state_summary, aes(map_id = stateName)) ### Creating a new variable called statemap_new for ggplot and using state_summary as data source. Assigning stateName as map_id.
statemap_new<- statemap_new + geom_map(map = us,aes(fill=meanIncome)) + geom_point(aes(x=center.x,y=center.y,size=totalPop)) ### Using geom_map function to create a map with us states data and filling each state with a color to represent the mean income for that state. Also using geom_point to add a point at the center of each state where the size of the point represents the state population.
statemap_new<- statemap_new + expand_limits(x= us$long, y = us$lat) ### We then expand the limits for the map based on longitude and latitude for US states.
statemap_new<- statemap_new + coord_map()+ggtitle("state map") ### We add a title to the visualization. coord_map() ensures that the map has the right proportions and it is not distorted or stretched
statemap_new
### C. If you need a hint on how to do this visualization, you need to have a center.x and center.y in your summaryDF
### (i.e., you need to create a new summaryDF with center.x and center.y).
## Used the dataframe that was created in step A by merging the stateNameDF & summaryDF
dftree<-createDataPartition(df, p=.5,list=FALSE)
y<-c(1,5,4,8,9,3)
x<-c('A','b','c','a','b','c')
qhub = data.frame(y,x)
lm(y~x,data = qhub)
subset(df,!duplicated(x))
t = as.data.frame(df)
df.qty
|
0f735331bf80fd7977e05b100bb6910df21ead71
|
8af5c25d759ee81073ad5f01b1dfcc6b18f401cb
|
/Assingment1.R
|
3078f1495f2a8887236c03f6b96facf19ffb2ea0
|
[] |
no_license
|
Phienho/RLearning
|
c0c2c212071bc571ddc77e748159fa29d9bbdf9e
|
1601d06d3386e4e6fadece36dd311176b73820f3
|
refs/heads/master
| 2020-04-27T04:35:54.974865
| 2015-10-04T07:05:09
| 2015-10-04T07:05:09
| 35,818,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,119
|
r
|
Assingment1.R
|
pollutantmean <- function(directory, pollutant, id) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## NOTE: Do not round the result!
files<-list.files(directory,full.names=TRUE)
if(nargs()<3){
id<-1:length(files)
}
meani<-numeric(0)
for(i in id){
data<-read.csv(files[i])
#Extract the column of the pollutant
dp<-data[pollutant]
#Find the missing data of the pollutant
idx<-is.na(dp)
#vector of unmissing values
dp_um1 <- dp[!idx]
meani <-c(meani,dp_um1)
}
mean(meani)
}
########################
########################
complete <- function(directory, id_dat) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
files<-list.files(directory,full.names=TRUE)
L<-length(id_dat)
count_id<-numeric(L)
for(i in 1:L){
data<-read.csv(files[id_dat[i]])
data1<-data[rowSums(is.na(data))==0,]
count_id[i]<-nrow(data1)
}
data.frame(id = id_dat,nobs = count_id)
}
#####
corr <- function(directory, threshold) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
if(nargs()<2){
threshold<-0
}
files<-list.files(directory,full.names=TRUE)
id_dat<-1:length(files)
L<-length(id_dat)
corl<-numeric(0)
for(i in 1:L){
data<-read.csv(files[id_dat[i]])
data1<-data[rowSums(is.na(data))==0,]
if(nrow(data1)>threshold){
print(nrow(data1))
corri<-cor(data1["sulfate"],data1["nitrate"])
corl<-c(corl,corri)}
}
corl
}
################
setwd("/home/phho/WORK/CODE/COURSERA/RLearning")
getwd()
directory<-"specdata"
id_dat<-1:100
pollutant<-"nitrate"
m<-pollutantmean(directory, pollutant)
m
# source("pollutantmean.R")
# pollutantmean("specdata", "sulfate", 1:10)
# part2<-complete(directory, id_dat)
# part2
# corvalue<-corr(directory)
# corvalue
|
18ec3d4c149046c9cef169f8693c5c8c286ff45b
|
4eee83253767b218d0348898633f2e3680d25ffb
|
/code/figures/figure_1.R
|
74dae6493fb238e971df36c868350c2478e270d0
|
[
"MIT"
] |
permissive
|
Faculty-Job-Market-Collab/COVID_Job_Market_19-20
|
9a2a4c4fc904d7284d869a6990db526625428ccd
|
3f74f607245b9ae348d95f6221c0e36b117230b8
|
refs/heads/master
| 2023-07-08T07:18:20.031093
| 2021-08-11T05:35:33
| 2021-08-11T05:35:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,629
|
r
|
figure_1.R
|
#Figure 1. The Pandemic begins, mid-interview portion of the Faculty Job Search
# A/B. Offers Rescinded by field and gender----
# requires get_offers_data
fig1A <- get_plot_summary(res_demo_data, "gender", "covid_offers_rescinded") %>%
ggplot(aes(x = gender, y=percent_res))+
geom_col()+
geom_text(aes(label = r), position = position_dodge(width = 0.9), vjust = -0.25)+
labs(x = "\nReported Gender", y = "\nPercent of Offers Rescinded")+
scale_y_continuous(limits = c(0,20), expand = c(0,0))+
my_theme_horiz
fig1B <- get_plot_summary(data = res_demo_data,
x = "research_category", y = "covid_offers_rescinded") %>%
ggplot(aes(x = research_category, y = percent_res))+
geom_col()+
geom_text(aes(label = r), position = position_dodge(width = 0.9), hjust = -0.25)+
coord_flip()+
labs(y = "\nPercent of Offers Rescinded", x = "\nReported Research Category")+
scale_y_continuous(limits = c(0,40), expand = c(0,0))+
my_theme
# C. Offers rescinded by race/ethnicity vs visa status----
#race_res_data <- race_data %>%
# group_by(spons_req, race_ethnicity, covid_offers_rescinded) %>%
# summarise(n = n()) %>%
# as_tibble() %>%
# spread(key = covid_offers_rescinded, value = n) %>%
# mutate(total = true + false,
# total = if_else(is.na(total), "0", as.character(total)),
# percent = get_percent(true, total),
# race_ethnicity = paste0(race_ethnicity, "\n(n=", total, ")"),) %>%
# filter(total != 0)
fig1C <- get_plot_summary(data = race_data, x = "race_ethnicity", y = "covid_offers_rescinded") %>%
ggplot(aes(x = fct_reorder(race_ethnicity, desc(percent_res)), y = percent_res))+
geom_col()+
geom_text(aes(label = r), position = position_dodge(width = 0.9), hjust = -0.25)+
coord_flip(ylim = c(0,50))+
#facet_wrap(~legal_status, ncol = 1, scales = "free_y")+
labs(y = "\nPercent of Offers Rescinded", x = "\nReported Race/Ethnicity")+
scale_y_continuous(expand = c(0,0))+
my_theme_horiz
# D, E, & F. Comparing applications submitted vs offers received & rescinded----
submitted <- app_outcomes %>%
select(id, R1_apps_submitted, PUI_apps_submitted) %>%
mutate(R1_apps_submitted = if_else(is.na(R1_apps_submitted), "0", R1_apps_submitted),
PUI_apps_submitted = if_else(is.na(PUI_apps_submitted), "0", PUI_apps_submitted),
total_apps_submitted = as.numeric(R1_apps_submitted) + as.numeric(PUI_apps_submitted)) %>%
summarise(sum_PUI = sum(as.numeric(PUI_apps_submitted)), sum_RI = sum(as.numeric(R1_apps_submitted)), sum_total = sum(total_apps_submitted))
offers_df <- app_outcomes %>%
select(id, faculty_offers, offer_institutions, covid_offers_rescinded) %>%
filter(faculty_offers >= 1) %>%
left_join(., carn_joined_inst, by = "id") %>%
filter(inst_type == "offer_institutions") %>%
select(id, faculty_offers, inst_type, covid_offers_rescinded, NAME, PUI_RI, US_region, world_region)
rescinded_df <- offers_df %>%
filter(covid_offers_rescinded >= 1) %>%
filter(covid_offers_rescinded <= faculty_offers) %>%
select(id, faculty_offers, covid_offers_rescinded, PUI_RI, US_region, world_region) %>%
distinct()
#get_rescinded_inst <- function(x, df){
#
# #df <- rescinded_df
#
# id_x <- as.numeric(x)
#
# ea_id <- df %>% filter(id == id_x)
#
# one_offer <- ea_id %>%
# head(n = 1)
#
# return(one_offer)
#}
#D. Compare the % of R1 vs PUI offers made vs offers rescinded----
offers_inst_type <- offers_df %>%
filter(!is.na(PUI_RI)) %>%
select(-inst_type, -covid_offers_rescinded) %>%
group_by(PUI_RI) %>%
summarise(n_offers = n())
rescinded_inst_type <- rescinded_df %>%
filter(!is.na(PUI_RI)) %>%
group_by(PUI_RI, covid_offers_rescinded) %>% summarise(n = n()) %>%
group_by(PUI_RI) %>% summarise(total = sum(n)) %>% as.tibble() %>%
mutate(n_rescinded = ifelse(PUI_RI == "RI", total+4, total)) %>%
select(-total)
per_PUI_RI_rescinded <- full_join(offers_inst_type, rescinded_inst_type, by = "PUI_RI") %>%
mutate(percent_res = get_percent(n_rescinded, n_offers),
PUI_RI = paste0(PUI_RI, "\n(n=", n_offers, ")"),
r = paste0("r=", n_rescinded))
fig1D <- per_PUI_RI_rescinded %>%
ggplot(aes(x = PUI_RI, y = percent_res))+
geom_col()+
geom_text(aes(label = r), position = position_dodge(width = 0.9), vjust = -0.25)+
#coord_flip(ylim = c(0,50))+
#facet_wrap(~legal_status, ncol = 1, scales = "free_y")+
labs(y = "\nPercent of Offers Rescinded\n", x = "\nInstitution Type")+
scale_y_continuous(limits = c(0,20), expand = c(0,0))+
my_theme_horiz
#map_df(.x = id_list,
# .f = get_rescinded_inst, rescinded_df)
# E. Compare world region of institutions applied to and the number of offers rescinded----
#offers_world_region <- offers_df %>%
# filter(!is.na(world_region)) %>%
#select(-inst_type, -covid_offers_rescinded) %>%
# group_by(world_region) %>%
# summarise(n_offers = n())
#
#rescinded_world_region <- rescinded_df %>%
# filter(!is.na(world_region)) %>%
# group_by(world_region, covid_offers_rescinded) %>% summarise(n = n()) %>%
# group_by(world_region) %>% summarise(n_rescinded = sum(n)) %>% as.tibble()
#
#per_world_region_rescinded <- full_join(offers_world_region, rescinded_world_region, by = "world_region") %>%
# mutate(n_rescinded = replace(n_rescinded, is.na(n_rescinded), 0),
# percent_res = get_percent(n_rescinded, n_offers),
# world_region = paste0(world_region, "\n(n=", n_offers, ")"))
#
#fig1E <- per_world_region_rescinded %>%
# ggplot()+
# geom_col(aes(x = fct_reorder(world_region, desc(percent_res)), y = percent_res))+
# coord_flip()+
# labs(y = "\nPercent of Offers Rescinded", x = "\nWorld Region\n",
# caption = "(n = total number of offers made)")+
# scale_y_continuous(expand = c(0,0))+
# my_theme_horiz
#F. Compare US region of institutions applied to and the number of offers rescinded----
offers_US_region <- offers_df %>%
filter(!is.na(US_region)) %>%
select(-inst_type, -covid_offers_rescinded) %>%
group_by(US_region) %>%
summarise(n_offers = n())
rescinded_US_region <- rescinded_df %>%
filter(!is.na(US_region)) %>%
group_by(US_region, covid_offers_rescinded) %>% summarise(n = n()) %>%
group_by(US_region) %>% summarise(n_rescinded = sum(n)) %>% as.tibble()
per_US_region_rescinded <- full_join(offers_US_region, rescinded_US_region, by = "US_region") %>%
mutate(n_rescinded = replace(n_rescinded, is.na(n_rescinded), 0),
percent_res = get_percent(n_rescinded, n_offers),
US_region = paste0(US_region, "\n(n=", n_offers, ")"),
r = paste0("r=", n_rescinded))
fig1F <- per_US_region_rescinded %>%
ggplot(aes(x = fct_reorder(US_region, desc(percent_res)), y = percent_res))+
geom_col()+
geom_text(aes(label = r), position = position_dodge(width = 0.9), hjust = -0.25)+
coord_flip()+
labs(y = "\nPercent of Offers Rescinded", x = "\nUS Region")+
scale_y_continuous(limits = c(0,20), expand = c(0,0))+
my_theme_horiz
#build figure 1----
fig1AB <- plot_grid(fig1A, fig1B, labels = c('A', 'B'),
label_size = 18,
nrow = 1, rel_widths = c(.5, 1))
fig1DE <- plot_grid(fig1D, fig1F, labels = c('D', 'E'),
label_size = 18,
nrow = 1, rel_widths = c(.5, 1))
plot_grid(fig1AB, fig1C, fig1DE,
labels = c('', 'C', ''),
label_size = 18, rel_heights = c(1, 1.25, 1),
nrow = 3)
ggsave("Figure_1.png", device = 'png', units = "in", scale = 1.75,
path = 'figures', width = 7.5, height = 6.8)
|
def3d0fc912dbf759abafee816132b73e16fdae7
|
e286830b40628a7b920c9977f1995a340e48ea46
|
/R/remove_articulation_duplicates.R
|
5a355182f108799a8ce99c57084ad7863a7f4d43
|
[] |
no_license
|
cran/rsetse
|
48871acf46ef37e53bf5741dcd73bc467fefe7b3
|
7e412f53e2e2ca38871ec476424a32aafa0075b7
|
refs/heads/master
| 2023-05-25T16:50:57.098677
| 2021-06-11T09:00:02
| 2021-06-11T09:00:02
| 315,994,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,516
|
r
|
remove_articulation_duplicates.R
|
#' remove articulation duplicates
#'
#' This is a helper function that is used to efficiently aggregate the articulation nodes after embedding
#'
#' This function uses vectors to be as fast as possible and use as little memory as possible
#'
#' @param node_embeddings A dataframe, produced after running fix_z_to_origin
#' @param ArticulationVect Vector of articulation nodes
#'
#' @return A dataframe with with articulation nodes aggregated so that the dataframe has the same number of rows
#' as nodes
#' @noRd
remove_articulation_duplicates <- function(node_embeddings, ArticulationVect) {
embeds <- node_embeddings[(node_embeddings$node %in% ArticulationVect),]
node_vect <- iter_vect <- force_vect <- elevation_vect <- net_tension_vect <- velocity_vect <- rep(NA, length(ArticulationVect))
for(n in 1:length(ArticulationVect)){
node_vect[n] <- ArticulationVect[n]
iter_vect[n] <- embeds$Iter[embeds$node == ArticulationVect[n]][1]
force_vect[n] <- sum(embeds$force[embeds$node == ArticulationVect[n]])
elevation_vect[n] <- embeds$elevation[embeds$node == ArticulationVect[n]][1]
net_tension_vect[n] <- sum(embeds$net_tension[embeds$node == ArticulationVect[n]])
velocity_vect[n] <- sum(embeds$velocity[embeds$node == ArticulationVect[n]])
}
data.frame(
node = node_vect,
Iter = iter_vect,
force = force_vect,
elevation = elevation_vect,
net_tension = net_tension_vect,
velocity = velocity_vect,
Articulation_node = TRUE)
}
|
61e232fc7fdd645f81194d63563d7eea72378eaf
|
6be759453ef1cf8a1d74752f4b04652c5c19281e
|
/src/get_interesting_users.R
|
8e1bcc4729f7f33070b06d2b3d8112872db4be2c
|
[] |
no_license
|
zakroiteduet/vk_music_preference_analysis
|
3f480537a4e05ade9d5944168398d301b9624842
|
9192a7398e782c65f35ad843cd49f5170ca9d150
|
refs/heads/master
| 2021-01-10T01:40:55.902633
| 2016-01-12T20:10:19
| 2016-01-12T20:10:19
| 49,527,047
| 0
| 0
| null | null | null | null |
WINDOWS-1251
|
R
| false
| false
| 4,160
|
r
|
get_interesting_users.R
|
special_artists <- c("coil",
"popol vuh",
"oneohtrix point never",
"colin stetson",
"robert wyatt",
"soft machine",
"psychic tv",
"embryo",
"glenn branca",
"fuck buttons",
"dean blunt",
"александр башлачев",
"tim hecker",
"laurel halo",
"nicolas jaar",
"hype williams",
"inga copeland",
"mulatu astatke",
"john coltrane",
"boredoms",
"alice coltrane",
"king crimson",
"eno",
"brian eno",
"23 skidoo",
"deepchord presents echospace",
"ariel pink",
"gas",
"аукцыон",
"звуки му",
"owen pallett",
"pharoah sanders",
"darkside",
"this heat",
"the field",
"autechre",
"tuxedomoon",
"boards of canada",
"mahavishnu orchestra",
"shpongle",
"tindersticks")
uid_con <- dbConnect(SQLite(), dbname="data/uids.db")
df.interesting <- data.frame()
for (uni_id in c(297, 269, 250, 128, 304, 241, 240)) {
try({
print(uni_id)
query <- sprintf("SELECT * FROM uid_db WHERE uni_id = %d", uni_id)
dt.uids <- data.table(
dbGetQuery(uid_con, query))
dbname <- paste("C:/Databases/VK_data/",uni_id, ".db", sep = "")
pllst_con <- dbConnect(SQLite(), dbname=dbname)
table_names <- dbListTables(pllst_con)
if ("pllst_db" %in% table_names) {
df.pllst <- dbGetQuery(pllst_con, "SELECT * FROM pllst_db")
}else{
df.pllst <- dbGetQuery(pllst_con, "SELECT * FROM pllst_counts_db")
}
df.pllst$artist <- tolower(iconv(df.pllst$artist,"UTF-8","WINDOWS-1251"))
df.pllst$artist <- str_replace(df.pllst$artist, pattern = "\\s{2,}", " ")
df.pllst$artist <- str_replace(df.pllst$artist, pattern = "^\\s+", "")
df.pllst$artist <- str_replace(df.pllst$artist, pattern = "-+\\s*$", "")
df.pllst$artist <- str_replace(df.pllst$artist, pattern = "\\s+$", "")
df.pllst$artist <- str_replace(df.pllst$artist, pattern = "ё", "е")
df.pllst <- df.pllst[,c("uid", "artist")]
dt.pllst <- data.table(df.pllst)
dt.pllst <- dt.pllst[,list(artist=unique(artist)),
by=list(uid)]
dt.pllst <- merge(dt.pllst, dt.uids, by="uid", all.x=T, all.y=F)
uids <- dt.pllst$uid[dt.pllst$artist %in% special_artists]
dt.interesting_part <- dt.pllst[dt.pllst$uid%in%uids,]
df.interesting_part <- as.data.frame(dt.interesting_part)
df.interesting <- rbind(df.interesting, df.interesting_part)
dbDisconnect(pllst_con)
})
}
df.interesting <- df.interesting[df.interesting$sex==1,]
df.interesting <- df.interesting[df.interesting$age>=20,]
df.interesting <- df.interesting[!is.na(df.interesting$artist),]
df.interesting <- merge(df.interesting, uni.data, by="uni_id",all.x=T,all.y=F)
df.interesting <- df.interesting[!is.na(df.interesting$city_id),]
df.interesting <- df.interesting[df.interesting$city_id == 1,]
#=============================
dt.intersting <- data.table(df.interesting)
dt.intersting.stat <- dt.intersting[,list(score=length(artist[artist%in%special_artists])),
by=list(uid, city, city_id, uni, mean_score, uni_profile,age)]
dt.intersting.stat <- dt.intersting.stat[order(dt.intersting.stat$score,dt.intersting.stat$mean_score,decreasing = T)]
dt.intersting.stat <- dt.intersting.stat[dt.intersting.stat$age<=24 & dt.intersting.stat$city_id==1 & dt.intersting.stat$mean_score>=69,]
#=============================
|
a353e834fbc96646f7c403836b70b223f1459023
|
c1a4c35179dd8862fbc40b09eea92e3f491a81e3
|
/R/functions.R
|
eab65f294966439212ffb8322ed1474ae6e0347b
|
[
"MIT"
] |
permissive
|
sdhutchins/documenteR
|
3a5f16dbcbdb2b0369572d459e40fa28abb5e74d
|
461d25295ec2bb58a4e7ad95311d4b53a07c670d
|
refs/heads/master
| 2020-03-27T17:11:12.483900
| 2019-05-07T19:02:09
| 2019-05-07T19:02:09
| 146,833,831
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,387
|
r
|
functions.R
|
#' @title Document an R object
#'
#' @description This function takes the name of an object (either an R function or an R
#' data.frame) and replaces it with skeleton roxygen2 documentation. It is used in the \code{documenter_addin()} function which is the installed R addin.
#'
#' For \strong{functions}, an empty \code{@param} is generated for each of the funciton's arguments.
#' For \strong{dataframes}, a full \code{\\description} block is generated from column names
#'
#' @note The addin will automatically source the file that the function or data is in.
#'
#' @param objname A character string naming an R function or data.frame.
#'
#' @examples
#' documenter("lm")
#' #' @title FUNCTION_TITLE
#' #'
#' #' @description FUNCTION_DESCRIPTION
#' #'
#' #' @param formula DESCRIPTION.
#' #' @param data DESCRIPTION.
#' #' @param subset DESCRIPTION.
#' #' @param weights DESCRIPTION.
#' #' @param na.action DESCRIPTION.
#' #' @param method DESCRIPTION.
#' #' @param model DESCRIPTION.
#' #' @param x DESCRIPTION.
#' #' @param y DESCRIPTION.
#' #' @param qr DESCRIPTION.
#' #' @param singular.ok DESCRIPTION.
#' #' @param contrasts DESCRIPTION.
#' #' @param offset DESCRIPTION.
#' #' @param ... DESCRIPTION.
#' #'
#' #' @return RETURN DESCRIPTION
#' #' @export
#'
#' documenter("iris")
#' #' DATASET_TITLE
#' #'
#' #' DATASET_DESCRIPTION
#' #'
#' #' @format A data frame with 150 rows and 5 variables:
#' #' \describe{
#' #' \item{\code{Sepal.Length}}{double. DESCRIPTION.}
#' #' \item{\code{Sepal.Width}}{double. DESCRIPTION.}
#' #' \item{\code{Petal.Length}}{double. DESCRIPTION.}
#' #' \item{\code{Petal.Width}}{double. DESCRIPTION.}
#' #' \item{\code{Species}}{integer. DESCRIPTION.}
#' #' }
#'
#' @export
documenter <- function(objname) {
obj <- get(objname)
if (is.function(obj)) {
document_function(obj, label = objname)
} else if (is.data.frame(obj)) {
document_data(obj = obj, label = objname)
} else {
stop(objname, " is a ", class(obj), ". documenter_addin currently supports only functions and data.frames.")
}
}
#' @rdname documenter
#'
#' @export
documenter_addin <- function() {
context <- rstudioapi::getActiveDocumentContext()
# Source the file.
source(context$path)
objname <- strpquotes(context$selection[[1]]$text)
rstudioapi::insertText(text = documenter(objname))
}
strpquotes <- function(t) {
gsub("[\"']", "", t)
}
document_data <- function(obj, label) {
# Get column names and types
vartype <- vapply(obj, typeof, FUN.VALUE = character(1))
# Write individual item description templates
items <- paste0("#\' \\item{\\code{", names(vartype), "}}{", vartype, ". DESCRIPTION.}", collapse = "\n")
# Return the full documentation template
paste0("
#\' DATASET_TITLE
#\'
#\' DATASET_DESCRIPTION
#\'
#\' @format A data frame with ", nrow(obj), " rows and ", length(vartype), " variables:
#\' \\describe{
", items, "
#\' }
\"", label, "\"")
}
document_function <- function(obj, label) {
# Get the function arguments
arglist <- formals(obj)
argnames <- names(arglist)
# Write individual parameter description templates
params <- paste0("#\' @param ", argnames, " DESCRIPTION.", collapse = "\n")
# Return the full documentation template
paste0("
#\' @title FUNCTION_TITLE
#\'
#\' @description FUNCTION_DESCRIPTION
#\'
", params, "
#\'
#\' @return RETURN_DESCRIPTION
#\' @export
", label)
}
|
70a5f79d2d486df3a0c02595e0b496634ed30b4a
|
cca9eadf050f17e2e9775cdf3e7c00dbc0a5ecd4
|
/plot3.R
|
5c219ff6f61c7fcb81c3e7bf2478cf9f1193b07f
|
[] |
no_license
|
venkyp8/ExData_Plotting1
|
873f8d83fe2bab2822a25dea0fafa900b1df72a4
|
260a7bd2362c58af67b7dd99b0edfaa0f32cbe85
|
refs/heads/master
| 2021-05-23T15:58:51.095411
| 2020-04-05T01:27:28
| 2020-04-05T01:27:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 871
|
r
|
plot3.R
|
setwd("~/Desktop/Exercises/Course4")
Data<-read.table("household_power_consumption.txt",
header=TRUE, sep=";",
na.strings = "?",
colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
Data<-Data[Data$Date %in% c("1/2/2007", "2/2/2007"),]
DateTime<-strptime(paste(Data$Date, Data$Time, sep=" "), format = "%d/%m/%Y %H:%M:%S")
Data<-cbind(DateTime, Data)
#plot 3
x<-Data$DateTime
y1<-Data$Sub_metering_1
y2<-Data$Sub_metering_2
y3<-Data$Sub_metering_3
collines<-c("black", "red", "blue")
labels<-c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
plot(y1~x, type="l", col=collines[1], ylab = "Energy sub metering")
lines(y2~x, type="l", col=collines[2])
lines(y3~x, type="l", col=collines[3])
legend("topright", legend = labels, col = collines, lty="solid")
|
951986ee8b1d612997e6e3cc34725540e1147187
|
bdd21aaffe29f6e9790f98e1b90207bc1b0776f3
|
/PCA.R
|
f4177e60edd2c2e58d2a355378b4fd753bdd1797
|
[] |
no_license
|
Green-Guo/World-of-2050
|
4f946a8c303c7b72c6bdfa766aa2b8d75d94f542
|
0f3272f1cb1ea42405246e810a2aaa2ab1abe761
|
refs/heads/master
| 2020-06-10T07:59:43.230789
| 2016-12-08T23:43:20
| 2016-12-08T23:43:20
| 75,984,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,122
|
r
|
PCA.R
|
ys<-read.csv("C:/Users/Sugar/Documents/MSBA/ADV/Modeling Project/Y.csv")
xs<-read.csv('C:/Users/Sugar/Documents/MSBA/ADV/Modeling Project/result.csv')
library(xtable)
list<-list()
rmse_list<-list()
library(fpp)
for (i in unique(xs$Country.Name)){
features<-list()
print (i)
xx<-subset.data.frame(xs, Country.Name==i)
X<-as.data.frame(t(xx))
X<-X[,1:ncol(X)]
new_header<-X[1,]
X<-X[2:nrow(X),]
#colnames(X, new_header)
# Save y series in memory for later modeling
income<-subset.data.frame(ys, Country==i)[3]
age<-subset.data.frame(ys, Country==i)[4]
# PCA
d<-X[,apply(X[2:22,], 2, var, na.rm=TRUE) != 0]
d<-as.data.frame(d[2:22,])
dd=apply(d, 2, as.numeric)
pca<-prcomp(dd, scale=T, center=T)
var<-varimax(pca$rotation)
plot( cumsum((pca$sdev)^2) / sum(pca$sdev^2), main=paste(i,'Scree Plot'), xlab='Components')
# Get 3 factors from PC1, 2 from PC2, 1 from PC3-5
table<-xtable(unclass(var$loadings))
list<-append(list, paste(i, 'Extracted Features:', xs[2][names(d)[which.max(abs(table$PC1))],] ,'|', xs[2][names(d)[match(tail(sort(abs(table$PC1)),3), abs(table$PC1))[2]],] ,'|', xs[2][names(d)[match(tail(sort(abs(table$PC1)),3), abs(table$PC1))[1]],], '|', xs[2][names(d)[match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))[match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))!=ifelse(length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))==0, 0, intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))][1]],] , '|', xs[2][names(d)[match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))[match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))!=ifelse(length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))==0, 0, intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))][2]],], '|', xs[2][names(d)[match(tail(sort(abs(table$PC3)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC3))[match(tail(sort(abs(table$PC3)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC3))!=ifelse(length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))==0, 0, Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))][1]],], '|', xs[2][names(d)[match(tail(sort(abs(table$PC4)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC4))[match(tail(sort(abs(table$PC4)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC4))!=ifelse(length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))==0, 0, Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))][1]],], '|', xs[2][names(d)[match(tail(sort(abs(table$PC5)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC5))[match(tail(sort(abs(table$PC5)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC5))!=ifelse(length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))==0, 0, Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))][1]],]))
features<-paste(which.max(abs(table$PC1)) , match(tail(sort(abs(table$PC1)),3), abs(table$PC1))[2] , match(tail(sort(abs(table$PC1)),3), abs(table$PC1))[1], match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))[match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))!=ifelse(length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))==0, 0, intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))][1], match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))[match(tail(sort(abs(table$PC2)),2+length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))), abs(table$PC2))!=ifelse(length(intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))==0, 0, intersect(x = match(tail(sort(abs(table$PC2)),2), abs(table$PC2)), y = match(tail(sort(abs(table$PC1)),3), abs(table$PC1))))][2], match(tail(sort(abs(table$PC3)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC3))[match(tail(sort(abs(table$PC3)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC3))!=ifelse(length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))==0, 0, Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))][1],
match(tail(sort(abs(table$PC4)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC4))[match(tail(sort(abs(table$PC4)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC4))!=ifelse(length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))==0, 0, Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)),match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))][1],
match(tail(sort(abs(table$PC5)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC5))[match(tail(sort(abs(table$PC5)),1+length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))), abs(table$PC5))!=ifelse(length(Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))==0, 0, Reduce(intersect, list(match(tail(sort(abs(table$PC1)),3), abs(table$PC1)),match(tail(sort(abs(table$PC3)),1), abs(table$PC3)),match(tail(sort(abs(table$PC4)),1), abs(table$PC4)), match(tail(sort(abs(table$PC5)),1), abs(table$PC5)), match(tail(sort(abs(table$PC2)),2), abs(table$PC2)))))][1])
green<-NULL
green_test<-NULL
nom<-NULL
d_test<-X[,apply(X[2:22,], 2, var, na.rm=TRUE) != 0]
d_test<-as.data.frame(d_test[23:62,])
dd_test=apply(d_test, 2, as.numeric)
for (i in unique(as.numeric(unlist(strsplit(features, "\\s+"))))){
green <- rbind(green, dd[,i])
nom <- cbind(nom, as.character(xs[2][names(d)[i],]))
row.names(green) <- nom
green_test <- rbind(green_test, dd_test[,i])
row.names(green_test) <- nom
}
green <- t(green)
green_test <- t(green_test)
# MODELING
## Always convert model inputs to dataframes!!!!!!!!!!!!!!!!!!!!!!!
x_train<-as.data.frame(green)
x_test<-as.data.frame(green_test)
y_train<-income[1:21,]
y_test<-income[22:nrow(income),]
xxx<-cbind(y_train, x_train)
y_train_age<-age[1:21,]
xxxx<-cbind(y_train_age, x_train)
y_test_age<-age[22:nrow(age),]
#arima_income <- auto.arima(y = y_train, xreg = x_train)
#arima_age <- auto.arima(y = y_train_age, xreg = x_train)
arima_income<-lm(formula = y_train~.,data = xxx)
arima_age<-lm(formula = y_train_age~.,data = xxxx)
pred_income<-predict(arima_income, newdata = x_test)
pred_age<-predict(arima_age, newdata = x_test)
rmses<-matrix(0,1,2)
rmse_income <- sqrt( mean( (pred_income[1:5] - y_test)^2 , na.rm = TRUE ) )
rmse_age <- sqrt( mean( (pred_age[1:5] - y_test_age)^2 , na.rm = TRUE ) )
rmses[1,1]<-rmse_income
rmses[1,2]<-rmse_age
rmse_list <- rbind(rmse_list, rmses)
}
|
aea519a3f9d10189bdff5f7620334f89f024476f
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8484_0/rinput.R
|
a1c55854f0aaed55d9d932221d6ac58e845b8b7a
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8484_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8484_0_unrooted.txt")
|
7a86c25b91b28cf375cc5753caf5e184882ff700
|
02b79365ee1bd0b04a866f07ca387c2454f4031f
|
/sb/global.R
|
cd2853821d6bccb900069fdee46a429c89764806
|
[] |
no_license
|
MikeMorris89/sbdr
|
5e8b66d2b2bd613256a66c8533d68941e1f979e4
|
06a5aa4c39c714a35f1b5539f71b0986d824abe3
|
refs/heads/master
| 2020-07-03T05:15:17.558898
| 2016-11-30T07:01:58
| 2016-11-30T07:01:58
| 74,194,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 933
|
r
|
global.R
|
# Copyright (c) 2014 Clear Channel Broadcasting, Inc.
# https://github.com/iheartradio/ShinyBuilder
# Licensed under the MIT License (MIT)
#install.packages('shinybootstrap2')
#install.packages('googleVis')
#remove.packages('googleVis')
#remove.packages('rJava')
options(shiny.fullstacktrace = TRUE)
#---------
#Libraries
#---------
library(stringr)
#library(googleVis)
#library(RJDBC)
library(RJSONIO)
library(RSQLite)
library(shinyAce)
library(shinyMCE)
library(shinyGridster)
#library(ShinyBuilder)
#source(system.file('googleChart.R', package = 'ShinyBuilder'))
source('googleChart.R')
source('ShinyBuilder.R')
#Shinybuilder directory
sb_dir <- system.file('', package = 'ShinyBuilder')
sb_dir<- paste(getwd(),'/',sep='')
#DB list
db_list <- dbListInit(sb_dir)
#Available dashboards
available_dashboards <- str_replace(list.files(path = str_c(sb_dir,'dashboards')), '.RData', '')
|
b7627080794ebe83d2cea1db19796c6a84494d67
|
e5e29b60f2111cf6998b46a80cb5e034962b2cc5
|
/rstuff/rmongodb/rmongodb/man/mongo.bson.buffer.append.object.Rd
|
109929473542c384e8e543f06bbd13d9e0269f74
|
[
"Apache-2.0"
] |
permissive
|
BigBlueBox/GoodAndBad
|
24e326862a5456b673b1928ffbb14bc1c47a4d4b
|
4d2b8d3de523c3595fc21aa062eddf739f6c3a20
|
refs/heads/master
| 2021-01-01T20:35:55.494474
| 2014-08-24T17:56:29
| 2014-08-24T17:56:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,800
|
rd
|
mongo.bson.buffer.append.object.Rd
|
% File rmongodb/man/mongo.bson.buffer.append.object.Rd
\name{mongo.bson.buffer.append.object}
\alias{mongo.bson.buffer.append.object}
\title{Append an R object onto a mongo.bson.buffer}
\description{
Append an R object onto a \link{mongo.bson.buffer}.
This function allows you to store higher level R objects in the database without
losing their attribute information. It will correctly handle data frames, matrices and arrays
for instance; although, empty objects, such as a data frame with no rows, are not permitted.
Note that the names attribute will not be preserved if the object is multidimensional (although
dimnames will be).
The object's value will look like this in the buffer:
\preformatted{
{
...
name : {
R_OBJ : true,
value : xxx,
attr : {
attr1 : yyy,
attr2 : zzz
}
}
...
}
}
\code{name} will be substituted with the value of the \code{name} parameter.\cr
\code{xxx} will be substituted with the low level value of the object (as would be appended by
\code{\link{mongo.bson.buffer.append}()}).\cr
\code{attr1} and \code{attr2} will be substituted with the names of attributes.\cr
\code{yyy} and \code{zzz} will be substituted with the values of those attributes.\cr
Note that it is inadvised to construct this wrapper manually as \code{\link{mongo.bson.value}()} and
\code{\link{mongo.bson.iterator.value}()} bypass the special checking and handling that is done by R code that
set attributes.
}
\usage{
mongo.bson.buffer.append.object(buf, name, value)
}
\arguments{
\item{buf}{(\link{mongo.bson.buffer}) The buffer object to which to append.}
\item{name}{(string) The name (key) of the field appended to the buffer. }
\item{value}{(object) The object to append to the buffer as a subobject. }
}
\value{
TRUE if successful; otherwise, FALSE if an error occured appending the data.
}
\examples{
age <- c(5, 8)
height <- c(35, 47)
d <- data.frame(age=age, height=height)
buf <- mongo.bson.buffer.create()
mongo.bson.buffer.append.object(buf, "table", d)
b <- mongo.bson.from.buffer(buf)
# this produces a BSON object of the form:
# { "table" : { "R_OBJ" : true,
# "value" : {
# "age" : [ 5, 8 ],
# "height" : [35, 47 ]
# },
# "attr" : {
# "row.names" : [ -2147483648, -2 ],
# "class" : "data.frame"
# }
# }
# }
# row.names is stored in the compact form used for integer row names.
}
\seealso{
\link{mongo.bson},\cr
\link{mongo.bson.buffer},\cr
\code{\link{mongo.bson.buffer.append}},\cr
\code{\link{mongo.bson.value}},\cr
\code{\link{mongo.bson.iterator.value}}
}
|
209cc59d8658ac5115e480dae566b5cca745b002
|
6c302a2c19baeb2c08994cd79ebdac75344395f4
|
/xkcd-plots-setup.R
|
342c1fa138bd4d6e1ac6653a0f411f8da719dd1a
|
[
"MIT"
] |
permissive
|
damonzon/xkcd-plots
|
940519e6c66fd2726cb4f32704be1d6182012abf
|
b61775c762c1ebbbe7f151c1a85e279fcd0f2212
|
refs/heads/master
| 2021-01-11T10:48:56.959074
| 2015-01-08T03:32:38
| 2015-01-08T03:32:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 998
|
r
|
xkcd-plots-setup.R
|
# Setup instructions follow http://cran.r-project.org/web/packages/xkcd/vignettes/xkcd-intro.pdf
# Torres-Manzanera, Emilio. "xkcd: An R Package for Plotting XKCD Graphs."
# Libraries ####
library(ggplot2)
library(extrafont)
library(xkcd)
# xkcd font install ####
# Checking if xkcd fonts is installed
# From page 3 of xkcd vignette
if( 'xkcd' %in% fonts()) {
p <- ggplot() + geom_point(aes(x=mpg, y=wt), data=mtcars) +
theme(text = element_text(size = 16, family = "xkcd"))
} else {
warning("xkcd fonts not installed!")
p <- ggplot() + geom_point(aes(x=mpg, y=wt), data=mtcars)
}
p
# Font was downloaded from https://github.com/ipython/xkcd-font
# Converted to .ttf format for use with extrafont package
font_import(paths="./Fonts", prompt=FALSE)
# Checking if it worked
fonts()
fonttable()
# Reloading fonts
if(.Platform$OS.type != "unix") {
## Register fonts for Windows bitmap output
loadfonts(device="win")
} else {
loadfonts()
}
|
46c2cc0595743668175f396d1f1dd8b515ea1bbe
|
98bce48ca65adc0f08ad399d107b82a29cccff4f
|
/scripts/simulation_sensM.R
|
1724bb487a97868f185a1632de1348ff307bc81d
|
[] |
no_license
|
skdeshpande91/VCBART
|
9c0aa7aa03e6d63d9ac883868407d7ed109e22a0
|
7c9b95aa157882ef9cc66370e03f87db733e74f1
|
refs/heads/master
| 2023-08-24T11:18:45.482808
| 2021-06-28T21:38:41
| 2021-06-28T21:38:41
| 246,921,783
| 15
| 2
| null | 2021-04-26T14:10:50
| 2020-03-12T20:08:13
|
C++
|
UTF-8
|
R
| false
| false
| 2,045
|
r
|
simulation_sensM.R
|
# Simulation study to assess sensitivity to number of trees
load("data/p5R20_data.RData")
source("scripts/vcbart_wrapper.R")
source("scripts/assess_support_recovery.R")
args <- commandArgs(TRUE)
M <- as.numeric(args[1])
sim_number <- as.numeric(args[2])
load(paste0("data/sim_p5R20/data_p5R20_", sim_number, "_sigma1.RData"))
fit <- vcbart_wrapper(Y_train, X_train, Z_train, n_train,
X_test, Z_test, n_test, cutpoints, M = M, error_structure = "ind",
split_probs_type = "adaptive", burn = 500, nd = 1000,
verbose = TRUE, print_every = 250)
est_support <- fit$beta_support[["support"]][[1]]
confusion_mat <- get_confusion_matrix(true_support, est_support, R)
beta_mse_train <- colMeans( (beta_train - fit[["train"]][["beta"]][,"MEAN",])^2 )
beta_mse_test <- colMeans( (beta_test - fit[["test"]][["beta"]][,"MEAN",])^2 )
beta_cov_train <- apply((beta_train >= fit[["train"]][["beta"]][,"L95",]) & (beta_train <= fit[["train"]][["beta"]][,"U95",]),
FUN = mean, MARGIN = 2, na.rm = TRUE)
beta_cov_test <- apply((beta_train >= fit[["train"]][["beta"]][,"L95",]) & (beta_train <= fit[["train"]][["beta"]][,"U95",]),
FUN = mean, MARGIN = 2, na.rm = TRUE)
ystar_mse_train <- mean( (Y_train - fit[["train"]][["ystar"]][,"MEAN"])^2 )
ystar_mse_test <- mean( (Y_test - fit[["test"]][["ystar"]][,"MEAN"])^2 )
ystar_cov_train <- mean( (Y_train >= fit[["train"]][["ystar"]][,"L95"]) & (Y_train <= fit[["train"]][["ystar"]][,"U95"]))
ystar_cov_test <- mean( (Y_test >= fit[["test"]][["ystar"]][,"L95"]) & (Y_test <= fit[["test"]][["ystar"]][,"U95"]))
time <- fit$time
vcbart_time <- fit$vcbart_time
save_list <- c("beta_mse_train", "beta_mse_test", "beta_cov_train", "beta_cov_test",
"ystar_mse_train", "ystar_mse_test", "ystar_cov_train", "ystar_cov_test",
"time", "vcbart_time", "confusion_mat")
save(list = save_list, file = paste0("results/sensM/results_sensM", M, "_sim", sim_number, ".RData"))
|
6c8f16a2206d8c0ee00b7bd91e1dcb6b7721abb8
|
01fba87f693c47a73f4b2330556f898cd3d3689a
|
/CalculoEntropia.R
|
d500b563014f8e28519b0bbf9f003f6b06ba1dd1
|
[] |
no_license
|
otluiz/R-twitters
|
0b793a60c13ff97a9f057a91428481d2062512c0
|
e54c12f2203372c6b4b6b267dff0b00ec5ee8308
|
refs/heads/master
| 2021-01-22T20:25:23.671448
| 2017-05-18T00:23:02
| 2017-05-18T00:23:02
| 85,318,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,936
|
r
|
CalculoEntropia.R
|
setwd("~/workspace/R/")
# free memory
rm(list = ls())
gc()
library("entropy")
y=c(4,2,3,0,2,4,0,0,2,1,1)
entropy(y, method = "ML")
entropy(y, method = "MM")
entropy(y, method = "Jeffreys")
entropy(y, method = "Laplace")
entropy(y, method = "minimax")
entropy(y, method = "CS")
#-------[ TODO o dataset para ser separado em conjunto de treinamento e testes ]------------------------
dfN <- read.csv("../data/prfPeriodoTx.csv") ## carrega o data frame
## Ajuste para Arvore Decisão ------------------------------------------------
for(i in 1:nrow(dfN)){
if(dfN[i,"Gravidade"] == 0) { dfN[i,"Gravidade"] = "N" }
if(dfN[i,"Gravidade"] == 1) { dfN[i,"Gravidade"] = "S" }
}
## Adicionar BR na frente do número ex 101 => BR101 --------------------------------
for(i in 1:nrow(dfN)){
if(dfN[i,"BR"] == 101) { dfN[i,"BR"] = "BR101" }
if(dfN[i,"BR"] == 104) { dfN[i,"BR"] = "BR104" }
if(dfN[i,"BR"] == 110) { dfN[i,"BR"] = "BR110" }
if(dfN[i,"BR"] == 116) { dfN[i,"BR"] = "BR116" }
if(dfN[i,"BR"] == 232) { dfN[i,"BR"] = "BR232" }
if(dfN[i,"BR"] == 316) { dfN[i,"BR"] = "BR316" }
if(dfN[i,"BR"] == 407) { dfN[i,"BR"] = "BR407" }
if(dfN[i,"BR"] == 408) { dfN[i,"BR"] = "BR408" }
if(dfN[i,"BR"] == 423) { dfN[i,"BR"] = "BR423" }
if(dfN[i,"BR"] == 424) { dfN[i,"BR"] = "BR424" }
if(dfN[i,"BR"] == 428) { dfN[i,"BR"] = "BR428" }
}
write.csv(dfN,"../data/prfBRTratada.csv", row.names = FALSE)
# entropy.ChaoShen(z, unit=c("log", "log2", "log10"))
dfN <- read.csv("./data/prfPeriodoTx.csv") ## carrega o data frame
attach(dfN)
str(dfN)
###### calculo da entropia para variável Gravidade @@@@@@@@@@@@@@@@@@@@@@@@@@@
cont <- c(Gravidade)
freq <- table(cont)/length(cont)
-sum(freq * log2(freq))
## tx = 0.9997114
entropy.ChaoShen(freq, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável RestrVisibili @@@@@@@@@@@@@@@@@@@@@@@
contTxRest <- c(RestrVisibili)
freqTxRest <- table(contTxRest)/length(contTxRest)
-sum(freqTxRest * log2(freqTxRest))
## tx = 0.5537316
entropy.ChaoShen(freqTxRest, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Tipo de Acidente @@@@@@@@@@@@@@@@@@@@@@@
contTxAcident <- c(TipoAcident)
freqTxAcident <- table(contTxAcident)/length(contTxAcident)
-sum(freqTxAcident * log2(freqTxAcident))
## entropia = 3.068937
entropy.ChaoShen(freqTxAcident, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Causa do Acidente @@@@@@@@@@@@@@@@@@@@@@@
contCausaAcident <- c(CausaAcident)
freqCausaAcident <- table(contCausaAcident)/length(contCausaAcident)
-sum(freqCausaAcident * log2(freqCausaAcident))
## entropia = 2.692134
entropy.ChaoShen(freqTxAcident, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável BR @@@@@@@@@@@@@@@@@@@@@@@
contBR <- c(BR)
freqBR <- table(contBR)/length(contBR)
-sum(freqBR * log2(freqBR))
## entropia = 2.412813
entropy.ChaoShen(freqBR, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Tracado da Via @@@@@@@@@@@@@@@@@@@@@@@
contTrçVia <- c(TracadoVia)
freqTrçVia <- table(contTrçVia)/length(contTrçVia)
-sum(freqTrçVia * log2(freqTrçVia))
## entropia = 0.8309614
entropy.ChaoShen(freqTrçVia, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável TipoAuto @@@@@@@@@@@@@@@@@@@@@@@
contTipoAuto <- c(TipoAuto)
freqTipoAuto <- table(contTipoAuto)/length(contTipoAuto)
-sum(freqTipoAuto * log2(freqTipoAuto))
## entropia = 3.171005
entropy.ChaoShen(freqTipoAuto, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Dia da Semana @@@@@@@@@@@@@@@@@@@@@@@
contSemana <- c(DiaDaSemana)
freqSemana <- table(contSemana)/length(contSemana)
-sum(freqSemana * log2(freqSemana))
## entropia = 2.804424
entropy.ChaoShen(freqSemana, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Dia da Semana @@@@@@@@@@@@@@@@@@@@@@@
contHour <- c(Hour)
freqHour <- table(contHour)/length(contHour)
-sum(freqHour * log2(freqHour))
## entropia = 4.38932
entropy.ChaoShen(freqHour, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Dia da Semana @@@@@@@@@@@@@@@@@@@@@@@
contPeriodo <- c(Periodo)
freqPeriodo <- table(contPeriodo)/length(contPeriodo)
-sum(freqPeriodo * log2(freqPeriodo))
## entropia = 1.807386
entropy.ChaoShen(freqPeriodo, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Dia da Semana @@@@@@@@@@@@@@@@@@@@@@@
contKM <- c(KMArredondado)
freqKM <- table(contKM)/length(contKM)
-sum(freqKM * log2(freqKM))
## entropia = 7.564134
entropy.ChaoShen(freqKM, unit=c("log", "log2", "log10"))
###### calculo da entropia para variável Dia da Semana @@@@@@@@@@@@@@@@@@@@@@@
contDelegacia <- c(Delegacia)
freqDelegacia <- table(contDelegacia)/length(contDelegacia)
-sum(freqDelegacia * log2(freqDelegacia))
## entropia = 1.800944
entropy.ChaoShen(freqDelegacia, unit=c("log", "log2", "log10"))
t=table(TipoAcident, Gravidade)
|
cd895149c520179dbe8d4cf1600a795523d3105b
|
28fdf64a287393ed4f550f8fc7b8634dc5c28e4d
|
/R/nctools-transform_internal.R
|
541cf983952c2c59c1a7e5711309781d9974db28
|
[] |
no_license
|
roliveros-ramos/nctools
|
73aad10b6bf85dfc13e5c345cb66072579446dec
|
65fd79270f855ac064386708d226ab1ba2c055a9
|
refs/heads/master
| 2022-08-18T17:05:25.460725
| 2022-08-07T19:22:54
| 2022-08-07T19:22:54
| 98,751,700
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,437
|
r
|
nctools-transform_internal.R
|
#' @export
nc_loc = function(filename, varid, MARGIN, loc, output=NULL, drop=TRUE,
newdim = NULL, name=NULL, longname=NULL, units=NULL,
compression=NA, verbose=FALSE, force_v4=TRUE,
ignore.case=FALSE) {
if(is.null(output)) stop("You must specify an 'output'file.")
if(file.exists(output)) {
oTest = file.remove(output)
if(!oTest) stop(sprintf("Cannot write on %s.", output))
}
nc = nc_open(filename)
on.exit(nc_close(nc))
varid = .checkVarid(varid=varid, nc=nc)
dn = ncvar_dim(nc, varid, value=TRUE)
dnn = if(isTRUE(ignore.case)) tolower(names(dn)) else names(dn)
if(length(MARGIN)!=1) stop("MARGIN must be of length one.")
if (is.character(MARGIN)) {
if(isTRUE(ignore.case)) MARGIN = tolower(MARGIN)
MARGIN = match(MARGIN, dnn)
if(anyNA(MARGIN))
stop("not all elements of 'MARGIN' are names of dimensions")
}
depth = ncvar_get(nc, varid=dnn[MARGIN])
dims = nc$var[[varid]]$size
dims[MARGIN] = dims[MARGIN]-1
ind0 = c(list(X=ncvar_get(nc, varid, collapse_degen = FALSE) - loc, drop=FALSE),
lapply(dims, seq_len))
x0 = do.call("[", ind0)
ind0[[MARGIN+2]] = ind0[[MARGIN+2]]+1L
x1 = do.call("[", ind0)
x0 = sign(x0)*sign(x1)
ind = apply(x0, -MARGIN, FUN=function(x) which(x<1)[1])
D1 = depth[ind]
D2 = depth[ind+1]
iList = append(lapply(dims[-MARGIN], seq_len), MARGIN-1, values=NA_integer_)
index = as.matrix(do.call(expand.grid, iList))
index[, MARGIN] = ind
x1 = ind0$X[index]
index[, MARGIN] = ind + 1
x2 = ind0$X[index]
Dx = (-x1*(D2-D1))/(x2-x1) + D1
dim(Dx) = dims[-MARGIN]
oldVar = nc$var[[varid]]
newDim = nc$dim[(oldVar$dimids + 1)[-MARGIN]]
thisDim = nc$dim[(oldVar$dimids + 1)[MARGIN]][[1]]
if(!is.na(compression)) oldVar$compression = compression
xlongname = sprintf("%s of %s=%s %s", dnn[MARGIN], oldVar$longname, loc, oldVar$units)
varLongname = if(is.null(longname)) xlongname else longname
varName = if(is.null(name)) dnn[MARGIN] else name
varUnits = thisDim$units
newVar = ncvar_def(name=varName, units = varUnits,
missval = oldVar$missval, dim = newDim,
longname = varLongname, prec = oldVar$prec,
compression = oldVar$compression)
ncNew = nc_create(filename=output, vars=newVar)
ncvar_put(ncNew, varName, Dx)
nc_close(ncNew)
return(invisible(output))
}
|
696fb5598a762f8c3ab4992b7663c3637ac410cd
|
836809ad92a6e14e7387ee82c64ff2b58c7c1ccf
|
/Q1-Q4 Combined.R
|
4eeae14ffee359f437d2c377e18d50d509cae9b1
|
[] |
no_license
|
kdhst905/R-Final-Project
|
bdd0db292ee93440bd33525eb1dc747e0a9b8df6
|
f67507ed724d617741d552734fcb608093e237ef
|
refs/heads/main
| 2023-01-27T22:03:58.655367
| 2020-12-07T05:20:32
| 2020-12-07T05:20:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,170
|
r
|
Q1-Q4 Combined.R
|
#Load library
library(readr)
library(dplyr)
#Load column/Remove column
my_col_types <- cols(
ProductName= col_character(),
CompanyName= col_character(),
BrandName= col_character(),
PrimaryCategory= col_factor(),
SubCategory= col_character(),
CasId = col_double(),
CasNumber = col_character(),
ChemicalName=col_character(),
ChemicalCount = col_double(),
CDPHId = col_skip(),
CSFId = col_skip(),
CSF= col_skip(),
CompanyId = col_skip(),
PrimaryCategoryId = col_skip(),
SubCategoryId = col_skip(),
ChemicalId = col_skip()
)
#Read the data as df
df <- read_csv("cscpopendata.csv", col_type=my_col_types,na=c(""))
df <- df%>%
#Filter case number with space
filter(CasNumber !=""&BrandName!="")
InitialDateReported<-as.Date(df$InitialDateReported,format= "%m/%d/%y")
MostRecentDateReported<-as.Date(df$MostRecentDateReported,format= "%m/%d/%y")
DiscontinuedDate<-as.Date(df$DiscontinuedDate,format= "%m/%d/%y")
ChemicalDateRemoved<-as.Date(df$ChemicalDateRemoved,format= "%m/%d/%y")
ChemicalCreatedAt<-as.Date(df$ChemicalDateRemoved,format= "%m/%d/%y")
ChemicalUpdatedAt<-as.Date(df$ChemicalDateRemoved,format= "%m/%d/%y")
##########################################################################################################################################################################
# Q1. what are top 10 most reported type of chemical in cosmetic?
library(reshape2)
top10chemical<- df %>%
select(ChemicalName, ChemicalCount) %>%
group_by(ChemicalName) %>%
summarise(sumChemcnt=sum(ChemicalCount))%>%
arrange(desc(sumChemcnt))%>%
top_n(10, ChemicalName)
df <- group_by(df, ChemicalName)
summ <- summarize(df, num_types = n())
pivot <- dcast(summ, ChemicalName~ ., value.var = "num_types")
pivot<- arrange(pivot, desc(.))
top10chemical<-head(pivot, n = 10)
# Q2. What is the toxicity of commonly reported chemicals?
# install.packages("readxl")
library(readxl)
pdf <- read_excel("pdf.xlsx")
pdf[,c(1)]<-NULL
pdf[,c(1)]<-NULL
pdf <- pdf %>%
select(Chemical, Cancer, Developmental, 'Female Reproductive', 'Male Reproductive')
TiO2 <- pdf[grep("Titanium dioxide", x=pdf$Chemical),]
ButylHy <- pdf[grep('Butylated hydroxyanisole', x=pdf$Chemical),]
CarbonB <- pdf[grep('Carbon black', x=pdf$Chemical),]
Talc <- pdf[grep('Talc', x=pdf$Chemical),]
Retinol <- pdf[grep('Retinol', x=pdf$Chemical),]
Cocamide <- pdf[grep('cocamide', x=pdf$Chemical),]
Silica <- pdf[grep('Silica', x=pdf$Chemical),]
Mica <- pdf[grep('mica', x=pdf$Chemical),]# lung scarring which leads to symptoms such as coughing, shortness of breath, weakness, and weight loss.
Vitamin<- pdf[grep('vitamin', x=pdf$Chemical),]#Too much intake-altered bone metabolism and altered metabolism of other fat-soluble vitamins
Retinylpalmitate<- pdf[grep('palmitate', x=pdf$Chemical),]#(Combination of pure vitamin A and fatty acid palmitic acid)-Generally safe
#Q3.Which primary category of cosmetics contain most chemical content reports?
library(dplyr)
library(reshape2)
#Summary result for chem count(primary category)
Primary <- df %>%
select(PrimaryCategory, ChemicalCount) %>%
group_by(PrimaryCategory) %>%
summarise(Avgchemcnt=mean(ChemicalCount))%>%
arrange(desc(Avgchemcnt))
#Summary result for chem count(subcategory)
Sub <- df %>%
select(SubCategory, ChemicalCount) %>%
group_by(SubCategory) %>%
summarise(Avgchemcnt=mean(ChemicalCount))%>%
arrange(desc(Avgchemcnt))
#Q4.Which companies' cosmetics contain most chemical reports and have not yet fixed, removed, or discontinued the product? Which companies are responsive to public health concern?
#Company responsive to health concern
ethics <- df %>%
select(CompanyName,ProductName,ChemicalCount,DiscontinuedDate,ChemicalCreatedAt,ChemicalUpdatedAt,ChemicalDateRemoved) %>%
group_by(CompanyName,ProductName,ChemicalCreatedAt,ChemicalUpdatedAt,DiscontinuedDate,ChemicalDateRemoved) %>%
summarise(chemcnt=sum(ChemicalCount))%>%
filter(ChemicalCreatedAt!=ChemicalUpdatedAt) %>%
arrange(desc(chemcnt))
ethics <- ethics[complete.cases(ethics), ]
|
7d207427808b4bcb0b292d26117a3149403fcd66
|
55e51b89b134522678d1f58d3006a5e37d1e7462
|
/man/information.Rd
|
7e8d8d60a8196eea2237dbf6a1440c3fab3afadf
|
[] |
no_license
|
cran/IRTpp
|
49da8ebb3f62625634aee44e3b0d4568b9d250a0
|
3cdd14c81e00802d2f1bcd022eebcc403c636798
|
refs/heads/master
| 2021-01-15T15:25:21.955389
| 2016-07-05T14:02:36
| 2016-07-05T14:02:36
| 54,411,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,394
|
rd
|
information.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exploratory.R
\name{information}
\alias{information}
\title{Test or Item information}
\usage{
information(object, range, items = NULL)
}
\arguments{
\item{object}{a matrix (con los coeficientes estimados)}
\item{range}{a interval for which the test information should be computed.}
\item{items}{the items for which the information shoulb be computed.}
}
\value{
TotalInfo the total amount of information.
RangeInfo the amount of information in the specified interval.
RangeProp the proportion of information in the specified interval.
Range the value of range argument
items the value of items argument
}
\description{
Computes the amount of test or item information.
}
\details{
The amount of information is computed as the area under the Item or Test
Information Curve.
The function was adapted from ltm_1.0 package.
}
\examples{
#data <- simulateTest(model = "2PL", items = 20, individuals = 800)
#fit <- irtpp(dataset = data$test,model = "2PL")
#fit <- parameter.matrix(fit$z)
#information(fit, c(-2, 0))
#information(fit, c(0, 2), items = c(3, 5))
}
\references{
Reckase, M. (2009). Multidimensional item response theory. New York: Springer.
Baker, F. B., & Kim, S. H. (Eds.). (2004). Item response theory: Parameter estimation techniques. CRC Press.
}
|
73eb76ef442cb6a3579a086557ac551848dd25b2
|
3e293bd4ce6ad2e248f5ce618b152445bf739d04
|
/CFTP_open_ver2.R
|
6dc908269a309591c3aaa44a4cb93382202e9104
|
[] |
no_license
|
kazuya65/CFTP_ozawa_open
|
cce0f7480956d6893cfbb01504fd8e984f20f995
|
aa7345bf59ed45807408bb12d78bb99fefbd8e4a
|
refs/heads/master
| 2021-04-26T22:54:43.901868
| 2018-03-05T09:22:43
| 2018-03-05T09:22:43
| 123,894,602
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,414
|
r
|
CFTP_open_ver2.R
|
choose_function <- function(){ #4が外部サーバー,1~3が内部サーバー
l <- which(rmultinom(1,1,q)==1)
if(l > 3){ #3以上であればサーバーからの離脱
m <- which(rmultinom(1,1,P[l-3,])==1)
}else{ #3以下であればサーバーへの到着
m <- l
}
return(c(l,m))
}
update_function <- function(x, L, a){
# cat("l=", a[[1]], "m=", a[[2]], "\n")
if (a[1] > 3) {
fserver <- a[1]-3
tserver <- a[2]
if (x[fserver] == 0) return(x)
x[fserver] <- x[fserver] - 1
if (tserver == 4) return(x)
if (x[tserver] < buffersize) {
x[tserver] <- x[tserver] + 1
}
}
else {
tserver <- a[1]
if (x[tserver] < buffersize) {
x[tserver] <- x[tserver] + 1
}
}
return(x)
}
cftp <- function(xu0, xl0, L, buffersize){
servers <- cbind(choose_function())
break_frag <- 0
T <- -1
while(1){
# for(i in 1:30){ #テスト用
xu <- xu0
xl <- xl0
for(i in 1:-T){
cat("xu=", xu, "xl=", xl, "\n")
xu <- update_function(xu, L, servers[,i])
xl <- update_function(xl, L, servers[,i])
if(all(xu == xl)){
break_frag <- 1
}
if (sum(xu) < sum(xl))
stop("error")
}
cat("xu=", xu, "xl=", xl, "\n")
cat("\n")
if(break_frag==1){
break
}
servers <- cbind(choose_function(), servers)
T <- T-1
}
return(list(xu, T))
}
cftp(xu, xl, L, buffersize)
|
d311e3093f097299940ea70ce356b16dc4363cef
|
9a79c6d33fc2776d08c72e71c8ad91aa73df2e10
|
/man/sum.Dataset.Rd
|
8b051172c8b1a7d15c92798888aa19fd482db400
|
[] |
no_license
|
giupo/rdataset
|
8e6d1d645e1806bed616f2c9d34fdeac3af65129
|
7786febfadb60bf37343976a0d0d2a0286cca259
|
refs/heads/master
| 2021-06-07T19:17:06.802211
| 2021-05-14T15:29:23
| 2021-05-14T15:29:23
| 67,530,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 390
|
rd
|
sum.Dataset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sum.r
\name{sum.Dataset}
\alias{sum.Dataset}
\title{Esegue il `sum` sul Dataset}
\usage{
\method{sum}{Dataset}(x, ..., na.rm = FALSE)
}
\arguments{
\item{x}{dataset da sommare}
\item{...}{other objects to add}
\item{na.rm}{Not used, just to be compliant with sum}
}
\description{
Esegue il `sum` sul Dataset
}
|
ff215e65c9d031cf5d8cb00ee4a55dfb7f25eee1
|
db2b24f57af0e645ce9a38027e3b0d08ef71105b
|
/tracks.R
|
4193431f163a20f5894664ee19ac866002bb6341
|
[] |
no_license
|
ThinkR-open/datasets
|
3ccfeb3b9c8428006bb02d92f40b58f627719fa6
|
00d39f024c13f8877466018508fc9978508c3bb6
|
refs/heads/master
| 2021-04-27T08:39:54.175182
| 2020-12-09T09:19:17
| 2020-12-09T09:19:17
| 122,495,096
| 6
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,769
|
r
|
tracks.R
|
library(httr)
app_id <- "app_spotify"
client_id <- "***"
client_secret <- "***"
spoti_ep <- httr::oauth_endpoint(
authorize = "https://accounts.spotify.com/authorize",
access = "https://accounts.spotify.com/api/token")
spoti_app <- httr::oauth_app(app_id, client_id, client_secret)
access_token <- httr::oauth2.0_token(spoti_ep, spoti_app, scope = "user-library-read user-read-recently-played")
library(tidyverse)
library(jsonlite)
get_tracks <- function(offset){
print(offset)
api_res <- GET("https://api.spotify.com/v1/me/tracks", config(token = access_token), query = list(limit = 50, offset = offset))
played <- api_res$content %>%
rawToChar() %>%
fromJSON(flatten = TRUE)
tibble(track = played$items$track.name %||% NA,
trackid = played$items$track.album.id %||% NA,
track_uri = gsub("spotify:track:", "", played$items$track.uri) %||% NA,
album_name = played$items$track.album.name %||% NA,
album_id = played$items$track.album.id %||% NA,
artist = map_chr(played$items$track.artists, ~.x$name[1] %||% NA),
duration = played$items$track.duration_ms %||% NA,
explicit = played$items$track.explicit %||% NA,
popularity = played$items$track.popularity %||% NA)
}
tracks <- GET("https://api.spotify.com/v1/me/tracks", config(token = access_token), query = list(limit = 50))
content(tracks)$total
colin_tracks <- map_df(seq(0,content(tracks)$total, 50), get_tracks)
dim(colin_tracks)
library(glue)
track_features <- function(id){
print(id)
# Évitons de se faire kicker par l'API Spotify
Sys.sleep(0.1)
api_res <- GET(glue("https://api.spotify.com/v1/audio-features/{id}"), config(token = access_token))
res <- api_res$content %>%
rawToChar() %>%
fromJSON(flatten = TRUE)
tibble(danceability = res$danceability %||% NA,
energy = res$energy %||% NA,
key = res$key %||% NA,
loudness = res$loudness %||% NA,
mode = res$mode %||% NA,
speechiness = res$speechiness %||% NA,
acousticness = res$acousticness %||% NA,
instrumentalness = res$instrumentalness %||% NA,
liveness = res$liveness %||% NA,
valence = res$valence %||% NA,
tempo = res$tempo %||% NA,
type = res$type %||% NA,
id = res$id %||% NA,
uri = res$uri %||% NA,
track_href = res$track_href %||% NA,
analysis_url = res$analysis_url %||% NA,
duration_ms = res$duration_ms %||% NA,
time_signature = res$time_signature %||% NA
)
}
tracks_features <- map_df(colin_tracks$track_uri, track_features)
full_tracks <- full_join(colin_tracks, tracks_features, by = c("track_uri" = "id"))
write_csv(full_tracks, "tracks.csv")
|
564540b94db6cff3db014995bc34bff3fbfb6368
|
166092ed16671797a6728866c3cfd0c7e5a47aef
|
/survival_prediction.R
|
43804e4725ebc5b60ea7e8955d819825e00e47e9
|
[] |
no_license
|
unchowdhury/CRC_Survival
|
00ecf8892b49a2bbb37fb34fae32d6e297cbf6a0
|
d5aeb11ac4a8547586bd88ed401a0b94c7d5b5e6
|
refs/heads/main
| 2023-04-25T13:42:53.463751
| 2021-05-07T17:24:18
| 2021-05-07T17:24:18
| 365,009,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,761
|
r
|
survival_prediction.R
|
#Script one
#Survival function prediction for the common DEGs
# 1. Set your work folder
setwd(".../workfolder")
# 2. Load libraries for script one
library(dplyr)
library(plyr)
library(survival)
#3. Working with clinical data
dato<-read.csv("data_clinical_patient_CRC.csv",header = TRUE,stringsAsFactors = FALSE)
View(dato)
datos<-select(dato,Patient_ID,AGE,SEX,Cancer_STAGE,ETHNICITY,RACE,Censor_Status,Cancer_type,Tumor_site,OS_DAYS)
View(datos)
#ranme the column
datos<-rename(datos,Rectime=OS_DAYS,Cancer_stage=Cancer_STAGE,Race=RACE,Censor.Status=Censor_Status,Ethnicity=ETHNICITY)
datos$race=as.character(lapply(datos$race,function(x){gsub("^$","Others",x)}))
datos$tumour_site=as.character(lapply(datos$tumour_site,function(x){gsub("^$",NA,x)}))
datos$cancer_stage=as.character(lapply(datos$cancer_stage,function(x){gsub("^$",NA,x)}))
datos$anatomic_site=as.character(lapply(datos$anatomic_site,function(x){gsub("^$",NA,x)}))
datos$histologic_grade=as.character(lapply(datos$histologic_grade,function(x){gsub("^$",NA,x)}))
#4. View the distribution of the clinical features.
count(datos,'Cancer_stage')
count(datos,'Race')
count(datos,'SEX')
count(datos,'Tumor_site')
count(datos,'Ethnicity')
count(datos,'Cancer_type')
#5. Working with the mRNA data
expr_mr<-read.csv("Genes_Zscore_CRC.csv",header=TRUE,stringsAsFactors = FALSE)
View(expr_mr)
#Replace missing values (NA) with 0
expr_mr[is.na(expr_mr)] <- 0
#function for labelling each expression value
altered_test<-function(x){
if(typeof(x)=="character"){
d=x
}
else{
if (abs(x)>=1.5){
d="Altered"
}
else{
d="Normal"
}
}
d
}
applyfunc<-function(df,f){
ds<-matrix(0,nrow = nrow(df),ncol=ncol(df))
colnames(ds)<-colnames(df)
for (i in seq(1:ncol(df))){
ds[,i]<-(sapply(expr_mr[,i],f))
}
ds<-as.data.frame(ds)
}
gene_status<-applyfunc(expr_mr,altered_test)
#remove the 01 from patient iD
remove_01<-function(x){
x<-unlist(strsplit(x,split=""))
x<-paste(x[0:(length(x)-3)],collapse = "")
x
}
gene_status$Patient_ID<-as.character(gene_status$Patient_ID)
gene_status$Patient_ID=unlist(lapply(gene_status$Patient_ID,remove_01))
# 6. Merge the tables
gene_status$Patient_ID=as.character(gene_status$Patient_ID)
combined<-datos%>%inner_join(gene_status)
#relevel the genes as normal as reference factor
applyrevel<-function(combined){
col_names<-colnames(combined)[11:ncol(combined)]
for(i in col_names){
combined[,i]<-as.factor(combined[,i])
combined[,i]<-relevel(combined[,i],ref="Normal")
}
combined
}
combined<-applyrevel(combined)
#7. Univariate analysis
kmsurvo<-Surv(combined$Rectime,combined$Censor.Status)
applycox<-function(combined){
models<-list()
col_names<-colnames(combined)[11:ncol(combined)]
for(i in col_names){
fit<-coxph(kmsurvo~factor(combined[,i]),data=combined)
tss<-summary(fit)
coefs<-c(tss$coefficients[c(1,2,5)])
models[[i]]=coefs
}
final_mode<-as.data.frame(models)
final_model=t(final_mode)
colnames(final_model)<-c("coef","exp.coef","p")
as.data.frame(final_model)
}
fs<-applycox(combined)
View(fs)
fs<-fs%>%mutate(gene=rownames(.))
write.csv(fs, file="R_unique_univariate_CRC.csv")
#8. Multivariate Analysis
fitt<-coxph(kmsurvo~.,data=combined[,11:ncol(combined)])
fitt
#9. Combined analysis for Clinical and rna Expression survival analysis
combined_reor<-combined[,c(1,7,10,2:6,8,9,11:ncol(combined))]
View(combined_reor)
combined_reor$race=factor(combined_reor$race)
fitt_grand<-coxph(kmsurvo~.-1,data=combined_reor[,4:ncol(combined_reor)])
fitt_grand
|
a2ae14193719ed5928e220274c8f38c9aee54747
|
344d1c620452d68805cabc7042dda37418346f55
|
/analysis.R
|
add1b92c86bc6747897ab254fd9626ed8def3719
|
[] |
no_license
|
energyandcleanair/202009_uk_cities
|
c13a4438d8c6a44809e89d22c8fa3b68464c3bd8
|
62a3e8ac57d5182a030359a72258109b25e423a4
|
refs/heads/master
| 2023-01-29T10:28:00.295639
| 2020-12-10T01:37:08
| 2020-12-10T01:37:08
| 294,978,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,654
|
r
|
analysis.R
|
source('./config.R')
source('./plots.R')
source('./utils.R')
source('./tables.R')
# Update measurements
# utils.upload.measurements()
date_from <- "2020-03-23"
date_to <- "2020-05-12"
source <- "defra"
polls <- c("no2","pm25","o3","pm10")
# Get stations
l <- rcrea::locations(country=country, city=city, source=source, with_meta = T)
l <- l %>% mutate(type=recode(type,
"Background Urban"="Background",
"Background Suburban"="Background",
"Industrial Urban"="Industrial",
"Industrial Suburban"="Industrial",
"Traffic Urban"="Traffic",
.default = "Unknown"),
type=tidyr::replace_na(type, "Unknown"))
write.csv(l %>% dplyr::select(id, name, city, type, source),
file.path(dir_results_data, "stations.csv"), row.names = F)
l.all <- rcrea::locations(country=country, source=source, with_meta = T)
l.all <- l.all %>% mutate(type=recode(type,
"Background Urban"="Background",
"Background Suburban"="Background",
"Industrial Urban"="Industrial",
"Traffic Urban"="Traffic",
.default = "Unknown"),
type=tidyr::replace_na(type, "Unknown"))
write.csv(l %>% dplyr::select(id, name, city, type, source),
file.path(dir_results_data, "stations_all.csv"), row.names = F)
# Get measurements
mc <- utils.get.city.measurements(city, polls, use_local=T)
ms <- utils.get.station.measurements(city, polls, l, use_local=T)
# Export csvs
export_measurements(mc)
# Plots
for(poll in polls){
plot_poll(poll, ms=ms, mc=mc, date_from=date_from)
}
# Transportation ----------------------------------------------------------
tc.tomtom <- utils.transport.tomtom(city)
tc.apple <- utils.transport.apple(city)
tc.mapbox <- utils.transport.mapbox(date0=date_from, city=city)
plot_traffic_poll_tomtom(mc, tc.tomtom, n_day=14)
plot_traffic_poll_tomtom(mc, tc.tomtom, n_day=30)
plot_traffic_poll_apple(mc, tc.apple, n_day=14)
plot_traffic_poll_apple(mc, tc.apple, n_day=30)
plot_traffic_poll_mapbox(mc, tc.mapbox, n_day=14)
plot_traffic_poll_mapbox(mc, tc.mapbox, n_day=30)
plot_traffic_poll_tomtom_apple(mc, tc.tomtom, tc.apple, n_day=30)
# plot_traffic_poll_apple_tomtom(mc, tc.apple, tc.tomtom, n_day=30)
# plot_corr_traffic_poll_tomtom(mc, tc.tomtom, tc.apple, tc.mapbox, date_from=date_from, date_to=date_to)
# plot_corr_traffic_poll_apple(mc, tc.tomtom, tc.apple, tc.mapbox, date_from=date_from, date_to=date_to)
# plot_corr_traffic_poll_mapbox(mc, tc.tomtom, tc.apple, tc.mapbox, date_from=date_from, date_to=date_to)
# Other charts ------------------------------------------------------------
for(poll in polls){
plot_predicted_vs_observed(mc, poll=poll, date_from=date_from, date_to=date_to)
plot_predicted_vs_observed_ts(mc, poll=poll, date_from=date_from, date_to=date_to)
}
plot_anomaly_average(mc %>% filter(tolower(region_id)!="others"), process_anomaly = "anomaly_gbm_lag1_city_mad", date_from=date_from, date_to=date_to, filename=paste0("plot_anomaly_lockdown.jpg"))
# Other tables ------------------------------------------------------------
table_impact(mc, tc.tomtom, tc.apple, tc.mapbox, date_from = date_from, date_to=date_to)
# table_impact(mc, tc.tomtom, tc.apple, tc.mapbox, n_day=7, date_from = date_from, date_to=date_to)
# table_impact(mc, tc.tomtom, tc.apple, tc.mapbox, n_day=14, date_from = date_from, date_to=date_to)
# table_impact(mc, tc.tomtom, tc.apple, tc.mapbox, n_day=30, date_from = date_from, date_to=date_to)
# Questions ---------------------------------------------------------------
# - How many cities recovered their pre-covid levels
mc %>%
filter(process_id=="anomaly_gbm_lag1_city_mad",
lubridate::date(date)>=lubridate::date("2020-03-23") - lubridate::days(30),
lubridate::date(date)<=lubridate::date("2020-09-30")) %>%
rcrea::utils.running_average(30) %>%
filter(lubridate::date(date)>=lubridate::date("2020-03-23")) %>%
group_by(poll, unit, process_id, region_name) %>%
arrange(date) %>%
summarise(
anomaly_lockdown=first(value),
anomaly_last=last(value)) %>%
mutate(delta_rebound=anomaly_last-anomaly_lockdown) %>%
arrange(poll, delta_rebound) %>%
write.csv(file.path("results","data","rebound_end_september.csv"), row.names = F)
mc %>%
filter(process_id %in% c("anomaly_offsetted_gbm_lag1_city_mad","anomaly_gbm_lag1_city_mad"),
lubridate::date(date)>=lubridate::date("2020-03-23") - lubridate::days(30),
lubridate::date(date)<=lubridate::date("2020-09-30")) %>%
select(-c(unit)) %>%
rcrea::utils.running_average(30) %>%
filter(lubridate::date(date)>=lubridate::date("2020-03-23")) %>%
tidyr::pivot_wider(names_from="process_id", values_from="value") %>%
rename(anomaly=anomaly_gbm_lag1_city_mad, offsetted=anomaly_offsetted_gbm_lag1_city_mad) %>%
group_by(poll, region_name) %>%
arrange(date) %>%
summarise(
anomaly_lockdown=first(anomaly),
offsetted_lockdown=first(offsetted),
anomaly_last=last(anomaly),
offsetted_last=last(offsetted)) %>%
mutate(delta_rebound=anomaly_last-anomaly_lockdown,
delta_rebound_relative=(anomaly_last-anomaly_lockdown)/offsetted_lockdown) %>%
arrange(poll, desc(delta_rebound)) %>%
write.csv(file.path("results","data","rebound_end_september_with_relative.csv"), row.names = F)
# - How many cities went below 10 for PM2.5
|
416940b3ec39f1f510e94f286fdda4a981a8d3e2
|
ffaf8c0014b439ea501f57d03c2cd3ef3186360b
|
/R/findTrains.R
|
d2fa7a5b04c18e8dd55c768b74ce6b129f9f2a40
|
[] |
no_license
|
meredithcmiles/twitchtools
|
67afe2813b3542c5c7950213115d7e0147a5987f
|
a41e7e3459117986c2ec23c477102f10e334f5ca
|
refs/heads/master
| 2020-05-14T02:40:07.667702
| 2019-10-10T14:39:12
| 2019-10-10T14:39:12
| 181,691,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,710
|
r
|
findTrains.R
|
findTrains<-function(wave, names=NULL, len=13000){
if (is.list(wave)==TRUE){
waves<-wave
for(i in 1:length(wave)){
wave<-waves[[i]]
twitch=wave@left
samp.rate<-wave@samp.rate
stim=wave@right
stim.smooth<-as.vector(env(stim, f = samp.rate, envt = "abs", msmooth=c(125, 0.8), norm = TRUE, plot=FALSE))
stim.smooth[stim.smooth>.5]<-1
stim.smooth[stim.smooth<.5]<-0
t.end<-length(stim)/samp.rate
time<-seq(0, t.end, length.out=length(stim.smooth))
samples<-round(seq(1, length(twitch), length.out=length(stim.smooth)))
smooth.out<-matrix(c(samples, time, stim.smooth), nrow=length(stim.smooth), ncol=3, dimnames=list(NULL, c("samples", "sec", "stim")))
trainpeaks<-findpeaks(smooth.out[,3], minpeakheight=0.5, minpeakdistance=100)
stimpeaks<-findpeaks(smooth.out[,3], minpeakheight=0.5, minpeakdistance=10)
t.next<-c(stimpeaks[2:(nrow(stimpeaks)),2], NA)
stim.int<-t.next-stimpeaks[,2]
ntrains<-nrow(trainpeaks)
if(is.null(names)==FALSE){
names.out<-names[1:ntrains]
}
train.start<-smooth.out[trainpeaks[,3],1]-1500
train.end<-train.start+len
train.start[train.start<0]<-0
train.end[train.end>length(twitch)]<-length(twitch)
trains<-vector(mode="list", length=ntrains)
for (j in 1:ntrains){
start<-train.start[j]
end<-train.end[j]
twitch.out<-twitch[start:end]
stim.out<-stim[start:end]
if(is.null(names)==FALSE){
name<-names.out[j]
trains[[j]]<-list("twitch"=twitch.out, "stim"=stim.out, "name"=name)
} else {
trains[[j]]<-list("twitch"=twitch.out, "stim"=stim.out)
}
}
if(i==1){
trains.out<-trains
} else {
trains.out<-append(trains.out, trains)
}
names<-names[-(1:ntrains)]
} ## END MULTI-TRAIN SECTION
} else {
twitch=wave@left
samp.rate<-wave@samp.rate
stim=wave@right
# smooth out stim
stim.smooth<-as.vector(env(stim, f = samp.rate, envt = "abs", msmooth=c(125, 0.8), norm = TRUE, plot=FALSE))
stim.smooth[stim.smooth>.5]<-1
stim.smooth[stim.smooth<.5]<-0
t.end<-length(stim)/samp.rate
# but now we need to keep the timescales tractable
time<-seq(0, t.end, length.out=length(stim.smooth))
samples<-round(seq(1, length(twitch), length.out=length(stim.smooth)))
smooth.out<-matrix(c(samples, time, stim.smooth), nrow=length(stim.smooth), ncol=3, dimnames=list(NULL, c("samples", "sec", "stim")))
trainpeaks<-findpeaks(smooth.out[,3], minpeakheight=0.5, minpeakdistance=100)
stimpeaks<-findpeaks(smooth.out[,3], minpeakheight=0.5, minpeakdistance=10)
t.next<-c(stimpeaks[2:(nrow(stimpeaks)),2], NA)
stim.int<-t.next-stimpeaks[,2]
ntrains<-nrow(trainpeaks)
train.start<-smooth.out[trainpeaks[,3],1]-1500 # get start point for trains in original sample units
train.end<-train.start+len # get endpoint for trains in original sample units
train.start[train.start<0]<-0
train.end[train.end>length(twitch)]<-length(twitch)
trains.out<-vector(mode="list", length=ntrains)
for (i in 1:ntrains){
start<-train.start[i]
end<-train.end[i]
twitch.out<-twitch[start:end]
stim.out<-stim[start:end]
if(is.null(names)==TRUE){
trains.out[[i]]<-list("twitch"=twitch.out, "stim"=stim.out)
} else {
name<-names[i]
trains.out[[i]]<-list("twitch"=twitch.out, "stim"=stim.out, "name"=name)
}
}
}
return(trains.out)
}
|
67d75013243e69d46b1cc03e53ac39c0d36a54a8
|
4cbf6b22c0a043643afd35375645cd54fd4e0df9
|
/Quiz2Question4.R
|
50da9db8d2e2e2bf301f0430229ba09d666d92d4
|
[] |
no_license
|
srakas/gettingandcleaningdatarepo
|
34c8e6681d7e96bfd09b25a2d396626f7a01cf3b
|
268d49ba5ea6b5a717152d9868f9abc155cdf5a9
|
refs/heads/master
| 2021-01-18T17:17:35.803689
| 2015-01-13T10:45:27
| 2015-01-13T10:45:27
| 29,185,328
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 175
|
r
|
Quiz2Question4.R
|
con = url("http://biostat.jhsph.edu/~jleek/contact.html")
htmlCode = readLines(con, 101)
close(con)
numOfCharsPerLine <- nchar(htmlCode)
numOfCharsPerLine[c(10, 20, 30, 100)]
|
695ff0f19d703682b330ed60820078fb04d3bb7b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/pla/R/invalidLatinSquare.R
|
dbb713399a947e09c4fca77e4fd0719bf65b3d08
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,013
|
r
|
invalidLatinSquare.R
|
.invalidLatinSquare <- function(data,
response = "Response",
row = "Row",
column = "Column",
sample = "Sample",
step = "Dilution",
sampleStep = "SampleStep"
) {
## mc <- function(sample, step)
## paste0("", sample, "\\textsubscript{", step, "}")
if (!any(dimnames(data)[[2]] == sampleStep)) {
SampleStep <- paste0(as.character(unlist(data[sample])), ":",
as.character(unlist(data[step])))
## SampleStep <- mc(as.character(unlist(data[sample])),
## as.character(unlist(data[step])))
names(SampleStep) <- "SampleStep"
data <- cbind(data, SampleStep = SampleStep)
}
dupletsRC <- data[duplicated(data[c(row, column)]),]
dupletsRS <- data[duplicated(data[c(row, sampleStep)]),]
dupletsCS <- data[duplicated(data[c(column, sampleStep)]),]
Error <- FALSE
if (dim(dupletsRC)[1] > 0)
Error <- TRUE
if (dim(dupletsRS)[1] > 0)
Error <- TRUE
if (dim(dupletsCS)[1] > 0)
Error <- TRUE
tRC <- table(data[c(row, column)])
tRS <- table(data[c(row, sampleStep)])
tCS <- table(data[c(column, sampleStep)])
Xrow <- NULL
Xcolumn <- NULL
XsampleStep <- NULL
if (length(which(tRC != 1)) > 0 |
length(which(tRS != 1)) > 0 |
length(which(tCS != 1)) > 0) {
Error <- TRUE
Rows <- unique(sort(unlist(data[row])))
Columns <- unique(sort(unlist(data[column])))
SampleSteps <- unique(sort(unlist(data[sampleStep])))
byRow <- split(data[, c(sampleStep, column)], data[, row])
byColumn <- split(data[, c(sampleStep, row)], data[, column])
bySample <- split(data[, c(row, column)], data[, sampleStep])
funRow <- function(item)
Rows[is.na(match(Rows, unlist(item[row])))]
funCol <- function(item)
Columns[is.na(match(Columns, unlist(item[column])))]
funSample <- function(item)
SampleSteps[is.na(match(SampleSteps,
unlist(item[sampleStep])))]
for(i in 1:length(byRow)) {
item <- byRow[[i]]
missing <- data.frame(column = funCol(item),
sampleStep = funSample(item))
if (dim(missing)[[1]] > 0)
Xrow <- rbind(Xrow,
cbind(row = rep(i, dim(missing)[1]), missing))
}
for(i in 1:length(byColumn)) {
item <- byColumn[[i]]
missing <- data.frame(row = funRow(item),
sampleStep = funSample(item))
if (dim(missing)[[1]] > 0)
Xcolumn <- rbind(Xcolumn,
cbind(column = rep(i,
dim(missing)[1]), missing))
}
for(i in 1:length(bySample)) {
item <- bySample[[i]]
missing <- data.frame(row = funRow(item), column = funCol(item))
if (dim(missing)[[1]] > 0)
XsampleStep <- rbind(XsampleStep,
cbind(sampleStep = rep(SampleSteps[i],
dim(missing)[1]), missing))
}
}
Result <- NULL
if (Error)
Result <- list(
countsRowColumn = tRC,
countsRowSample = tRS,
countsColumnSample = tCS,
dupletsRowColumn = dupletsRC,
dupletsRowSample = dupletsRS,
dupletsColumnSample = dupletsCS,
missingByRow = Xrow,
missingByColumn = Xcolumn,
missingBySampleStep = XsampleStep)
return(Result)
}
|
5ed586058e69b9741b26176ecd6ce75f517d5ea9
|
10f047c7631b3aad90c7410c567c588993bfa647
|
/EcuRCode/WeightVsNestSize/PaperCode/CurrentPaperCode_9Aug/FunctionOverdisperseTest.R
|
7f8b1bfd8555f7d74362f158f882081a59d2e62e
|
[] |
no_license
|
ruthubc/ruthubc
|
ee5bc4aa2b3509986e8471f049b320e1b93ce1d5
|
efa8a29fcff863a2419319b3d156b293a398c3a9
|
refs/heads/master
| 2021-01-24T08:05:40.590243
| 2017-08-30T01:37:56
| 2017-08-30T01:37:56
| 34,295,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,493
|
r
|
FunctionOverdisperseTest.R
|
# TODO: Add comment
#
# Author: Ruth
###############################################################################
# from http://ase.tufts.edu/gsc/gradresources/guidetomixedmodelsinr/mixed%20model%20guide.html
overdisp_fun <- function(model) {
## number of variance parameters in an n-by-n variance-covariance matrix
vpars <- function(m) {
nrow(m) * (nrow(m) + 1)/2
}
# The next two lines calculate the residual degrees of freedom
model.df <- sum(sapply(VarCorr(model), vpars)) + length(fixef(model))
rdf <- nrow(model.frame(model)) - model.df
# extracts the Pearson residuals
rp <- residuals(model, type = "pearson")
Pearson.chisq <- sum(rp^2)
prat <- Pearson.chisq/rdf
# Generates a p-value. If less than 0.05, the data are overdispersed.
pval <- pchisq(Pearson.chisq, df = rdf, lower.tail = FALSE)
c(chisq = Pearson.chisq, ratio = prat, rdf = rdf, p = pval)
}
library(MASS)
summary(model)
overdisp_fun(PQLMod)
length(fixef(model))
ranef(model)
model.frame(model)
model <- PQLMod
Anova(model)
dim(condVar)
a_output <- VarCorr(model)
getVarCov(model)
an_model <- vcov(model) # fixed effect variance covariance matrix
model.df <- sum(sapply(VarCorr(model), vpars)) + length(fixef(model))
a_output2 <- lapply(VarCorr(model), vpars)
sum(a_output)
sum
unlist(a_output)
an_output <- (unlist(lapply(a_output, function(x) sum(x)))
mapply(a_output)
sum(an_output, na.rm = TRUE)
typeof(an_output)
a_output[,2]
|
eaec14d9d0f8808e1dcf5beadb0b2eb1289494f2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sjmisc/examples/is_crossed.Rd.R
|
35cb328a5cc4978bcc15276d3ecc971da269dc93
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 961
|
r
|
is_crossed.Rd.R
|
library(sjmisc)
### Name: is_crossed
### Title: Check whether two factors are crossed or nested
### Aliases: is_crossed is_nested is_cross_classified
### ** Examples
# crossed factors, each category of
# x appears in each category of y
x <- c(1,4,3,2,3,2,1,4)
y <- c(1,1,1,2,2,1,2,2)
# show distribution
table(x, y)
# check if crossed
is_crossed(x, y)
# not crossed factors
x <- c(1,4,3,2,3,2,1,4)
y <- c(1,1,1,2,1,1,2,2)
# show distribution
table(x, y)
# check if crossed
is_crossed(x, y)
# nested factors, each category of
# x appears in one category of y
x <- c(1,2,3,4,5,6,7,8,9)
y <- c(1,1,1,2,2,2,3,3,3)
# show distribution
table(x, y)
# check if nested
is_nested(x, y)
is_nested(y, x)
# not nested factors
x <- c(1,2,3,4,5,6,7,8,9,1,2)
y <- c(1,1,1,2,2,2,3,3,3,2,3)
# show distribution
table(x, y)
# check if nested
is_nested(x, y)
is_nested(y, x)
# also not fully crossed
is_crossed(x, y)
# but partially crossed
is_cross_classified(x, y)
|
a4963b99d1ef1f7812f61c02efe0065537f49276
|
5201716c9c6ce47e90bda56c61ed7a17504b05cd
|
/run_analysis.R
|
8fb91a057d4debcdd6491c5759f2bb3ef47bd3a9
|
[] |
no_license
|
Arpi15/Getting_and_Cleaning_Data_Peer_Assessment
|
5734966a70a5785faf0902deb7b2e839dc49e03c
|
907dc182ded5fde8e14375ed9d51b9b93857ab25
|
refs/heads/master
| 2022-07-28T23:01:17.532932
| 2020-05-21T14:46:40
| 2020-05-21T14:46:40
| 265,827,504
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,758
|
r
|
run_analysis.R
|
# reading training data
x_train <- read.table("X_train.txt")
y_train <- read.table("y_train.txt")
subject_train <- read.table("subject_train.txt")
head(x_train)
names(x_train)
#reading test data
x_test <- read.table("X_test.txt")
y_test <- read.table("y_test.txt")
subject_test <- read.table("subject_test.txt")
#reading activity and features data
activity <- read.table("activity_labels.txt")
head(x_test)
names(x_test)
features <- read.table("features.txt")
#naming training set columns
colnames(x_train) <- features[, 2]
colnames(y_train) <- "activity"
colnames(subject_train) <- "subject_id"
colnames(activity) <- c("activity_id", "activity_name")
#naming test set columns
colnames(x_test) <- features[, 2]
colnames(y_test) <- "activity"
colnames(subject_test) <- "subject_id"
#merging training and test dataset
training_dataset <- cbind(x_train, subject_train, y_train)
dim(training_dataset)
test_datset <- cbind(x_test, subject_test, y_test)
dim(test_datset)
new_cmbnd_data <- rbind(training_dataset, test_datset)
dim(new_cmbnd_data)
#Extracts only the measurements on the mean and standard deviation for each measurement
str(new_cmbnd_data)
names(new_cmbnd_data)
sum(is.na(new_cmbnd_data))
library(dplyr)
noduplicate_data <- new_cmbnd_data[ , !duplicated(colnames(new_cmbnd_data))] #removing duplicate column names
dim(noduplicate_data)
col_names <- names(noduplicate_data)
sub_data <- grepl("subject_id", col_names) | grepl("activity", col_names) | grepl("mean\\(\\)", col_names) | grepl("std\\(\\)", col_names)
noduplicate_data <- subset(noduplicate_data, select = sub_data)
str(noduplicate_data)
#Uses descriptive activity names to name the activities in the data set
noduplicate_data$activity <- factor(noduplicate_data$activity)
levels(noduplicate_data$activity) <- activity$activity_name
head(noduplicate_data$activity, 30)
#Appropriately labels the data set with descriptive variable names
names(noduplicate_data) <- gsub("BodyBody", replacement = "Body", names(noduplicate_data))
names(noduplicate_data) <- gsub("^t", replacement = "time", names(noduplicate_data))
names(noduplicate_data) <- gsub("^f", replacement = "frequency", names(noduplicate_data))
#creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_data <- noduplicate_data %>% group_by(subject_id, activity) %>% summarise_all(list(mean)) %>% ungroup()
tidy_dataset <- as.data.frame(tidy_data)
#writing tidy dataset to working directory as a text file
write.table(tidy_dataset, file = "tidy_dataset.txt", row.names = FALSE)
#codebook
install.packages("dataMaid")
library(dataMaid)
description(tidy_dataset)
makeCodebook(tidy_dataset, file = "tidy_dataset_codebook.Rmd")
|
8743686de0e32b05fe5ffc24f69eaf1d7b757a4a
|
58fbb09a48bcfbc878322f54d59b1c5e18d0ce47
|
/R/function-mcpf.R
|
7c8e1dcb7b11e4cb51846d7d437554d231736aad
|
[] |
no_license
|
WangJJ-xrk/MCPF
|
c596f89221bf43322f6c0f8b322b3b35d4005fd3
|
2c8105e44a17592c2235b5ede26f52f87d6acaf0
|
refs/heads/master
| 2022-08-22T11:31:57.626740
| 2020-05-29T05:59:52
| 2020-05-29T05:59:52
| 266,784,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,901
|
r
|
function-mcpf.R
|
#' Calculation of Pseudo F Statistic Matrix.
#'
#' \code{calct1t2} calculates the pseudo F statistics of the divided similarity matrixes
#' at given dividing point.
#'
#' @param Y the projection of pseudo outcome data.
#' @param point.seq a vector containing the dividing points.
#' @param H1 the difference between two projection matrixes.
#' @param H2 the difference between the identity matirx and a projection
#' matirx.
#'
#' @return a length(point.seq)*2 matrix of pseudo F statistics.
calct1t2 <- function(Y, point.seq, H1, H2){
k = dim(Y)[2]
kcut = length(point.seq)
tMat = matrix(NA, kcut,2)
for (i in 1:kcut){
ipoint = point.seq[i]
G1 = as.matrix(Y[,1:ipoint])%*%as.matrix(t(Y[,1:ipoint]))
G2 = as.matrix(Y[,(ipoint+1):k])%*%as.matrix(t(Y[,(ipoint+1):k]))
tMat[i,1] = sum(H1*G1)/sum(H2*G1)
tMat[i,2] = sum(H1*G2)/sum(H2*G2)
}
return(tMat)
}
#' Generate Fibonacci Sequence to be Used as Dividing Points
#'
#' \code{Fibonacci} generates a Fibonacci sequence to be used as dividing
#' points in MCPF. So it deletes the first element 1 in the original
#' Fibonacci sequence.
#'
#' @param n a number indicating the maximum of the generated sequence is
#' less than n.
#'
#' @return a vector.
Fibonacci <- function(n){
fab = NA
fab[1] = 1
fab[2] = 1
if(n == 1|n == 2){
return(fab[1])
}else{
i = 2
while(fab[i] < n){
i = i + 1
fab[i] = fab[i-1] + fab[i-2]
}
return(fab[2:(i-1)])
}
}
#' Significance Calculation of MCPF in Distance-based Regression Model
#'
#' \code{mcpf} calculates the p value of MCPF in distance regression model.
#'
#' @param simi.mat a similarity matrix.
#' @param dis.mat a distance matrix. Only one of these two matrixes needs
#' to be given. If dis.mat is given, it is then transformed to the
#' similarity matrix.
#' @param null.space an index vector containing the index of ${X}_1$,
#' the covariate matrix to be adjusted for.
#' @param x.mat a predictor matrix which contains the covariates and
#' the predictors of interest.
#' @param n.monterCarlo the number of permutation replicates.
#' @param rMethod the method used to generate the vector of dividing point.
#' It should be one of "interpolation" and "fibonacci", where
#' "interpolation" indicates that the vector is generated via interpolation
#' strategy, while "fibonacci" indicates that it is generated through
#' Fibonacci sequence.
#'
#' @return the p value of MCPF.
#'
#' @importFrom MASS ginv
#'
#' @export
#'
#' @examples
#' library(MASS)
#' n = 50
#' p = 100
#' k = 15
#' sigmax = diag(rep(0.5,k)) + matrix(0.5,k,k)
#' sigmay = diag(rep(1,p))
#' for(i in 1:p){
#' for(j in 1:p){
#' sigmay[i,j] = 0.5^abs(i-j)
#' }
#' }
#' r1 = 0.05
#' beta0 = r1*matrix(rbinom(k*p,1,0.9), k, p)
#' x = mvrnorm(n, rep(0,k), sigmax)
#' y = x%*%beta0 + mvrnorm(n, rep(0,p), sigmay)
#' Ky = calcKerMat_Cen(y)
#' mcpf(Ky,null.space = 1:5,x.mat = x,rMethod = "interpolation")
mcpf <- function(simi.mat, dis.mat = NULL, null.space,
x.mat, n.monterCarlo = 1000,
rMethod = c("interpolation","fibonacci")){
if(is.null(dis.mat) == FALSE){
simi.mat = -0.5*dis.mat^2
}
x1 = x.mat[,null.space]
if(length(null.space)==1){
x1 = matrix(x1, ncol=1)
}
Hx = x.mat %*% ginv(t(x.mat)%*%x.mat) %*% t(x.mat)
Hx1 = x1 %*% ginv(t(x1)%*%x1) %*% t(x1)
n = nrow(simi.mat)
In = diag(rep(1,n))
H1 = Hx - Hx1
H2 = In - Hx
H3 = In - Hx1
eigG = eigen(simi.mat)
Gval = eigG$values
Gvec = eigG$vectors
k = sum(Gval>0.001)
GY = Gvec[,1:k] %*% diag(sqrt(Gval[1:k]))
QY = H3 %*% GY
if(rMethod == "interpolation"){
kcut = k-1
difGval = diff(Gval)
for(vv in 1:(k-2)){
if(difGval[vv]>difGval[vv+1]){
kcut = vv
break
}
}
seq0 = seq(1:kcut)
}else if(rMethod == "fibonacci"){
seq0 = Fibonacci(k)
kcut = length(seq0)
}else{
return(message("'arg' should be one of \"interpolation\" and \"fibonacci\"."))
}
B2 = n.monterCarlo
T0 = calct1t2(QY, seq0, H1, H2)
mat1 = matrix(NA, kcut, B2)
mat2 = matrix(NA, kcut, B2)
U = 1:n
for(j in 1:B2){
newQY = QY[sample(U, replace = FALSE),]
newT = calct1t2(newQY,seq0,H1,H2)
mat1[,j] = newT[,1]
mat2[,j] = newT[,2]
}
rank1 = matrix(NA, kcut, B2)
rank2 = matrix(NA, kcut, B2)
combvec = rep(NA, kcut)
combmat = matrix(NA, kcut, B2)
for(l in 1:kcut){
p1 = (sum(mat1[l,] > T0[l,1])+1)/(B2+1)
p2 = (sum(mat2[l,] > T0[l,2])+1)/(B2+1)
combvec[l] = -2*log(p1) - 2*log(p2)
rank1[l,] = (B2 -rank(mat1[l,]) + 1)/B2
rank2[l,] = (B2 -rank(mat2[l,]) + 1)/B2
for(w in 1:B2){
combmat[l,w] = -2*log(rank1[l,w]) - 2*log(rank2[l,w])
}
}
mcpf = max(combvec)
maxvec = apply(combmat,2,max)
pvalue = sum(maxvec > mcpf)/B2
return(pvalue)
}
|
83ca2bc42471c5cc46a63397b70251e6a76a2814
|
00d0db486665c80ab821cb77a1f41ed7874c591c
|
/Bioconductor for Genomic Analysis /lectures/R_S4 (1).R
|
157be5518dfe69304d86addca9f8b418f939ec93
|
[] |
no_license
|
Mohnish7869/DATA_SCIENCE_IN_BIOLOGY
|
37a6b2d3e05b0d62162f919fa06d22af620faada
|
113165e0c1e9aee0c9b68ae93b1681ac068b6ae6
|
refs/heads/master
| 2023-01-02T01:56:29.475343
| 2020-09-30T12:37:05
| 2020-09-30T12:37:05
| 299,911,494
| 0
| 0
| null | 2020-09-30T12:27:57
| 2020-09-30T12:27:56
| null |
UTF-8
|
R
| false
| false
| 3,137
|
r
|
R_S4 (1).R
|
## ----dependencies, warning=FALSE, message=FALSE--------------------------
library(ALL)
library(GenomicRanges)
## ----biocLite, eval=FALSE------------------------------------------------
## source("http://www.bioconductor.org/biocLite.R")
## biocLite(c("ALL" "GenomicRanges"))
## ----lm------------------------------------------------------------------
df <- data.frame(y = rnorm(10), x = rnorm(10))
lm.object <- lm(y ~ x, data = df)
lm.object
names(lm.object)
class(lm.object)
## ----lm2-----------------------------------------------------------------
xx <- list(a = letters[1:3], b = rnorm(3))
xx
class(xx) <- "lm"
xx
## ----ALL-----------------------------------------------------------------
library(ALL)
data(ALL)
ALL
class(ALL)
isS4(ALL)
## ----help, eval=FALSE----------------------------------------------------
## ?"ExpressionSet-class"
## class?ExpressionSet
## ----list----------------------------------------------------------------
xx <- list(a = 1:3)
## ----ExpressionSet-------------------------------------------------------
ExpressionSet()
## ----help2,eval=FALSE----------------------------------------------------
## ?ExpressionSet
## ----newExpressionSet----------------------------------------------------
new("ExpressionSet")
## ----getClass------------------------------------------------------------
getClass("ExpressionSet")
## ----slots---------------------------------------------------------------
ALL@annotation
slot(ALL, "annotation")
## ----accessor------------------------------------------------------------
annotation(ALL)
## ----updateObject, eval=FALSE--------------------------------------------
## new_object <- updateObject(old_object)
## ----updateObject2, eval=FALSE-------------------------------------------
## object <- updateObject(object)
## ----validity------------------------------------------------------------
validObject(ALL)
## ----mimicMethod---------------------------------------------------------
mimicMethod <- function(x) {
if (is(x, "matrix"))
method1(x)
if (is(x, "data.frame"))
method2(x)
if (is(x, "IRanges"))
method3(x)
}
## ----as.data.frame-------------------------------------------------------
as.data.frame
## ----showMethods---------------------------------------------------------
showMethods("as.data.frame")
## ----getMethod-----------------------------------------------------------
getMethod("as.data.frame", "DataFrame")
## ----base_as.data.frame--------------------------------------------------
base::as.data.frame
## ----helpMethod,eval=FALSE-----------------------------------------------
## method?as.data.frame,DataFrame
## ?"as.data.frame-method,DataFrame"
## ----findOverlaps--------------------------------------------------------
showMethods("findOverlaps")
## ----ignore.strand-------------------------------------------------------
getMethod("findOverlaps", signature(query = "Ranges", subject = "Ranges"))
getMethod("findOverlaps", signature(query = "GenomicRanges", subject = "GenomicRanges"))
## ----sessionInfo, echo=FALSE---------------------------------------------
sessionInfo()
|
b2fb9946ced73c6d72b3a3a1c1fe0a5ef7466130
|
be15fb577e5d4f39e29fd647d7ffc9e44dfe44ac
|
/Model_Versions/Uncertainty_SLR_GEV/Sensitivity_Analysis/OAT/Scripts/exceedance_prob_OAT.R
|
eb4df3af6f9ab88702af37ecca25a6e92860f8ee
|
[] |
no_license
|
scrim-network/VanDantzig
|
d2bfe66d90f6bc9aa8458443be9433d34ab520b6
|
9f1b876e3c8506262d6238d078b0f4d33efd702e
|
refs/heads/master
| 2021-04-30T12:13:13.759818
| 2018-03-20T12:33:07
| 2018-03-20T12:33:07
| 121,269,677
| 0
| 1
| null | 2018-02-12T16:14:25
| 2018-02-12T16:14:25
| null |
UTF-8
|
R
| false
| false
| 1,404
|
r
|
exceedance_prob_OAT.R
|
###################################
# file: exceedance_prob_OAT.R
###################################
# Author and copyright: Perry Oddo
# Pennsylvania State University
# poddo@psu.edu
###################################
# Generates survival function curves
# for exceedance probability estimations
# for use in OAT analysis
####################################
library(fExtremes)
# Determine exceedance probabilities by generating survival curves for each GEV parameter set
q = seq(0,1, length.out = 10^4+2)
length_param = length(Parameters[,1])
gev <- sapply(1:length_param, function(x){
qgev(q, Parameters$xi[x], Parameters$mu[x], Parameters$sigma[x])
})
gev <- gev[2:(length(gev[,1])-1),]
q <- seq(0,1, length.out = length(gev[,1]))
min_es <- sapply(1:length_param, function(x){
min(gev[,x]/100)
})
max_es <- sapply(1:length_param, function(x){
max(gev[,x]/100)
})
p_exceed <- mat.or.vec(length_param, 1)
p_index <- mat.or.vec(length_param, 1)
# Function for Initial Exceedance Frequency
exceedance_prob <- function(X){
p_exceed <- mat.or.vec(length_param, 1)
p_exceed <- sapply(1:length_param, function(i) {
if(X <= min_es[i]) {
return(1)
}
if(X >= max_es[i]) {
return(0)
}
if(X > min_es[i] && X < max_es[i]) {
return(1-q[which.min(abs(gev[,i]/100 - X))])
}
})
return(p_exceed)
}
|
af111c2fe9099374489bf6ae116b9c2eaa31ce2b
|
a25ac37ac0d9090020f0c7f32d9604cde793bdc6
|
/01-Regression.R
|
0a2bcd95ac91b1518ffa41a82a64c97efdeb4b9f
|
[] |
no_license
|
faridskyman/RCourse
|
cafeed8ce722e7d4833eeb71145f6190a9aee972
|
967ed1502471c4965d4fdcc63a9090bec9fdd32e
|
refs/heads/master
| 2021-01-20T18:44:52.062716
| 2016-07-02T02:27:06
| 2016-07-02T02:27:06
| 62,296,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,263
|
r
|
01-Regression.R
|
# tutorials from http://www.r-bloggers.com/simple-linear-regression/
"Suppose you want to obtain a linear relationship between weight (kg) and height (cm) of 10 subjects.
Height: 175, 168, 170, 171, 169, 165, 165, 160, 180, 186
Weight: 80, 68, 72, 75, 70, 65, 62, 60, 85, 90
The first problem is to decide what is the dependent variable Y and what is the independent variable X.
In our case:
* (x) - 'weight' is independent variable, not affected by an error (as its an inputed var)
* (y) - 'height' is dependent variable, is affected by error, as its a predicted/ calculated var
problem is to find a linear relationship (formula) that allows us to calculate the height,
known as the weight of an individual.
The simplest formula is that of a broad line of type Y = a + bX.
The simple regression line in R is calculated as follows:"
# Input data
height = c(175, 168, 170, 171, 169, 165, 165, 160, 180, 186)
weight = c(80, 68, 72, 75, 70, 65, 62, 60, 85, 90)
# you declare first the dependent variable, and after the independent variable (or variables).
fit <- lm(formula = height ~ weight, x=TRUE, y=TRUE)
summary(fit)
fit
# flots scatter based on the data
plot(weight, height)
# draws the regresison line
abline(fit)
|
1c0d264eadfa52401b6ec274c107154e0115bffc
|
11de9e10b0b9fa137bbf8607bfd321e9d5fede6e
|
/mPowerEI/man/recordMap.Rd
|
334641b4ed31c288909ec086c43cddb5071bdf8e
|
[
"MIT"
] |
permissive
|
MonteShaffer/mPowerEI
|
dbc906db491716f15295899f7e10efac6601531b
|
d587c4fda58a377b9bbfb3327411249a11049aa2
|
refs/heads/master
| 2021-05-16T08:48:51.456692
| 2017-10-05T11:03:30
| 2017-10-05T11:03:30
| 104,287,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 503
|
rd
|
recordMap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadSetup.R
\name{recordMap}
\alias{recordMap}
\title{Map table data to Records (rv)}
\usage{
recordMap(mPower)
}
\arguments{
\item{mPower}{list of data objects}
}
\value{
lists 'recs' of data objects mapped on recordStringToVariable(RecordId)
}
\description{
Map table data to Records (rv)
}
\examples{
mPower = loadSynapseData(); # loads summary info of data tables
recs = recordMap(mPower); # organizes data based on rv
}
|
601ac01284e4a5064c31e5cc283d50613ded8959
|
162eb695c97820b7a85c458087553758fe73f7c5
|
/Figure1_spectrumChart.R
|
7732025631e2df9602fc92135cb156a077ab8c95
|
[] |
no_license
|
edwardHujber/ENUanalysis
|
d5742366d714a7a8fc95269c9a55b7f4acacb2db
|
109279761db723ba732e6e57250f1bd0ee52491f
|
refs/heads/master
| 2021-01-21T19:02:04.131878
| 2017-05-22T23:50:15
| 2017-05-22T23:50:15
| 92,109,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,356
|
r
|
Figure1_spectrumChart.R
|
library("abind")
#### 'dat' should already have been defined by one of the import scripts.
SEM<-function(x,na.rm=FALSE){
if(na.rm){x<-x[!is.na(x)]}
sd(x)/sqrt(length(x))}
barPlotwSEM<-function(table, ylab=NULL, xlab=NULL){
if(length(dim(table))==0){meanCount<-table;SEMCount<-0}
else{meanCount<- rowMeans(table)
SEMCount<-apply(table,1,SEM)}
print(c("means:",meanCount))
print(c("SEMs:",SEMCount))
bars<-barplot(meanCount, ylim=c(0,max(meanCount,na.rm = TRUE)*1.2),xlab=xlab, ylab=ylab)
arrows(x0=bars,y0=meanCount-SEMCount,y1=meanCount+SEMCount, angle=90, code=3, length=0)
}
repFunctionOverStrains<-function(DAT,FUN){
strains <- unique(DAT$ID)
print(strains)
nStrains <-length(strains)
outMatrix<-NULL
for(i in 1:nStrains){
temp<-DAT[which(DAT$ID==strains[i]),]
print(nrow(temp))
funOut<-FUN(temp)
print(funOut)
# print(funOut)
outMatrix<-abind(outMatrix,funOut,along=(1+length(dim(funOut))))
}
return(outMatrix)
}
## count variants, transitions, transversion
codingOnly <- FALSE
oneLinePerVar <- TRUE
useThisDat<-dat
if(codingOnly){useThisDat <- useThisDat[which(nchar(as.character(useThisDat$Old_codon.New_codon))!=0),]}
if(oneLinePerVar){
CPRCs <- levels(factor(useThisDat$CPRC))
for(i in 1:length(CPRCs)){
if(length(which(useThisDat$CPRC==CPRCs[i]))>1) { useThisDat<- useThisDat[-which(useThisDat$CPRC==CPRCs[i])[-1],] }
}
}
paramCut <- 1
while(paramCut<15) {
useThisDat <- useThisDat[which(useThisDat$altCount >= paramCut),]
GAper<-repFunctionOverStrains(useThisDat,function(x){
useThisDat <- x
varTypes <- data.frame("Ref"=NA,"Var"=NA,"Count"=NA)
for(i in ntnum){
for(j in ntnum[-i]){
cnt<-length(which(useThisDat$wt_dna==nt[i] & useThisDat$mut_dna==nt[j]))
varTypes <- rbind(varTypes,c(nt[i],nt[j],cnt))
}
}
varTypes <- varTypes[-1,]
varTypes$Percent <- apply(varTypes,1,function(x) as.numeric(x[3])/sum(as.numeric(varTypes$Count)) )
transitions<- varTypes[which((is.na(match(varTypes$Ref,purines))==FALSE &
is.na(match(varTypes$Var,purines))==FALSE) |
(is.na(match(varTypes$Ref,pyrimidines))==FALSE &
is.na(match(varTypes$Var,pyrimidines))==FALSE)),]
transversions <- varTypes[which((is.na(match(varTypes$Ref,purines))==FALSE &
is.na(match(varTypes$Var,pyrimidines))==FALSE) |
(is.na(match(varTypes$Ref,pyrimidines))==FALSE &
is.na(match(varTypes$Var,purines))==FALSE)),]
## Move varTypes to the cleaner mutTypes
mutType <- data.frame("mutType"=mutRows[c(6,2,4,5,1,3)])
mutType$Count <- apply(mutType,1,function(x)
sum(as.numeric(varTypes$Count[which( (varTypes$Ref==substring(x,1,1)&varTypes$Var==substring(x,7,7)) | (varTypes$Ref==substring(x,3,3)&varTypes$Var==substring(x,9,9)) )]))
)
mutType$countGCnormal <- apply(mutType,1,function(x){
if(grepl("A/T >",x['mutType'])){return(as.numeric(x['Count'])/(1-GCbias))}
if(grepl("C/G >",x['mutType'])){return(as.numeric(x['Count'])/(GCbias))}
})
mutType$Percent <- apply(mutType,1,function(x) as.numeric(x['Count'])/sum(as.numeric(mutType$Count)) )
mutType$PercentGCnormal <- apply(mutType,1,function(x) as.numeric(x['countGCnormal'])/sum(as.numeric(mutType$countGCnormal)) )
return(mutType)
})
enu2rowmeans <- rowMeans(matrix(nrow=dim(GAper[,5,])[1],ncol=dim(GAper[,5,])[2],as.numeric(GAper[,5,])))
# barPlotwSEM(GAper)
# rowMeans(tab)
# # matx[1,altCountCutoff+1]<-round(mutType$PercentGCnormal[which(mutType$mutType=="C/G > T/A")],3)*100
# print(altCountCutoff)
# }
# plot(x=0:maxAlt,y=matx)
if(paramCut == 1){
RMs <- data.frame("cutoff"=paramCut,"mut"=GAper[,1,1], "Percent"=enu2rowmeans)
}else{
RMs <- rbind(RMs, data.frame("cutoff"=paramCut,"mut"=GAper[,1,1], "Percent"=enu2rowmeans))
}
paramCut<- paramCut+1
}
ggplot(RMs, aes(x=cutoff,y=Percent,group=mut,fill=mut)) + geom_area(position="stack")
rm(list=setdiff(ls(), c("mutType",VARS2KEEP)))
gc()
## Figure 1 prototype ##################
# barplot(as.matrix(mutType$PercentGCnormal))#####
########################################
|
2d9d05c6f883a37756ad58fbe97019450b3ddef8
|
1dfec10ac8e51f63947f35ae42bbfead6ed8ce4c
|
/Walmart_Trip_Type_Classification/crossvalidation.R
|
50120a42a71a7ba17a69fb22bdda6136f28d1b8f
|
[] |
no_license
|
jothiprakash/Kaggle_Challenges
|
6f84109e5523d2afbe7eca678565be80985fb8e1
|
e5d5f815b0b52ecd5b3157596387d2184613dd8f
|
refs/heads/master
| 2021-08-23T02:22:21.383158
| 2017-12-02T13:40:39
| 2017-12-02T13:40:39
| 110,996,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,525
|
r
|
crossvalidation.R
|
# fine tuning
test = read.csv("testdataset_v8c.csv",header=TRUE, colClasses = "numeric")
train = read.csv("traindataset_v8c.csv",header=TRUE, colClasses = "numeric")
predictors = names(train)[c(-1,-2)]
train$TripType = as.numeric(as.character(train$TripType))
train = train[1:60000,]
set.seed(123)
x = sample(1:40000, 20000)
set.seed(1234)
y = sample(1:40000, 20000)
err= 100000
eta = 0.3
# depth =50
# round = 20
for (depth in seq(1,20,1)) {
for (rounds in seq(1,100,1)) {
# train
bst <- xgboost(data = as.matrix(train[1:40000,predictors]),
label = train$TripType[1:40000],
max.depth=4, nround=1000, eta=0.15,objective = "multi:softprob",
num_class = 38, eval_metric = "mlogloss", subsample=1,
colsample_bytree = 0.1, min_child_weight = 1, gamma = 0.6, max_delta_step =10)
gc()
# predict
predictions <- predict(bst, as.matrix(train[40001:95674,predictors]))
predictions = matrix(predictions, 38, 55674, byrow=FALSE)
predictions = t(predictions)
predictions = data.frame(cbind(train[40001:95674,"TripType"], predictions))
err2=-sum(log(apply(predictions,1, function(x){
y=x[1] +2
return(x[y])
})))
err2
gc()
if (err2 < err) {
err = err2
print(paste(depth,rounds,eta,err))
answer = c(depth,rounds,eta,err)
write.csv(answer,"result.csv", row.names = FALSE)
}
}
}
|
ef649b07db133850f7daddefd9537ab51e055aa1
|
0844fb0e1f115bdacb56389b82f3346bd6903099
|
/resourceefficiency_old.r
|
56b80f4362e761bc540dad715acdadb1b841f6ed
|
[] |
no_license
|
aleip/nutrientlca
|
3bcde9d1109cf876c5e2899ff75ba5a8e7b06e88
|
f72312b0c7fb8fa614fa4d95d869bfa94de9ca35
|
refs/heads/master
| 2021-01-11T23:01:53.707429
| 2017-01-29T18:43:40
| 2017-01-29T18:43:40
| 78,537,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,572
|
r
|
resourceefficiency_old.r
|
processmatrix<-function(E,S){
# Recover system definition
# Combine Technology and Intervention matrices to Process matrix
A<-as.data.frame(E$A)
rownames(A)<-S$products
colnames(A)<-S$processes
B<-as.data.frame(E$B)
rownames(B)<-c(S$resources,S$losses)
colnames(B)<-S$processes
P<-rbind(A,B)
colnames(P)<-S$processes
return(list(A=A,B=A,P=P))
}
f_recfatelambda<-function(E,S,flows,lambda,flowfin){
P<-E$P
chainflows<-flows$chainflows
nproc<-S$nproc
nprod<-S$nprod
lam3<-lambda*(!(chainflows))
lam3<-t(t(lam3)/colSums(lam3))
#Calculate the destination of an input to process 1
#that is not recycled
# Create a matrix that 'chain fractions', ie. the
# share of process j (column)
# that is arriving at process i (rows)
chainfractions<-matrix(0,ncol=nproc,nrow=nproc)
chainfractions[,]<-sapply(1:nproc,function(j) sapply(1:nproc,function(i)
if(i==j){
1
}else if(j<i){
0
}else{
Reduce("*",colSums(lam3[1:nproc,1:nproc])[i:(j-1)])
}
))
# chainfractions<-matrix(0,ncol=nproc,nrow=nproc)
# for(i in 1:1){
# for(j in 1:nproc){
# chainfractions[i,j]<-if(j>i){flowfin[i,j]}else{1}*flowfin[j,j]
# for(jstar in 1:(i-1)){
# chainfractions[i,j]<-Reduce("*",)
# }
# }
# }
# Share per process which is exported
lamexport<-colSums(lam3[flows$exportflows[1:nprod],])
# Fate of recycled input to processes
# The rows give the process into which the input occurs
# The columns give the process to which the input is distributed
recfate<-t(t(chainfractions)*lamexport)
return(recfate=recfate)
}
f_newnutflow<-function(E,S,flows,lambda){
P<-E$P
nproc<-S$nproc
nprod<-S$nprod
origin<-flows$origin
target<-flows$target
goods<-S$goods
losses<-S$losses
prows<-S$prows
sumlosses<-flows$sumlosses
# Fill in input flows
#sel<-which(origin!=target&target!=0)
#A<-P[1:nprod,]*lambda
#for(i in 1:length(sel)){A[sel[i],target[sel[i]]]<--A[sel[i],origin[sel[i]]]}
A<-P[1:nprod,]
#Add to A the losses according to flow-allocation
lbyflow<-allocationbyflow(E,S,flows)
ladd<-t(t(lbyflow)*sumlosses)
#Substract now the losses according to current allocation
lsub<-t(t(lambda)*sumlosses)
A<-A+ladd-lsub
#Adjust recycling flows
for(i in 1:length(sel)){A[sel[i],target[sel[i]]]<--A[sel[i],origin[sel[i]]]}
sel<-which(rownames(l2)%in%goods)
#print(apply(as.matrix(Aall[sel,][origin[sel]==3,]),2,sum))
#print(P[sel,][origin[sel]==3,,drop=FALSE])
V<-matrix(rep(0,nproc**2),ncol=nproc,nrow=nproc)
V<-t(sapply(1:nproc,function(x) apply(l2[sel,][origin[sel]==x,,drop=FALSE],2,sum)))
e<-as.vector(as.matrix(-P[prows%in%losses,]))
rintensity<-t(r)%*%ginv(V)
eintensity<-t(e)%*%ginv(V)
}
f_flowmatrix<-function(nproc,nrpod,lambda,origin,target,goods){
flowmatrix<-matrix(0,ncol=nproc,nrow=nproc)
flowmatrix<-sapply(1:nproc,function(j) sapply(1:nproc,function(i)
if(i!=j){
sum(lambda[origin==i&target==j,])
}else{
sum(lambda[origin==i&target==0,])
}
))
return(flowmatrix)
}
f_flowfin<-function(flowmatrix){
flowfin<-flowmatrix*upperTriangle(flowmatrix)
#flowfin<-(flowfin/rowSums(flowfin))
return(flowfin)
}
f_flowrec<-function(flowmatrix){
#share recycled are not recycled
flowrec<-flowmatrix*(1-upperTriangle(flowmatrix))
#for(i in 1:ncol(flowrec)) flowrec[i,i]=1-sum(flowrec[i,])
return(flowrec)
}
f_chainfraction<-function(nproc,flowfin){
#nproc<-S$nproc
chainfractions<-matrix(0,ncol=nproc,nrow=nproc)
# Approach: separate 'direct' chain-flow ignoring recycling flows and
# then calculate recycling flows.
# First calculate the distribution of burden that are transported
# along the supply chain.
# This loop calculates 'chainfractions' of burden that are 'caused' by
# a certain process i (rows) over all processes j (columns)
# The burden is attributed to the sum of products that leave each respective
# process.
for(j in nproc:1){
# Loop over all last-but-one origins for the target
for(i in j:1){
# The fraction arriving at target is the direct flow ...
chainfractions[i,j]<-flowfin[i,j]*flowfin[j,j]
if(j==i)chainfractions[i,j]<-flowfin[j,j]
if(i<j-1){
for(ii in (i+1):(j-1)){
#chainfractions[ii,i]<-chainfractions[ii,i]+flowfin[ii,i]*if(i<j){chainfractions[i,j]}else{1}
chainfractions[i,j]<-chainfractions[i,j] +
# ... plus the fraction from origin-to-midorigin*fraction from
# mid-origin(already calculated) to target
flowfin[i,ii]*chainfractions[ii,j]
}
}
}
}
chainfractions<-chainfractions/rowSums(chainfractions)
return(chainfractions)
}
f_recfate<-function(S,flowrec,chainfractions){
nproc<-S$nproc
#nproc<-S$nproc
finfractions<-matrix(0,ncol=nproc,nrow=nproc)
for(i in 1:nproc){
for(ii in 1:max(1,(i-1))){
finfractions[i,]<-finfractions[i,]+
# The recycled part enters in a previous step
flowrec[i,ii]*finfractions[ii,]
}
finfractions[i,]<-finfractions[i,]+
# The non-recycled part is distributed as in defined in
# chainfractions along the chain
(1-sum(flowrec[i,]))*chainfractions[i,]
}
#print(flowrec)
#print(chainfractions)
#print(finfractions)
#return(recfate=finfractions)
return(recfate=chainfractions)
}
calcmatrix<-function(V,lambda,dirburden){
#Retrieve the sum of net goods per process
sumnetgoods<-colSums(V*diag(3))
temp<-t(t(lambda)*sumnetgoods)
#Construct matrix
E<-V
E[,]<-0
E[1,2]<--dirburden[1,1]
E[2,1]<--dirburden[8,2]
E[3,1]<--dirburden[9,3]
E[2,3]<--dirburden[2,2]
E<-E-diag(3)*colSums(dirburden)
lossintensity<-t(sumnetgoods)%*%ginv(E)
lossintensity%*%E
}
calcburden<-function(E,S,flows,lambda,recfate){
P<-E$P
nproc<-S$nproc
nprod<-S$nprod
chainflows<-flows$chainflows
target<-flows$target
origin<-flows$origin
exportflows<-flows$exportflows
resources<-S$resources
losses<-S$losses
# Burden must be total losses!
# Distribute the direct burden (total losses) of each process
# over the products of the process including those that are recycled
interventions<-c(resources,losses)
rburd<-which(interventions%in%losses)+nprod
rreso<-which(interventions%in%resources)+nprod
temp<-P[1:nprod,]
temp[,]<-0
temp2<-colSums(temp)
dirburden<-temp
dirburden[,]<-sapply(1:nproc,function(j)
sapply(1:(nprod),function(i)lambda[i,j]*as.vector(P[rburd,j])))
# Resources must be calculated as N in product + N in losses
# Calculate the resources each product requires to match
# the equation: Resources = Products + Losses
# Note - in the example there is no stock changes so far,
# equations would need to be adapted as stock changes
# are resources but count as goods without burden
A<-P[1:nprod,]
dirresour<-dirburden
lcells<-dirresour<0
dirresour[lcells]<-A[lcells]-dirburden[lcells]
# Calculate the embedded burden in recycling flows
# This formula calculates how much burden of a chainflow needs
# to be added to the target process
recburden<-colSums(sapply(1:nproc,function(x) rowSums(dirburden*chainflows*(target==x))))
# Add burden from original chainflows
#recburden<-recburden+c(0,colSums(dirburden[1:(nproc-1),])[1:(nproc-1)])
recburden<-recburden+colSums(dirburden*(!chainflows))
# The embedded burden is distributed over the processes...
embburden<-recburden*recfate
embburdenprc<-colSums(embburden)
# Distribute over exported products
exproducts<-A*exportflows
#print(t(embburdenprc*t(exproducts)))
embburdenprd<-t(embburdenprc*t(exproducts))
embburdenprd<-t(t(embburdenprd)/colSums(exproducts))
# ... and added to the direct burden
# This yields the burden for each process
# Through recycling the burden of processes that receive recycling flows
# is increased and those recycling is decreased
#burdenproducts<-dirburden*exportflows+embburdenprd
burdenproducts<-embburdenprd
# Resources must be calculated as N in product + N in losses
finproducts<-A*exportflows
resourcesproducts<-finproducts+burdenproducts
nfinprod<-sum(finproducts>0)
cfinprod<-which(finproducts>0,arr.ind=TRUE)
# Nutrient Use Efficiency
nueproducts<-finproducts*0
lcell<-finproducts!=0
nueproducts[lcell]<-finproducts[lcell]/resourcesproducts[lcell]
# Footprints
lossfactors<-finproducts*0
lossfactors[lcell]<--burdenproducts[lcell]/finproducts[lcell]
return(list(dirburden=dirburden,
dirresour=dirresour,
recburden=recburden,
embburden=embburden,
embburdenprd=embburdenprd,
burdenproducts=burdenproducts,
resourcesproducts=resourcesproducts,
finproducts=finproducts,
nueproducts=nueproducts,
lossfactors=lossfactors,
nfinprod=nfinprod,
cfinprod=cfinprod))
}
f_reffanalysis<-function(
supplydef="default",
supplyexe="aimable",
supplyall="NA"
){
# Function to generate a Lists (examp) of Lists (burden, nutflow, ...)
# with the results.
# The number of scenarios run is given by the length of the vector of
# 'examples' with flow rates (supplyexe).
# The other two vectors supplydef and supplyall are indicating the
# system definitions and allocation rules to be used. Those vectors can
# be shorter (or just contain one value) in which case the last value is
# used for all further scenarios
#
# Allocations:
# NA for applying MFA aprroach
# 'byflow','byvalue1','byvalue2' etc for LCA appraoch
nexamples<-length(supplyexe)
for(i in 1:nexamples){
stype<-if(length(supplydef)<i){supplydef[length(supplydef)]}else{supplydef[i]}
etype<-supplyexe[i]
dolambda<-if(length(supplyall)<i){supplyall[length(supplyall)]}else{
supplyall[i]}
if(!is.na(dolambda)) {if(dolambda=="NA") {dolambda<-NA}}
cat(i,stype,etype,dolambda,"\n")
S<-supplychainsimple(stype)
E<-supplyvalues(etype,S)
s<-scaling(E$A,E$B,E$f)
E$P<-s$P
E$P<-fillrecycling(E,S)
flows<-flowanalysis(E,S)
# B. Calculation of material flow analysis
if(is.na(dolambda)){
nutflow<-nutflowanalysi(E,S,flows)
curex<-list(stype=stype,etype=etype,dolambda=dolambda,
S=S,E=E,flows=flows,nutflow=nutflow)
}else{
# C. Calculation of efficiency acc to allocation
if(dolambda=="byflow"){
lambda<-allocationbyflow(E,S,flows)
}else if(dolambda=="byvalue1"){
lambda<-allocationbyvalue1(E$P,S$nprod,S$nproc)
}else if(dolambda=="byvalue2"){
lambda<-allocationbyvalue2(E$P,S$nprod,S$nproc)
}
#QQ Is flows really needed to be done or can it be omitted?
E$P<-systemseparation(E,S,flows,lambda)
testnut<-nutflowanalysi(E,S,flows)
#print(lambda)
flowmatrix<-f_flowmatrix(S$nproc,S$nprod,lambda,flows$origin,flows$target)
#print(flowmatrix)
flowfin<-f_flowfin(flowmatrix)
print("flowfin")
print(flowfin)
flowrec<-f_flowrec(flowmatrix)
#print('flowrec')
#print(flowrec)
chainfractions<-f_chainfraction(S$nproc,flowfin)
#print("chainfractions")
#print(chainfractions)
recfateold<-f_recfate(S,flowrec,chainfractions)
recfate<-f_recfatelambda(E,S,flows,lambda,flowfin)
print(recfate)
burden<-calcburden(E,S,flows,lambda,recfate)
curex<-list(stype=stype,etype=etype,dolambda=dolambda,
S=S,E=E,flows=flows,burden=burden,
lambda=lambda,flowmatrix=flowmatrix,flowfin=flowfin,
flowrec=flowrec,chainfractions=chainfractions,recfate=recfate)
}
if(i==1){examp<-curex}else if(i==2){examp<-list(examp,curex)}else{
examp[[i]]<-curex
}
}
return(examp)
}
#Matrix of flows which are 'too many' (not part of the main food chain)
# ... from Heijungs adn Suh, but here probably not needed
# Calculation requires a square Technology matrix
# Fill missing columns on the basis of existing data
Afull<-function(A){
n<-ncol(A)
m<-nrow(A)
grows2add<-m-n
g<-matrix(rep(0,ncol=(m-n)*(m-n)),ncol=(m-n),nrow=(m-n))
gtemp<-matrix(rep(0,ncol=(m-n)*n),ncol=(m-n),nrow=n)
fsum<-apply(A,1,sum)[(n+1):m]
for(i in 1:length(fsum)){g[i,i]<--fsum[i]}
g<-rbind(gtemp,g)
g<-cbind(A,g)
return(g)
}
fplus<-function(A,f){
g<-c(f,rep(0,nrow(A)-ncol(A)))
return(g)
}
temploop<-function(V){
i=1
emb<-0
for(x in 1:1000){
a=50/90
a2=40/90
b=10/35
b2=5/35
c=20/35
d=2/14.5
e=12.5/14.5
# The recycled input is returned through recycled outputs
retur=(a*b+a*c*d)
# All must be leaving the system in the final products
leave=(a2+a*b2+a*c*e)
retur2<-retur*retur
leave2<-leave+retur*leave
#
retur3<-retur**3
leave3<-leave+retur*leave+retur**2*leave
F1<-a2/leave
F2<-a*b2/leave
F3<-a*c*e/leave
#Ft must give 1
Ft<-F1+F2+F3
exitprod<-lambda*(target==0)
cmult<-diag(nproc)
cmult[2:nproc,2:nproc]<-lambda[1:(nproc-1),1:(nproc-1)]
cmult<-colSums(cmult)
j=a*b+a*c*d
o=a*b+a*c*e
emb<-emb+j
if(j<0.00001) break
}
}
comparison<-function(a,what2,which2=NULL){
if(is.null(which2))which2<-1:length(a)
if(what2%in%"P"){
for(i in which2){
b<-a[[i]]
cat("\nProcess matrix",i,"\n")
print(b$E$P)
}
}
if(what2%in%"nue"){
cat("\nNutrient Use Efficiency - system definition - example\n")
for(i in which2){
b<-a[[i]]
if(is.na(b$dolambda)){
approach<-"MFA"
cat(i,approach,round(as.vector(1/b$nutflow$rintensity),2),
"-",b$stype,"-",b$etype,"\n")
}else{
approach<-"LCA"
b$burden$nueproducts[b$burden$nueproducts==0]<-NA
cat(i,approach,round(apply(b$burden$nueproducts,2,mean,na.rm=TRUE),5),
"-",b$stype,"-",b$etype,"-",b$dolambda,"\n")
}
}
}
}
|
d4701cc7af515e66c519602d410d750c5f86670a
|
22168141237225ccbcccbe4fe10fefc1c687ab96
|
/inst.R
|
7be5e985553cc2c5976311cf111493ea02f6aaf5
|
[] |
no_license
|
mamueller/R6Unit
|
2c6932591c109e1dea6c892899c82e084820980b
|
823092bc9d64b92e47197f67126b6f4df843c672
|
refs/heads/master
| 2020-01-23T21:34:17.461604
| 2019-10-07T16:49:49
| 2019-10-07T16:49:49
| 74,699,014
| 0
| 0
| null | 2016-11-24T20:00:49
| 2016-11-24T19:09:06
| null |
UTF-8
|
R
| false
| false
| 376
|
r
|
inst.R
|
#!/usr/bin/Rscript
if (!is.element('devtools',installed.packages())){
install.packages('devtools',repos='https://cran.uni-muenster.de')
}
install.packages('stringr')
install.packages('getopt')
require(getopt)
require(stringr)
require(devtools)
devtools::install_github('mamueller/debugHelpers',subdir='pkg')
devtools::install('pkg',build=FALSE)
print(get_Rscript_filename())
|
414856bbe2a59e775094b1b9dd7122c7c28cc259
|
320ddefc84d992475db8b83befde46e2b780314f
|
/man/see.ancova.tck.rd
|
49ea4dc59ad3dab3db85df1f31a44c9c8f6f29a0
|
[] |
no_license
|
cran/asbio
|
bca146e402058cd67ff5fc42423cb0c0544f942b
|
3cb01b7cb1a8dec60a5f809f91bc460a6566954d
|
refs/heads/master
| 2023-08-31T23:43:08.304864
| 2023-08-20T02:22:36
| 2023-08-20T04:30:48
| 17,694,492
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
rd
|
see.ancova.tck.rd
|
\name{see.ancova.tck}
\alias{see.ancova.tck}
\title{
Visualize ANCOVA mechanics
}
\description{
An interactive GUI to view ANCOVA meachnics. Exp. power tries to simulate explanatory power in the concomitant variable. It simply results in (1 - Exp. power) \eqn{\times} Residual SE.
}
\usage{
see.ancova.tck()
}
\author{
Ken Aho
}
\keyword{graphs}
|
4cc6de2fde7ed5248cd15d93fb3a839538f976cc
|
a9fb5a228b2316e5b43f58e4b8d6c858cb7784f7
|
/man/getMotifFootprints-DsATAC-method.Rd
|
fbceb6d5d24dfebf5bbcebcf650e30a2c9c1af12
|
[] |
no_license
|
GreenleafLab/ChrAccR
|
f94232d5ac15caff2c5b2c364090bfb30b63e61a
|
43d010896dc95cedac3a8ea69aae3f67b2ced910
|
refs/heads/master
| 2023-06-24T05:29:29.804920
| 2023-03-17T13:01:49
| 2023-03-17T13:01:49
| 239,655,070
| 17
| 7
| null | 2023-05-05T09:51:23
| 2020-02-11T02:01:37
|
R
|
UTF-8
|
R
| false
| true
| 1,978
|
rd
|
getMotifFootprints-DsATAC-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DsATAC-class.R
\docType{methods}
\name{getMotifFootprints,DsATAC-method}
\alias{getMotifFootprints,DsATAC-method}
\alias{getMotifFootprints}
\title{getMotifFootprints-methods}
\usage{
\S4method{getMotifFootprints}{DsATAC}(
.object,
motifNames,
samples = getSamples(.object),
motifFlank = 250L,
type = ".genome",
motifDb = "jaspar"
)
}
\arguments{
\item{.object}{\code{\linkS4class{DsATAC}} object}
\item{motifNames}{character vector of motif names}
\item{samples}{sample identifiers}
\item{motifFlank}{number of base pairs flanking the motif on each side}
\item{type}{(PLACEHOLDER ARGUMENT: NOT IMPLEMENTED YET) character string specifying the region type or \code{".genome"} (default) for genome-wide profiling}
\item{motifDb}{either a character string (currently only "jaspar" and sets contained in \code{chromVARmotifs} ("homer", "encode", "cisbp") are supported) or an object containing PWMs
that can be used by \code{motifmatchr::matchMotifs} (such as an \code{PFMatrixList} or \code{PWMatrixList} object) OR a list of \code{GRanges} objects specifying motif occurrences}
}
\value{
a \code{list} of footprinting results with one element for each motif. Each motif's results contain summary data frames with aggregated counts
across all motif occurrences and a \code{ggplot} object for plotting footprints
}
\description{
Perform enrichment analysis for (TF) motif footprinting
}
\examples{
\dontrun{
dsa <- ChrAccRex::loadExample("dsAtac_ia_example")
motifNames <- c("MA1419.1_IRF4", "MA0139.1_CTCF", "MA0037.3_GATA3")
# motifNames <- grep("(IRF4|CTCF|GATA3)$", names(prepareMotifmatchr("hg38", "jaspar")$motifs), value=TRUE, ignore.case=TRUE) # alternative by searching
samples <- c("TeffNaive_U_1001", "TeffNaive_U_1002", "TeffMem_U_1001", "TeffMem_U_1002")
fps <- getMotifFootprints(dsa, motifNames, samples)
fps[["MA1419.1_IRF4"]]$plot
}
}
\author{
Fabian Mueller
}
|
8e9bcec14196a4aa3f8cafac255f0cb84b30c65b
|
da38b99eb42630fe1bd3bc115ab40dbf00a018d1
|
/plot4.R
|
7422d1a2409ffa9f4792e58a3c76aa7482e58365
|
[] |
no_license
|
wnichola/ExData_Plotting1
|
7656c80c1eaad61f304b871281bfeb20f515df7b
|
12c3e21274b63d6f1883d0998a34c8887195fd72
|
refs/heads/master
| 2020-12-24T15:59:16.798129
| 2015-03-17T05:22:34
| 2015-03-17T05:22:34
| 31,635,213
| 0
| 0
| null | 2015-03-04T03:08:05
| 2015-03-04T03:08:04
| null |
UTF-8
|
R
| false
| false
| 2,668
|
r
|
plot4.R
|
## Use the data.table library to be used as it is very efficient in reading large files
library("data.table")
# Define the names (and location) of the files used and writed
srcfile <- "./data/household_power_consumption.txt"
plotfile <- "./figure/plot4.png"
# Use fread to read in the file into a data.table, handing ? for missing values
DT <- fread(srcfile, header = TRUE, stringsAsFactors = FALSE, na.string = "?", colClasses = "character")
# Subset the data.table for records with Dates "1/2/2007" and "2/2/2007"
DT2 <- DT[Date == "1/2/2007" | Date == "2/2/2007", ]
# Create a new Datetime field for the X-axis plot
DT2[, Datetime := as.POSIXct(strptime(paste(DT2$Date, DT2$Time, sep = " "), "%d/%m/%Y %H:%M:%S"))]
# Convert relevant date into a numeric so that it can be plotted
DT2[ ,Global_active_power := as.numeric(Global_active_power)]
DT2[ ,Voltage := as.numeric(Voltage)]
DT2[ ,Sub_metering_1 := as.numeric(Sub_metering_1)]
DT2[ ,Sub_metering_2 := as.numeric(Sub_metering_2)]
DT2[ ,Sub_metering_3 := as.numeric(Sub_metering_3)]
DT2[ ,Voltage := as.numeric(Voltage)]
DT2[ ,Global_reactive_power := as.numeric(Global_reactive_power)]
# Set the canvas to take in 4 plots appearing across the columns in a row first
# before moving to the next row
par(mfrow = c(2,2))
# Plot 1: Datetime by Global Active Power
plot(DT2$Datetime, DT2$Global_active_power, type = "l", xlab = "", ylab = "Global active power")
# Plot 2: Datetime by Voltage
plot(DT2$Datetime, DT2$Voltage, type = "l", xlab = "", ylab = "Voltage")
# Plot 3: Datetime by Sub_metering_1, Sub_metering_2, and Sub_metering_3
# Find the range for the Y axis for Plot 3
y_range <- range(0, DT2$Sub_metering_1, DT2$Sub_metering_2, DT2$Sub_metering_3)
# Generate the Line Plot (type = "l"), with a blank x-label and y-label with "Energy sub metering"
plot(DT2$Datetime, DT2$Sub_metering_1, type = "l",
xlab = "", ylab = "Energy sub metering", ylim = y_range) # Plot Sub_metering_1 first
lines(DT2$Datetime, DT2$Sub_metering_2, type = "l", col = "red", lwd = 2) # Plot Sub_metering_2
lines(DT2$Datetime, DT2$Sub_metering_3, type = "l", col = "blue", lwd = 2) # Plot Sub_metering_3
# Add the Legend at topright
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lwd=2, cex=0.3, bty = "n")
# Plot 4: Datetime by Global Reactive Power
plot(DT2$Datetime, DT2$Global_reactive_power, type = "l", xlab = "", ylab = "Global reactive power")
# Copy the Historgram into the plotfile, with the width and height at 480 pixels
dev.copy(png, file=plotfile, width = 480, height = 480)
# Complete the plot and close the png file.
dev.off()
|
cf2bef8008a7de3fb2b248f5886d071917f02d9d
|
1c8651c1d055acaada9e8db9d6fb6244e407f903
|
/man/read_files.Rd
|
e0f4827ebd2f8f32dd3620c4cedd49c2f938985c
|
[
"MIT"
] |
permissive
|
datalorax/sundry
|
b164a510e3bfd02b1b091753f0791ee970c39b1b
|
8c180d16e47270759d93b0d440f42a8b4751d36a
|
refs/heads/master
| 2023-04-03T06:11:34.194216
| 2021-04-06T23:13:37
| 2021-04-06T23:13:37
| 52,026,171
| 6
| 4
|
NOASSERTION
| 2021-04-06T23:12:38
| 2016-02-18T17:43:07
|
R
|
UTF-8
|
R
| false
| true
| 707
|
rd
|
read_files.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_files.R
\name{read_files}
\alias{read_files}
\title{Read a batch of files}
\usage{
read_files(dir = ".", pat = "*.csv|*.sav|*.xls|*.xlsx|*.txt", df = TRUE, ...)
}
\arguments{
\item{dir}{Directory where files are stored. Defaults to the current
working directory}
\item{pat}{Optional string pattern of files in the directory}
\item{df}{Logical. Should the function try to read the files in as a single
data frame? Defaults to \code{TRUE}. A list of data frames are returned
if \code{FALSE}.}
\item{...}{Additonal arguments passed to \code{\link[rio]{import}} (e.g.,
\code{delim}).}
}
\description{
Read a batch of files
}
|
b7e64654a45a16af2594505befb84f3aa6d61d59
|
9f9c1c69adb6bc2ac097ae368c8ba2f293d01b64
|
/R/combine_sources.R
|
21ea28e2aa6011d811a385e12359a63e74be2794
|
[] |
no_license
|
izabelabujak/MixSIAR
|
a8be0e62a1299b6cd365ab0fa9ff315ad8c306f9
|
2b1b2545638fcfaacbcaaa695e2bec436a91806f
|
refs/heads/master
| 2020-08-30T03:45:42.453711
| 2019-08-02T22:02:57
| 2019-08-02T22:02:57
| 218,252,966
| 1
| 0
| null | 2019-10-29T09:43:47
| 2019-10-29T09:43:47
| null |
UTF-8
|
R
| false
| false
| 11,704
|
r
|
combine_sources.R
|
#' Combine sources from a finished MixSIAR model (\emph{a posteriori})
#'
#' \code{combine_sources} aggregates the proportions from multiple sources.
#' Proportions are summed across posterior draws, since the source proportions
#' are correlated.
#'
#' \emph{Note: Aggregating sources after running the mixing model (a posteriori)
#' effectively changes the prior weighting on the sources.} Aggregating
#' uneven numbers of sources will turn an 'uninformative'/generalist
#' prior into an informative one. Because of this, \code{combine_sources}
#' automatically generates a message describing this effect and a figure
#' showing the original prior, the effective/aggregated prior, and what the
#' 'uninformative'/generalist prior would be if sources were instead grouped
#' before running the mixing model (a priori).
#'
#' @param jags.1 \code{rjags} model object, output from \code{\link{run_model}}
#' @param mix list, output from \code{\link{load_mix_data}}
#' @param source list, output from \code{\link{load_source_data}}
#' @param alpha.prior vector with length = n.sources, Dirichlet prior on p.global (default = 1, uninformative)
#' @param groups list, which sources to combine, and what names to give the new combined sources. See example.
#'
#' @return \code{combined}, a list including:
#' \itemize{
#' \item \code{combined$post}: matrix, posterior draws with new source groupings
#' \item \code{combined$source.new}: list, original \code{source} list with modified entries for \code{n.sources} and \code{source_names}
#' \item \code{combined$groups}: (input) list, shows original and combined sources
#' \item \code{combined$jags.1}: (input) \code{rjags} model object
#' \item \code{combined$source.old}: (input) list of original source data
#' \item \code{combined$mix}: (input) list of original mix data
#' \item \code{combined$prior.old}: (input) prior vector on original sources
#' \item \code{combined$prior.new}: (output) prior vector on combined sources
#' }
#'
#' @seealso \code{\link{summary_stat}} and \code{\link{plot_intervals}}
#' @examples
#' \dontrun{
#' # first run mantis shrimp example
#' # combine 6 sources into 2 groups of interest (hard-shelled vs. soft-bodied)
#' # 'hard' = 'clam' + 'crab' + 'snail' # group 1 = hard-shelled prey
#' # 'soft' = 'alphworm' + 'brittlestar' + 'fish' # group 2 = soft-bodied prey
#' combined <- combine_sources(jags.1, mix, source, alpha.prior=alpha,
#' groups=list(hard=c("clam","crab","snail"), soft=c("alphworm","brittlestar","fish")))
#'
#' # get posterior medians for new source groupings
#' apply(combined$post, 2, median)
#' summary_stat(combined, meanSD=FALSE, quantiles=c(.025,.5,.975), savetxt=FALSE)
#' }
#'
combine_sources <- function(jags.1, mix, source, alpha.prior=1, groups){
old.source.names <- sort(unlist(groups, use.names=FALSE))
if(!identical(old.source.names, source$source_names)){
stop(paste("Source names in 'groups' list do not match those in
'source$source_names'. All previous sources must appear in 'groups'.",sep=""))
}
# New source object (only names and # of sources changed, not data)
n.sources <- source$n.sources
source.new <- source
source.new$n.sources <- length(groups)
source.new$source_names <- names(groups)
# Get matrix of posterior draws
# post <- jags.1$BUGSoutput$sims.array
# names(dimnames(post)) <- c("iterations", "chains","parameters")
post.mat <- jags.1$BUGSoutput$sims.matrix
old.other <- post.mat[,-c(grep("p.global",colnames(post.mat)), grep("p.fac1",colnames(post.mat)), grep("p.fac2",colnames(post.mat)))]
new.fac1 <- new.fac2 <- new.both <- NULL
# Combine posterior draws into new source groupings
old.global <- post.mat[,grep("p.global",colnames(post.mat))]
n.draws <- dim(old.global)[1]
new.global <- matrix(NA, nrow=n.draws, ncol=source.new$n.sources)
colnames(new.global) <- paste0("p.global[",1:source.new$n.sources,"]")
for(i in 1:source.new$n.sources){
old <- groups[[i]]
old.levels <- match(old, source$source_names)
new.global[,i] <- apply(as.matrix(old.global[,old.levels]), 1, sum)
}
if(!mix$fere){
# combine factor 1
if(mix$n.effects > 0){
for(f1 in 1:mix$FAC[[1]]$levels){
new.fac1.tmp <- matrix(NA, nrow=n.draws, ncol=source.new$n.sources)
colnames(new.fac1.tmp) <- paste0("p.fac1[",f1,",",1:source.new$n.sources,"]")
old.fac1.tmp <- post.mat[,grep(paste0("p.fac1\\[",f1), colnames(post.mat))]
for(i in 1:source.new$n.sources){
old <- groups[[i]]
old.levels <- match(old, source$source_names)
new.fac1.tmp[,i] <- apply(as.matrix(old.fac1.tmp[,old.levels]), 1, sum)
}
new.fac1 <- cbind(new.fac1, new.fac1.tmp)
}
}
# combine factor 2
if(mix$n.effects > 1){
for(f2 in 1:mix$FAC[[2]]$levels){
new.fac2.tmp <- matrix(NA, nrow=n.draws, ncol=source.new$n.sources)
colnames(new.fac2.tmp) <- paste0("p.fac2[",f2,",",1:source.new$n.sources,"]")
old.fac2.tmp <- post.mat[,grep(paste0("p.fac2\\[",f2), colnames(post.mat))]
for(i in 1:source.new$n.sources){
old <- groups[[i]]
old.levels <- match(old, source$source_names)
new.fac2.tmp[,i] <- apply(as.matrix(old.fac2.tmp[,old.levels]), 1, sum)
}
new.fac2 <- cbind(new.fac2, new.fac2.tmp)
}
}
}
# Post-processing for 2 FE or 1FE + 1RE, calculate p.both = ilr.global + ilr.fac1 + ilr.fac2
if(mix$fere){
fac2_lookup <- list()
for(f1 in 1:mix$FAC[[1]]$levels){
fac2_lookup[[f1]] <- unique(mix$FAC[[2]]$values[which(mix$FAC[[1]]$values==f1)])
}
ilr.both <- array(NA,dim=c(n.draws,mix$FAC[[1]]$levels, mix$FAC[[2]]$levels, n.sources-1))
p.both <- array(NA,dim=c(n.draws,mix$FAC[[1]]$levels, mix$FAC[[2]]$levels, n.sources))
cross.both <- array(data=NA,dim=c(n.draws,mix$FAC[[1]]$levels, mix$FAC[[2]]$levels,n.sources,n.sources-1))
e <- matrix(rep(0,n.sources*(n.sources-1)),nrow=n.sources,ncol=(n.sources-1))
for(i in 1:(n.sources-1)){
e[,i] <- exp(c(rep(sqrt(1/(i*(i+1))),i),-sqrt(i/(i+1)),rep(0,n.sources-i-1)))
e[,i] <- e[,i]/sum(e[,i])
}
for(i in 1:n.draws){
for(f1 in 1:mix$FAC[[1]]$levels) {
for(f2 in fac2_lookup[[f1]]){
for(src in 1:(n.sources-1)) {
ilr.both[i,f1,f2,src] <- jags.1$BUGSoutput$sims.list$ilr.global[i,src] + jags.1$BUGSoutput$sims.list$ilr.fac1[i,f1,src] + jags.1$BUGSoutput$sims.list$ilr.fac2[i,f2,src];
cross.both[i,f1,f2,,src] <- (e[,src]^ilr.both[i,f1,f2,src])/sum(e[,src]^ilr.both[i,f1,f2,src]);
# ilr.both[,f1,f2,src] <- ilr.global[,src] + ilr.fac1[,f1,src] + ilr.fac2[,f2,src];
}
for(src in 1:n.sources) {
p.both[i,f1,f2,src] <- prod(cross.both[i,f1,f2,src,]);
}
p.both[i,f1,f2,] <- p.both[i,f1,f2,]/sum(p.both[i,f1,f2,]);
} # f2
} # f1
}
# Now combine sources for p.both
for(f1 in 1:mix$FAC[[1]]$levels){
for(f2 in 1:mix$FAC[[2]]$levels){
new.both.tmp <- matrix(NA, nrow=n.draws, ncol=source.new$n.sources)
colnames(new.both.tmp) <- paste0("p.both[",f1,",",f2,",",1:source.new$n.sources,"]")
old.both.tmp <- p.both[,f1,f2,]
for(i in 1:source.new$n.sources){
old <- groups[[i]]
old.levels <- match(old, source$source_names)
new.both.tmp[,i] <- apply(as.matrix(old.both.tmp[,old.levels]), 1, sum)
}
new.both <- cbind(new.both, new.both.tmp)
}
}
} # end fere
# Combine posterior matrices
post.new <- cbind(old.other, new.global, new.fac1, new.fac2, new.both)
# Error check on prior input
if(length(which(alpha.prior==0))!=0){
stop(paste("*** Error: You cannot set any alpha = 0.
Instead, set = 0.01.***",sep=""))
}
if(is.numeric(alpha.prior)==F) alpha.prior = 1 # Error checking for user inputted string/ NA
if(length(alpha.prior)==1) alpha = rep(alpha.prior,n.sources) # All sources have same value
if(length(alpha.prior) > 1 & length(alpha.prior) != n.sources) alpha = rep(1,n.sources) # Error checking for user inputted string/ NA
if(length(alpha.prior) > 1 & length(alpha.prior) == n.sources) alpha = alpha.prior # All sources have different value inputted by user
# Simulate old prior
N = 20000
p.old = MCMCpack::rdirichlet(N, alpha)
alpha.unif <- rep(1,n.sources)
p.unif.old = MCMCpack::rdirichlet(N, alpha.unif)
alpha_lab <- paste0("(",paste0(round(alpha,2),collapse=","),")",sep="")
alpha.unif_lab <- paste0("(",paste0(round(alpha.unif,2),collapse=","),")",sep="")
# Calculate new prior with aggregated sources
prior.new <- rep(NA, source.new$n.sources)
p.new <- matrix(NA, nrow=N, ncol=source.new$n.sources)
for(i in 1:source.new$n.sources){
old <- groups[[i]]
old.levels <- match(old, source$source_names)
prior.new[i] <- sum(alpha[old.levels])
if(length(old.levels) > 1) p.new[,i] <- apply(p.old[,old.levels], 1, sum)
if(length(old.levels) == 1) p.new[,i] <- p.old[,old.levels]
}
alpha.unif.new <- rep(1,source.new$n.sources)
p.unif.new = MCMCpack::rdirichlet(N, alpha.unif.new)
alpha.lab.new <- paste0("(",paste0(round(prior.new,2),collapse=","),")",sep="")
alpha.unif.lab.new <- paste0("(",paste0(round(alpha.unif.new,2),collapse=","),")",sep="")
dev.new(width=9, height=7)
layout(cbind(matrix(c(seq(1:(2*n.sources)),(2*n.sources)+1,(2*n.sources)+1), ncol=2, byrow=TRUE),
matrix(c(seq((2*n.sources)+2, 2*n.sources+2*source.new$n.sources+1), rep(2*n.sources+2*source.new$n.sources+2, 2), rep(2*n.sources+2*source.new$n.sources+3, 2*(n.sources-source.new$n.sources))), ncol=2, byrow=TRUE)),
heights=c(rep(3,n.sources),2))
par(mai=rep(0.3,4))
for(i in 1:n.sources){
hist(p.old[,i], breaks = seq(0,1,length.out=40),col="darkblue", main = source$source_names[i], xlab=expression(p[i]),xlim=c(0,1))
hist(p.unif.old[,i], breaks = seq(0,1,length.out=40),col="darkgrey", main = source$source_names[i], xlab=expression(p[i]),xlim=c(0,1))
}
par(mai=c(0,0,0,0))
plot.new()
legend(x="center", ncol=2,legend=c(paste0("Original prior\n",alpha_lab),paste0("\"Uninformative\" prior\n",alpha.unif_lab)),
fill=c("darkblue","darkgrey"),bty = "n",cex=1.5)
par(mai=rep(0.3,4))
for(i in 1:source.new$n.sources){
hist(p.new[,i], breaks = seq(0,1,length.out=40),col="red", main = source.new$source_names[i], xlab=expression(p[i]),xlim=c(0,1))
hist(p.unif.new[,i], breaks = seq(0,1,length.out=40),col="darkgrey", main = source.new$source_names[i], xlab=expression(p[i]),xlim=c(0,1))
}
par(mai=c(0,0,0,0))
plot.new()
legend(x="center", ncol=2,legend=c(paste0("New prior\n",alpha.lab.new),paste0("\"Uninformative\" prior\n",alpha.unif.lab.new)),
fill=c("red","darkgrey"),bty = "n",cex=1.5)
cat("
-------------------------------------------------------------------------
*** WARNING ***
Aggregating sources after running the mixing model (a posteriori)
effectively changes the prior weighting on the sources. Aggregating
uneven numbers of sources will turn an 'uninformative'/generalist
prior into an informative one. Please check your new, aggregated
prior (red), and note the difference between it and the original prior
(blue). The right (grey) column shows what the 'uninformative'/generalist
prior would be if you aggregate sources before running the mixing model
(a priori).
-------------------------------------------------------------------------
",sep="\n")
return(list(post=post.new, source.new=source.new, groups=groups, jags.1=jags.1, source.old=source, mix=mix, prior.old=alpha, prior.new=prior.new))
}
|
8b79049a12028762038b98d7e26f95672e04a15c
|
75a981dbcb03a471ed72df9eee09604d84a3956a
|
/R/move.HMM.ACF.R
|
331b85d86aa303fce70558783a21b081ccb43972
|
[] |
no_license
|
benaug/move.HMM
|
7c1e897aa33a2aaa0564844d9e8ee20fbcf0d96a
|
1095eedcc3c47976ea0d966131e5e1855eaaa9ef
|
refs/heads/master
| 2022-02-11T01:31:16.309023
| 2022-01-22T19:41:44
| 2022-01-22T19:41:44
| 13,788,227
| 3
| 2
| null | 2014-10-01T17:46:47
| 2013-10-22T22:58:15
|
R
|
UTF-8
|
R
| false
| false
| 2,344
|
r
|
move.HMM.ACF.R
|
#'ACF plot
#'
#'This function compares the empirical ACFs to those simulated from the fitted HMM. Experimental.
#'@param move.HMM A move.HMM object containing a fitted HMM model.
#'@param simlength The number of observations to simulate. The ACF from the simulated data will
#'converge to the theoretical ACF as simlength goes to infinity
#'@param transforms A list of length ndist that contains functions for transforming the data for each distribution.
#'Default is NULL, so data are not transformed.
#'@param lag.max Maximum lag at which to calculate the acf. Default is 10.
#'@param ylim a ndist x 2 matrix with the lower and upper bounds for plotting each ACF. Defaults to (-0.3,0.3).
#'@param tol numeric value indicating the distance between the empirical and simulated ACFs plots at each lag length.
#'Defaults to 0.1.
#'@return A vector of shifted negative binomial pdf values
#'@include move.HMM.simulate
#'@export
move.HMM.ACF=function(move.HMM,simlength=10000,transforms=NULL,lag.max=10,ylim=NULL,tol=0.1){
dists=move.HMM$dists
params=move.HMM$params
obs=move.HMM$obs
sim=move.HMM.simulate(dists,params,simlength)$obs
ndist=length(dists)
plotat=1:lag.max
fixy=is.null(ylim)
if(is.null(ylim))ylim=matrix(NA,nrow=ndist,ncol=2)
for(i in 1:ndist){
par(ask=T)
if(!is.null(transforms)){
acf1=acf(transforms[[i]](obs[,i]),plot=F,lag.max=lag.max,na.action=na.pass)
acf2=acf(transforms[[i]](sim[,i]),plot=F)
}else{
acf1=acf(obs[,i],plot=F,lag.max=lag.max,na.action=na.pass)
acf2=acf(sim[,i],plot=F)
}
if(fixy){
ylim[i,]=c(-0.3,0.3)
maximum=max(c(acf1$acf[2:(lag.max+1)]),acf2$acf[2:(lag.max+1)])
minimum=min(c(acf1$acf[2:(lag.max+1)]),acf2$acf[2:(lag.max+1)])
if(maximum>0.3)ylim[i,2]=min(maximum*1.2,1)
if(minimum<=-0.3)ylim[i,1]=max(minimum*1.2,-1)
}
plot(NA,xlim=c(1,lag.max),ylim=ylim[i,],xlab="Lag",ylab="ACF",main=paste(dists[i],"Observed vs. Simulated ACFs"))
abline(c(0,0))
for(i in 1:length(plotat)){
lines(x=c(plotat[i]-tol,plotat[[i]]-tol),y=c(0,acf1$acf[i+1]))
}
for(i in 1:length(plotat)){
lines(x=c(plotat[i]+tol,plotat[[i]]+tol),y=c(0,acf2$acf[i+1]),col="red")
}
legend(x="bottomright",lwd=c(1,1),col=c("black","red"),cex=0.7,legend=c("Observed","Simulated"))
}
par(ask=F)
}
|
583ca47b982415dba3171ac63527b9392c8b9f55
|
d226838e64a1d55fdaf797893f7468651b725183
|
/man/systemHTSeqCount.Rd
|
6f12c1e7bf061d13f9e8c2e229a0803a93f71c10
|
[] |
no_license
|
HenrikBengtsson/aroma.seq
|
5fd673cc449d9c3b89daf1125e8cc95556d0641d
|
6464f1e5e929c423978cf7dcb11ac7018d179a6d
|
refs/heads/master
| 2021-06-21T13:53:21.618898
| 2021-02-10T02:57:15
| 2021-02-10T02:57:15
| 20,848,327
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,211
|
rd
|
systemHTSeqCount.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% systemHTSeqCount.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{systemHTSeqCount}
\alias{systemHTSeqCount.default}
\alias{systemHTSeqCount}
\title{Wrapper for htseq-count}
\description{
Wrapper for htseq-count.
}
\usage{
\method{systemHTSeqCount}{default}(..., args=NULL, stdout=TRUE, stderr=FALSE, command="htseq-count",
.fake=FALSE, verbose=FALSE)
}
\arguments{
\item{commandName}{A \code{\link[base]{character}} string specifying the htseq-count command.}
\item{...}{Additional arguments specifying htseq-count command line switches.}
\item{system2ArgsList}{Named list of arguments to pass to internal system2 call.}
\item{.fake}{If \code{\link[base:logical]{TRUE}}, the executable is not called.}
\item{verbose}{See \code{\link[R.utils]{Verbose}}.}
}
\author{Taku Tokuyasu}
\references{
[1] HTSeq: Analysing high-throughput sequencing data with Python,
June 2013.
\url{http://www-huber.embl.de/users/anders/HTSeq/}
}
\keyword{internal}
|
c30aedf7aac6f52b44be838cd5fe7035bcd44938
|
25e2c290ed0810bc5dc817d5391de246c9915660
|
/cachematrix.R
|
ae0f93126fecb344eac9fac2aff55ea4c4be4551
|
[] |
no_license
|
agsalt/ProgrammingAssignment2
|
0c305b3efe2aa153b7c6d54a6e5f832a7261e537
|
92ad04593e6f9d876dfcfc749ec87d7030b3d6a6
|
refs/heads/master
| 2021-01-15T12:27:45.370971
| 2014-05-25T07:39:59
| 2014-05-25T07:39:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,864
|
r
|
cachematrix.R
|
## The following two functions assist with the potentially time consuming
## computation of finding the inverse of a matrix with the solve function.
## Instead of recalculating the inverse for the same matrix submitted more than once
## the original result is recalled from cache rather than recalculated.
## This first function creates a special matrix which is a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the matrix inverse
## 4. get the value of the matrix inverse
makeCacheMatrix <- function(x = matrix()) {
## create a special "list" matrix of functions to to be used in caching an inverse
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The second function calculates the inverse of the special matrix above but
## first checks to see if it has already been calculated and if so gets the inverse
## from the cache and skips the computation. Otherwise it calculates the inverse of the
## data and sets the value of the inverse in the cache via the setinverse function.
## If the previously cahced version is returned the user is notified with
## the message "getting cached data" otherwise no message is displayed.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
10f4e693cfe00082cd14b5440096261606e5e74d
|
5ac57449f8a0cfbc0e9c8f716ab0a578d8606806
|
/man/fitplot.Rd
|
c7b372dcc34473a369a2d9409e9a90813193a9b6
|
[] |
no_license
|
hugaped/MBNMAtime
|
bfb6913e25cacd148ed82de5456eb9c5d4f93eab
|
04de8baa16bf1be4ad7010787a1feb9c7f1b84fd
|
refs/heads/master
| 2023-06-09T01:23:14.240105
| 2023-06-01T12:51:48
| 2023-06-01T12:51:48
| 213,945,629
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,097
|
rd
|
fitplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.functions.R
\name{fitplot}
\alias{fitplot}
\title{Plot fitted values from MBNMA model}
\usage{
fitplot(
mbnma,
treat.labs = NULL,
disp.obs = TRUE,
n.iter = round(mbnma$BUGSoutput$n.iter/4),
n.thin = mbnma$BUGSoutput$n.thin,
...
)
}
\arguments{
\item{mbnma}{An S3 object of class \code{"mbnma"} generated by running
a time-course MBNMA model}
\item{treat.labs}{A character vector of treatment labels with which to name graph panels.
Can use \code{mb.network()[["treatments"]]} with original dataset if in doubt.}
\item{disp.obs}{A boolean object to indicate whether raw data responses should be
plotted as points on the graph}
\item{n.iter}{number of total iterations per chain (including burn in;
default: 2000)}
\item{n.thin}{thinning rate. Must be a positive integer. Set
\code{n.thin} > 1 to save memory and computation time if
\code{n.iter} is large. Default is \code{max(1, floor(n.chains *
(n.iter-n.burnin) / 1000))} which will only thin if there are at
least 2000 simulations.}
\item{...}{Arguments to be sent to \code{ggplot2::ggplot()}}
}
\value{
Generates a plot of fitted values from the MBNMA model and returns a list containing
the plot (as an object of class \code{c("gg", "ggplot")}), and a data.frame of posterior mean
fitted values for each observation.
}
\description{
Plot fitted values from MBNMA model
}
\details{
Fitted values should only be plotted for models that have converged successfully.
If fitted values (\code{theta}) have not been monitored in \code{mbnma$parameters.to.save}
then additional iterations will have to be run to get results for these.
}
\examples{
\donttest{
# Make network
painnet <- mb.network(osteopain)
# Run MBNMA
mbnma <- mb.run(painnet,
fun=temax(pool.emax="rel", method.emax="common",
pool.et50="abs", method.et50="random"))
# Plot fitted values from the model
# Monitor fitted values for 500 additional iterations
fitplot(mbnma, n.iter=500)
}
}
|
46f3225db6eeea8bdbf42d1baedf17df36beef04
|
7f9f945c8a02dfd5f38d30abfcbbfa20d24a4391
|
/R/fixest_multi.R
|
6185fb62dd2c41b7fbaa0abcd73a1b26cb79b7c3
|
[] |
no_license
|
lrberge/fixest
|
96428663b68c3701f1063f0fb76a87b68333b7d4
|
6b852fa277b947cea0bad8630986225ddb2d6f1b
|
refs/heads/master
| 2023-08-19T22:36:19.299625
| 2023-04-24T08:25:17
| 2023-04-24T08:25:17
| 200,205,405
| 309
| 64
| null | 2023-09-13T09:51:03
| 2019-08-02T09:19:18
|
R
|
UTF-8
|
R
| false
| false
| 42,851
|
r
|
fixest_multi.R
|
#----------------------------------------------#
# Author: Laurent Berge
# Date creation: Sat Nov 07 09:05:26 2020
# ~: fixest_multi
#----------------------------------------------#
setup_multi = function(data, values, var = NULL, tree = NULL){
# the incoming data is ALWAYS strongly structured
# => they all have the same number of elements
# data:
# either a list of fixest objects
# either a list of fixest_multi objects
#
# values: must be strongly and properly formatted
# its length is the nber of objects (length(data)), with the appropriate names
# var: to keep the information on the variable (sample, subset)
# We also add the $model_info variable in each model
# To remove after development
check_arg(data, "list")
check_arg(values, "named list")
check_arg(var, "NULL character vector no na")
check_arg(tree, "NULL data.frame")
n_models = length(data)
IS_VAR = !is.null(var)
IS_TREE = !is.null(tree)
if(!IS_TREE){
stopifnot(identical(class(data), "list"))
}
var_label = NULL
if(IS_VAR){
stopifnot(length(values) == 1)
var_label = names(values)[1]
if(length(var) == 1){
var = rep(var, n_models)
}
}
IS_NESTED = inherits(data[[1]], "fixest_multi")
if(IS_TREE){
# This is an internal call from [.fixest_multi
# data = the final data
# values = the new tree
res = data
tree$id = NULL # we re-create it later
if(IS_NESTED){
# We allow non balanced data lists
res = vector("list", sum(lengths(data)))
tree_left = list()
tree_right = list()
index = 1
for(i in 1:n_models){
data_i = data[[i]]
# updating the tree
tree_nested = attr(data_i, "tree")[, -1, drop = FALSE]
n_nested = nrow(tree_nested)
tree_left[[i]] = rep_df(tree[i, , drop = FALSE], each = n_nested)
tree_right[[i]] = tree_nested
for(j in 1:n_nested){
res[[index]] = data_i[[j]]
index = index + 1
}
}
tree_left = do.call(rbind, tree_left)
tree_right = do.call(rbind, tree_right)
tree = cbind(tree_left, tree_right)
}
} else {
v_names = names(values)
tree = as.data.frame(values)
if(IS_NESTED){
# bookkeeping needed: note that we ensure beforehand that each element is STRONGLY consistent
tree_left = list()
tree_right = list()
for(i in 1:n_models){
# updating the tree
tree_nested = attr(data[[i]], "tree")[, -1, drop = FALSE]
n_nested = nrow(tree_nested)
tree_left[[i]] = rep_df(tree[i, , drop = FALSE], each = n_nested)
tree_right[[i]] = tree_nested
}
tree_left = do.call(rbind, tree_left)
tree_right = do.call(rbind, tree_right)
tree = cbind(tree_left, tree_right)
} else if(!inherits(data[[1]], "fixest")){
stop("Internal error: the current data type is not supportded by setup_multi.")
}
# res: a plain list containing all the models
res = vector("list", nrow(tree))
index = 1
for(i in 1:n_models){
data_i = data[[i]]
# new model information
new_info = list()
for(v in v_names){
if(IS_VAR){
new_info[[v]] = list(var = var[i], value = values[[v]][i])
} else {
new_info[[v]] = values[[v]][i]
}
}
n_j = if(IS_NESTED) length(data_i) else 1
for(j in 1:n_j){
if(IS_NESTED){
mod = data_i[[j]]
} else {
mod = data_i
}
# updating the model information
model_info = mod$model_info
for(v in names(new_info)){
model_info[[v]] = new_info[[v]]
}
mod$model_info = model_info
res[[index]] = mod
index = index + 1
}
}
}
if(IS_VAR){
tree = cbind(var, tree)
names(tree)[1] = paste0(var_label, ".var")
}
tree_names = mapply(function(x, y) paste0(x, ": ", y), names(tree), tree)
if(is.vector(tree_names)){
model_names = paste(tree_names, collapse = "; ")
} else {
tree_names = as.data.frame(tree_names)
model_names = apply(tree_names, 1, paste0, collapse = "; ")
}
# indexes
info = index_from_tree(tree)
index_names = info$index_names
tree_index = info$tree_index
tree = cbind(id = 1:nrow(tree), tree)
# Shouldn't I remove tree_index and index_names since they can be built from the tree?
# It seems it can be useful if they're directly computed... We'll see.
names(res) = model_names
class(res) = "fixest_multi"
attr(res, "tree") = tree
attr(res, "tree_index") = tree_index
attr(res, "index_names") = index_names
res
}
index_from_tree = function(tree){
index_names = list()
tree_index = list()
names_keep = names(tree)[!grepl("\\.var$|^id$", names(tree))]
for(v in names_keep){
z = tree[[v]]
fact = factor(z, levels = unique(z))
index_names[[v]] = levels(fact)
tree_index[[v]] = as.integer(unclass(fact))
}
tree_index = as.data.frame(tree_index)
list(index_names = index_names, tree_index = tree_index)
}
reshape_multi = function(x, obs, colorder = NULL){
# x: fixest_multi object
# obs: indexes to keep
tree = attr(x, "tree")
new_tree = tree[obs, , drop = FALSE]
if(!is.null(colorder)){
new_tree_list = list()
for(i in seq_along(colorder)){
# I use grep to catch ".var" variables
qui = grepl(colorder[i], names(tree))
new_tree_list[[i]] = new_tree[, qui, drop = FALSE]
}
new_tree_list[[i + 1]] = new_tree["id"]
new_tree = do.call(cbind, new_tree_list)
}
n_models = nrow(new_tree)
new_data = vector("list", n_models)
for(i in 1:n_models){
new_data[[i]] = x[[new_tree$id[i]]]
}
setup_multi(new_data, tree = new_tree)
}
set_index_multi = function(x, index_names){
# Function specific to [.fixest_multi => global assignments!!!
arg = deparse(substitute(x))
NAMES = index_names[[arg]]
vmax = length(NAMES)
if(is.logical(x)){
if(isFALSE(x)){
last = get("last", parent.frame())
last[length(last) + 1] = arg
assign("last", last, parent.frame())
}
res = 1:vmax
} else if(is.character(x)){
dict = 1:vmax
names(dict) = NAMES
vars = keep_apply(NAMES, x)
vars = order_apply(vars, x)
res = as.integer(dict[vars])
if(length(res) == 0){
stop_up("The set of regular expressions (equal to: ", x, ") in '", arg, "' didn't match any choice.")
}
} else if(any(abs(x) > vmax)){
stop_up("The index '", arg, "' cannot be greater than ", vmax, ". Currently ", x[which.max(abs(x))], " is not valid.")
} else {
res = x
}
res
}
rep_df = function(x, times = 1, each = 1, ...){
if(identical(times, 1) && identical(each, 1)){
return(x)
}
as.data.frame(lapply(x, rep, times = times, each = each))
}
####
#### USER LEVEL ####
####
#' Extracts the models tree from a `fixest_multi` object
#'
#' Extracts the meta information on all the models contained in a `fixest_multi` estimation.
#'
#' @inheritParams print.fixest_multi
#' @param simplify Logical, default is `FALSE`. The default behavior is to display all the meta information, even if they are identical across models. By using `simplify = TRUE`, only the information with some variation is kept.
#'
#' @return
#' It returns a `data.frame` whose first column (named `id`) is the index of the models and the other columns contain the information specific to each model (e.g. which sample, which RHS, which dependent variable, etc).
#'
#' @examples
#'
#' # a multiple estimation
#' base = setNames(iris, c("y", "x1", "x2", "x3", "species"))
#' est = feols(y ~ csw(x.[, 1:3]), base, fsplit = ~species)
#'
#' # All the meta information
#' models(est)
#'
#' # Illustration: Why use simplify
#' est_sub = est[sample = 2]
#' models(est_sub)
#' models(est_sub, simplify = TRUE)
#'
#'
#'
models = function(x, simplify = FALSE){
check_arg(x, "class(fixest_multi)")
res = attr(x, "tree")
if(simplify){
who_keep = sapply(res, function(x) length(unique(x)) != 1)
if(!all(who_keep)){
# we need to handle the behavior with the .var thing
names_keep = names(res)[who_keep]
pattern = dsb("^(.['|'C?names_keep])")
res = res[, grepl(pattern, names(res)), drop = FALSE]
}
}
res
}
####
#### METHODS ####
####
#' Summary for fixest_multi objects
#'
#' Summary information for fixest_multi objects. In particular, this is used to specify the type of standard-errors to be computed.
#'
#' @method summary fixest_multi
#'
#' @inheritParams summary.fixest
#'
#' @inherit print.fixest_multi seealso
#'
#' @param object A `fixest_multi` object, obtained from a `fixest` estimation leading to multiple results.
#' @param type A character either equal to `"short"`, `"long"`, `"compact"`, `"se_compact"` or `"se_long"`. If `short`, only the table of coefficients is displayed for each estimation. If `long`, then the full results are displayed for each estimation. If `compact`, a `data.frame` is returned with one line per model and the formatted coefficients + standard-errors in the columns. If `se_compact`, a `data.frame` is returned with one line per model, one numeric column for each coefficient and one numeric column for each standard-error. If `"se_long"`, same as `"se_compact"` but the data is in a long format instead of wide.
#' @param ... Not currently used.
#'
#' @return
#' It returns either an object of class `fixest_multi` (if `type` equals `short` or `long`), either a `data.frame` (if type equals `compact` or `se_compact`).
#'
#' @examples
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "species")
#'
#' # Multiple estimation
#' res = feols(y ~ csw(x1, x2, x3), base, split = ~species)
#'
#' # By default, the type is "short"
#' # You can still use the arguments from summary.fixest
#' summary(res, se = "hetero")
#'
#' summary(res, type = "long")
#'
#' summary(res, type = "compact")
#'
#' summary(res, type = "se_compact")
#'
#' summary(res, type = "se_long")
#'
#'
summary.fixest_multi = function(object, type = "short", vcov = NULL, se = NULL, cluster = NULL, ssc = NULL,
.vcov = NULL, stage = 2, lean = FALSE, n = 1000, ...){
dots = list(...)
check_arg_plus(type, "match(short, long, compact, se_compact, se_long)")
if(!missing(type) || is.null(attr(object, "print_request"))){
attr(object, "print_request") = type
}
if(is_user_level_call()){
validate_dots(suggest_args = c("type", "vcov"),
valid_args = c("agg", "forceCovariance", "keepBounded", "nthreads"))
}
est_1 = object[[1]]
if(is.null(est_1$cov.scaled) || !isTRUE(dots$fromPrint)){
for(i in 1:length(object)){
object[[i]] = summary(object[[i]], vcov = vcov, se = se, cluster = cluster, ssc = ssc,
.vcov = .vcov, stage = stage, lean = lean, n = n, ...)
}
# In IV: multiple estimations can be returned
if("fixest_multi" %in% class(object[[1]])){
tree = attr(object, "tree")
object = setup_multi(object, tree = tree)
}
}
if(type %in% c("compact", "se_compact", "se_long")){
tree = attr(object, "tree")
tree_index = attr(object, "tree_index")
res = data.frame(i = tree$id)
if(!"lhs" %in% names(tree_index)){
res$lhs = sapply(object, function(x) as.character(x$fml[[2]]))
}
for(my_dim in names(tree_index)){
res[[my_dim]] = sfill(tree[[my_dim]], right = TRUE)
}
res$i = NULL
if(type == "se_long"){
res$type = "coef"
}
n_start = ncol(res)
signifCode = c("***"=0.001, "**"=0.01, "*"=0.05, "."=0.1)
ct_all = list()
for(i in seq_along(object)){
ct = object[[i]]$coeftable
vname = row.names(ct)
if(type == "compact"){
stars = cut(ct[, 4], breaks = c(-1, signifCode, 100), labels = c(names(signifCode), ""))
stars[is.na(stars)] = ""
value = paste0(format_number(ct[, 1], 3), stars, " (", format_number(ct[, 2], 3), ")")
names(value) = vname
} else if(type %in% c("se_compact", "se_long")){
n = length(vname)
vname_tmp = character(2 * n)
qui_coef = seq(1, by = 2, length.out = n)
qui_se = seq(2, by = 2, length.out = n)
vname_tmp[qui_coef] = vname
vname_tmp[qui_se] = paste0(vname, "__se")
vname = vname_tmp
value = numeric(2 * n)
value[qui_coef] = ct[, 1]
value[qui_se] = ct[, 2]
names(value) = vname
}
ct_all[[i]] = value
}
vname_all = unique(unlist(lapply(ct_all, names)))
tmp = lapply(ct_all, function(x) x[vname_all])
my_ct = do.call("rbind", tmp)
colnames(my_ct) = vname_all
if(type == "compact"){
my_ct[is.na(my_ct)] = ""
}
for(i in seq_along(vname_all)){
if(type == "compact"){
res[[vname_all[i]]] = sfill(my_ct[, i], anchor = "(", right = TRUE)
} else {
res[[vname_all[i]]] = my_ct[, i]
}
}
if(type == "se_long"){
# clumsy... but works
who_se = which(grepl("__se", names(res)))
se_all = res[, c(1:n_start, who_se)]
se_all$type = "se"
names(se_all) = gsub("__se$", "", names(se_all))
coef_all = res[, -who_se]
quoi = rbind(coef_all, se_all)
n = nrow(coef_all)
res = quoi[rep(1:n, each = 2) + rep(c(0, n), n), ]
row.names(res) = NULL
}
return(res)
}
return(object)
}
#' Print method for fixest_multi objects
#'
#' Displays summary information on fixest_multi objects in the R console.
#'
#' @method print fixest_multi
#'
#' @param x A `fixest_multi` object, obtained from a `fixest` estimation leading to multiple results.
#' @param ... Other arguments to be passed to [`summary.fixest_multi`].
#'
#' @seealso
#' The main fixest estimation functions: [`feols`], [`fepois`][fixest::feglm], [`fenegbin`][fixest::femlm], [`feglm`], [`feNmlm`]. Tools for mutliple fixest estimations: [`summary.fixest_multi`], [`print.fixest_multi`], [`as.list.fixest_multi`], \code{\link[fixest]{sub-sub-.fixest_multi}}, \code{\link[fixest]{sub-.fixest_multi}}.
#'
#' @examples
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "species")
#'
#' # Multiple estimation
#' res = feols(y ~ csw(x1, x2, x3), base, split = ~species)
#'
#' # Let's print all that
#' res
#'
print.fixest_multi = function(x, ...){
if(is_user_level_call()){
validate_dots(valid_args = dsb("/type, vcov, se, cluster, ssc, stage, lean, agg, forceCovariance, keepBounded, n, nthreads"))
}
x = summary(x, fromPrint = TRUE, ...)
# Type = compact
if(is.data.frame(x)){
return(x)
}
is_short = identical(attr(x, "print_request"), "short")
tree = attr(x, "tree")
tree_index = attr(x, "tree_index")
# if(nrow(tree)){
# # Only one estimation
# print(x[[1]])
# return(invisible(NULL))
# }
# Finding out the type of SEs
if(is_short){
all_se = unique(unlist(sapply(x, function(x) attr(x$cov.scaled, "type"))))
if(length(all_se) > 1){
cat("Standard-errors: mixed (use summary() with arg. 'vcov' to harmonize them) \n")
} else if(length(all_se) == 1){
cat("Standard-errors:", all_se, "\n")
}
}
dict_title = c("sample" = "Sample", "lhs" = "Dep. var.", "rhs" = "Expl. vars.",
"iv" = "IV", "fixef" = "Fixed-effects", sample.var = "Sample var.")
qui_drop = apply(tree_index, 2, max) == 1
if(any(qui_drop) && !all(qui_drop)){
var2drop = names(tree_index)[qui_drop]
for(d in var2drop){
cat(dict_title[d], ": ", tree[[d]][1], "\n", sep = "")
}
tree = tree[, !names(tree) %in% var2drop, drop = FALSE]
tree_index = tree_index[, !qui_drop, drop = FALSE]
}
depth = ncol(tree_index)
headers = list()
headers[[1]] = function(d, i) cat(dict_title[d], ": ", tree[[d]][i], "\n", sep = "")
headers[[2]] = function(d, i) cat("\n### ", dict_title[d], ": ", tree[[d]][i], "\n\n", sep = "")
headers[[3]] = function(d, i) cat("\n\n# ", toupper(dict_title[d]), ": ", tree[[d]][i], "\n\n", sep = "")
headers[[4]] = function(d, i) cat("\n\n#\n# ", toupper(dict_title[d]), ": ", tree[[d]][i], "\n#\n\n", sep = "")
headers[[5]] = function(d, i) cat("\n\n#===\n# ", toupper(dict_title[d]), ": ", tree[[d]][i], "\n#===\n\n", sep = "")
for(i in 1:nrow(tree)){
for(j in 1:depth){
d = names(tree_index)[j]
if(i == 1 || tree_index[i - 1, j] != tree_index[i, j]){
headers[[depth - j + 1]](d, i)
}
}
if(is_short){
if(isTRUE(x[[i]]$onlyFixef)){
cat("No variable (only the fixed-effects).\n")
} else {
print_coeftable(coeftable = coeftable(x[[i]]), show_signif = FALSE)
}
if(tree_index[i, depth] != max(tree_index[, depth])) cat("---\n")
} else {
print(x[[i]])
if(tree_index[i, depth] != max(tree_index[, depth])) cat("\n")
}
}
}
#' Extracts one element from a `fixest_multi` object
#'
#' Extracts single elements from multiple `fixest` estimations.
#'
#' @inherit print.fixest_multi seealso
#' @inheritParams print.fixest_multi
#'
#' @param i An integer scalar. The identifier of the estimation to extract.
#'
#' @return
#' A `fixest` object is returned.
#'
#' @examples
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "species")
#'
#' # Multiple estimation
#' res = feols(y ~ csw(x1, x2, x3), base, split = ~species)
#'
#' # The first estimation
#' res[[1]]
#'
#' # The second one, etc
#' res[[2]]
#'
"[[.fixest_multi" = function(x, i){
n = length(x)
check_arg_plus(i, "evalset integer scalar mbt", .data = list(.N = n))
if(i < 0 || i > length(x)){
stop("Index 'i' must lie within [1; ", n, "]. Problem: it is equal to ", i, ".")
}
`[[.data.frame`(x, i)
}
#' Subsets a fixest_multi object
#'
#' Subsets a fixest_multi object using different keys.
#'
#'
#' @inherit print.fixest_multi seealso
#' @inheritParams print.fixest_multi
#'
#' @param sample An integer vector, a logical scalar, or a character vector. It represents the `sample` identifiers for which the results should be extracted. Only valid when the `fixest` estimation was a split sample. You can use `.N` to refer to the last element. If logical, all elements are selected in both cases, but `FALSE` leads `sample` to become the rightmost key (just try it out).
#' @param lhs An integer vector, a logical scalar, or a character vector. It represents the left-hand-sides identifiers for which the results should be extracted. Only valid when the `fixest` estimation contained multiple left-hand-sides. You can use `.N` to refer to the last element. If logical, all elements are selected in both cases, but `FALSE` leads `lhs` to become the rightmost key (just try it out).
#' @param rhs An integer vector or a logical scalar. It represents the right-hand-sides identifiers for which the results should be extracted. Only valid when the `fixest` estimation contained multiple right-hand-sides. You can use `.N` to refer to the last element. If logical, all elements are selected in both cases, but `FALSE` leads `rhs` to become the rightmost key (just try it out).
#' @param fixef An integer vector or a logical scalar. It represents the fixed-effects identifiers for which the results should be extracted. Only valid when the `fixest` estimation contained fixed-effects in a stepwise fashion. You can use `.N` to refer to the last element. If logical, all elements are selected in both cases, but `FALSE` leads `fixef` to become the rightmost key (just try it out).
#' @param iv An integer vector or a logical scalar. It represent the stages of the IV. Note that the length can be greater than 2 when there are multiple endogenous regressors (the first stage corresponding to multiple estimations). Note that the order of the stages depends on the `stage` argument from [`summary.fixest`]. If logical, all elements are selected in both cases, but `FALSE` leads `iv` to become the rightmost key (just try it out).
#' @param i An integer vector. Represents the estimations to extract.
#' @param I An integer vector. Represents the root element to extract.
#' @param reorder Logical, default is `TRUE`. Indicates whether reordering of the results should be performed depending on the user input.
#' @param drop Logical, default is `FALSE`. If the result contains only one estimation, then if `drop = TRUE` it will be transformed into a `fixest` object (instead of `fixest_multi`).
#'
#' @details
#' The order with we we use the keys matter. Every time a key `sample`, `lhs`, `rhs`, `fixef` or `iv` is used, a reordering is performed to consider the leftmost-side key to be the new root.
#'
#' Use logical keys to easily reorder. For example, say the object `res` contains a multiple estimation with multiple left-hand-sides, right-hand-sides and fixed-effects. By default the results are ordered as follows: `lhs`, `fixef`, `rhs`. If you use `res[lhs = FALSE]`, then the new order is: `fixef`, `rhs`, `lhs`. With `res[rhs = TRUE, lhs = FALSE]` it becomes: `rhs`, `fixef`, `lhs`. In both cases you keep all estimations.
#'
#' @return
#' It returns a `fixest_multi` object. If there is only one estimation left in the object, then the result is simplified into a `fixest` object.
#'
#' @examples
#'
#' # Estimation with multiple samples/LHS/RHS
#' aq = airquality[airquality$Month %in% 5:6, ]
#' est_split = feols(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' aq, split = ~ Month)
#'
#' # By default: sample is the root
#' etable(est_split)
#'
#' # Let's reorder, by considering lhs the root
#' etable(est_split[lhs = 1:.N])
#'
#' # Selecting only one LHS and RHS
#' etable(est_split[lhs = "Ozone", rhs = 1])
#'
#' # Taking the first root (here sample = 5)
#' etable(est_split[I = 1])
#'
#' # The first and last estimations
#' etable(est_split[i = c(1, .N)])
#'
"[.fixest_multi" = function(x, i, sample, lhs, rhs, fixef, iv, I, reorder = TRUE, drop = FALSE){
core_args = c("sample", "lhs", "rhs", "fixef", "iv")
check_arg(reorder, drop, "logical scalar")
extra_args = c("reorder", "drop")
mc = match.call()
if(!any(c(core_args, "i", "I") %in% names(mc))){
return(x)
}
use_i = "i" %in% names(mc)
if(use_i && any(c(core_args, "I") %in% names(mc))){
stop("The index 'i' cannot be used with any other index (", enumerate_items(c(core_args, "I"), "quote.or"), ").")
}
use_I = "I" %in% names(mc)
if(use_I && any(core_args %in% names(mc))){
stop("The index 'I' cannot be used with any other index (", enumerate_items(c("i", core_args), "quote.or"), ").")
}
# We get the meta information
tree = attr(x, "tree")
tree_index = attr(x, "tree_index")
index_names = attr(x, "index_names")
index_n = lapply(index_names, length)
# tree_index does not contain extra info like id or .var
args = c(names(tree_index), extra_args)
nc = ncol(tree)
n = nrow(tree)
if(!use_i && !use_I){
pblm = setdiff(names(mc)[-(1:2)], args)
if(length(pblm) > 0){
stop("The ", ifsingle(pblm, "index", "indices"), " ", enumerate_items(pblm, "is"),
" not valid for this list of results (the valid one", plural_len(index_n, "s.is"), " ", enumerate_items(names(index_n)), ").")
}
}
if(use_i){
check_arg_plus(i, "evalset integer vector l0 no na", .data = list(.N = n))
if(length(i) == 0) return(list())
if(any(abs(i) > n)){
stop("The index 'i' cannot have values greater than ", n, ". Currently ", i[which.max(abs(i))], " is not valid.")
}
obs = (1:n)[i]
res = reshape_multi(x, obs)
return(res)
}
if(use_I){
I_max = index_n[[1]]
check_arg_plus(I, "evalset integer vector no na", .data = list(.N = I_max))
if(any(abs(I) > I_max)){
stop("The index 'I' refers to root elements (here ", names(index_n)[1], "), and thus cannot be greater than ", I_max, ". Currently ", I[which.max(abs(I))], " is not valid.")
}
obs = (1:I_max)[I]
tree_index$obs = 1:nrow(tree_index)
new_tree = list()
for(i in seq_along(obs)){
new_tree[[i]] = tree_index[tree_index[[1]] == obs[i], ]
}
tree_index = do.call(base::rbind, new_tree)
res = reshape_multi(x, tree_index$obs)
return(res)
}
# Here:
# We take care of reordering properly
is_sample = !missing(sample)
is_lhs = !missing(lhs)
is_rhs = !missing(rhs)
is_fixef = !missing(fixef)
is_iv = !missing(iv)
selection = list()
last = c()
s_max = index_n[["sample"]]
if(is_sample){
check_arg_plus(sample, "evalset logical scalar | vector(character, integer) no na", .data = list(.N = s_max))
sample = set_index_multi(sample, index_names)
selection$sample = (1:s_max)[sample]
} else if("sample" %in% names(index_n)){
selection$sample = 1:s_max
}
lhs_max = index_n[["lhs"]]
if(is_lhs){
check_arg_plus(lhs, "evalset logical scalar | vector(character, integer) no na", .data = list(.N = lhs_max))
lhs = set_index_multi(lhs, index_names)
selection$lhs = (1:lhs_max)[lhs]
} else if("lhs" %in% names(index_n)){
selection$lhs = 1:lhs_max
}
rhs_max = index_n[["rhs"]]
if(is_rhs){
check_arg_plus(rhs, "evalset logical scalar | vector(character, integer) no na", .data = list(.N = rhs_max))
rhs = set_index_multi(rhs, index_names)
selection$rhs = (1:rhs_max)[rhs]
} else if("rhs" %in% names(index_n)){
selection$rhs = 1:rhs_max
}
fixef_max = index_n[["fixef"]]
if(is_fixef){
check_arg_plus(fixef, "evalset logical scalar | vector(character, integer) no na", .data = list(.N = fixef_max))
fixef = set_index_multi(fixef, index_names)
selection$fixef = (1:fixef_max)[fixef]
} else if("fixef" %in% names(index_n)){
selection$fixef = 1:fixef_max
}
iv_max = index_n[["iv"]]
if(is_iv){
check_arg_plus(iv, "evalset logical scalar | vector(character, integer) no na", .data = list(.N = iv_max))
iv = set_index_multi(iv, index_names)
selection$iv = (1:iv_max)[iv]
} else if("iv" %in% names(index_n)){
selection$iv = 1:iv_max
}
# We keep the order of the user!!!!!
sc = sys.call()
user_order = setdiff(names(sc)[-(1:2)], extra_args)
if(reorder == FALSE){
user_order = names(index_n)
} else {
user_order = c(user_order, setdiff(names(index_n), user_order))
if(length(last) > 0){
user_order = c(setdiff(user_order, last), last)
}
}
tree_index$obs = 1:nrow(tree_index)
for(my_dim in rev(user_order)){
# 1) we prune the tree
obs_keep = tree_index[[my_dim]] %in% selection[[my_dim]]
if(!any(obs_keep)){
stop("No models ended up selected: revise selection?")
}
tree_index = tree_index[obs_keep, , drop = FALSE]
# 2) we reorder it according to the order of the user
new_tree = list()
dim_order = selection[[my_dim]]
for(i in seq_along(dim_order)){
new_tree[[i]] = tree_index[tree_index[[my_dim]] == dim_order[i], , drop = FALSE]
}
tree_index = do.call(base::rbind, new_tree)
}
n_models = nrow(tree_index)
if(n_models == 1 && drop){
return(x[[tree_index$obs]])
}
# Reshaping a fixest_multi object properly
res = reshape_multi(x, tree_index$obs, user_order)
return(res)
}
#' Transforms a fixest_multi object into a list
#'
#' Extracts the results from a `fixest_multi` object and place them into a list.
#'
#' @inheritParams print.fixest_multi
#' @inherit print.fixest_multi seealso
#'
#' @method as.list fixest_multi
#'
#' @param ... Not currently used.
#'
#' @return
#' Returns a list containing all the results of the multiple estimations.
#'
#' @examples
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "species")
#'
#' # Multiple estimation
#' res = feols(y ~ csw(x1, x2, x3), base, split = ~species)
#'
#' # All the results at once
#' as.list(res)
#'
#'
as.list.fixest_multi = function(x, ...){
nm = names(x)
attributes(x) = NULL
names(x) = nm
x
}
#' Extracts the coefficients of fixest_multi objects
#'
#' Utility to extract the coefficients of multiple estimations and rearrange them into a matrix.
#'
#' @inheritParams etable
#' @inheritParams coef.fixest
#'
#' @param object A `fixest_multi` object. Obtained from a multiple estimation.
#' @param long Logical, default is `FALSE`. Whether the results should be displayed in a long format.
#' @param na.rm Logical, default is `TRUE`. Only applies when `long = TRUE`: whether to remove the coefficients with `NA` values.
#' @param ... Not currently used.
#'
#'
#' @examples
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "species")
#'
#' # A multiple estimation
#' est = feols(y ~ x1 + csw0(x2, x3), base)
#'
#' # Getting all the coefficients at once,
#' # each row is a model
#' coef(est)
#'
#' # Example of keep/drop/order
#' coef(est, keep = "Int|x1", order = "x1")
#'
#'
#' # To change the order of the model, use fixest_multi
#' # extraction tools:
#' coef(est[rhs = .N:1])
#'
#' # collin + long + na.rm
#' base$x1_bis = base$x1 # => collinear
#' est = feols(y ~ x1_bis + csw0(x1, x2, x3), base, split = ~species)
#'
#' # does not display x1 since it is always collinear
#' coef(est)
#' # now it does
#' coef(est, collin = TRUE)
#'
#' # long
#' coef(est, long = TRUE)
#'
#' # long but balanced (with NAs then)
#' coef(est, long = TRUE, na.rm = FALSE)
#'
#'
coef.fixest_multi = function(object, keep, drop, order, collin = FALSE,
long = FALSE, na.rm = TRUE, ...){
# row: model
# col: coefficient
check_arg(keep, drop, order, "NULL character vector no na")
check_arg(collin, "logical scalar")
res_list = list()
for(i in seq_along(object)){
res_list[[i]] = coef(object[[i]], collin = collin)
}
all_names = unique(unlist(lapply(res_list, names)))
if(!missnull(keep)) all_names = keep_apply(all_names, keep)
if(!missnull(drop)) all_names = drop_apply(all_names, drop)
if(!missnull(order)) all_names = order_apply(all_names, order)
if(length(all_names) == 0) return(NULL)
nr = length(res_list)
nc = length(all_names)
res_list = lapply(res_list, function(x) x[all_names])
res = do.call(rbind, res_list)
colnames(res) = all_names
# model information
mod = models(object)
if(long){
res_long = c(t(res), recursive = TRUE)
tmp = data.frame(coefficient = res_long)
mod_long = rep_df(mod, each = ncol(res))
res = cbind(mod_long, coefficient = rep(all_names, NROW(res)),
estimate = res_long)
if(na.rm && anyNA(res$estimate)){
res = res[!is.na(res$estimate), , drop = FALSE]
}
} else {
res = cbind(mod, res)
}
res
}
#' @rdname coef.fixest_multi
coefficients.fixest_multi <- coef.fixest_multi
#' Extracts the coefficients tables from `fixest_multi` estimations
#'
#' Series of methods to extract the coefficients table or its sub-components from a `fixest_multi` objects (i.e. the outcome of multiple estimations).
#'
#' @inheritParams etable
#'
#' @param object A `fixest_multi` object, coming from a `fixest` multiple estimation.
#' @param wide A logical scalar, default is `FALSE`. If `TRUE`, then a list is returned: the elements of the list are coef/se/tstat/pvalue. Each element of the list is a wide table with a column per coefficient.
#' @param long Logical scalar, default is `FALSE`. If `TRUE`, then all the information is stacked, with two columns containing the information: `"param"` and `"value"`. The column `param` contains the values `coef`/`se`/`tstat`/`pvalue`.
#' @param ... Other arguments to be passed to [`summary.fixest`].
#'
#' @return
#' It returns a `data.frame` containing the coefficients tables (or just the se/pvalue/tstat) along with the information on which model was estimated.
#'
#' If `wide = TRUE`, then a list is returned. The elements of the list are coef/se/tstat/pvalue. Each element of the list is a wide table with a column per coefficient.
#'
#' If `long = TRUE`, then all the information is stacked. This removes the 4 columns containing the coefficient estimates to the p-values, and replace them with two new columns: `"param"` and `"value"`. The column `param` contains the values `coef`/`se`/`tstat`/`pvalue`, and the column `values` the associated numerical information.
#'
#' @examples
#'
#' base = setNames(iris, c("y", "x1", "x2", "x3", "species"))
#' est_multi = feols(y~ csw(x.[,1:3]), base, split = ~species)
#'
#' # we get all the coefficient tables at once
#' coeftable(est_multi)
#'
#' # Now just the standard-errors
#' se(est_multi)
#'
#' # wide = TRUE => leads toa list of wide tables
#' coeftable(est_multi, wide = TRUE)
#'
#' # long = TRUE, all the information is stacked
#' coeftable(est_multi, long = TRUE)
#'
#'
#'
coeftable.fixest_multi = function(object, vcov = NULL, keep = NULL, drop = NULL, order = NULL,
long = FALSE, wide = FALSE, ...){
check_arg(keep, drop, order, "NULL character vector no na")
check_arg(wide, "logical scalar | charin(se, pvalue, tstat)")
check_arg(long, "logical scalar")
if(long && !isFALSE(wide)){
stop("You cannot have 'wide = TRUE' with 'long = TRUE', please choose.")
}
mod = models(object)
res_list = list()
for(i in seq_along(object)){
ct = coeftable(object[[i]], vcov = vcov, keep = keep, drop = drop, order = order, ...)
if(is.null(ct)){
next
}
ct = cbind(coefficient = row.names(ct), as.data.frame(ct))
mod_current = rep_df(mod[i, , drop = FALSE], each = nrow(ct))
res_list[[length(res_list) + 1]] = cbind(mod_current, ct)
}
if(length(res_list) == 0){
stop("Not any variable was selected: revise you 'keep'/'drop' arguments?")
}
res = do.call(rbind, res_list)
row.names(res) = NULL
if(!isFALSE(wide)){
# we return a list of wide tables
res_list = list()
roots = c("coef", "se", "tstat", "pvalue")
if(isTRUE(wide)) wide = roots
i_coef = which(names(res) == "coefficient")
for(i_select in which(roots %in% wide)){
all_coef = unique(res$coefficient)
all_id = unique(res$id)
key = paste0(res$id, "; ", res$coefficient)
value = res[, i_coef + i_select]
names(value) = key
df_xpd = expand.grid(coef = all_coef, id = all_id)
new_key = paste0(df_xpd$id, "; ", df_xpd$coef)
res_wide = matrix(value[new_key], nrow = length(all_id), ncol = length(all_coef),
dimnames = list(NULL, all_coef), byrow = TRUE)
item = cbind(mod[all_id, , drop = FALSE], as.data.frame(res_wide))
if(length(wide) == 1){
# internal call
return(item)
}
res_list[[roots[i_select]]] = item
}
res = res_list
}
if(long){
i_coef = which(names(res) == "coefficient")
values = res[, i_coef + 1:4]
values_all = c(t(values), recursive = TRUE)
params = data.frame(param = rep(c("coef", "se", "tstat", "pvalue"), nrow(res)))
info = rep_df(res[, 1:i_coef, drop = FALSE], each = 4)
res = cbind(info, params, value = values_all)
}
res
}
#' @describeIn coeftable.fixest_multi Extracts the standard-errors from `fixest_multi` estimations
se.fixest_multi = function(object, vcov = NULL, keep = NULL, drop = NULL, order = NULL,
long = FALSE, ...){
# Default is wide format => same as with coef
check_arg(keep, drop, order, "NULL character vector no na")
check_arg(long, "logical scalar")
mc = match.call()
if("wide" %in% names(mc)){
stop("The argument 'wide' is not a valid argument.")
}
wide = if(long) FALSE else "se"
res = coeftable(object, vcov = vcov, keep = keep, drop = drop, order = order,
wide = wide, ...)
if(long){
i_coef = which(names(res) == "coefficient")
res = res[, c(1:i_coef, i_coef + 2)]
}
res
}
#' @describeIn coeftable.fixest_multi Extracts the t-stats from `fixest_multi` estimations
tstat.fixest_multi = function(object, vcov = NULL, keep = NULL, drop = NULL, order = NULL,
long = FALSE, ...){
# Default is wide format => same as with coef
check_arg(keep, drop, order, "NULL character vector no na")
check_arg(long, "logical scalar")
mc = match.call()
if("wide" %in% names(mc)){
stop("The argument 'wide' is not a valid argument.")
}
wide = if(long) FALSE else "tstat"
res = coeftable(object, vcov = vcov, keep = keep, drop = drop, order = order,
wide = wide, ...)
if(long){
i_coef = which(names(res) == "coefficient")
res = res[, c(1:i_coef, i_coef + 3)]
}
res
}
#' @describeIn coeftable.fixest_multi Extracts the p-values from `fixest_multi` estimations
pvalue.fixest_multi = function(object, vcov = NULL, keep = NULL, drop = NULL, order = NULL,
long = FALSE, ...){
# Default is wide format => same as with coef
check_arg(keep, drop, order, "NULL character vector no na")
check_arg(long, "logical scalar")
mc = match.call()
if("wide" %in% names(mc)){
stop("The argument 'wide' is not a valid argument.")
}
wide = if(long) FALSE else "pvalue"
res = coeftable(object, vcov = vcov, keep = keep, drop = drop, order = order,
wide = wide, ...)
if(long){
i_coef = which(names(res) == "coefficient")
res = res[, c(1:i_coef, i_coef + 4)]
}
res
}
#' Extracts the residuals from a `fixest_multi` object
#'
#' Utility to extract the residuals from multiple `fixest` estimations. If possible, all the residuals are coerced into a matrix.
#'
#' @inheritParams resid.fixest
#'
#' @param object A `fixes_multi` object.
#' @param na.rm Logical, default is `FALSE`. Should the NAs be kept? If `TRUE`, they are removed.
#'
#' @return
#' If all the models return residuals of the same length, a matrix is returned. Otherwise, a `list` is returned.
#'
#' @examples
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "species")
#'
#' # A multiple estimation
#' est = feols(y ~ x1 + csw0(x2, x3), base)
#'
#' # We can get all the residuals at once,
#' # each column is a model
#' head(resid(est))
#'
#' # We can select/order the model using fixest_multi extraction
#' head(resid(est[rhs = .N:1]))
#'
resid.fixest_multi = function(object, type = c("response", "deviance", "pearson", "working"), na.rm = FALSE, ...){
# Je fais un prototype pour le moment, je l'ameliorerai apres (07-04-2021)
check_arg_plus(type, "match")
check_arg_plus(na.rm, "logical scalar")
res_list = list()
for(i in seq_along(object)){
res_list[[i]] = resid(object[[i]], type = type, na.rm = na.rm)
}
n_all = sapply(res_list, length)
if(all(n_all == n_all[1])){
res = do.call(cbind, res_list)
} else {
res = res_list
}
res
}
#' @rdname resid.fixest_multi
residuals.fixest_multi <- resid.fixest_multi
#' Confidence intervals for `fixest_multi` objects
#'
#' Computes the confidence intervals of parameter estimates for `fixest`'s multiple estimation objects (aka `fixest_multi`).
#'
#' @inheritParams confint.fixest
#'
#' @param object A `fixest_multi` object obtained from a multiple estimation in `fixest`.
#'
#' @return
#' It returns a data frame whose first columns indicate which model has been estimated. The last three columns indicate the coefficient name, and the lower and upper confidence intervals.
#'
#' @examples
#'
#' base = setNames(iris, c("y", "x1", "x2", "x3", "species"))
#' est = feols(y ~ csw(x.[,1:3]) | sw0(species), base, vcov = "iid")
#'
#' confint(est)
#'
#' # focusing only on the coefficient 'x3'
#' confint(est, "x3")
#'
#' # the 'id' provides the index of the estimation
#' est[c(3, 6)]
#'
confint.fixest_multi = function(object, parm, level = 0.95, vcov = NULL, se = NULL,
cluster = NULL, ssc = NULL, ...){
n = length(object)
confint_all = vector("list", n)
for(i in 1:n){
confint_all[[i]] = confint(object[[i]], parm = parm, level = level,
vcov = vcov, se = se, cluster = cluster,
ssc = ssc, coef.col = TRUE, internal = TRUE, ...)
}
n_all = sapply(confint_all, NROW)
if(max(n_all) == 0){
stop("No coefficient could be selected. Revise the argument `parm`?")
}
mod = models(object)
# Formatting
mod_all = rep_df(mod, times = n_all)
res = do.call(base::rbind, confint_all)
res = cbind(mod_all, res)
attr(res, "type") = attr(confint_all[n_all > 0][[1]], "type")
res
}
|
f3fc50d4d78043669724a1ad14e2e8e310ec61b1
|
324eca83aa70857741464da7aadfe7e688e3f513
|
/man/logfile_-.logger.Rd
|
d9bf5873f88d55571a3b6fda6d7f3c27c52c9e77
|
[] |
no_license
|
jcborras/log4r
|
62aa2b07bda0307ec9a714f6b77f1e76b9917e8c
|
7a1d0b8232f3bbb49723b4dbd4c919edb4efdb1b
|
refs/heads/master
| 2020-12-25T02:59:52.966156
| 2011-05-23T08:19:04
| 2011-05-23T08:19:04
| 1,759,788
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 501
|
rd
|
logfile_-.logger.Rd
|
\name{logfile<-.logger}
\alias{logfile<-.logger}
\alias{logfile<-}
\title{
Set the logfile for a logger object.
}
\description{
Set the logfile for a logger object. Must be a valid path in an already existing directory.
}
\usage{
logfile(x) <- value
}
\arguments{
\item{x}{
An object of class logger.
}
\item{value}{
The path name of a file to be used for logging.
}
}
\examples{
library('log4r')
logger <- create.logger()
logfile(logger) <- 'debug.log'
debug(logger, 'A Debugging Message')}
|
374cd661b939d8df6e1d74032028a2de928690f7
|
d07f0e82f7ec59f000ec2a9c78bbcbad27912d5c
|
/man/pairwiseCI.package.Rd
|
8b9f669d1cd1d37501447c016ebd191fee8532a0
|
[] |
no_license
|
cran/pairwiseCI
|
e4d4bbfe27e3714ccc8118b70af5dced9b8040d4
|
600fb0d96cff9b1820a529bb605a822d3d6e1ef4
|
refs/heads/master
| 2021-01-20T11:59:53.231348
| 2019-03-11T10:20:03
| 2019-03-11T10:20:03
| 17,698,181
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,408
|
rd
|
pairwiseCI.package.Rd
|
\name{pairwiseCI-package}
\alias{pairwiseCI-package}
\docType{package}
\title{
Wrapper functions for two-sample confidence intervals and tests.
}
\description{
A collection of wrapper functions for simple evaluation of factorial trials.
The function pairwiseCI allows to calculate 2-sample confidence intervals for all-pairs and many-to-one comparisons
between levels of a factor. Intervals are NOT adjusted for multiple hypothesis testing per default.
The function pairwiseTest allows to calculate p-values of two-sample tests for all-pairs and many-to-one comparisons
between levels of a factor. P-values are NOT adjusted for multiple hypothesis testing per default.
Both function allow splitting of the data according to additional factors.
Intervals can be plotted, summary.pairwiseTest allows to use the p-value adjustments as implemented in p.adjust(stats).
Different test and interval methods (parametric, nonparametric, bootstrap for robust estimators of location, binomial proportions)
are implemented in a unified user level function.
}
\author{
Frank Schaarschmidt and Daniel Gerhard, for the Institute of Biostatistics, Leibniz Universitaet Hannover
Maintainer: Frank Schaarschmidt <schaarschmidt@biostat.uni-hannover.de>
}
\keyword{ package }
\seealso{
Multiple comparisons for the differences of means:\pkg{multcomp}
\code{pairwise.t.test(stats)}
\code{pairwise.prop.test(stats)}
\code{p.adjust(stats)}
}
\examples{
# some examples:
# In many cases the shown examples might not make sense,
# but display how the functions can be used.
data(Oats)
Oats
# # all pairwise comparisons,
# separately for each level of nitro:
apc <- pairwiseCI(yield ~ Variety, data=Oats,
by="nitro", method="Param.diff")
apc
# Intervals can be plotted:
plot(apc)
# See ?pairwiseCI or ?pairwiseCImethodsCont
# for further options for intervals of 2 samples
# of continuous data.
# Or a test
apcTest <- pairwiseTest(yield ~ Variety, data=Oats,
by="nitro", method="t.test")
# with holm-adjusted p-values:
summary(apcTest, p.adjust.method="holm")
# # If only comparisons to one control would be of interest:
# many to one comparisons, with variety Marvellous as control,
# for each level of nitro separately:
m21 <- pairwiseCI(yield ~ Variety, data=Oats,
by="nitro", method="Param.diff", control="Marvellous")
##############################################
# # Proportions: another structure of the data is needed.
# Currently the data.frame data must contain two columns,
# specifying the number of successes and failures on each
# unit.
# The rooting example:
# Calculate confidence intervals for the
# difference of proportions between the 3 doses of IBA,
# separately for 4 combinations of "Age" and "Position".
# Note: we pool over Rep in that way. Whether this makes
# sense or not, is decision of the user.
data(rooting)
rooting
# Confidence intervals for the risk difference
aprootsRD<-pairwiseCI(cbind(root, noroot) ~ IBA,
data=rooting, by=c("Age", "Position"), method="Prop.diff")
# See ?pairwiseCIProp for further options to compare proportions
# Or a test:
aprootsTest<-pairwiseTest(cbind(root, noroot) ~ IBA,
data=rooting, by=c("Age", "Position"), method="Prop.test")
aprootsTest
summary(aprootsTest, p.adjust.method="holm")
}
\keyword{ htest }
|
a1c5c1f75dbbb895a006b83ea6319d813aba79c8
|
10861c9bd3ee5bcc928edf42437b23e1cc29f01a
|
/man/qc_metrics.Rd
|
c592425e5c973359b21a91484470cffe53d7abc6
|
[
"MIT"
] |
permissive
|
huipan1973/ezscrnaseq
|
545e3a305b62e119a672c273bd5d0204a1ab60a5
|
6e4a655b4b601ba0fc106f0856086a947f7392f9
|
refs/heads/master
| 2022-07-21T19:49:26.933389
| 2022-07-12T20:03:24
| 2022-07-12T20:03:24
| 243,549,643
| 0
| 0
|
MIT
| 2020-04-05T22:26:55
| 2020-02-27T15:27:07
|
R
|
UTF-8
|
R
| false
| true
| 1,300
|
rd
|
qc_metrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qc_metrics.r
\name{qc_metrics}
\alias{qc_metrics}
\title{Quality control on the cells}
\usage{
qc_metrics(
sce,
sym_col = "symbol",
by_nmads = TRUE,
thresholds = c(3, 3, 3),
ncores = 1,
prefix = NULL,
plot = TRUE,
write = TRUE,
verbose = TRUE
)
}
\arguments{
\item{sce}{A SingleCellExperiment object containing expression values, usually counts.}
\item{sym_col}{The column name for the gene symbols in \code{rowData(sce)}.}
\item{by_nmads}{TRUE/FASLE for using number of median absolute deviation as thresholds.}
\item{thresholds}{Numbers of median absolute deviation if \code{by_nmads} is TRUE, otherwise the actual counts or
percentages.}
\item{ncores}{Number of cores.}
\item{prefix}{Prefix for file name for the QC metrics histograms.}
\item{plot}{TRUE/FASLE for whether plot the QC metrics histograms.}
\item{write}{TRUE/FASLE for whether write the table of filtered cells.}
\item{verbose}{TRUE/FASLE for specifying whether diagnostics should be printed to screen.}
}
\value{
A SingleCellExperiment object.
}
\description{
Quality control on the cells i.e. filter cells by library sizes, number of expressed genes and mitochondrial gene
proportion, visualize the QC metrics by histograms.
}
|
fcaca09ded27a6b1214e5ab7e960c18733a30286
|
0df9cb1098e017d62bb97639d89a874714d3d29e
|
/AddSubDiv Matrices.R
|
56babe482e5a9a0d356f20bf71456e94989add10
|
[] |
no_license
|
AkshataKumar/Projects
|
2d693bf07709cc0b730036b0b4e472f27d2c8443
|
fba06397669501d38f46715c1079634ab4a106aa
|
refs/heads/master
| 2022-10-11T17:34:21.478973
| 2020-06-10T11:07:12
| 2020-06-10T11:07:12
| 271,250,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
r
|
AddSubDiv Matrices.R
|
Games
rownames(Games)
colnames(Games)
Games[3,8]
Games[3,"2012"]
FieldGoals #divide one matrix by the other # needs to be of the same size though
FieldGoals / Games
round(FieldGoals / Games,1) #rounding to 1 dec
MinutesPlayed / Games
round(MinutesPlayed / Games) #rounding to whole numbers
MinutesPlayed
Points / Games
round(Points / Games)
|
e9ae081d10346951b4e690fa6ad3ccfecd7fa255
|
718677799c142034c141537b79182546f302acd2
|
/inst/gridmap/ui.R
|
37f3984acadcb7086ab50879f7c63ed574eb129b
|
[] |
no_license
|
lecy/cartogram
|
d8d09e4a379ab67cf2a44bf88288896e38d9e3d6
|
265c24cad77d88f9024904bf2aefefaea6ce98d6
|
refs/heads/master
| 2021-01-25T04:34:38.583616
| 2016-06-01T14:46:22
| 2016-06-01T14:46:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Dorling Cartogram"),
sidebarPanel(
sliderInput("iteration",
"The number of iterations running with one click :",
min = 1,
max = 201,
value = 11,
step = 10),
sliderInput("resolution",
"The number of bins by row and column:",
min = 10,
max = 200,
value = 50,
step = 10),
submitButton(text = "Update View")
),
mainPanel(
plotOutput("distPlot",width="100%", height="430px"),
plotOutput("origPlot",width="100%", height="525px")
)
))
|
2cf549ed3338af71b3deab09e932a76a53743d97
|
a3386aa4f794d2b8327e3d167f0ffd0d784ffc7e
|
/R/dodds_sentiment.R
|
b2f4a1a9b8899845b7d9e319bfb82a2687d2d94f
|
[] |
no_license
|
systats/lexicon
|
2ef43af84016968aff7fea63e0c332bcc734f5e6
|
f42a10e9f0c3f830d28f0e50911acbad439ec682
|
refs/heads/master
| 2021-04-28T20:26:46.470503
| 2018-02-15T04:31:44
| 2018-02-15T04:31:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,224
|
r
|
dodds_sentiment.R
|
#' Language Assessment by Mechanical Turk Sentiment Words
#'
#' A dataset containing words, average happiness score (polarity), standard
#' deviations, and rankings.
#'
#' @details
#' \itemize{
#' \item word. The word.
#' \item happiness_rank. Happiness ranking of words based on average happiness
#' scores.
#' \item happiness_average. Average happiness score.
#' \item happiness_standard_deviation. Standard deviations of the happiness
#' scores.
#' \item twitter_rank. Twitter ranking of the word.
#' \item google_rank. Google ranking of the word.
#' \item nyt_rank. New York Times ranking of the word.
#' \item lyrics_rank. lyrics ranking of the word.
#' }
#'
#' @docType data
#' @keywords datasets
#' @name dodds_sentiment
#' @usage data(dodds_sentiment)
#' @format A data frame with 10222 rows and 8 variables
#' @references
#' Dodds, P.S., Harris, K.D., Kloumann, I.M., Bliss, C.A., & Danforth, C.M. (2011)
#' Temporal patterns of happiness and information in a global social network:
#' Hedonometrics and twitter. PLoS ONE 6(12): e26752.
#' doi:10.1371/journal.pone.0026752
#'
#' http://www.plosone.org/article/fetchSingleRepresentation.action?uri=info:doi/10.1371/journal.pone.0026752.s001
NULL
|
9c6ae5548037b01b990ab8b6d65d89de78d97900
|
fc7476839b870e69aae46279615684dd1f0e5e85
|
/man/belong.Rd
|
52567fdc5b509bb0d86562f6b6f2863e31dc70de
|
[] |
no_license
|
vishkey/arqas
|
194f472cc08fa21bdccde31ddf2639105eef995e
|
85ec37540fd008229adb662e1a1b5adf4d48f212
|
refs/heads/master
| 2020-03-31T08:35:35.711433
| 2015-09-16T15:54:08
| 2015-09-16T15:54:08
| 14,345,407
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 459
|
rd
|
belong.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/5_SimulateModels.R
\name{belong}
\alias{belong}
\title{Checks if a object belongs to a determinate package}
\usage{
belong(obj, packagename)
}
\arguments{
\item{obj}{Object to check}
\item{packagename}{Name of the package}
}
\value{
TRUE if obj belongs to the package, FALSE if not
}
\description{
Checks if a object belongs to a determinate package
}
\keyword{internal}
|
ced17047812ab81e1ae15115fce7cfac849927bf
|
1e867077be74f6385330f8032807ec2028823600
|
/heatmap.R
|
7820506e47937c440e77c947809a1ea41708539a
|
[] |
no_license
|
jorgecruzn/Tareas_Curso_BioinfinvRepro
|
70bdbdf33a863a3d3735c9777ecb7dbd637a3e82
|
8f87ad6482b348a0114475f6e4405e18633878bf
|
refs/heads/master
| 2021-04-29T16:53:15.246253
| 2018-05-28T20:29:55
| 2018-05-28T20:29:55
| 121,658,267
| 0
| 0
| null | 2018-04-02T14:31:54
| 2018-02-15T17:22:05
| null |
UTF-8
|
R
| false
| false
| 406
|
r
|
heatmap.R
|
# Este Script genera 1 heatmaps uno con datos de oyamel
#Cargamos las librerias
#source("http://bioconductor.org/biocLite.R")
# biocLite("Heatplus") Solo la primera vez
library("Heatplus")
# Leer archivo
oyamel <- read.csv("../meta/3918_mybstr.csv")
# Quitamos la columna que no es numerica, de lo contrario no lo hace
oyamel2 <- oyamel[,-1]
heatmap(as.matrix(oyamel2, Rowv=NA, Colv=NA))
|
820cfacd76988867df7ffdf8db4516ad2ff867d1
|
557c317f924f94019e60b40cc2a0a6b713c80543
|
/Set_ind.R
|
ad1da6ff3cc0756f57e6c8af921aa542a58fc132
|
[] |
no_license
|
bblakely/SapFlux
|
23416d66479dc80cf420b1dbeabb6443976bac58
|
122973ab38ae07c20e7d80087ea67a5ab8694234
|
refs/heads/master
| 2021-01-10T13:25:32.786118
| 2020-05-06T19:31:37
| 2020-05-06T19:31:37
| 55,317,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
Set_ind.R
|
#script for setting all indices; these lines are also in 'refine tower data'
gs<-c(150:250) #growing season days
dayhr<-c(8:20)
midday<-c(12:15)
dayind<-which(twrdat$HOUR%in%dayhr) #made earlier; daytime hours
daymid<-which(twrdat$HOUR%in%midday)
gsind<-which(twrdat$DOY%in%gs)
daygs<-which(twrdat$HOUR%in%dayhr & twrdat$DOY%in%gs)
midgs<-which(twrdat$HOUR%in%midday & twrdat$DOY%in%gs)
|
55c4a87ac599973a5d7ffe14d210504715ce0dc9
|
98da3b7bf9459f9b9704d2bfaa3eee6d705a389d
|
/03-packages-and-help-pages/scatterplot.R
|
852212d86fcd082c9601ecc8643000a7415189c9
|
[
"BSD-3-Clause"
] |
permissive
|
jrhrmsll/hands-on-programming-with-R
|
acf2b84dc895b03f22222407fec0bd458abf9a3f
|
1fd89d9877a9fae41b8cce00de039bc90c2324ad
|
refs/heads/master
| 2023-02-04T11:53:18.491838
| 2020-12-20T20:21:33
| 2020-12-20T20:21:33
| 297,152,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 92
|
r
|
scatterplot.R
|
library("ggplot2")
numbers <- 1:25
squares <- numbers ^ 2
qplot(x = numbers, y = squares)
|
4513a1c5f1e902ee3868c104d0da8587533e31b7
|
b7d94733c7b74ecad085e212fcc42fcfdb48932c
|
/fips_clean.R
|
7f14597a0e76b47fa8229e726c1639ed8c4a9014
|
[] |
no_license
|
scworland/usgs-water-use
|
1e42558899c75a900e7a927120ab36508c5a7ac6
|
90b1c62dd91d439b616f77c8078dd2e6de170fe3
|
refs/heads/master
| 2021-06-04T10:52:24.819626
| 2016-10-28T17:02:58
| 2016-10-28T17:02:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
r
|
fips_clean.R
|
# function to clean up FIPS
# county FIPS has to be the first column
fips.clean <- function(d){
## counties to be removed due to changing FIPS
remove <- c("08014","08001","08013","08059",
"08123","51560","51005","51780",
"51083","30113","30031","30067",
"24031","24031","24003","51015",
"51790","51143","51590")
if (any(d[,1] %in% remove) == TRUE) {
d <- d[-which(d[,1] %in% remove),]
} else {
d <- d
}
## counties to be renamed due to changing FIPS
if (any(d[,1]=="12025") == TRUE) {
d[which(d[,1]=="12025")] = "12086"
} else {
d <- d
}
}
# example
# d2 <- fips.clean(d)
|
626f4bfc719c3489d8a4c21a2682ee87d1cf277b
|
6a80156ec20fd85828571eb48b4fff9febc8e25c
|
/lab10/lab10_code.R
|
e9f865f060fad49b355a8127f73c2a898433d0d4
|
[] |
no_license
|
mmaruchin98/cbs
|
51f43ad71bfd43838dd8881f5c3b1b31aded4761
|
7d224b10ec8488a31c61e56d6974c221b1317c9b
|
refs/heads/main
| 2023-05-11T16:42:26.629856
| 2021-06-07T03:17:20
| 2021-06-07T03:17:20
| 354,831,663
| 0
| 0
| null | null | null | null |
WINDOWS-1258
|
R
| false
| false
| 4,426
|
r
|
lab10_code.R
|
library(Rtsne)
library(data.table)
library(ggplot2)
library(dplyr)
library(Seurat)
library(patchwork)
pbmc.data <- Read10X(data.dir = "/Users/Micha³/Desktop/filtered_matrices_mex/hg19/")
pbmc <- CreateSeuratObject(counts = pbmc.data, project = "pbmc68k", min.cells = 3, min.features = 200)
pbmc <- NormalizeData(pbmc)
pbmc <- FindVariableFeatures(pbmc, selection.method = "vst", nfeatures = 2000)
pbmc <- ScaleData(pbmc)
pbmc <- RunPCA(pbmc, features = VariableFeatures(object = pbmc))
pbmc.embedet <- Embeddings(object = pbmc, reduction = "pca")[,1:20]
k.mean <- kmeans(pbmc.embedet, 10)
k.mean.clusters <- k.mean$cluster
set.seed(1)
tsne_out <- Rtsne(pbmc.embedet,pca=F,perplexity=30)
tsne_out_pos = data.table(tsne_out$Y)
tsne_out_pos$cluster <- k.mean$cluster
ggplot(tsne_out_pos) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster))) + labs(color = "cluster")
c1 <- pbmc.embedet[k.mean.clusters == 1,]
c2 <- pbmc.embedet[k.mean.clusters == 2,]
c3 <- pbmc.embedet[k.mean.clusters == 3,]
c4 <- pbmc.embedet[k.mean.clusters == 4,]
c5 <- pbmc.embedet[k.mean.clusters == 5,]
c6 <- pbmc.embedet[k.mean.clusters == 6,]
c7 <- pbmc.embedet[k.mean.clusters == 7,]
c8 <- pbmc.embedet[k.mean.clusters == 8,]
c9 <- pbmc.embedet[k.mean.clusters == 9,]
c10 <- pbmc.embedet[k.mean.clusters == 10,]
png(file="lab10_c1.png")
c1.kmean <- kmeans(c1, 5)
clusters.c1 <- c1.kmean$cluster
c1.tsne <- Rtsne(c1, pca = F, perplexity = 30)
c1.tsne.dt <- data.table(c1.tsne$Y)
c1.tsne.dt$cluster <- c1.kmean$cluster
ggplot(c1.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c2.png")
c2.kmean <- kmeans(c2, 5)
clusters.c2 <- c2.kmean$cluster
c2.tsne <- Rtsne(c2, pca = F, perplexity = 30)
c2.tsne.dt <- data.table(c2.tsne$Y)
c2.tsne.dt$cluster <- c2.kmean$cluster
ggplot(c2.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c3.png")
c3.kmean <- kmeans(c3, 5)
clusters.c3 <- c3.kmean$cluster
c3.tsne <- Rtsne(c3, pca = F, perplexity = 30)
c3.tsne.dt <- data.table(c3.tsne$Y)
c3.tsne.dt$cluster <- c3.kmean$cluster
ggplot(c3.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c4.png")
c4.kmean <- kmeans(c4, 5)
clusters.c4 <- c4.kmean$cluster
c4.tsne <- Rtsne(c4, pca = F, perplexity = 30)
c4.tsne.dt <- data.table(c4.tsne$Y)
c4.tsne.dt$cluster <- c4.kmean$cluster
ggplot(c4.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c5.png")
c5.kmean <- kmeans(c5, 5)
clusters.c5 <- c5.kmean$cluster
c5.tsne <- Rtsne(c5, pca = F, perplexity = 30)
c5.tsne.dt <- data.table(c5.tsne$Y)
c5.tsne.dt$cluster <- c5.kmean$cluster
ggplot(c5.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c6.png")
c6.kmean <- kmeans(c6, 5)
clusters.c6 <- c6.kmean$cluster
c6.tsne <- Rtsne(c6, pca = F, perplexity = 30)
c6.tsne.dt <- data.table(c6.tsne$Y)
c6.tsne.dt$cluster <- c6.kmean$cluster
ggplot(c6.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c7.png")
c7.kmean <- kmeans(c7, 5)
clusters.c7 <- c7.kmean$cluster
c7.tsne <- Rtsne(c7, pca = F, perplexity = 30)
c7.tsne.dt <- data.table(c7.tsne$Y)
c7.tsne.dt$cluster <- c7.kmean$cluster
ggplot(c7.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c8.png")
c8.kmean <- kmeans(cluster.8, 5)
clusters.cluster.8 <- cluster.8.kmean$cluster
cluster.8.tsne <- Rtsne(cluster.8, pca = F, perplexity = 30)
cluster.8.tsne.dt <- data.table(cluster.8.tsne$Y)
cluster.8.tsne.dt$cluster <- cluster.8.kmean$cluster
ggplot(cluster.8.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c9.png")
c9.kmean <- kmeans(c9, 5)
clusters.c9 <- c9.kmean$cluster
c9.tsne <- Rtsne(c9, pca = F, perplexity = 30)
c9.tsne.dt <- data.table(c9.tsne$Y)
c9.tsne.dt$cluster <- c9.kmean$cluster
ggplot(c9.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
png(file="lab10_c10.png")
c10.kmean <- kmeans(c10, 5)
clusters.c10 <- c10.kmean$cluster
c10.tsne <- Rtsne(c10, pca = F, perplexity = 30)
c10.tsne.dt <- data.table(c10.tsne$Y)
c10.tsne.dt$cluster <- c10.kmean$cluster
ggplot(c10.tsne.dt) + geom_point(aes(x=V1, y=V2, col = as.factor(cluster)))
dev.off()
|
351f546468f7da7774c6761d0bdc9244eeff0bab
|
788d1c20affd0902af1d84a0b94279610486fd0d
|
/man/regex.Rd
|
8902796433de22536b948a2e7a9685ff70e9d449
|
[] |
no_license
|
cran/rebus.base
|
c366d615642d019142b24c6096d7bc5572007eef
|
e7f2fd852ca5db0cb8a21fb6799b6e10738f0ab7
|
refs/heads/master
| 2021-01-10T13:12:30.812306
| 2017-04-25T20:45:26
| 2017-04-25T20:45:26
| 48,132,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 487
|
rd
|
regex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regex-methods.R
\name{regex}
\alias{regex}
\title{Create a regex}
\usage{
regex(...)
}
\arguments{
\item{...}{Passed to \code{paste0}.}
}
\value{
An object of class \code{regex}.
}
\description{
Creates a regex object.
}
\note{
This works like \code{paste0}, but the returns value has class
\code{c("regex", "character")}.
}
\seealso{
\code{\link[base]{paste0}}
as.regex(month.abb)
regex(letters[1:5], "?")
}
|
5cbcae80b34f38893f8c0bf5c99c0c0e97aa138a
|
62f1a9d2cf95915dd670f65ba4c550fa3da5561a
|
/run_analysis.R
|
7667cdfd92b1bfd91a5ab1afbc605f41b4932f86
|
[] |
no_license
|
robertbenparkinson/getting-and-cleaning-data
|
1b21cd8fd06e1ed31e0cb601f598fe337063410c
|
60321723867836d211fab89f195f6bf70f3f35d1
|
refs/heads/master
| 2021-01-10T05:12:49.849374
| 2015-12-26T17:44:00
| 2015-12-26T17:44:00
| 48,617,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,072
|
r
|
run_analysis.R
|
##run_analysis.r
##Getting and Cleaning Data Assignment
##Robert Ben Parkinson
##23-Dec-2015
##various possible NA strings
na_strings <- c("NA", "<NA>", "Nan", " ")
##import the six primary data frames in the TRAIN and TEST files
t1 <- read.table("coursera/UCI/train/subject_train.txt", na.strings = na_strings)
t2 <- read.table("coursera/UCI/train/X_train.txt", na.strings = na_strings)
t3 <- read.table("coursera/UCI/train/y_train.txt", na.strings = na_strings)
t4 <- read.table("coursera/UCI/test/subject_test.txt", na.strings = na_strings)
t5 <- read.table("coursera/UCI/test/X_test.txt", na.strings = na_strings)
t6 <- read.table("coursera/UCI/test/y_test.txt", na.strings = na_strings)
##imports in the column names from features.txt file
features <- read.table("coursera/UCI/features.txt")
##opens the dplyr library. This assumes that you have already installed package
library(dplyr)
##ensures that the features table is a data frame
tbl_df(features)
##starting with the TRAIN file
##the subject_train file saved as t1 is saved as bigset_train
bigset_train <- t1
##the first column is readnamed "users"
colnames(bigset_train)[1] <- "users"
##a column named "testtrain" is added along with the variable "train"
##in case we need to used slipt the set in the future
bigset_train <- mutate(bigset_train, testtrain = "TRAIN")
##readable names are added to X_train files
colnames(t2) <- features$V2
##the standard deviation and mean columns as seperated out and placed into a vector
stand_mean <- c(1:6, 41:46, 81:86, 121:126, 161:166, 201:202, 214:215,
227:228, 240:241, 253:254, 266:271, 345:350, 424:429,
503:504, 516:517, 529:530, 542,543)
##the standard deviation and mean columns as seperated out and placed into a vector
t2 <- t2[stand_mean]
##the number references are replaced in the y_train file with readable names
t3[t3==1] <- "WALKING"
t3[t3==2] <- "UPSTAIRS" ##walking upstairs
t3[t3==3] <- "DOWNSTAIRS" ##walking downstairs
t3[t3==4] <- "SITTING"
t3[t3==5] <- "STANDING"
t3[t3==6] <- "LAYING"
##the y-train column is read named "activites"
colnames(t3)[1] <- "activities"
##the bigset_train, t2, and t3 data sets are combined into bigset_train
bigset_train <- cbind(bigset_train, t2, t3)
##starting with the Test file
##the subject_train file saved as t2 is saved as bigset_test
bigset_test <- t4
##the first column is readnamed "users"
colnames(bigset_test)[1] <- "users"
##a column named "testtrain" is added along with the variable "test"
##in case we need to used slipt the set in the future
bigset_test <- mutate(bigset_test, testtrain = "TEST")
##readable names are added to X_test files
colnames(t5) <- features$V2
##the number references are replaced in the y_train file with readable names
t6[t6==1] <- "WALKING"
t6[t6==2] <- "UPSTAIRS" ##walking upstairs
t6[t6==3] <- "DOWNSTAIRS" ##walking downstairs
t6[t6==4] <- "SITTING"
t6[t6==5] <- "STANDING"
t6[t6==6] <- "LAYING"
##the y_test column is read named "activites"
colnames(t6)[1] <- "activities"
##the standard deviation and mean columns as seperated out and placed into a vector
t5 <- t5[stand_mean]
##the bigset_test, t5, and t5 data sets are combined into bigset_test
bigset_test <- cbind(bigset_test, t5, t6)
##using rbin() bigset_train and bigset_test are merged together to form bigset
bigset <- rbind(bigset_train, bigset_test)
##bigset is reordered to move the activites column to the front
bigset <- select(bigset, 1, 69, 2:68)
##any "na" are removed
bigset <- na.omit(bigset)
##i've simply ran out of names at this point
##the vector t7 contains the six activies found in the dataset
t7 <- c("WALKING", "UPSTAIRS", "DOWNSTAIRS", "SITTING", "STANDING", "LAYING")
clean_names <- c("users", "activities", "testtrain", "tB-A-mean-X", "tB-A-mean-Y", "tB-A-mean-Z",
"tB-A-std-X", "tB-A-std-Y", "tB-A-std-Z", "tG-A-mean-X", "tG-A-mean-Y", "tG-A-mean-Z",
"tG-A-std-X", "tG-A-std-Y", "tG-A-std-Z", "tB-AJ-mean-X", "tB-AJ-mean-Y", "tB-AJ-mean-Z",
"tB-AJ-std-X", "tB-AJ-std-Y", "tB-AJ-std-Z", "tB-G-mean-X", "tB-G-mean-Y", "tB-G-mean-Z",
"tB-G-std-X", "tB-G-std-Y", "tB-G-std-Z", "tB-GJ-mean-X", "tB-GJ-mean-Y", "tB-GJ-mean-Z",
"tB-GJ-std-X", "tB-GJ-std-Y", "tB-GJ-std-Z", "tB-AM-mean", "tB-AM-std", "tG-AM-mean",
"tG-AM-std", "tB-AJM-mean", "tB-AJM-std", "tB-GM-mean", "tB-GM-std", "tB-GJM-mean",
"tB-GJM-std", "fB-A-mean-X", "fB-A-mean-Y", "fB-A-mean-Z", "fB-A-std-X", "fB-A-std-Y",
"fB-A-std-Z", "fB-AJ-mean-X", "fB-AJ-mean-Y", "fB-AJ-mean-Z", "fB-AJ-std-X", "fB-AJ-std-Y",
"fB-AJ-std-Z", "fB-G-mean-X", "fB-G-mean-Y", "fB-G-mean-Z", "fB-G-std-X", "fB-G-std-Y",
"fB-G-std-Z", "fB-AM-mean", "fB-AM-std", "fBB_AJM-mean", "fBB_AJM-std", "fBB_GM-mean",
"fBB_GM-std", "fBB_GJM-mean", "fBB_GJM-std")
##switch used for the if statement below
zzz <- FALSE
##the double for loop below will sort get the mean for each user for each activity
##i represents ther users in the study
for(i in 1:30){
##x represents the six activites found in the study
for(z in t7){
##the initial run will form the tidy dataset
if(zzz == FALSE){
##filters by users and activities
xtidy <- filter(bigset, users == i & activities == z)
##slices off the firts three columsn of the data set
##these are the information columns and are saved as xtidyA
xtidyA <- select(xtidy, 1:3)
##the first row of the dataset is sliced and saved as xtidtA
xtidyA <- xtidyA[1,]
##slices off the remaining 66 numereic columns and saves as xtidyB
xtidyB <- select(xtidy, 4:69)
##using apply the mean is taken across all 66 columns
xtidyB <- t(apply(xtidyB, 2, mean))
##for the sake of clarity only the first 4 significant digits are kept
xtidyB <- round(xtidyB, 4)
##xtidayA and xtidyB are merged using cbind() to form tidy dataset
tidy <- cbind(xtidyA, xtidyB)
##zzz is set to true
##code will jump to else statement next time it loops through
zzz <- TRUE
}
else{
##filters by users and activities
xtidy <- filter(bigset, users == i & activities == z)
##slices off the firts three columsn of the data set
##these are the information columns and are saved as xtidyA
xtidyA <- select(xtidy, 1:3)
##the first row of the dataset is sliced and saved as xtidtA
xtidyA <- xtidyA[1,]
##slices off the remaining 66 numereic columns and saves as xtidyB
xtidyB <- select(xtidy, 4:69)
##using apply the mean is taken across all 66 columns
xtidyB <- t(apply(xtidyB, 2, mean))
##for the sake of clarity only the first 4 significant digits are kept
xtidyB <- round(xtidyB, 4)
##xtidayA and xtidyB are merged using cbind() to form tidyB dataset
tidyB <- cbind(xtidyA, xtidyB)
##tidy and tidyB datasets are merged using rbind
##this forms our final tidy dataset
tidy <- rbind(tidy, tidyB)
}
}
}
colnames(tidy) <- clean_names
write.table(tidy, file ="tidy_dataset.txt", sep = "\t", row.names = FALSE, eol = "\n\r")
##FIN
|
fafd6d6cf26a148933f3026c27d991f2b30d794b
|
1e404990c848a549f2ad7f67c5d409be9e8b3b7b
|
/man/RGB_to_HSV.Rd
|
dd6d44f5756e7f50fd32c918f8af8e99e252f3b2
|
[] |
no_license
|
mlampros/OpenImageR
|
4c13473eb5dad6701117344224b3517dbd11d577
|
59b3c72be79fcb38e8ce5cdd4678194fe8452ad6
|
refs/heads/master
| 2023-07-19T17:30:14.915296
| 2023-07-08T07:38:03
| 2023-07-08T07:38:03
| 62,885,650
| 61
| 8
| null | 2023-03-08T18:34:43
| 2016-07-08T12:01:42
|
R
|
UTF-8
|
R
| false
| true
| 602
|
rd
|
RGB_to_HSV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superpixels.R
\name{RGB_to_HSV}
\alias{RGB_to_HSV}
\title{Conversion of RGB to HSV colour type}
\usage{
RGB_to_HSV(input_data)
}
\arguments{
\item{input_data}{a 3-dimensional array (RGB image) where the third dimension is equal to 3}
}
\description{
Conversion of RGB to HSV colour type
}
\details{
Meaning: RGB (Red-Green-Blue) to HSV (Hue, Saturation, Value) colour conversion
}
\examples{
library(OpenImageR)
set.seed(1)
array_3d = array(sample(1:255, 675, replace = TRUE), c(15, 15, 3))
res = RGB_to_HSV(array_3d)
}
|
d9fb4c70b6462ccd8395b20f92553930e8ffa615
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/TestCor/man/BootRWCor.Rd
|
4e7e25df92fcaba343b0e4f9b17f4be6b5be4ce6
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,570
|
rd
|
BootRWCor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FwerMethods.R
\name{BootRWCor}
\alias{BootRWCor}
\title{Bootstrap multiple testing method of Romano & Wolf (2005) for correlations.}
\usage{
BootRWCor(
data,
alpha = 0.05,
stat_test = "empirical",
Nboot = 1000,
vect = FALSE,
logical = FALSE,
arr.ind = FALSE
)
}
\arguments{
\item{data}{matrix of observations}
\item{alpha}{level of multiple testing (used if logical=TRUE)}
\item{stat_test}{\describe{
\item{'empirical'}{\eqn{\sqrt{n}*abs(corr)}}
\item{'fisher'}{ \eqn{\sqrt{n-3}*1/2*\log( (1+corr)/(1-corr) )}}
\item{'student'}{ \eqn{\sqrt{n-2}*abs(corr)/\sqrt(1-corr^2)}}
\item{'2nd.order'}{ \eqn{\sqrt{n}*mean(Y)/sd(Y)} with \eqn{Y=(X_i-mean(X_i))(X_j-mean(X_j))}}
}}
\item{Nboot}{number of iterations for Monte-Carlo quantile evaluation}
\item{vect}{if TRUE returns a vector of adjusted p-values, corresponding to \code{vectorize(cor(data))};
if FALSE, returns an array containing the adjusted p-values for each entry of the correlation matrix}
\item{logical}{if TRUE, returns either a vector or a matrix where each element is equal to TRUE if the corresponding null hypothesis is rejected, and to FALSE if it is not rejected}
\item{arr.ind}{if TRUE, returns the indexes of the significant correlations, with respect to level alpha}
}
\value{
Returns \itemize{\item{the adjusted p-values, as a vector or a matrix depending of the value of \code{vect},} \item{an array containing indexes \eqn{\lbrace(i,j),\,i<j\rbrace} for which correlation between variables \eqn{i} and \eqn{j} is significant, if \code{arr.ind=TRUE}.}}
}
\description{
Multiple testing method based on the evaluation of quantile by bootstrap
in the initial dataset (Romano & Wolf (2005)).
}
\examples{
n <- 100
p <- 10
corr_theo <- diag(1,p)
corr_theo[1,3] <- 0.5
corr_theo[3,1] <- 0.5
data <- MASS::mvrnorm(n,rep(0,p),corr_theo)
# adjusted p-values
res <- BootRWCor(data,stat_test='empirical',Nboot=1000)
round(res,2)
# significant correlations with level alpha:
alpha <- 0.05
whichCor(res<alpha)
# directly
BootRWCor(data,alpha,stat_test='empirical',Nboot=1000,arr.ind=TRUE)
}
\references{
Romano, J. P., & Wolf, M. (2005). Exact and approximate stepdown methods for multiple hypothesis testing. Journal of the American Statistical Association, 100(469), 94-108.
Roux, M. (2018). Graph inference by multiple testing with application to Neuroimaging, Ph.D., Université Grenoble Alpes, France, https://tel.archives-ouvertes.fr/tel-01971574v1.
}
\seealso{
ApplyFwerCor, BootRWCor_SD
}
|
4a22fd7e9f76010c1dfa7cbb6cefc1eaf36e486f
|
f742e300d0d886a2093acc43a37dc0d65cf6e877
|
/R/end_round.R
|
0df957b8705d3e6e237ef53a0c387394796813f2
|
[] |
no_license
|
cran/whitechapelR
|
347608622b3828dcade330f4cf25f7c3fe4cab9e
|
35986d29898717d2cc5f7343d02584af9c2a1725
|
refs/heads/master
| 2020-03-27T04:02:00.429566
| 2018-10-02T16:40:03
| 2018-10-02T16:40:03
| 145,906,933
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,311
|
r
|
end_round.R
|
#' @export
end_round = function(paths,hideouts=NULL){
#' @title Manage list of possible hideouts
#'
#' @description Create or update a list of possible hideouts based on final positions from the list of possible paths traveled.
#'
#' @param paths list of all possible paths already traveled
#' @param hideouts optional vector of possible hideouts from previous rounds. Not used in round 1, only rounds 2 and 3
#'
#' @return list of all possible hideouts
#' @examples
#' possibilities = start_round(64)
#' possibilities = take_a_step(possibilities,roads)
#' possibilities = take_a_step(possibilities,roads,blocked=list(c(63,82),c(63,65)))
#' possibilities = inspect_space(possibilities,space = c(29,30), clue = FALSE)
#' possibilities = inspect_space(possibilities,space = 49, clue = TRUE)
#' hideouts = end_round(possibilities,hideouts=NULL)
#' possibilities = start_round(67)
#' possibilities = take_a_step(possibilities,roads)
#' hideouts = end_round(possibilities,hideouts=hideouts)
possible_hideouts = lapply(paths,function(x){
rev(x)[1]
})
possible_hideouts = unique(unlist(possible_hideouts))
if(is.null(hideouts)) return(sort(possible_hideouts))
hideouts = intersect(hideouts,possible_hideouts)
return(sort(hideouts))
}
|
d8d9ff9dcbf96efa10c1db41ac6173d6cd20975c
|
fe90a921df931bf1ee5def42bb1726b811beba6c
|
/R/verb.R
|
fa694e27055c16e2e06fcb20079a0b317a9943e4
|
[] |
no_license
|
josephwb/malign
|
ea2caf982cc6910d7fc6259a3e180cce91f81f34
|
afd4517e19ff70f17cee4b97e4f415013ece5cd6
|
refs/heads/master
| 2023-02-26T01:01:42.282458
| 2021-02-01T17:11:20
| 2021-02-01T17:11:20
| 321,757,725
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,701
|
r
|
verb.R
|
created <- c(
"abused",
"assembled (without plan)",
"created (with poor knowledge)",
"befouled",
"botched",
"brewed",
"bungled",
"built (without cause)",
"composed (without skills)",
"constructed (for no readily apparent purpose)",
"designed (without style)",
"destroyed",
"devastated",
"devised (without coherence)",
"disordered",
"disorganized",
"fluffed",
"forged",
"formed (with neglect)",
"fucked up",
"fumbled",
"hacked",
"harmed",
"hurt",
"ignored",
"initiated (without forethought)",
"made (for who knows what reason)",
"mangled",
"marred",
"misdirected",
"misgoverned",
"mishandled",
"mismanaged",
"misused",
"mucked up",
"planned (without care)",
"prepared (without rigour)",
"ruined",
"screwed up",
"set up (with negligence)",
"spoiled",
"thrown together",
"wrecked"
);
creating <- c(
"abusing",
"assembling",
"befouling",
"botching",
"brewing (without forethought)",
"building (without care)",
"bungling",
"composing (without due care)",
"constructing (without foresight)",
"designing (with neglect)",
"destroying",
"devastating",
"devising (without precaution)",
"disordering",
"disorganizing",
"forging",
"forming (sans intelligence)",
"fucking up",
"fumbling",
"hacking",
"harming",
"hurting",
"ignoring",
"initiating (without cause)",
"making (without care)",
"mangling",
"marring",
"misdirecting",
"misgoverning",
"mishandling",
"mismanaging",
"misusing",
"mucking up",
"planning (without skill)",
"preparing (without circumspection)",
"ruining",
"screwing up",
"setting up (for failure)",
"spoiling",
"wrecking"
);
|
e5f21a3e906cc25dedf8df4b0c5f36aec64ebcb3
|
38214fe58616ef227e533967a0b4155b2edd64d6
|
/RScriptsAsPerTask/1)C + 2) PREDICTION WITHOUT NEURAL NETWORKS.R
|
7d59d5776ec921f5dd1947e1f156e957fa3e6803
|
[] |
no_license
|
SandeepBethi/Energy-Forecasting-for-Jeremiah-Burke-school
|
ea1ed3eda4d5c94fa1bfe803bb31734cb27343c3
|
3d3687dc9ee1ecb4fc386ed638055b4b30b4b76c
|
refs/heads/master
| 2021-05-04T08:11:23.610904
| 2016-09-24T23:21:30
| 2016-09-24T23:21:30
| 69,132,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,910
|
r
|
1)C + 2) PREDICTION WITHOUT NEURAL NETWORKS.R
|
#NOTE: uncomment the lines from 6 to 12 if the packages are not installed and loaded
# in your system. Installing and loading the libraries everytime you run the
# program makes the execution time longer. Hence commented those parts.
#installing and importing the library
#removing the unecessary columns
rawdata$Channel=NULL
#removing the blank spaces
rawdata$Units=str_replace_all(rawdata$Units, " ", ".")
#removing the unnecesary records
rawdata <- subset(rawdata, Units=="kWh")
#reshaping the dataframe wide to long
rawdata$Units=NULL
rawdata <- reshape(rawdata,
varying = c(colnames(rawdata[,-1:-2])),
v.names = "kWh",
timevar = "hour",
times = sort(c(rep(seq(from=0,to=23,by=1),12))),
direction = "long")
# Forming a Subset
Account=rawdata[,1]
#Sorting the data frame by Date
rawdata=rawdata[order(rawdata$Date),]
#To Visualize the outliers for kwh using Box Plot
#boxplot(rawdata$kWh,horizontal = TRUE)
#boxplot.stats(rawdata$kWh)
#Replacing outliers with NA by Box Plot
#outliers = boxplot(rawdata$kWh, plot=FALSE)$out
#outliers
#rawdata[rawdata$kWh %in% outliers,3]=NA
#summary(rawdata)
#Replacing NAs with mean of next 2 observations for temperature
for(i in 1:length(rawdata$kWh))
{
if(is.na(rawdata$kWh[i])==TRUE)
{
rawdata$kWh[i]=mean(rawdata$kWh[i:(i+2)],na.rm=TRUE)
}
}
summary(rawdata)
#aggregate by Date and hour to find hourly kWh
rawdata=aggregate(rawdata$kWh,
list(Date = rawdata$Date, hour = rawdata$hour),
sum)
#renaming the columns
colnames(rawdata)=c("Date","hour","kWh")
#Adding the column Account
rawdata$Account=c(rep(Account[1],length(nrow(rawdata))))
#Formatting Date column and Adding day,month, year, Dayofweek, weekday and Peakhour Columns
rawdata$Date <- as.Date(rawdata$Date, format="%m/%d/%Y")
rawdata$month=format(rawdata$Date, format = "%m")
rawdata$day=format(rawdata$Date, format = "%d")
rawdata$year= format(rawdata$Date, format = "%Y")
rawdata$DayofWeek = weekdays(rawdata$Date, abbreviate = TRUE)
weekdays1 <- c('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday')
rawdata$Weekday = factor((weekdays(rawdata$Date) %in% weekdays1), levels=c(FALSE, TRUE), labels=c(0, 1))
rawdata$Peakhour = with(rawdata, ifelse(hour > 6 & hour < 19, 1, 0))
#Sorting the data frame by Date and hour column
rawdata=rawdata[order(rawdata$Date,rawdata$hour),]
rawdata$DayofWeek <- with(rawdata, ifelse(DayofWeek=="Mon", 1,ifelse(DayofWeek=="Tue", 2,ifelse(DayofWeek=="Wed", 3, ifelse(DayofWeek=="Thu", 4, ifelse(DayofWeek=="Fri", 5, ifelse(DayofWeek=="Sat", 6, ifelse(DayofWeek=="Sun", 0,7))))))))
#rearranging the order of columns
rawdata=rawdata[,c(4,1,3,5,6,7,2,8,9,10)]
summary(rawdata)
#Replacing NAs with mean of next 2 observations for temperature
for(i in 1:length(rawdata$kWh))
{
if(is.na(rawdata$kWh[i])==TRUE)
{
rawdata$kWh[i]=mean(rawdata$kWh[i:(i+2)],na.rm=TRUE)
}
}
summary(rawdata)
pb=rawdata
#replacing zeros with NA
for(i in 1:length(rawdata$kWh))
{
if(rawdata$kWh[i] == 0)
{
rawdata$kWh[i]=NA
}
}
summary(rawdata)
#Using Zoo package and filling NA's with na.fill
rawdata$kWh <- zoo(rawdata$kWh)
rawdata$kWh=na.fill(rawdata$kWh, "extend")
summary(rawdata)
#write.csv(rawdata,"New Data without Zeros.csv",row.names = FALSE)
#bon=file.choose(new = FALSE)
#weather<-read.csv(bon, header = TRUE)
weather <- getWeatherForDate("KBOS", rawdata$Date[1], end_date=rawdata$Date[length(rawdata$Date)], opt_detailed = TRUE, opt_all_columns = TRUE)
#Binning and removing unnecessary columns
weather=weather[,c(1,3)]
#renaming the columns and Dealing with Date Time format
colnames(weather)=c("Date","Temperature")
weather$Date=as.POSIXct(weather$Date, tz="America/New_York")
weather$hour=format(weather$Date,format="%H")
weather$hour=as.numeric(weather$hour)
weather$Date=as.Date(as.character(as.POSIXct(weather$Date, tz="America/New_York")))
#Transforming the temperature data
summary(weather)
weather=weather[,c(1,3,2)]
summary(weather)
weather=weather[order(weather$Date,weather$hour),]
#To Visualize the outliers for temperature using Box Plot
boxplot(weather$Temperature,horizontal = TRUE)
boxplot.stats(weather$Temperature)
summary(weather)
#Replacing outliers with NA by Box Plot
outliers = boxplot(weather$Temperature, plot=FALSE)$out
outliers
weather[weather$Temperature %in% outliers,3]=NA
summary(weather)
#Replacing NAs with mean of next 2 observations for temperature
for(i in 1:length(weather$Temperature))
{
if(is.na(weather$Temperature[i])==TRUE)
{
weather$Temperature[i]=mean(weather$Temperature[i:(i+2)],na.rm=TRUE)
}
}
summary(weather)
#aggregating temperature by Date and hour
weather=weather[order(weather$Date,weather$hour),]
weather=aggregate(weather$Temperature,
list(Date = weather$Date, hour = weather$hour),
mean)
summary(weather)
#renaming the columuns after aggregation
colnames(weather)=c("Date","hour","Temperature")
colnames(weather)
# rounding the decimal points in Temperature
weather$Temperature <- round(weather$Temperature,digits=0)
#merging the two data frames by left outer join
sampleformat=merge(rawdata, weather,by=c("Date","hour"),all.x=TRUE)
summary(sampleformat)
#rearranging the order of columns for the desired output
sampleformat=sampleformat[,c(3,1,4,5,6,7,2,8,9,10,11)]
#Sorting the merged data by Date and hour
sampleformat=sampleformat[order(sampleformat$Date,sampleformat$hour),]
#To Visualize the outliers for merged data using Box Plot
boxplot(sampleformat$Temperature,horizontal = TRUE)
boxplot.stats(sampleformat$Temperature)
summary(sampleformat)
#checking for outliers in merged data and replacing them with NA
outliers = boxplot(sampleformat$Temperature, plot=FALSE)$out
outliers
sampleformat[sampleformat$Temperature %in% outliers,11]=NA
summary(sampleformat)
#checking for NA's in merged data and replacing them with mean of 2 consecutive observations
for(i in 1:length(sampleformat$Temperature))
{
if(is.na(sampleformat$Temperature[i])==TRUE)
{
sampleformat$Temperature[i]=mean(sampleformat$Temperature[i:(i+2)],na.rm=TRUE)
}
}
for(i in 1:length(sampleformat$Temperature))
{
if(is.na(sampleformat$Temperature[i])==TRUE)
{
sampleformat$Temperature[i]=mean(sampleformat$Temperature[i:(i+2)],na.rm=TRUE)
}
}
#Replacing NAs with mean of next 2 observations for kwh
for(i in 1:length(sampleformat$kWh))
{
if(is.na(sampleformat$kWh[i])==TRUE)
{
sampleformat$kWh[i]=mean(sampleformat$kWh[i:(i+2)],na.rm=TRUE)
}
}
summary(sampleformat)
#rounding the decimal values in Temperature
sampleformat$Temperature <- round(sampleformat$Temperature,digits=0)
sampleformat$month <- as.numeric(sampleformat$month)
sampleformat$day <- as.numeric(sampleformat$day)
sampleformat$year <- as.numeric(sampleformat$year)
#sampleformat$Weekday <- as.numeric(sampleformat$Weekday)
summary(sampleformat)
write.csv(sampleformat, "Hourly_filled_data.csv",row.names = FALSE)
Hourly_filled_data=sampleformat
Hourly_filled_data1=sampleformat
Hourly_filled_data2=sampleformat
Hourly_filled_data3=sampleformat
####REGRESSION###
#Start Regression
#install.packages('forecast')
#lm.fit=lm(kWh~., data=sampleformat)
#singularities for account and year so remove them.
sampleformat$kWh <- as.numeric(sampleformat$kWh)
library(MASS)
library(ISLR)
smp_size <- floor(0.80*nrow(sampleformat))
set.seed(123)
train_ind <- sample(seq_len(nrow(sampleformat)),size=smp_size)
train <- sampleformat[train_ind, ]
test <- sampleformat[-train_ind, ]
lm.fit1= lm(kWh~.-Account -year, data = train)
summary(lm.fit1)
library(forecast)
pred = predict(lm.fit1, test)
#Exporting ReggressionOutputs and PerformanceMatrics
a = accuracy(pred,test$kWh)
a
write.csv(a, "PerformanceMatrics filled_data_set_(1c)_approach.csv")
summary(sampleformat)
####2.PREDICTION
#install.packages('tree')
library (tree)
library (MASS)
library (ISLR)
set.seed (1)
tree = tree(kWh ~ .-Account -year, sampleformat)
summary(tree)
train = sample (1:nrow(sampleformat), nrow(sampleformat)/2)
sf.test=sampleformat [-train,"kWh"]
tree.sf = tree(kWh~.-Account -year,sampleformat,subset=train)
summary (tree.sf)
plot (tree.sf)
text (tree.sf, pretty = 0)
cv.sf = cv.tree (tree, FUN = prune.tree)
plot (cv.sf$size, cv.sf$dev, type='b')
prune.sf =prune.tree(tree, best = 9)
#regression tree model output
plot(prune.sf)
text(prune.sf, pretty = 0)
yhat1=predict (tree.sf, newdata =sampleformat [-train,])
plot(yhat1,sf.test)
abline (0,1)
mean((yhat1 -sf.test)^2)
yhat2=predict (prune.sf,newdata =sampleformat [-train,] )
plot(yhat2,sf.test)
abline (0,1)
mean((yhat2 -sf.test)^2)
regtree=accuracy(yhat1,sf.test)
regtree
write.csv(regtree,"Performance Metrics Regression Tree.csv",row.names = FALSE)
|
572e6522008fc4210f7a25dd0a10f6e92e14681d
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/stepR/man/contMC.Rd
|
32afd9516c32539a203d11eea25c9468faeaea85
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,752
|
rd
|
contMC.Rd
|
\name{contMC}
\alias{contMC}
\title{Continuous time Markov chain}
\description{
Simulate a continuous time Markov chain.
\bold{Deprecation warning:} This function is mainly used for patchlamp recordings and may be transferred to a specialised package.
}
\usage{
contMC(n, values, rates, start = 1, sampling = 1, family = c("gauss", "gaussKern"),
param = NULL)
}
\arguments{
\item{n}{number of data points to simulate}
\item{values}{a \code{\link{numeric}} vector specifying signal amplitudes for different states}
\item{rates}{a square \code{\link{matrix}} matching the dimension of \code{values} each with \code{rates[i,j]} specifying the transition rate from state \code{i} to state \code{j}; the diagonal entries are ignored}
\item{start}{the state in which the Markov chain is started}
\item{sampling}{the sampling rate}
\item{family}{whether Gaussian white (\code{"gauss"}) or coloured (\code{"gaussKern"}), i.e. filtered, noise should be added; cf. \link{family}}
\item{param}{for \code{family="gauss"}, a single non-negative \code{\link{numeric}} specifying the standard deviation of the noise; for \code{family="gaussKern"}, \code{param} must be a list with entry \code{df} giving the \code{\link{dfilter}} object used for filtering, an \code{\link{integer}} entry \code{over} which specifies the oversampling factor of the filter, i.e. \code{param$df} has to be created for a sampling rate of \code{sampling} times \code{over}, and an additional non-negative \code{\link{numeric}} entry \code{sd} specifying the noise's standard deviation \emph{after} filtering; cf. \link{family}}
}
\value{
A \code{\link{list}} with components
\item{\code{cont}}{an object of class \code{\link{stepblock}} containing the simulated true values in continuous time, with an additional column \code{state} specifying the corresponding state}
\item{\code{discr}}{an object of class \code{\link{stepblock}} containing the simulated true values reduced to discrete time, i.e. containing only the observable blocks}
\item{\code{data}}{a \code{\link{data.frame}} with columns \code{x} and \code{y} containing the times and values of the simulated observations, respectively}
}
\note{
This follows the description for simulating ion channels given by VanDongen (1996).
}
\references{
VanDongen, A. M. J. (1996) A new algorithm for idealizing single ion channel data containing multiple unknown conductance levels. \emph{Biophysical Journal} \bold{70}(3), 1303--1315.
}
\seealso{\code{\link{stepblock}}, \code{\link{jsmurf}}, \code{\link{stepbound}}, \code{\link{steppath}}, \code{\link{family}}, \code{\link{dfilter}}}
\examples{
# Simulate filtered ion channel recording with two states
set.seed(9)
# sampling rate 10 kHz
sampling <- 1e4
# tenfold oversampling
over <- 10
# 1 kHz 4-pole Bessel-filter, adjusted for oversampling
cutoff <- 1e3
df <- dfilter("bessel", list(pole=4, cutoff=cutoff / sampling / over))
# two states, leaving state 1 at 1 Hz, state 2 at 10 Hz
rates <- rbind(c(0, 1e0), c(1e1, 0))
# simulate 5 s, level 0 corresponds to state 1, level 1 to state 2
# noise level is 0.1 after filtering
sim <- contMC(5 * sampling, 0:1, rates, sampling=sampling, family="gaussKern",
param = list(df=df, over=over, sd=0.1))
sim$cont
plot(sim$data, pch = ".")
lines(sim$discr, col = "red")
# noise level after filtering, estimated from first block
sd(sim$data$y[1:sim$discr$rightIndex[1]])
# show autocovariance in first block
acf(ts(sim$data$y[1:sim$discr$rightIndex[1]], freq=sampling), type = "cov")
# power spectrum in first block
s <- spec.pgram(ts(sim$data$y[1:sim$discr$rightIndex[1]], freq=sampling), spans=c(200,90))
# cutoff frequency is where power spectrum is halved
abline(v=cutoff, h=s$spec[1] / 2, lty = 2)
}
\keyword{nonparametric}
|
8f87caf22efab9f71d6035c99ffa698db88c467f
|
2416c46f6af4733c9141d65d93692401d649b065
|
/praca_domowa_R_1/wojciech_aRtichowicz_homewoRk.R
|
fef72a2ff5801eb3cb3379aeb6f55d2b251303e0
|
[] |
no_license
|
infoshareacademy/jdsz1-sqluci
|
28e76dfd3ddedae6b1546cb46c4d1fcaadaefd08
|
350936615de02fa3d4c206d872eca446135c6a4b
|
refs/heads/master
| 2021-03-27T14:59:33.820037
| 2018-07-27T06:12:56
| 2018-07-27T06:12:56
| 119,201,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,438
|
r
|
wojciech_aRtichowicz_homewoRk.R
|
# devel history:
# https://github.com/WojciechArtichowicz/jdsz1_wojciech_artichowicz/blob/master/R/snippets/stuff/homework.R
rm(list=ls())
# 1) load packages devtools, openxlsx, RPostgreSQL, dplyr
{
install.packages("devtools")
library(devtools)
install.packages("openxlsx")
library(openxlsx)
install.packages("RPostgreSQL")
library(RPostgreSQL)
install.packages("dplyr")
library(dplyr)
}
#2 read and build fnction active_packages, which will read all packages from prvious point
# what does it mean to read a function?
{
active_packages <- function()
{
library(devtools)
library(openxlsx)
library(RPostgreSQL)
library(dplyr)
}
active_packages()
print(active_packages) # == read?
}
#3 run function active_packages in concolse and check whether
call(active_packages())
# 4) load all data from szczegoly_rekompensat table into data frame called df_compensations
{
library(dplyr)
library(RPostgreSQL)
drv<-dbDriver("PostgreSQL")
#con<-dbConnect(drv,dbname="postgres",host="localhost",port=5432,user="postgres",password="postgres")
con<-dbConnect(drv,dbname="wnioskiDB",host="localhost",port=5432,user="postgres",password="postgres")
if (dbExistsTable(con, "szczegoly_rekompensat")){
df_compensations<- dbGetQuery(con, "SELECT * from szczegoly_rekompensat")
print(df_compensations)
} else {
df_compensations = {}
}
#dbDisconnect(con) #remove in order to execute #5
}
# 5) check if table tab_1 exists in a connection defined in previous point
dbExistsTable(con, "tab_1")
# 6) print df_compensations data frame summary
print(summary(df_compensations))
# 7) create vector sample_vector which contains numbers 1,21,41 (don't use seq function)
sample_vector <- c(1,21,41)
# 8) create vector sample_vector_seq which contains numbers 1,21,41 (use seq function)
sample_vector_seq <- seq(1,41,20)
# 9) Combine two vectors (sample_vector, sample_vector_seq) into new one: v_combined
v_combined <- c(sample_vector,sample_vector_seq)
# 10) Sort data descending in vector v_combined
v_combined <- sort(v_combined,decreasing = TRUE)
# 11) Create vector v_accounts created from df_compensations data frame, which will store data from 'konto' column
v_accounts <- df_compensations$konto
# 12)Check v_accounts vector lengt
length(v_accounts)
# 13) Because previously created vector containst duplicated values, we need a new vector (v_accounts_unique), with unique values. Print vector and check its length
{
v_accounts_unique <- unique(v_accounts)
length(v_accounts_unique)
}
# 14) Create sample matrix called sample_matrix, 2 columns, 2 rows. Data: first row (998, 0), second row (1,1)
sample_matrix <- rbind(c(998,0),c(1,1))
# 15) Assign row and column names to sample_matrix. Rows: ("no cancer", "cancer"), Columns: ("no cancer", "cancer")
{
colnames(sample_matrix) <- c("no cancer","cancer")
rownames(sample_matrix) <- c("no cancer","cancer")
}
# 16) Create 4 variables: precision, recall, acuracy, fscore and calculate their result based on data from sample_matrix
{
precision <- sample_matrix["cancer","cancer"] / sum(sample_matrix[,"cancer"])
recall <- sample_matrix["cancer","cancer"] / sum(sample_matrix["cancer",])
accuracy <- sum(diag(sample_matrix)) / sum(sample_matrix)
fscore <- 2.* precision*recall /(precision + recall)
}
# 17) Create matrix gen_matrix with random data: 10 columns, 100 rows, random numbers from 1 to 50 inside
#install.packages("purrr")
#library(purrr)
gen_matrix <- matrix(sapply(runif(10*100)*50,{function (x) if (x < 1) 1+x else x}),nrow = 100,ncol = 10)
# lub
gen_matrix <- sample(1:50,100*10,TRUE)
# 18) Create list l_persons with 3 members from our course. Each person has: name, surname, test_results (vector), homework_results (vector)
l_persons <- list(
person1 = list(name = "Monika",surname = "Serkowska",test_results = c(1,2,3),homework_results = c(3,2,1)),
person2 = list(name = "Wojtek",surname = "Artichowicz",test_results = c(1,2,3),homework_results = c(3,2,1)),
person3 = list(name = "Magdalena",surname = "Kortas",test_results = c(1,2,3),homework_results = c(3,2,1))
)
# 19) Print first element from l_persons list (don't use $ sign)
print(l_persons[1])
# 20) Print first element from l_persons list (use $ sign)n)
print(l_persons$person1)
# 21) Create list l_accounts_unique with unique values of 'konto' column from df_compensations data frame. Check l_accounts_unique type
l_accounts_unique <- list(unique(df_compensations$konto))
# 22) Create data frame df_comp_small with 4 columns from df_compensations data frame (id_agenta, data_otrzymania, kwota, konto)
df_comp_small <- df_compensations[,c("id_agenta","data_otrzymania","kwota","konto")]
# 23) Create new data frame with aggregated data from df_comp_small (how many rows we have per each account, and what's the total value of recompensations in each account)
df_comp_small %>%
group_by(konto) %>%
summarise(rows_per_account = n(), recomp_sum = sum(kwota)) -> new_data_frame
# 24) Which agent recorded most recompensations (amount)? Is this the same who recorded most action?
{
df_comp_small %>%
group_by(id_agenta) %>%
summarise(actions = n(), recomp_sum = sum(kwota)) -> tmp
id_agenta_most_recompensations <- tmp[order(tmp$recomp_sum,decreasing = TRUE),]$id_agenta[1]
print(paste("Which agent recorded most recompensations (amount) " , toString( id_agenta_most_recompensations)))
id_agenta_most_actions <- tmp[order(tmp$actions,decreasing = TRUE),]$id_agenta[1]
print(paste("Is this the same who recorded most action? " , toString( {function (x) if (x == TRUE) "yes" else "no"}(id_agenta_most_actions == id_agenta_most_recompensations) )))
}
# 25) Create loop (for) which will print random 100 values
for (r in rnorm(100,0,1)){
print(r)
}
# 26) Create loop (while) which will print random values (between 1 and 50) until 20 wont' appear
{
r=-1
while (r != 20) {
r = sample(1:50,1)
print(r)
}
}
# 27) Add extra column into df_comp_small data frame called amount_category.
df_comp_small$amount_category <- NA
# 28) Store data from df_comp_small into new table in DB
{
drv<-dbDriver("PostgreSQL")
con<-dbConnect(drv,dbname="wnioskiDB",host="localhost",port=5432,user="postgres",password="postgres")
dbWriteTable(con, "comp_small", df_comp_small)
if (dbExistsTable(con, "comp_small")){
print("Creating table succeeded.")
}else
print("Creating table failed.")
dbDisconnect(con)
}
# 29) Fill values in amount_category. All amounts below average: 'small', All amounts above avg: 'high'
{
abs(rnorm(length(df_comp_small$amount_category),3,2)) %>% #create random values and pipe them
{function (X){ # into lambda function which
m <- mean(X) # finds mean of the piped argument X and
return(sapply(X, {function(arg) if (m<arg) "small" else "high"}) ) # maps another lambda choosing proper text
}}() -> df_comp_small$amount_category # save that into the variable
}
# 30) Create function f_agent_stats which for given agent_id, will return total number of actions in all tables (analiza_wniosku, analiza_operatora etc)
{
library(dplyr)
library(RPostgreSQL)
#get agent id and database connection; assuming proper data - no sanity checks
f_agent_stats <- function (id_agenta, con)
{
actions = 0
for (t in dbListTables(con)) #iterate trough al tables
{
for (f in dbListFields(con,t)) #iterate all the fields
if (grepl('agent', f) ) #if field contains 'agent' then proceed
{
query = paste("SELECT COUNT(",f,") FROM", t,"WHERE",f,"=",toString(id_agenta)) # SQL query counting the agents actions
actions = actions + dbGetQuery(con,query)[[1]] #sum up SQL the query result
}
}
return(actions)
}
drv<-dbDriver("PostgreSQL")
con<-dbConnect(drv,dbname="wnioskiDB",host="localhost",port=5432,user="postgres",password="postgres")
id_agenta = 2
agent_actions <- f_agent_stats(id_agenta, con)
print(agent_actions)
dbDisconnect(con)
}
|
78eb6f822994bd02d3c316d2e4e7b0917a38e8ca
|
27f53c5a9aa2d0962b5cd74efd373d5e9d9e0a99
|
/R/setHyperPars.R
|
98ac5f2257f39b0aa34c069b01755bc1fd1b8282
|
[] |
no_license
|
dickoa/mlr
|
aaa2c27e20ae9fd95a0b63fc5215ee373fa88420
|
4e3db7eb3f60c15ce2dfa43098abc0ed84767b2d
|
refs/heads/master
| 2020-12-24T13:44:59.269011
| 2015-04-18T19:57:42
| 2015-04-18T19:57:42
| 31,710,800
| 2
| 0
| null | 2015-04-18T19:57:43
| 2015-03-05T11:29:18
|
R
|
UTF-8
|
R
| false
| false
| 2,675
|
r
|
setHyperPars.R
|
#' Set the hyperparameters of a learner object.
#'
#' @template arg_learner
#' @param ... [any]\cr
#' Named (hyper)parameters with new setting. Alternatively these can be passed
#' using the \code{par.vals} argument.
#' @param par.vals [\code{list}]\cr
#' Optional list of named (hyper)parameter settings. The arguments in
#' \code{...} take precedence over values in this list.
#' @template ret_learner
#' @export
#' @family learner
#' @examples
#' cl1 = makeLearner("classif.ksvm", sigma = 1)
#' cl2 = setHyperPars(cl1, sigma = 10, par.vals = list(C = 2))
#' print(cl1)
#' # note the now set and altered hyperparameters:
#' print(cl2)
setHyperPars = function(learner, ..., par.vals = list()) {
args = list(...)
assertClass(learner, classes = "Learner")
assertList(args, names = "named", .var.name = "parameter settings")
assertList(par.vals, names = "named", .var.name = "parameter settings")
setHyperPars2(learner, insert(par.vals, args))
}
#' Only exported for internal use.
#' @param learner [\code{\link{Learner}}]\cr
#' The learner.
#' @param par.vals [\code{list}]\cr
#' List of named (hyper)parameter settings.
#' @export
setHyperPars2 = function(learner, par.vals) {
UseMethod("setHyperPars2")
}
#' @export
setHyperPars2.Learner = function(learner, par.vals) {
ns = names(par.vals)
pars = learner$par.set$pars
on.par.without.desc = coalesce(learner$config$on.par.without.desc, getMlrOptions()$on.par.without.desc)
on.par.out.of.bounds = coalesce(learner$config$on.par.out.of.bounds, getMlrOptions()$on.par.out.of.bounds)
for (i in seq_along(par.vals)) {
n = ns[i]
p = par.vals[[i]]
pd = pars[[n]]
if (is.null(pd)) {
# no description: stop warn or quiet
msg = sprintf("%s: Setting parameter %s without available description object!\nYou can switch off this check by using configureMlr!", learner$id, n)
if (on.par.without.desc == "stop") {
stop(msg)
} else if (on.par.without.desc == "warn") {
warning(msg)
}
learner$par.set$pars[[n]] = makeUntypedLearnerParam(id = n)
learner$par.vals[[n]] = p
} else {
if (on.par.out.of.bounds != "quiet" && !isFeasible(pd, p)) {
msg = sprintf("%s is not feasible for parameter '%s'!", convertToShortString(p), pd$id)
if (on.par.out.of.bounds == "stop") {
stop(msg)
} else {
warning(msg)
}
}
## if valname of discrete par was used, transform it to real value
#if (pd$type == "discrete" && is.character(p) && length(p) == 1 && p %in% names(pd$values))
# p = pd$values[[p]]
learner$par.vals[[n]] = p
}
}
return(learner)
}
|
ece216884aa7f0475712c4b468aac49281655bac
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/genotypeR/examples/sort_sequenom_df.Rd.R
|
de7b143138362d96cb49ca8d1bea98f0a90a798e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 219
|
r
|
sort_sequenom_df.Rd.R
|
library(genotypeR)
### Name: sort_sequenom_df
### Title: Sequenom Data frame Sort
### Aliases: sort_sequenom_df
### Keywords: sequenom sort
### ** Examples
data(genotypes_data)
sort_sequenom_df(genotypes_data)
|
0821ca3cec37273fe290cb5a018e6334024294b3
|
8cb0c44a74f7a61f06d41e18ff8c222cc5f28826
|
/R/DocumentationLink.r
|
8d3d453d1d710cfc2d54b6f61ee50d8ece6e0e14
|
[] |
no_license
|
OpenSILEX/opensilexClientToolsR
|
cb33ddbb69c7596d944dcf1585a840b2018ee66c
|
856a6a1d5be49437997a41587d0c87594b0c6a36
|
refs/heads/master
| 2023-05-31T14:26:19.983758
| 2022-01-26T17:51:51
| 2022-01-26T17:51:51
| 360,246,589
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,324
|
r
|
DocumentationLink.r
|
# OpenSilex API
#
# No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
#
# OpenAPI spec version: 1.0.0-rc+2
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' DocumentationLink Class
#'
#' @field type
#' @field url
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
DocumentationLink <- R6::R6Class(
'DocumentationLink',
public = list(
`type` = NULL,
`url` = NULL,
initialize = function(`type`, `url`){
if (!missing(`type`)) {
stopifnot(is.character(`type`), length(`type`) == 1)
self$`type` <- `type`
}
if (!missing(`url`)) {
stopifnot(is.character(`url`), length(`url`) == 1)
self$`url` <- `url`
}
},
toJSON = function() {
DocumentationLinkObject <- list()
if (!is.null(self$`type`)) {
DocumentationLinkObject[['type']] <- self$`type`
}
if (!is.null(self$`url`)) {
DocumentationLinkObject[['url']] <- self$`url`
}
DocumentationLinkObject
},
fromJSON = function(DocumentationLinkJson) {
DocumentationLinkObject <- jsonlite::fromJSON(DocumentationLinkJson)
if (!is.null(DocumentationLinkObject$`type`)) {
self$`type` <- DocumentationLinkObject$`type`
}
if (!is.null(DocumentationLinkObject$`url`)) {
self$`url` <- DocumentationLinkObject$`url`
}
},
fromJSONObject = function(DocumentationLinkObject) {
if (!is.null(DocumentationLinkObject$`type`)) {
self$`type` <- DocumentationLinkObject$`type`
}
if (!is.null(DocumentationLinkObject$`url`)) {
self$`url` <- DocumentationLinkObject$`url`
}
},
toJSONString = function() {
sprintf(
'{
"type": %s,
"url": %s
}',
ifelse(is.null(self$`type`), "null",jsonlite::toJSON(self$`type`,auto_unbox=TRUE, null = "null")),
ifelse(is.null(self$`url`), "null",jsonlite::toJSON(self$`url`,auto_unbox=TRUE, null = "null"))
)
},
fromJSONString = function(DocumentationLinkJson) {
DocumentationLinkObject <- jsonlite::fromJSON(DocumentationLinkJson)
self$`type` <- DocumentationLinkObject$`type`
self$`url` <- DocumentationLinkObject$`url`
}
)
)
|
e8373d8d90141ad6c4a14cc19088fa0481e52707
|
8a9d70289006cc10c59917f5447b4c51650e0b8a
|
/03 historic.R
|
7ff143739fb627b9c603b22b3c931335dc9fdb02
|
[] |
no_license
|
fabiolexcastro/aclimatar_v1
|
47a34c6c344f2b6f6b7bd716e171d0cdbb026ca5
|
23cfe58d81ab3e629a0a5256cd1cfa7bc1f46874
|
refs/heads/master
| 2023-06-15T09:30:04.653236
| 2021-07-07T01:45:53
| 2021-07-07T01:45:53
| 383,642,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,917
|
r
|
03 historic.R
|
# Load libraries -----------------------------------------------------------
require(pacman)
pacman::p_load(raster, rgdal, rgeos, terra, stringr, glue, sf, tidyverse, gtools, foreach, doSNOW)
g <- gc(reset = TRUE)
rm(list = ls())
# Functions to use --------------------------------------------------------
extMskNC <- function(vr, yr){
cat('Start\n')
fle <- grep(vr, fls, value = T) %>% grep(yr, ., value = T)
bck <- raster::brick(fle)
nms <- names(bck) %>% gsub('\\.', '_', .)
trr <- terra::rast(bck)
trr <- trr[[1:12]]
lim <- vect(cnt)
trr <- terra::crop(trr, lim)
trr <- terra::mask(trr, lim)
cnt <- unique(cnt$GID_0)
out <- glue('../rst/clima/hs/{yr}/{cnt}_{vr}_{1:12}.tif')
ifelse(!dir.exists(unique(dirname(out))), dir.create(unique(dirname(out)), recursive = TRUE), print('Dir already exists'))
Map('writeRaster', x = trr, filename = out, overwrite = TRUE)
cat('To convert to a table ', vr, ' ', yr, '\n')
bck <- stack(trr)
bck <- rasterToPoints(bck) %>% as_tibble()
names(bck)[1:2] <- c('Lon', 'Lat')
bck$gid <- 1:nrow(bck)
csv <- glue('../tbl/clima/hs/{cnt}_{vr}_{yr}.csv')
write.csv(bck, csv, row.names = F)
cat('Done\n')
}
# Load data ---------------------------------------------------------------
pth <- '//dapadfs/workspace_cluster_9/Coffee_Cocoa2/_guatemala/_data/_nc/_world'
fls <- list.files(pth, full.names = TRUE, pattern = '.nc$')
fls <- grep(paste0(1980:2017, collapse = '|'), fls, value = TRUE)
cnt <- 'DOM'
cnt <- raster::getData('GADM', country = cnt, level = 0)
vrs <- c('pet', 'ppt', 'tmax', 'tmin')
yrs <- basename(fls) %>% readr::parse_number() %>% unique()
ext <- extent(cnt)
# Precipitation
map(1:length(yrs), function(z)extMskNC(vr = 'ppt', yr = yrs[z]))
# Maximum temperature
map(1:length(yrs), function(z)extMskNC(vr = 'tmax', yr = yrs[z]))
# Minimum temperature
map(1:length(yrs), function(z)extMskNC(vr = 'tmin', yr = yrs[z]))
|
8b9984cbaa50a4f755ce6b22e91c848f2ed94478
|
319c8effd49600b5796cd1759063b0b8f10aeac1
|
/workspace/CRISPR/wntscreen/gfp+_seq/pval_distribution.r.2018090617
|
7f9582c67175f1365004081a18b10476a9fb2e3f
|
[] |
no_license
|
ijayden-lung/hpc
|
94ff6b8e30049b1246b1381638a39f4f46df655c
|
6e8efdebc6a070f761547b0af888780bdd7a761d
|
refs/heads/master
| 2021-06-16T14:58:51.056045
| 2021-01-27T02:51:12
| 2021-01-27T02:51:12
| 132,264,399
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
2018090617
|
pval_distribution.r.2018090617
|
#!/usr/bin/env Rscript
library(ggplot2)
args<-commandArgs(T)
data = read.table(args[1],header=TRUE,sep="\t")
pdf(args[2])
subdata = subset(data,data$tag != "other")
nrow(subdata)
ggplot(data=data,aes(x=pval_rank,y=-log10(pval)))+geom_line()+
geom_point(data=subdata,aes(color=tag))+
#scale_color_manual(values=c(others=""))+
theme_bw()+
theme(legend.position="none")
|
be1476c7ea19e16f2d9dab3670dae645d1a16993
|
740d61a6181e12753c10a83bf7c6ee13541744ac
|
/Сбор данных в интернете/12/04-gapminder_xlsx.R
|
935292ebe7b5a07875d99dd5948b17cee8c7eb4e
|
[] |
no_license
|
kn7072/R
|
d566efd2410421b4333b8666bfc99ce90c2cbec7
|
142f3d51d2beff767896c43307881be6f9a64fee
|
refs/heads/master
| 2021-01-11T11:07:02.972657
| 2017-02-05T16:59:29
| 2017-02-05T16:59:29
| 72,877,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 667
|
r
|
04-gapminder_xlsx.R
|
# Скачаем данные о заболеваемости ВИЧ из хранилища Gapminder
# (https://www.gapminder.org/data/) и сохраним в таблице R.
suppressPackageStartupMessages(library('XLConnect'))
url_base <- "http://spreadsheets.google.com/pub"
key <- "pyj6tScZqmEfbZyl0qjbiRQ"
url <- paste0(url_base,"?key=",key,"&output=xls")
# Сохраним данные в файле hiv.xlsx
download.file(url, "hiv.xlsx", mode="wb")
# Загрузим эту рабочую книгу
wb = loadWorkbook("hiv.xlsx")
# Сохраним в таблице df содержимое листа data
df = readWorksheet(wb, sheet = "data")
|
3720df0806e67fa2b4e76f4002e00e4d0a4b94ca
|
b8e2e581169e27b538cc1fa5e7cfb5830316c693
|
/man/testPlotMap.CSV.Rd
|
c07aa48af535683661ede2afafdc85145183a387
|
[
"MIT"
] |
permissive
|
Libardo1/wtsGMT
|
051aaf67fb89856176abafd18ec09bf634ed3506
|
232bb70963ac83d09f6ced10c946031c37d3542d
|
refs/heads/master
| 2021-01-15T20:04:14.296141
| 2014-07-16T18:49:47
| 2014-07-16T18:49:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 112
|
rd
|
testPlotMap.CSV.Rd
|
\name{testPlotMap.CSV}
\alias{testPlotMap.CSV}
\title{Run tests.}
\usage{
testPlotMap.CSV()
}
\description{
}
|
9a85c44e78d1a3a30080f505135569413e5cc179
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sboost/examples/validate.Rd.R
|
d01ddb697e3f16c3dc69fb4c9a303bc3ca8e57af
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
validate.Rd.R
|
library(sboost)
### Name: validate
### Title: sboost Validation Function
### Aliases: validate
### ** Examples
# malware
validate(malware[-1], malware[1], iterations = 5, k_fold = 3, positive = 1)
# mushrooms
validate(mushrooms[-1], mushrooms[1], iterations = 5, k_fold = 3, positive = "p")
|
9903920d850add54b3a0774be1b4c573f4edbe72
|
25755fbf197933d6e0112f55dcb989959e3b29d5
|
/201105.R
|
6905547eaa356e40145ac37c2220263511e7bda5
|
[] |
no_license
|
BGJeong/R_test
|
c2c989bae0f7a8c32dd3715bcf27a3db5d89da31
|
55ea421519b499caee2e5261693c6ab48a3c8023
|
refs/heads/main
| 2023-01-08T15:31:47.923637
| 2020-11-06T00:14:15
| 2020-11-06T00:14:15
| 307,956,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 582
|
r
|
201105.R
|
# 의존성 패키지 설치
install.packages(c('stringr', 'hash', 'tau', 'Sejong', 'RSQLite', 'devtools'), type = "binary")
# github 버전 설치
install.packages("remotes")
# 64bit 에서만 동작합니다.
remotes::install_github('haven-jeon/KoNLP', upgrade = "never", INSTALL_opts=c("--no-multiarch"))
library("rJava")
library("memoise")
library("dplyr")
library("KoNLP")
useNIADic()
extractNoun(text)
extractNoun(text)
txt <- readLines("data/hiphop2.txt")
txt
txt2 <-readLines("data/hiphop.txt")
txt2
ttt <-readLines("data/haha.txt")
ttt
|
f1df5da315213fdf7d710d97ffef3594fdbd60c7
|
4d0c5b2ffe0a23e40c65ddb240a89bc9e465a06c
|
/WebContent/WEB-INF/scriptR.R
|
bb61c50c3038005be769215e0a6fb358887259ba
|
[] |
no_license
|
Pitchounette/Back2Back-proj
|
79bafca8a2a206d7a04e524c78cfdb246531ec2f
|
a3af13cbfd160903028fd152133f2747c320c27d
|
refs/heads/master
| 2021-05-13T16:27:30.720246
| 2018-03-29T09:52:03
| 2018-03-29T09:52:03
| 116,792,768
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,859
|
r
|
scriptR.R
|
library(rpart)
library(randomForest)
arbre_classification = function(repertoireTrain, repertoireTest, indY, minbucket, transform){
train.fichier = read.csv(repertoireTrain, header=TRUE)
test.fichier = read.csv(repertoireTest, header=TRUE)
if (!is.factor(train.fichier[,indY]) & transform){
train.fichier[,indY] = as.factor(train.fichier[,indY])
test.fichier[,indY] = as.factor(test.fichier[,indY])
}
nameY = names(train.fichier)[indY]
set.seed(runif(1,0,1000))
fichier.rpart = rpart(eval(parse(text=(nameY)))~.,data=train.fichier, method="class", minbucket = minbucket, na.action = na.roughfix)
predict.fichier = predict(fichier.rpart, newdata = test.fichier, type="class")
cmat_Cart = table(eval(parse(text=(paste0("test.fichier$",nameY)))), predict.fichier)
success=0
for (i in 1:nrow(cmat_Cart)){
success = success + cmat_Cart[i,i]
}
accuracy = success/sum(cmat_Cart)
return(accuracy)
}
foret_aleatoire = function(repertoireTrain,repertoireTest, indY, ntree, mtry, transform){
train.fichier = read.csv(repertoireTrain, header=TRUE)
test.fichier = read.csv(repertoireTest, header=TRUE)
if (!is.factor(train.fichier[,indY]) & transform){
train.fichier[,indY] = as.factor(train.fichier[,indY])
test.fichier[,indY] = as.factor(test.fichier[,indY])
}
nameY = names(train.fichier)[indY]
set.seed(runif(1,0,1000))
fichier.rpart = randomForest(eval(parse(text=(nameY))) ~ ., data = train.fichier, ntree = ntree,
mtry = mtry, na.action = na.roughfix)
predict.fichier = predict(fichier.rpart, newdata = test.fichier)
cmat_Cart = table(eval(parse(text=(paste0("test.fichier$",nameY)))), predict.fichier)
success=0
for (i in 1:nrow(cmat_Cart)){
success = success + cmat_Cart[i,i]
}
accuracy = success/sum(cmat_Cart)
return(accuracy)
}
|
9d1298b596523266e499638cbd4b99b6051899a7
|
b79af28f8a3a769a52a9c2d6bdc2ee543a7ea82c
|
/R/forecast.R
|
f8b83bbfae145a5281dff90d14637b561f91872b
|
[] |
no_license
|
cewm/Habitat_for_humanity
|
8e2fe10ecf209ea38f932495d2c4b9ac37128dd6
|
f6dfb4d5011203fd8930f5cd8a1cb14781af18f5
|
refs/heads/master
| 2020-05-19T04:25:45.751647
| 2019-08-08T20:27:50
| 2019-08-08T20:27:50
| 184,825,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,320
|
r
|
forecast.R
|
rm(list=ls())
setwd('C:/Users/white/Documents/Practicum/R/ZIP')
####load data
data=read.csv("30363.csv",header=T)
head(data)
#create time series
temp = ts(data, start = 2007)
#plot time series
ts.plot(temp, ylab = 'Total Assessment', main = '30363')
#####plot acf and pacf
acf(temp)
pacf(temp)
#Order selection -- AIC
n = length(temp)
norder = 2
p = c(1:norder)-1; q = c(1:norder)-1
aic = matrix(0,norder,norder)
for(i in 1:norder){
for(j in 1:norder){
modij = arima(temp, order = c(p[i],1,q[j]), method='ML')
aic[i,j] = modij$aic-2*(p[i]+q[j]+1)+2*(p[i]+q[j]+1)*n/(n-p[i]-q[j]-2)
}
}
aicv = as.vector(aic)
plot(aicv,ylab="AIC values")
indexp = rep(c(1:norder),norder)
indexq = rep(c(1:norder),each=norder)
indexaic = which(aicv == min(aicv))
porder = indexp[indexaic]-1
qorder = indexq[indexaic]-1
porder
qorder
#####model creation
final_model = arima(temp, order = c(1,1,1), method = "ML")
## GOF: residual analysis
plot(resid(final_model), ylab='Residuals',type='o',main="Residual Plot")
abline(h=0)
acf(resid(final_model),main="ACF: Residuals")
pacf(resid(final_model),main="PACF: Residuals")
hist(resid(final_model),xlab='Residuals',main='Histogram: Residuals')
qqnorm(resid(final_model),ylab="Sample Q",xlab="Theoretical Q")
qqline(resid(final_model))
Box.test(final_model$resid, lag = (porder+qorder+1), type = "Box-Pierce", fitdf = (porder+qorder))
Box.test(final_model$resid, lag = (porder+qorder+1), type = "Ljung-Box", fitdf = (porder+qorder))
summary(final_model)
#####forecasting
####6 quarters ahead
n = length(temp)
nfit = n
outsales = arima(temp[1:nfit], order = c(1,1,1), method = 'ML')
outpred = predict(outsales, n.ahead = 5)
temp
outpred$pred[1:5]
hola <- c(as.vector(temp), as.vector(outpred$pred[1:5]))
hola
sink("output.txt")
print(out)
sink()
#accuracy measures
obssales= temp[(nfit+1):n]
predsales = (outpred$pred)^2 ### Mean Squared Prediction Error (MSPE)
mean((predsales-obssales)^2) ### Mean Absolute Prediction Error (MAE)
mean(abs(predsales-obssales)) ### Mean Absolute Percentage Error (MAPE)
mean(abs(predsales-obssales)/obssales) ### Precision Measure (PM)
sum((predsales-obssales)^2)/sum((obssales-mean(obssales))^2)
sink("output.txt")
print(out)
sink()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.