blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
035b88eef33ea2f54994dac5897af94fef3cdef0
|
427328c281f6e119b0742a2638bb6ea9c84db223
|
/TempCode/neuroim_brains_in_out.R
|
86cb4644609a4bcd52ab22dc638885cc48018008
|
[] |
no_license
|
LJWilliams/MARINeR
|
f099c1e8f0d1821e2ae0bd9c41a22361487d989f
|
737ed2f1a4222bffa44fd4424874ff941226a97d
|
refs/heads/master
| 2022-11-25T13:43:52.410735
| 2018-08-06T23:34:36
| 2018-08-06T23:34:36
| 279,640,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,709
|
r
|
neuroim_brains_in_out.R
|
#### workspace for testing out functions
#######################################################
###### script for getting multiple peeps
source('../MARINeR/R/volsToMatrix.R')
library(neuroim)
subjs.dir<-'/Volumes/JOHNNYFIVE/Professional/Baycrest/S17/BrainHack/BrainHack_TO_2017/ExampleData/Data'
subjs<-dir(subjs.dir)
masks.dir<-'/Volumes/JOHNNYFIVE/Professional/Baycrest/S17/BrainHack/BrainHack_TO_2017/ExampleData/Masks'
mask1<-'L_amygdala2example_func.nii'
mask2<-'R_amygdala2example_func.nii'
run1<-'filtered_func_data_fMRI1.nii'
run2<-'filtered_func_data_fMRI2.nii'
### data should just be any .nii in the given directory.
S01 <- list(
subj.data=c('/Volumes/JOHNNYFIVE/Professional/Baycrest/S17/BrainHack/BrainHack_TO_2017/ExampleData/Data/S01/filtered_func_data_fMRI1.nii',
'/Volumes/JOHNNYFIVE/Professional/Baycrest/S17/BrainHack/BrainHack_TO_2017/ExampleData/Data/S01/filtered_func_data_fMRI2.nii'),
masks=c('/Volumes/JOHNNYFIVE/Professional/Baycrest/S17/BrainHack/BrainHack_TO_2017/ExampleData/Masks/S01/L_amygdala2example_func.nii',
'/Volumes/JOHNNYFIVE/Professional/Baycrest/S17/BrainHack/BrainHack_TO_2017/ExampleData/Masks/S01/R_amygdala2example_func.nii')
)
peepDesign<-c()
peepsOut<-c()
for(s in 1:length(subjs)){ ## assumes everyone has the same number of scans
maskVol<-c(paste(masks.dir,subjs[s],mask1,sep='/'),paste(masks.dir,subjs[s],mask2,sep='/'))
dataVols<-c(paste(subjs.dir,subjs[s],run1,sep='/'),paste(subjs.dir,subjs[s],run2,sep='/'))
peepMat<-volsToMatrix(dataVols, maskVol)
peepsOut<-cbind(peepsOut,peepMat$dataMatrix)
peepDesign<-cbind(peepDesign,matrix(s,1,dim(peepMat$dataMatrix)[2]))
}
runDesign<-peepMat$runDesign
|
3f9502ddd3f088962daf2891a2217864bb9a461a
|
e04fdb3830feea19fc2d7f0dc937b21039af996b
|
/Software/RBrownie/man/write.brownie.matrix.Rd
|
e32bedb9363507a305e020e0537f1776672ece12
|
[] |
no_license
|
bomeara/omearatenure
|
c53b2bb41f313e84cd551c59317918657a3fbef1
|
dd0fac37c26712e71bfdf0ad39e36b9b29c8f616
|
refs/heads/master
| 2020-04-08T08:33:03.815826
| 2014-11-03T07:26:40
| 2014-11-03T07:26:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,352
|
rd
|
write.brownie.matrix.Rd
|
\name{write.brownie.matrix}
\alias{write.brownie.matrix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Return a matrix formatted for the brownie core
}
\description{
This function can assist users in creating matrices which are compatible with the brownie core. The inputted matrix is essentially flattened, make it compatible with certain brownie commands, namely 'discrete'
}
\usage{
write.brownie.matrix(mat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mat}{
%% ~~Describe \code{mat} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
J. Conrad Stack
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{addDiscrete}}
}
\examples{
testmat = matrix(c("","a",
"b",""), byrow=TRUE,nrow=2)
bmat = write.brownie.matrix(testmat)
data(geospiza_ext)
geotmp = addDiscrete(geospiza_ext,model="USER",ratemat=bmat)
commands(geotmp)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ methods }
|
e95b91aaafede31296f24b63582950d3dccf7442
|
4753f45f9a029aae3635ffacf46e4c4411c4dc69
|
/death-by-R graphic/death-by-R-withoutgrid.R
|
fa962b8b1cb3146222f37b41f15d88024e42598a
|
[] |
no_license
|
michelledaviest/life-after-death-by-R
|
0b543083e015d07cadaa9b6da03643d1e80f6efa
|
7e76d64ede8e72a4360da04a3a8a6b941ea4fb4c
|
refs/heads/master
| 2021-04-21T16:22:33.111898
| 2020-04-02T09:50:29
| 2020-04-02T09:50:29
| 249,795,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
death-by-R-withoutgrid.R
|
library(grid)
#png(filename="deathR.png")
#function to draw a grid and colour it a passed colour. also pass coordinates
draw_grid <- function(dat, ht=1/10, wt=1/10, vp, colour="grey") {
grid.rect(x = dat$x,y = dat$y,height = ht, width = wt, hjust = 0,
vjust = 0,vp = vp, gp=gpar(col=1,fill=colour))
}
grid.newpage()
#create viewport for the image we have to draw
vp1 <- viewport(x = 0.1, y = 0.1, w = 0.8, h = 0.8,
just = c("left", "bottom"),
name = "vp1")
#vertical column x-y values
grid.rect(x = 0.1, y = 0.5,
width = 0.1, height = 1,
draw = TRUE, vp = vp1, gp = gpar(col="grey", fill="grey"))
#bottom horizontal x-y values
grid.rect(x = 0.45, y = 0.045,
width = 0.6, height = 0.09,
draw = TRUE, vp = vp1,
gp = gpar(col="grey",fill="grey"))
#top horizontal x-y values
grid.rect(x = 0.35, y = 0.955,
width = 0.4, height = 0.09,
draw = TRUE, vp = vp1,
gp = gpar(col="grey",fill="grey"))
#vertical red line
grid.rect(x = 0.525, y = 0.76,
width = 0.05, height = 0.3,
draw = TRUE, vp = vp1,
gp = gpar(col="red",fill="red"))
#draw the rotated R
#grid.text doesn't have a rot parameter so used textGrob
gxa = textGrob("R", x = 0.53, y = 0.51, rot = 340, check.overlap = TRUE,
gp = gpar(cex=10, fontfamily="mono",lwd = 20), vp = vp1)
grid.draw(gxa)
#dev.off()
|
e751c4c89f1b7deafc506e7b12ed2398802fd6b3
|
1881a8cd65d9c7dddcb05c9e4e0465dbfc18b008
|
/cachematrix.R
|
379fe331ad7f4e0cde3618dc706c02be177238b4
|
[] |
no_license
|
Chicag0data/ProgrammingAssignment2
|
5ab860e0f9e128eb937608dc0d9d97f091ba8430
|
b466fc7e026646aa142e8e320b9faec41fcf85fc
|
refs/heads/master
| 2021-01-19T07:27:49.819634
| 2014-11-23T05:57:20
| 2014-11-23T05:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 981
|
r
|
cachematrix.R
|
## These functions will cache a special matrix inverse
## and solve the matrix if it has yet to be solved.
## This function creates a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
sol = NULL
get = function(){x}
setsolve = function(solve){ sol <<- solve}
getsolve = function(){sol}
list( get = get, setsolve = setsolve, getsolve = getsolve)
}
## This function computes the inverse of the special matrix returned by the makeCacheMatrix above
## If the inverse has already been calculated(and the matrix has not changed)
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
sol = x$getsolve()
if(!is.null(sol)){
message("getting cached data")
return(sol)
}
data = x$get()
sol = solve(data)
x$setsolve(sol)
sol
}
|
38d9c1ca839ee309647e71fd25c2064201a01e8a
|
bc84c72d82acd94c8f8bce7bf92354af3c73ad52
|
/Section_2-2/Figure_1.R
|
8b6a6098ef772ba9a91129d603cb9e2c81cc697a
|
[] |
no_license
|
YaohuiZeng/biglasso_reproduce
|
da5eef59859369fa8222ca882d9891cdfd905c72
|
5a64327df10850b1642387d2ea1f40068b5e0fea
|
refs/heads/master
| 2021-03-24T10:13:08.702955
| 2018-12-21T04:39:39
| 2018-12-21T04:39:39
| 122,927,482
| 0
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,817
|
r
|
Figure_1.R
|
## Figure 1
rm(list = ls())
require(biglasso)
require(ncvreg)
require(glmnet)
require(ggplot2)
require(picasso)
# Assume current working directory set to be folder "biglasso_reproduce/"
# setwd("~/GitHub/biglasso_experiment/biglasso_reproduce/")
load("./Section_2-2/bcTCGA.RData")
p <- ncol(X)
n <- nrow(X)
x.bm <- as.big.matrix(X, type = 'double')
## replicate figure 1
# ---------------------------------------------------------------------
eps <- 1e-8
lam.min <- 0.1
fit.hsr.bedpp <- biglasso(x.bm, y, family = 'gaussian',
screen = "SSR-BEDPP",
safe.thresh = 0,
lambda.min = lam.min,
output.time = F, eps = eps,
ncores = 1, lambda.log.scale = F)
fit.edpp <- biglasso(x.bm, y, family = 'gaussian', screen = 'SEDPP',
lambda.min = lam.min,
eps = eps,
lambda.log.scale = F, output.time = F)
fit.hsr <- biglasso(x.bm, y, family = 'gaussian', screen = 'SSR',
lambda.min = lam.min,
eps = eps,
lambda.log.scale = F, output.time = F)
lamb.ratio <- fit.hsr.bedpp$lambda / fit.hsr.bedpp$lambda[1]
rejections <- c(fit.hsr$rejections, fit.edpp$rejections, fit.hsr.bedpp$safe_rejections)
lam.ratio <- lamb.ratio
rej.mean.df <- as.data.frame(matrix(rejections / p, ncol = 1))
names(rej.mean.df) <- 'Reject_percent'
rej.mean.df$lam.ratio <- rep(lam.ratio, 3)
rej.mean.df$Rule <- rep(c("SSR", 'SEDPP', 'BEDPP'), each = 100)
rej.mean.df$Rule <- factor(rej.mean.df$Rule, c("SSR", 'SEDPP', 'BEDPP'))
gp <- ggplot(rej.mean.df, aes(x = lam.ratio, y = Reject_percent, color = Rule)) +
# geom_ribbon(aes(ymin = Reject_percent, ymax = 1, linetype = NA,
# fill = Rule),
# alpha = 0.4) +
geom_line(size = 1) +
xlab(expression(lambda/lambda[max])) +
ylab("Percent of discarded features") +
# scale_x_continuous(limits = c(0.1, 1),
# breaks = seq(0.1, 1, by = 0.1)) +
scale_x_reverse(limits = c(1, 0.09), breaks = seq(0, 1, by = 0.2)) +
scale_y_continuous(limits = c(0, 1),
breaks = seq(0, 1, by = 0.2)) +
geom_hline(aes(yintercept=1), linetype = 5, color = 'black') +
# geom_hline(aes(yintercept=0.92), linetype = 5, color = 'red') +
theme_bw() +
theme(legend.position = c(.78, .5),
# axis.text = element_text(size = 16, face = "bold"),
# axis.title = element_text(size = 16, face = "bold"),
# legend.title = element_text(size = 16, face = "bold"),
# legend.text = element_text(size = 16)
legend.background = element_rect(colour = "darkgray")
)
pdf(file = paste0("Fig_1_three_rules_breast.pdf"), width = 5, height = 4)
print(gp)
dev.off()
|
122134548472aca9a9c2d0207d9fc2e2acb32fd2
|
609ca5bd45edb5d1055b4a38efbcdfe2adfe7d63
|
/R/qLearnS1Est.R
|
1876896d2733e602e0db9457239d66b9ed89a5e7
|
[] |
no_license
|
kalinn/iqLearn
|
e2c7939aa7eb058c7e48dd98e9d6464d01b65a19
|
97bfba5f7dbe0a9f6d8ed333c5aac6422af28b5d
|
refs/heads/master
| 2022-08-09T11:48:39.147582
| 2022-08-01T17:27:09
| 2022-08-01T17:27:09
| 30,946,670
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
qLearnS1Est.R
|
qLearnS1Est <-
function (object, H1q, A1, s1ints, ...){
s1vars = as.data.frame (H1q);
s1names = names (s1vars);
s1ints = as.vector (s1ints);
Ytilde = object$Ytilde;
if (length (s1ints) > 0){
s1. = as.matrix (cbind (1, s1vars, A1, A1*s1vars[,s1ints]));
colnames (s1.) = c ("intercept", s1names, "A1",
paste(s1names[s1ints], "A1", sep=":"));
p10 = ncol (s1vars);
p11 = ncol (s1vars[,s1ints]);
p1 = ncol (s1.);
H10 = s1.[, 1:(p10+1)];
H11 = s1.[, (p10+2):p1]*A1;
## second-stage regression
s1Fit = lm (Ytilde ~ s1. - 1, ...);
betaHat1 = s1Fit$coefficients;
betaHat10 = betaHat1[1:(p10+1)];
betaHat11 = betaHat1[(p10+2):p1];
## vector of optimal second-stage txts
optA1 = sign (H11 %*% betaHat11);
}
else{
s1. = as.matrix (cbind (1, s1vars, A1));
colnames (s1.) = c ("intercept", s1names, "A1");
p10 = ncol (s1vars);
p1 = ncol (s1.);
H10 = s1.[, 1:(p10+1)];
H11 = 1;
## second-stage regression
s1Fit = lm (Ytilde ~ s1. - 1, ...);
betaHat1 = s1Fit$coefficients;
betaHat10 = betaHat1[1:(p10+1)];
betaHat11 = betaHat1[p1];
## vector of optimal second-stage txts
optA1 = sign (betaHat11);
}
list ("betaHat10"=betaHat10, "betaHat11"=betaHat11, "optA1"=optA1,
"s1Fit"=s1Fit, "s1ints"=s1ints);
}
|
6856fae96d11b82070b09bbe2f3de68a2febe0eb
|
e9c5c064650015211a8c85be5e21a6c143ff4959
|
/man/SigmaPsi.Rd
|
e8bdd4ce535b346b15bea3fd77afcab8aa5b89fb
|
[] |
no_license
|
livioivil/SigmaPsi
|
74c4e774aa8e285a16fe18e0a5fc3f3f762c3c86
|
db353dbb6023c5082c955a3af75613495cd8e687
|
refs/heads/master
| 2022-11-12T00:53:25.623818
| 2022-11-05T18:55:11
| 2022-11-05T18:55:11
| 50,662,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 300
|
rd
|
SigmaPsi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SigmaPsi.R
\docType{package}
\name{SigmaPsi}
\alias{SigmaPsi}
\alias{SigmaPsi-package}
\title{SigmaPsi}
\description{
an R package for Psychometrics
}
\author{
Author: Livio Finos, Gianmarco Altoè, Massimiliano Pastore
}
|
6ba6f7a3a9e3ba6e56346ccbb6a731509d0b8400
|
c032e0a19a40e6fd784c806637662eb6bbe06a21
|
/tests/testthat/test_parser.R
|
8691c17dc44552781b9aa369c322d0b655b5aa64
|
[] |
no_license
|
minghao2016/retrocombinator
|
d454bde6d5066c497a15941d14552d5ee869fd1a
|
0df3b74d1cb9973b3d7440ad37f5f3667581e1df
|
refs/heads/master
| 2023-08-26T05:54:19.655566
| 2021-11-11T06:29:49
| 2021-11-11T06:29:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 666
|
r
|
test_parser.R
|
context("Reading output of C++ simulation")
test_that("Parsing the C++ simulation output works", {
run <- parseSimulationOutput(system.file("testdata", "test_simulation.test",
package="retrocombinator"))
expected <- readRDS(system.file("testdata", "test_parser_expected.rds",
package="retrocombinator"))
expect_equal(run$params, expected$params)
expect_equal(run$sequences, expected$sequences)
expect_equal(run$pairwise, expected$pairwise)
expect_equal(run$familyRepresentatives, expected$familyRepresentatives)
expect_equal(run$familyPairwise, expected$familyPairwise)
})
|
6362a8e22f474e748bf5bb310dc28706a88ab953
|
81729b118b878a726d45480efbb8ed9250e8c2bf
|
/map-ikeda.R
|
d713a5bb17c879f4f0fa4d4fd3057fa326dc3104
|
[] |
no_license
|
sebdalgarno/ikeda-map
|
8d851d558e6c436bf39e19e3f89ea8ad38d19ecc
|
804fee65c4f4e18eb79a602ce0d86d9166cf76e6
|
refs/heads/master
| 2021-05-06T22:57:48.765695
| 2017-12-18T00:37:12
| 2017-12-18T00:37:12
| 112,889,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,038
|
r
|
map-ikeda.R
|
source('header.R')
set_sub("tidy")
load_datas()
lims_bay <- st_bbox(bounds_bay) %>%
ps_pad_bbox(c(-1610, -1200, 200, 200)) %>%
as.vector
lims_town <- st_bbox(bounds_town) %>%
# ps_pad_bbox(c(50, 200, 0, 0)) %>%
as.vector
map <- ggplot() +
geom_sf(data = ikeda_island, fill = "white", alpha = 0.9) +
geom_sf(data = creek, color = ) +
geom_point(data = village, aes(x = X, y = Y, color = legend), size = 3, show.legend = T) +
scale_color_manual(values = c("black", "black"), name = "", guide = "legend") +
geom_label_repel(data = slice(village, 1), aes(x = X, y = Y, label = Label),
size = 3.5, nudge_x = -350, nudge_y = 500, fontface = "bold") +
geom_label_repel(data = slice(village, 2), aes(x = X, y = Y, label = Label),
size = 3.5, nudge_x = 700, nudge_y = 300, fontface = "bold") +
geom_text_repel(data = bay, aes(x = X, y = Y, label = Label),
size = 3.5, nudge_x = 900, nudge_y = -150, fontface = "bold", point.padding = unit(2, "lines")) +
geom_text(data = awaya, aes(x = X, y = Y, label = Label),
size = 3.5, nudge_x = 520, nudge_y = -100, fontface = "bold") +
geom_text_repel(data = ikeda, aes(x = X, y = Y, label = Label),
size = 3.5, nudge_x = 260, nudge_y = 200, fontface = "bold") +
geom_text(data = jedway, aes(x = X, y = Y, label = Name),
size = 3.5, nudge_x = 200, nudge_y = 100, fontface = "bold") +
geom_text(data = NULL, aes(x = bay$X, y = bay$Y, label = "Inlet Big Creek"),
size = 3.5, nudge_x = -1900, nudge_y = -300, fontface = "bold", color = "#0087be") +
geom_sf(data = bounds_town, fill = "transparent", size = 1.5, color = "black") +
coord_sf(xlim = c(lims_bay[1], lims_bay[3]), ylim = c(lims_bay[2], lims_bay[4])) +
theme(panel.background = element_rect(fill = "light blue"),
legend.position = c(0.11, 0.09),
legend.background = element_rect(fill = "transparent"),
legend.key = element_blank()) +
labs(x = "Longitude", y = "Latitude") +
ggsn::scalebar(data = NULL, location = "bottomleft", dist = 0.5,
height = 0.007, st.size = 2.3, st.dist = 0.015,
x.min = lims_bay[1], x.max = lims_bay[3], y.min = lims_bay[2], y.max = lims_bay[4])
inset <- ggplot() +
geom_sf(data = ikeda_island, fill = "white", alpha = 0.9) +
geom_sf(data = creek, color = "#0087be") +
geom_sf(data = buildings, fill = "black") +
geom_sf(data = train, aes(linetype = Name), show.legend = "line") +
# geom_point(data = sites_town, aes(x = X, y = Y, color = legend), size = 4) +
# scale_color_manual(values = "black", name = "", guide = "legend") +
# geom_sf(data = train, color = "grey30") +
geom_text_repel(data = sites_town, aes(x = X, y = Y, label = Name), size = 2) +
# geom_text_repel(data = filter(sites_town, Name == "Blacksmith Shop"), aes(x = X, y = Y, label = Name),
# size = 6, nudge_x = -40, nudge_y = -20) +
# geom_text_repel(data = filter(sites_town, Name == "Assay Office"), aes(x = X, y = Y, label = Name),
# size = 6, nudge_x = -10, nudge_y = 20) +
# geom_text_repel(data = filter(sites_town, Name == "Barn"), aes(x = X, y = Y, label = Name),
# size = 6, nudge_x = -25, nudge_y = -10) +
# geom_text_repel(data = filter(sites_town, Name == "Residence"), aes(x = X, y = Y, label = Name),
# size = 6, nudge_x = -5, nudge_y = 20) +
# geom_text_repel(data = filter(sites_town, Name == "Wharf"), aes(x = X, y = Y, label = Name),
# size = 6, nudge_x = -5, nudge_y = 10) +
# geom_text_repel(data = filter(sites_town, Name == "Ore Bunkers"), aes(x = X, y = Y, label = Name),
# size = 6, nudge_x = -5, nudge_y = 20) +
coord_sf(xlim = c(lims_town[1], lims_town[3]), ylim = c(lims_town[2], lims_town[4])) +
# ggmap::theme_inset() +
theme(panel.background = element_rect(fill = "light blue")) +
labs(x = "Longitude", y = "Latitude") +
ggsn::scalebar(data = NULL, location = "topleft", dist = 0.25,
height = 0.015, st.size = 2, st.dist = 0.03,
x.min = lims_town[1]-100, x.max = lims_town[3], y.min = lims_town[2], y.max = lims_town[4]) +
theme(legend.position = c(0,0),
legend.background = element_rect(fill = "transparent"),
legend.key = element_blank(),
panel.border = element_rect(fill = "transparent", color = "black", size = 2))
inset
map
dir.create("output/plots/maps", recursive = T)
png(filename = "output/plots/maps/ikeda_map.png", width = 3200, height = 2400, res = 300)
vpm <- viewport(width = 1, height = 0.75, x = 0.5, y = 0.6)
vpi <- viewport(width = 1, height = 0.25, x = 0.5, y = 0.2)
print(map, vp = vpm)
print(inset, vp = vpi)
dev.off()
pdf(file = "output/plots/maps/ikeda_map.pdf", width = 12, height = 9)
vpm <- viewport(width = 1, height = 1, x = 0.5, y = 0.5)
vpi <- viewport(width = 0.45, height = 0.45, x = 0.5, y = 0.922, just = c("right", "top"))
print(map, vp = vpm)
print(inset, vp = vpi)
dev.off()
|
1bc9f58529e2e6549ad8920f4799912baf4054d9
|
3d5627f28dce2b2aa295c124e96c4f9ce9796b3a
|
/Lasso_and_Ridge.R
|
d32092ed90058b053f258f4e214d9fb0ac6841f1
|
[] |
no_license
|
Akhil-Koppera/L1-and-L2-regularization
|
8b2dba93153593ed7ece3ffc4050783a50946175
|
2959b34acf3f1ea9669621a102799b3f640e11ba
|
refs/heads/master
| 2021-01-05T00:49:27.766601
| 2020-02-16T02:42:55
| 2020-02-16T02:42:55
| 240,819,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,035
|
r
|
Lasso_and_Ridge.R
|
###########################################################################################################
## This code will work on the insurance company benchmark data set and
## compute the OLS estimates and compare them with those obtained from a few variable-selection algorithms.
## Name: Akhil Koppera
###########################################################################################################
rm(list = ls())
setwd("F:/Statistical Data Mining")
#install.packages("leaps")
library(leaps)
train_data <-read.table('ticdata2000.txt')
test_data <-read.table('ticeval2000.txt')
y_test<-read.table('tictgts2000.txt')
#View(train_data)
#View(test_data)
#train_data1 = sample(1:nrow(train_data), nrow(train_data)*0.80)
#test_data1 = -train_data1
#new_train_data = train_data[train_data1, ] # New Train Data
#new_test_data = train_data[test_data1, ] # New Test Data
?lm
?predict
lm_model<- lm(V86~., data =train_data)
lm_pred <- predict(lm_model,test_data)
lm_pred<-ifelse(lm_pred > 0.5,1,0)
which(lm_pred!=y_test)
lm_error<-mean((as.matrix(y_test) - lm_pred)^2)
lm_error
lm_reponse<-which(lm_pred==1)
#Forward and backward selection
?regsubsets
forward_selection<- regsubsets(V86~., data=train_data, method = "forward",nvmax = 85)
for_sum <- summary(forward_selection)
par(mfrow = c(2,2))
x11()
plot(for_sum$cp, xlab = "Number of Variables", ylab = "Cp", type = "l",main="forward_selection")
i<-which(for_sum$cp== min(for_sum$cp))
fwd_coef = coef(forward_selection, id = i)
fwd_temp_train <- train_data[names(fwd_coef)[2:(i+1)]]
fwd_train_lm <- lm(train_data$V86~., data = fwd_temp_train)
fwd_test_pred <- round(predict(fwd_train_lm, newdata=test_data),0)
fwd_test_errors = mean((as.matrix(y_test) - fwd_test_pred)^2)
fwd_test_errors
backward_selection<- regsubsets(V86~., data=train_data, method = "backward",nvmax = 85)
back_sum <- summary(backward_selection)
par(mfrow = c(2,2))
x11()
plot(back_sum$cp, xlab = "Number of Variables", ylab = "Cp", type = "l",main="backward_selection")
i<-which(back_sum$cp== min(back_sum$cp))
back_coef = coef(backward_selection, id = i)
back_temp_train <- train_data[names(back_coef)[2:(i+1)]]
back_train_lm <- lm(train_data$V86~., data = back_temp_train)
back_test_pred <- round(predict(back_train_lm, newdata=test_data),0)
back_test_errors = mean((as.matrix(y_test) - back_test_pred)^2)
back_test_errors
##########################################################
######## Ridge and lasso models##########################
#########################################################
set.seed(555)
X<- as.matrix(train_data[,-86])
Y <- train_data$V86
x_test<-as.matrix(test_data)
###################################
# Model Selection
###################################
ridge_cv.out <- cv.glmnet(X,Y, alpha = 0)
plot(ridge_cv.out)
summary(ridge_cv.out)
names(ridge_cv.out)
plot(ridge_cv.out, main="Ridge")
ridge_bestlam <- ridge_cv.out$lambda.min
ridge_bestlam
ridge.mod<-glmnet(X,Y,alpha=0,lambda=ridge_bestlam)
ridge.pred<-round(predict(ridge.mod,s=ridge_bestlam ,newx=x_test),0)
#ridge_test_error<-classError(ridge.pred,test_data$Apps)$errorRate
#ridge_test_error
#ridge.pred <- predict(ridge.mod, s= bestlam, type = "coefficients")
#ridge.pred2 <- predict(ridge.mod, s = bestlam, newx = X[-train,], type = "response")
y_hat <- ridge.pred
ridge_test_error <- mean((y_hat - as.matrix(y_test))^2) #test_error
ridge_test_error
ridge_response<-which(ridge.pred==1)
######################################
# The LASSO
######################################
lasso_cv.out <- cv.glmnet(X,Y, alpha = 1)
summary(lasso_cv.out)
plot(lasso_cv.out,main="Lasso")
names(lasso_cv.out)
lasso_bestlam <- lasso_cv.out$lambda.min
lasso_bestlam
lasso.mod<-glmnet(X,Y,alpha=1,lambda=lasso_bestlam)
lasso.pred<-round(predict(lasso.mod,s=lasso_bestlam ,newx=x_test),0)
lasso_test_error <- mean((lasso.pred - as.matrix(y_test))^2) #test_error
lasso_test_error
response<-which(lasso.pred==1)
actual_response<- which(y_test==1)
|
b107eb00e806ebff23f7223631545f69036b914d
|
4415b6ac85b893186495abbe286884403a56729c
|
/question2.R
|
6da0abf395b7764939d1d17e0dbee70e77492f01
|
[] |
no_license
|
yup111/stats_project
|
698b4f0634a97bf1f3849df25b7db7417ac6b3d6
|
fca5fb4e547cb8f6fc849a2e8631270451cfdb12
|
refs/heads/master
| 2020-03-12T12:06:31.536734
| 2018-04-23T01:12:54
| 2018-04-23T01:12:54
| 130,610,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 364
|
r
|
question2.R
|
q2data=data.frame(sorption_rates=c(1.06,0.79,0.82,0.89,1.05,0.95, 0.65, 1.15, 1.12, 1.58, 1.12, 1.45, 0.91, 0.57, 0.83, 1.16, 0.43, 0.29, 0.43, 0.06, 0.51, 0.44, 0.10, 0.55, 0.53, 0.61, 0.34, 0.06, 0.09, 0.17, 0.17, 0.60),
solvents=factor(c(rep("A",9),rep("C",8),rep("E",15))))
model1 <- with(q2data,aov(sorption_rates~solvents))
summary(model1)
|
6932636b18898e942444aa0c64de59a33cbb6c0f
|
1e1939479e8014f48e7362a27be9dfc68719c6e8
|
/night/Rscript/nightlyRun_makeKnitrReport.R
|
7d92ea0a068754ae07e4eaa9e86eb7348ee1815f
|
[
"MIT"
] |
permissive
|
wotuzu17/tronador
|
bffec07586340bc5320d3baf092ba6388a6ee98c
|
8d55d26ab1accd0499e6264674408304a70d5e1b
|
refs/heads/master
| 2021-01-10T00:59:45.599781
| 2015-04-25T07:50:22
| 2015-04-25T07:50:22
| 32,209,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,892
|
r
|
nightlyRun_makeKnitrReport.R
|
#!/usr/bin/Rscript --vanilla
# this script creates a easy readable html file containing stock symbols buy/sell recommendation
# for each analyzed version
# input: matches list from /home/voellenk/tronador/dailyreport/Nuggets-YYYY-MM-DD.Rdata
# output: html file in Dropbox/constellation/dailyReport/Nuggets-YYYY-MM-DD.html
runtime <- Sys.time()
# load required packages
suppressPackageStartupMessages(library(optparse))
suppressPackageStartupMessages(library(knitr))
# global parameters follow
inputDir <- "/home/voellenk/tronador/dailyReport"
outputDir <- "/home/voellenk/Dropbox/constellation/dailyReport"
knitrfileDir <- "/home/voellenk/tronador_workdir/tronador/knitrfiles"
option_list <- list(
make_option(c("-v", "--verbose"), action="store_true", default=FALSE,
help="Print extra output [default]")
)
opt <- parse_args(OptionParser(option_list=option_list))
args <- commandArgs(trailingOnly=TRUE)
setwd(knitrfileDir)
# load matches list from current nugget file
load(paste0(inputDir, "/Nuggets-", format(runtime, format="%Y-%m-%d"), ".Rdata"))
versions <- names(matches)
# create sorted table of best signals for each version
tbllist <- list()
for(i in 1:length(matches)) {
this.version <- names(matches)[i]
if (nrow(matches[[this.version]]) > 0) {
this.match <- matches[[this.version]]
this.match$sym <- as.character(this.match$sym) # transform factor to character
this.match$ID <- as.character(this.match$ID) # transform factor to character
dun <- unique(this.match[,c("sym", "date", "Close", "ATR20", "signal", "ID")])
dun <- cbind(dun, n3=NA, n5=NA, n10=NA, n20=NA)
for (n in c(3,5,10,20)) {
for(r in 1:nrow(dun)) {
line <- this.match[this.match$sym == dun[r,"sym"] &
this.match$date == dun[r,"date"] &
this.match$signal == dun[r,"signal"] &
this.match$ID == dun[r,"ID"] &
this.match$period == n ,]
if (nrow(line) == 1) {
dun[r,c(paste0("n", n))] <- line[1,"mean.BS"] - line[1,"mean.SS"]
#dun <- dun[with(dun, order(-n10, sym, date)), ] # order findings
} else if (nrow(line) > 1) {
stop ("Error: Filter condition lead to more than one line. This must not happen.")
}
}
}
tbllist[[this.version]] <- dun
} else { # no obs found for this version
tbllist[[this.version]] <- data.frame()
}
}
if (opt$verbose == TRUE) {
print ("The tbllist is:")
tbllist
}
clean.tbllist <- list()
# tbllist still may contain multiple IDs for distinct symbols. needs to get filtered out.
filtered.tbllist <- list()
for (i in 1:length(names(tbllist))) {
dun <- tbllist[[names(tbllist)[i]]]
if (nrow(dun) > 0) { # omitting versions with no observations
filtered.tbllist[[names(tbllist)[i]]] <-
do.call(rbind,lapply(split(dun, dun$sym), function(chunk) chunk[which.max(chunk$n10),]))
}
}
if (opt$verbose == TRUE) {
print ("The filtered.tbllist is:")
filtered.tbllist
}
if (length(filtered.tbllist) > 0) {
# round numeric columns to 2 decimals
omit <- c("sym", "date", "signal", "ID")
for (i in 1:length(names(filtered.tbllist))) {
dun <- filtered.tbllist[[names(filtered.tbllist)[i]]]
leave <- match(omit, names(dun))
out <- dun
out[-leave] <- round(dun[-leave], digits=3)
out <- out[with(out, order(-n10, sym, date)), ] # order findings
clean.tbllist[[names(filtered.tbllist)[i]]] <- out
}
if (opt$verbose == TRUE) {
print ("The clean.tbllist is:")
clean.tbllist
}
} else {
print ("found no nuggets for today!")
}
knit2html("dailyReport.Rmd", output="temp.html")
# move finished html (filename=file to Dropbox folder
file.copy("temp.html", paste0(outputDir, "/Nuggets-", format(runtime, format="%Y-%m-%d"), ".html"), overwrite=TRUE)
file.remove("temp.html")
|
9b4dc2e45d272e744205e02b5742d2875c9872f8
|
edffb272203ffb6afdeea34e8b6ed6920dba82a9
|
/compile.R
|
bae63d34408e8cb3ddd2209d19afc3249e4495fb
|
[] |
no_license
|
iSEE/screenshots
|
0cc5017437b5e13bfd061d5e16febe1b2ac4d9dc
|
5bd6d409e27fa88bf83aa6770b6ec2003a70b673
|
refs/heads/master
| 2022-12-07T05:34:37.195412
| 2020-04-09T05:15:11
| 2020-04-09T05:15:11
| 248,394,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
compile.R
|
# This script will consider all Rmarkdown files in the 'vignettes' subdirectory
# (typically as part of an R package structure) and execute them, taking
# appshots along the way whenever it sees a SCREENSHOT command.
args <- commandArgs(trailingOnly=TRUE)
if (length(args)) {
all.assets <- args
} else {
src.dir <- "vignettes"
all.assets <- list.files(src.dir, full.names=TRUE, pattern=".Rmd$")
}
library(callr)
for (fn in all.assets) {
r(fun=function(fname) {
SCREENSHOT <- function(x, delay=10) {
dir.create(dirname(x), recursive=TRUE, showWarning=FALSE)
webshot2::appshot(app, delay=delay, file=x) # bound to global 'app'.
}
rmarkdown::render(fname, run_pandoc=FALSE) # avoid need for the bib file.
}, args=list(fname=fn), show=TRUE)
}
|
4cb578ec969fbba859658b4b81ec9a3a75befa23
|
640acf9187ab7bdc3ad71e2e622dd6e813cf03f9
|
/chapters/R/04-05-igf.R
|
ec3ca706a6b94090e605dfc484d140f19278d1f5
|
[] |
no_license
|
haziqj/phd-thesis
|
97a91cc335035ea136db7203045619ac9f978cb4
|
7e0cf4632c5efb83f43fa3e5cf881d334621599e
|
refs/heads/master
| 2021-09-23T10:33:21.065654
| 2021-09-09T15:09:47
| 2021-09-09T15:09:47
| 119,505,724
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,344
|
r
|
04-05-igf.R
|
# Chapter 4
# Random effects model (IGF data set)
source("00-prelim.R")
## ---- IGF.data ----
data(IGF, package = "nlme")
head(IGF)
## ---- IGF.mod.iprior ----
mod.iprior <- iprior(conc ~ age * Lot, IGF, method = "em")
summary(mod.iprior)
## ---- IGF.mod.iprior.plot ----
plot_fitted_multilevel(mod.iprior, facet = 1, cred.bands = FALSE,
extrapolate = TRUE, show.legend = FALSE)
## ---- IGF.mod.iprior.const ----
mod.iprior.const <- iprior(conc ~ age * Lot, IGF, method = "em",
est.lambda = FALSE, lambda = c(0, 0))
logLik(mod.iprior.const)
(D <- -2 * (logLik(mod.iprior.const) - logLik(mod.iprior)))
pchisq(D, df = 2)
## ---- IGF.mod.lmer ----
(mod.lmer <- lmer(conc ~ age + (age | Lot), IGF))
round(coef(summary(mod.lmer)), 4)
## ---- IGF.mod.lmer.eigen ----
eigen(VarCorr(mod.lmer)$Lot)
## ---- IGf.prep.plot ----
grp <- unique(as.numeric(IGF$Lot))
beta.iprior <- matrix(NA, nrow = length(grp), ncol = 2)
tmp.df <- data.frame(x = IGF$age, y = fitted(mod.iprior)$y,
grp = as.numeric(IGF$Lot))
for (i in seq_along(grp)) {
beta.iprior[i, ] <- coef(lm(y ~ x, tmp.df[tmp.df$grp == grp[i], ]))
}
beta.lmer <- coef(mod.lmer)$Lot
beta.fixed.iprior <- coef(lm(y ~ x, tmp.df))
beta.fixed.lmer <- mod.lmer@beta
beta.fixed.df <- data.frame(
beta = c(beta.fixed.iprior, beta.fixed.lmer),
type = rep(c("Intercept", "Slope")),
model = rep(c("iprior", "lmer"), each = 2)
)
Sigma.iprior <- cov(beta.iprior)
sigma0.iprior <- sqrt(Sigma.iprior[1, 1])
sigma1.iprior <- sqrt(Sigma.iprior[2, 2])
corr.iprior <- cor(beta.iprior)[1, 2] # Sigma.iprior[1, 2] / (sigma0.iprior * sigma1.iprior)
Sigma.lmer <- VarCorr(mod.lmer)[[1]]
sigma0.lmer <- attr(Sigma.lmer, "stddev")[1]
sigma1.lmer <- attr(Sigma.lmer, "stddev")[2]
corr.lmer <- attr(Sigma.lmer, "correlation")[1, 2]
plot.df.beta <- data.frame(
beta = as.numeric(beta.iprior),
Lot = unique(IGF$Lot),
type = rep(c("Intercept", "Slope"), each = 10),
model = "iprior"
)
plot.df.beta <- rbind(plot.df.beta, data.frame(
beta = unlist(beta.lmer),
Lot = unique(IGF$Lot),
type = rep(c("Intercept", "Slope"), each = 10),
model = "lmer"
))
plot.df.param <- data.frame(
param = c(sigma0.iprior, sigma1.iprior, corr.iprior,
sigma0.lmer, sigma1.lmer, corr.lmer),
name = rep(c("sigma[0]", "sigma[1]", "sigma[01]"), 2),
model = rep(c("iprior", "lmer"), each = 3)
)
plot.df.param$name <- factor(plot.df.param$name,
levels = c("sigma[01]", "sigma[1]", "sigma[0]"))
## ---- IGF.plot.beta ----
plot.df.fake <- plot.df.beta[c(19, 39, 9, 29), ]
plot.df.fake[1, 1] <- 0.02 # slopes
plot.df.fake[2, 1] <- -0.02
plot.df.fake[3, 1] <- 5.45 # intercepts
plot.df.fake[4, 1] <- 5.25
ggplot(plot.df.beta) +
geom_point(aes(beta, Lot, col = model)) +
geom_point(data = plot.df.fake, aes(beta, Lot, col = model), alpha = 0) +
geom_vline(data = beta.fixed.df, aes(xintercept = beta, col = model),
linetype = "dashed") +
facet_grid(. ~ type, scales = "free") +
theme_bw() +
theme(legend.position = "top") +
labs(colour = "Model", x = expression(beta))
## ---- IGF.plot.param ----
ggplot(plot.df.param) +
geom_point(aes(name, param, col = model),
position = position_dodge(width = 0.2)) +
coord_flip() +
labs(x = NULL, y = NULL) +
theme_bw()
|
c2f3bac5f2d6fa5cfea574ddffd7ddd91392917b
|
3632465c101324fc2ee5ad8dec22f45b30130c0c
|
/R/data.R
|
ac9623f0a40d9cbcb433fbcefef10ad5e028a6f2
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
ddalthorp/GenEst
|
3921d5b4b48936dbe41667d5221b36a7d620735b
|
7dfc443913da2fb7d66d7a3553ac69714468422c
|
refs/heads/master
| 2023-08-16T23:17:34.073169
| 2023-05-29T01:23:46
| 2023-05-29T01:23:46
| 97,149,192
| 8
| 9
|
NOASSERTION
| 2023-05-25T17:45:11
| 2017-07-13T17:33:07
|
R
|
UTF-8
|
R
| false
| false
| 46,963
|
r
|
data.R
|
# Mock ------------------------------
#' A mock example data set
#'
#' A template dataset used for testing purposes. Dataset containing SE, CP, SS,
#' DWP, and CO data. Data are mostly random without patterns.
#'
#' @format A list with 5 items:
#' \describe{
#' \item{SE}{Searcher efficiency trial data}
#' \item{CP}{Carcass persistence trial data}
#' \item{SS}{Search schedule data}
#' \item{DWP}{Density weighted proportion of area searched data}
#' \item{CO}{Carcass observations}
#' }
#' @source \code{mock}
"mock"
# Cleared ------------------------------
#' Wind cleared plot (60m) Search Example
#'
#' A complete example data set for estimating fatalities from 60 m cleared plots
#' at 23 out of 100 searches at a wind power facility. Data on carcass
#' observations (CO) from a search of all terrain out to 60m from each of 100
#' turbines at a theoretical site, field trials for estimating carcass
#' persistence (CP) and searcher efficiency (SE), search schedule (SS)
#' parameters (for example, which turbines were searched on which days), and
#' density weighted proportion (DWP) of area searched at each turbine (which is
#' an area adjustment factor to account for incomplete search coverage).
#'
#' @format \code{wind_cleared} is a list with 5 elements:
#' \describe{
#' \item{\code{SE}}{Searcher efficiency trial data}
#' \item{\code{CP}}{Carcass persistence trial data}
#' \item{\code{SS}}{Search schedule parameters}
#' \item{\code{DWP}}{Density weighted proportion of area searched}
#' \item{\code{CO}}{Carcass observations}
#' }
#'
#' @section Searcher Efficiency (\code{SE}):
#' \code{$SE} is a data frame with each row representing the fate of a single
#' carcass in the searcher efficiency trials. There are columns for:
#' \describe{
#' \item{\code{pkID}}{unique ID for each carcass}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"}}
#' \item{\code{Visibility}}{indicator for visibility class of the ground, with
#' \code{"RP"} for carcasses placed on a road or turbine pad, \code{"M"}
#' for moderate visibility (e.g., plowed field; short, sparse vegetation),
#' or \code{"D"} for difficult visibility}
#' \item{\code{"s1",...,"s5"}}{fate of carcass on the 1st, 2nd, 3rd, 4th, and
#' 5th search after placement. A value of 1 implies that a carcass was
#' discovered by searchers, 0 implies the carcass was present but not
#' discovered, and any other value is interpreted as "no search" or
#' "carcass not present" and ignored in the model. In this data set,
#' \code{NA} indicates that a carcass had been previously discovered and
#' removed from the field. A user may use a variety of values to
#' differentiate different reasons no search was conducted or the carcass
#' was not present. For example, "SN" could be used to indicate that the
#' turbine was not searched because of snow, or "NS" to indicate the search
#' was not scheduled in that location at that time, or "SC" to indicate the
#' carcass had been removed by scavengers prior to the search.}
#' }
#'
#' @section Carcass Persistence (\code{CP}):
#' \code{$CP} is a data frame with each row representing the fate of a single
#' carcass in the carcass persistence trials. There are columns for:
#' \describe{
#' \item{\code{cpID}}{unique ID for each carcass}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"}}
#' \item{\code{Visibility}}{indicator for visibility class of the ground, with
#' \code{"RP"} for carcasses placed on a road or turbine pad, \code{"M"} for
#' moderate visibility (e.g., plowed field; short, sparse vegetation), or
#' \code{"D"} for difficult visibility.} \item{\code{LastPresent},
#' \code{FirstAbsent}}{endpoints of the interval bracketing the time the carcass
#' was scavenged or otherwise removed from the field. For example,
#' \code{LastPresent = 2.04}, \code{FirstAbsent = 3.21} indicates that the carcass was
#' last observed 2.04 days after being placed in the field and was noted
#' missing 3.21 days after being placed. If the precise time of carcass
#' removal is known (e.g., recorded by camera), then \code{LastPresent} and
#' \code{FirstAbsent} should be set equal to each other. If a carcass persists
#' beyond the last day of the field trial, \code{LastPresent} is the last time it
#' was observed and \code{FirstAbsent} is entered as \code{Inf} or \code{NA}.}
#' }
#'
#' @section Search Schedule (\code{SS}):
#' \code{$SS} is a data frame with a row for each date a turbine at the site
#' was searched, a column of \code{SearchDate}s, and a column for each turbine.
#' In addition, there is a column to indicate the \code{Season}. A column with
#' search dates and columns for each turbine searched are required. Other
#' columns are optional.
#' \describe{
#' \item{\code{SearchDate}}{columns of dates on which at least one turbine was
#' searched. Format in this data is \code{"\%Y-\%m-\%d CDT"}, but time zone
#' (\code{CDT}) is optional. A time stamp may be included if desired (e.g.,
#' \code{2018-03-20 02:15:41}). Alternatively, \code{\\} can be used in place
#' of \code{-}.}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"} to
#' indicate which season the search was conducted in. \code{Season} is
#' optional but may be used as a temporal covariate for fatality estimates.}
#' \item{\code{t1}, etc.}{unique ID for all turbines that were searched on at
#' least one search date. Values are either 1 or 0, indicating whether the
#' given turbine (column) was searched or not on the given date (row).}
#' }
#'
#' @section Density Weighted Proportion (\code{DWP}):
#' \code{$DWP} is a data frame with a row for each turbine and columns for
#' each carcass size class. Values represent the density-weighted proportion
#' of the searched area for each size (or the fraction of carcasses that fall
#' in the searched area).
#' \describe{
#' \item{\code{Turbine}}{unique ID for each turbine. IDs match those used in
#' the \code{$CO} data frame and the column names in the \code{$SS} data.}
#' \item{\code{Size}}{\code{bat}, \code{sml}, \code{med}, \code{lrg}}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"} to
#' indicate which season the search was conducted in. \code{Season} is
#' optional but may be used as a temporal covariate for fatality estimates.}}
#'
#' @section Carcass Observations (\code{CO}):
#' \code{$CO} is a data frame with a row for carcass observed in the carcass
#' searches and a number of columns giving information about the given carcass
#' (date found, size, species, etc.)
#' \describe{
#' \item{\code{carcID}}{unique identifier for each carcass: \code{"x30"},
#' \code{"x46"}, etc.}
#' \item{\code{Turbine}}{identifier for which turbine the given carcass was
#' found at: \code{"t19"}, \code{"t65"}, \code{"t49"}, etc.}
#' \item{\code{TurbineType}}{the type of turbine: \code{"X"}, \code{"Y"} or
#' \code{"Z"}. }
#' \item{\code{DateFound}}{dates entered in the same format as in
#' \code{$SS$SearchDate}. Every date entered here is (and must be) included
#' in the search schedule (\code{$SS$SearchDate})}
#' \item{\code{Visibility}}{visibility class: \code{"RP"}, \code{"M"}, or
#' \code{"D"}, as described in \code{$CP} and \code{$SE}}
#' \item{\code{Species}}{species of the carcass: \code{"BA"}, \code{"BB"},
#' \code{"BC"}, \code{"BD"}, \code{"BE"}, \code{"LA"}, \code{"LB"},
#' \code{"LD"}, \code{"LE"}, \code{"MA"}, \code{"MB"}, \code{"SA"},
#' \code{"SB"}, \code{"SC"}, \code{"SD"}, \code{"SE"}, \code{"SF"},
#' \code{"SG"}}
#' \item{\code{SpeciesGroup}}{species group: \code{"bat0"}, \code{"bat1"},
#' \code{"brd1"}, \code{"brd2"}, \code{"brd3"}}
#' \item{\code{Size}}{size: \code{"bat"}, \code{"lrg"}, \code{"med"},
#' \code{"sml"}}
#' \item{\code{Distance}}{distance from the turbine}
#' }
#'
#' @source \code{wind_cleared}
"wind_cleared"
# RP ------------------------------
#' Wind Road and Pad (120m) Example
#'
#' This example dataset is based on 120 m radius road and pad searches of all
#' 100 turbines at a theoretical site. The simulated site consists of 100
#' turbines, searched on roads and pads only, out to 120 meters. Search
#' schedule differs by turbine and season, with more frequent searches in the
#' fall, and a subset of twenty turbines searched at every scheduled search.
#'
#' Data on carcass observations (CO) from searches, field trials for estimating
#' carcass persistence (CP) and searcher efficiency (SE), search schedule (SS)
#' parameters (for example, which turbines were searched on which days), and
#' density weighted proportion (DWP) of area searched at each turbine (which is
#' an area adjustment factor to account for incomplete search coverage).
#'
#' @format \code{wind_RP} is a list with 5 elements:
#' \describe{
#' \item{\code{SE}}{Searcher efficiency trial data}
#' \item{\code{CP}}{Carcass persistence trial data}
#' \item{\code{SS}}{Search schedule parameters}
#' \item{\code{DWP}}{Density weighted proportion of area searched}
#' \item{\code{CO}}{Carcass observations}
#' }
#'
#' @section Searcher Efficiency (\code{SE}):
#' \code{$SE} is a data frame with each row representing the fate of a single
#' carcass in the searcher efficiency trials. There are columns for:
#' \describe{
#' \item{\code{pkID}}{unique ID for each carcass}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"}}
#' \item{\code{"s1",...,"s5"}}{fate of carcass on the 1st, 2nd, 3rd, 4th, and
#' 5th search after placement. A value of 1 implies that a carcass was
#' discovered by searchers, 0 implies the carcass was present but not
#' discovered, and any other value is interpreted as "no search" or
#' "carcass not present" and ignored in the model. In this data set,
#' \code{NA} indicates that a carcass had been previously discovered and
#' removed from the field. A user may use a variety of values to
#' differentiate different reasons no search was conducted or the carcass
#' was not present. For example, "SN" could be used to indicate that the
#' turbine was not searched because of snow, or "NS" to indicate the search
#' was not scheduled in that location at that time, or "SC" to indicate the
#' carcass had been removed by scavengers prior to the search.}
#' }
#' @section Carcass Persistence (\code{CP}):
#' \code{$CP} is a data frame with each row representing the fate of a single
#' carcass in the carcass persistence trials. There are columns for:
#' \describe{
#' \item{\code{cpID}}{unique ID for each carcass}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird.}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"}}
#' \item{\code{LastPresent}, \code{FirstAbsent}}{endpoints of the interval bracketing the
#' time the carcass was scavenged or otherwise removed from the field. For
#' example, \code{LastPresent = 2.04}, \code{FirstAbsent = 3.21} indicates that the carcass
#' was last observed 2.04 days after being placed in the field and was noted
#' missing 3.21 days after being placed. If the precise time of carcass
#' removal is known (e.g., recorded by camera), then \code{LastPresent} and
#' \code{FirstAbsent} should be set equal to each other. If a carcass persists
#' beyond the last day of the field trial, \code{LastPresent} is the last time it
#' was observed and \code{FirstAbsent} is entered as \code{Inf} or \code{NA}.}
#' }
#'
#' @section Search Schedule (\code{SS}):
#' \code{$SS} is a data frame with a row for each date a turbine at the site
#' was searched, a column of \code{SearchDate}s, and a column for each turbine.
#' In addition, there is a column to indicate the \code{Season}. A column with
#' search dates and columns for each turbine searched are required. Other
#' columns are optional.
#' \describe{
#' \item{\code{SearchDate}}{columns of dates on which at least one turbine was
#' searched. Format in this data is \code{"\%Y-\%m-\%d CDT"}, but time zone
#' (\code{CDT}) is optional. A time stamp may be included if desired
#' (e.g., \code{2018-03-20 02:15:41}). Alternatively, \code{\\} can be used
#' in place of \code{-}.}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"} to
#' indicate which season the search was conducted in. \code{Season} is
#' optional but may be used as a temporal covariate for fatality estimates.}
#' \item{\code{t1}, etc.}{unique ID for all turbines that were searched on at
#' least one search date. Values are either 1 or 0, indicating whether the
#' given turbine (column) was searched or not on the given date (row).}
#' }
#'
#' @section Density Weighted Proportion (\code{DWP}):
#' \code{$DWP} is a data frame with a row for each turbine and columns for
#' each carcass size class. Values represent the density-weighted proportion
#' of the searched area for each size (or the fraction of carcasses that fall
#' in the searched area).
#' \describe{
#' \item{\code{Turbine}}{unique ID for each turbine. IDs match those used in
#' the \code{$CO} data frame and the column names in the \code{$SS} data.}
#' \item{\code{bat}}{DWP associated with size class Bat.}
#' \item{\code{sml}}{DWP associated with size class Small.}
#' \item{\code{med}}{DWP associated with size class Medium.}
#' \item{\code{lrg}}{DWP associated with size class Large.}
#' }
#'
#' @section Carcass Observations (\code{CO}):
#' \code{$CO} is a data frame with a row for carcass observed in the carcass
#' searches and a number of columns giving information about the given
#' carcass (date found, size, species, etc.)
#' \describe{
#' \item{\code{carcID}}{unique identifier for each carcass: \code{"x30"},
#' \code{"x46"}, etc.}
#' \item{\code{Turbine}}{identifier for which turbine the given carcass was
#' found at: \code{"t19"}, \code{"t65"}, \code{"t49"}, etc.}
#' \item{\code{TurbineType}}{the type of turbine: \code{"X"}, \code{"Y"} or
#' \code{"Z"}. }
#' \item{\code{DateFound}}{dates entered in the same format as in
#' \code{$SS$SearchDate}. Every date entered here is (and must be) included
#' in the search schedule (\code{$SS$SearchDate}}
#' \item{\code{Species}}{species of the carcass: \code{"BA"}, \code{"BB"},
#' \code{"BC"}, \code{"BD"}, \code{"BE"}, \code{"LA"}, \code{"LB"},
#' \code{"LD"}, \code{"LE"}, \code{"MA"}, \code{"MB"}, \code{"SA"},
#' \code{"SB"}, \code{"SC"}, \code{"SD"}, \code{"SE"}, \code{"SF"},
#' \code{"SG"}}
#' \item{\code{SpeciesGroup}}{species group: \code{"bat0"}, \code{"bat1"},
#' \code{"brd1"}, \code{"brd2"}, \code{"brd3"}}
#' \item{\code{Size}}{size: \code{"bat"}, \code{"lrg"}, \code{"med"},
#' \code{"sml"}}
#' \item{\code{Distance}}{distance from the turbine}
#' }
#'
#' @source \code{wind_RP}
"wind_RP"
# RPbat ------------------------------
#' Wind Bat-Only Road and Pad (120m) Example
#'
#' This example dataset considers only bats found on 120 m radius road and pad
#' searches of all 100 turbines at a theoretical site. The simulated site
#' consists of 100 turbines, searched on roads and pads only, out to 120
#' meters. Search schedule differs by turbine and season, with more frequent
#' searches in the fall, and a subset of twenty turbines searched at every
#' scheduled search.
#'
#' Data on carcass observations (CO) from searches, field trials for estimating
#' carcass persistence (CP) and searcher efficiency (SE), search schedule (SS)
#' parameters (for example, which turbines were searched on which days), and
#' density weighted proportion (DWP) of area searched at each turbine (which is
#' an area adjustment factor to account for incomplete search coverage).
#'
#' @format \code{wind_RPbat} is a list with 5 elements:
#' \describe{
#' \item{\code{SE}}{Searcher efficiency trial data}
#' \item{\code{CP}}{Carcass persistence trial data}
#' \item{\code{SS}}{Search schedule parameters}
#' \item{\code{DWP}}{Density weighted proportion of area searched}
#' \item{\code{CO}}{Carcass observations}
#' }
#'
#' @section Searcher Efficiency (\code{SE}):
#' \code{$SE} is a data frame with each row representing the fate of a single
#' carcass in the searcher efficiency trials. There are columns for:
#' \describe{
#' \item{\code{pkID}}{unique ID for each carcass}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"}}
#' \item{\code{"s1",...,"s5"}}{fate of carcass on the 1st, 2nd, 3rd, 4th, and
#' 5th search after placement. A value of 1 implies that a carcass was
#' discovered by searchers, 0 implies the carcass was present but not
#' discovered, and any other value is interpreted as "no search" or
#' "carcass not present" and ignored in the model. In this data set,
#' \code{NA} indicates that a carcass had been previously discovered and
#' removed from the field. A user may use a variety of values to
#' differentiate different reasons no search was conducted or the carcass
#' was not present. For example, "SN" could be used to indicate that the
#' turbine was not searched because of snow, or "NS" to indicate the search
#' was not scheduled in that location at that time, or "SC" to indicate the
#' carcass had been removed by scavengers prior to the search.}
#' }
#' @section Carcass Persistence (\code{CP}):
#' \code{$CP} is a data frame with each row representing the fate of a single
#' carcass in the carcass persistence trials. There are columns for:
#' \describe{
#' \item{\code{cpID}}{unique ID for each carcass}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"}}
#' \item{\code{LastPresent}, \code{FirstAbsent}}{endpoints of the interval bracketing
#' the time the carcass was scavenged or otherwise removed from the field.
#' For example, \code{LastPresent = 2.04}, \code{FirstAbsent = 3.21} indicates that the
#' carcass was last observed 2.04 days after being placed in the field and
#' was noted missing 3.21 days after being placed. If the precise time of
#' carcass removal is known (e.g., recorded by camera), then \code{LastPresent} and
#' \code{FirstAbsent} should be set equal to each other. If a carcass persists
#' beyond the last day of the field trial, \code{LastPresent} is the last time it
#' was observed and \code{FirstAbsent} is entered as \code{Inf} or \code{NA}.}
#' }
#' @section Search Schedule (\code{SS}):
#' \code{$SS} is a data frame with a row for each date a turbine at the site
#' was searched, a column of \code{SearchDate}s, and a column for each turbine.
#' In addition, there is a column to indicate the \code{Season}. A column with
#' search dates and columns for each turbine searched are required. Other
#' columns are optional.
#' \describe{
#' \item{\code{SearchDate}}{columns of dates on which at least one turbine was
#' searched. Format in this data is \code{"\%Y-\%m-\%d CDT"}, but time zone
#' (\code{CDT}) is optional. A time stamp may be included if desired
#' (e.g., \code{2018-03-20 02:15:41}). Alternatively, \code{\\} can be used
#' in place of \code{-}.}
#' \item{\code{Season}}{\code{"spring"}, \code{"summer"}, or \code{"fall"} to
#' indicate which season the search was conducted in. \code{Season} is
#' optional but may be used as a temporal covariate for fatality estimates.}
#' \item{\code{t1}, etc.}{unique ID for all turbines that were searched on at
#' least one search date. Values are either 1 or 0, indicating whether the
#' given turbine (column) was searched or not on the given date (row).}
#' }
#' @section Density Weighted Proportion (\code{DWP}):
#' \code{$DWP} is a data frame with a row for each turbine and columns for
#' each carcass size class. Values represent the density-weighted proportion
#' of the searched area for each size (or the fraction of carcasses that fall
#' in the searched area).
#' \describe{
#' \item{\code{Turbine}}{unique ID for each turbine. IDs match those used in
#' the \code{$CO} data frame and the column names in the \code{$SS} data.}
#' \item{\code{bat}}{Contains the DWP for each turbine, with respect to size
#' class (in this case, bats only.}
#' }
#'
#' @section Carcass Observations (\code{CO}):
#' \code{$CO} is a data frame with a row for carcass observed in the carcass
#' searches and a number of columns giving information about the given
#' carcass (date found, size, species, etc.)
#' \describe{
#' \item{\code{carcID}}{unique identifier for each carcass: \code{"x30"},
#' \code{"x46"}, etc.}
#' \item{\code{Turbine}}{identifier for which turbine the given carcass was
#' found at: \code{"t19"}, \code{"t65"}, \code{"t49"}, etc.}
#' \item{\code{TurbineType}}{the type of turbine: \code{"X"}, \code{"Y"} or
#' \code{"Z"}. }
#' \item{\code{DateFound}}{dates entered in the same format as in
#' \code{$SS$SearchDate}. Every date entered here is (and must be) included
#' in the search schedule (\code{$SS$SearchDate}}
#' \item{\code{Species}}{species of the carcass: \code{"BA"}, \code{"BB"},
#' \code{"BC"}, \code{"BD"}, \code{"BE"}, \code{"LA"}, \code{"LB"},
#' \code{"LD"}, \code{"LE"}, \code{"MA"}, \code{"MB"}, \code{"SA"},
#' \code{"SB"}, \code{"SC"}, \code{"SD"}, \code{"SE"}, \code{"SF"},
#' \code{"SG"}}
#' \item{\code{SpeciesGroup}}{species group: \code{"bat0"}, \code{"bat1"},
#' \code{"brd1"}, \code{"brd2"}, \code{"brd3"}}
#' \item{\code{Distance}}{Distance from the turbine.}
#' }
#' @source \code{wind_RPbat}
"wind_RPbat"
# Trough ------------------------------
#' Trough-based solar thermal power simulated example
#'
#' An example data set for estimating fatalities from a trough-based solar
#' thermal electric power generation facility. The simulated site is inspected
#' daily along ten 2000 meter long transects, which run north-south. Observers
#' look up to 150 meters away down the rows created by troughs (east-west).
#' One sided distance sampling will be used, with observers looking consistently
#' in one cardinal direction as they travel through the facility. A sitewide
#' clearout search is implemented before the first scheduled winter search.
#'
#' The dataset consists of five parts: Data on carcass observations (CO) from
#' daily searches, field trials for estimating carcass persistence (CP) and
#' searcher efficiency (SE), search schedule (SS), and density weighted
#' proportion (DWP) of area searched for the rows within each transect (which is
#' an area adjustment factor to account for incomplete search coverage).
#'
#' @format \code{solar_trough} is a list with 5 elements:
#' \describe{
#' \item{\code{SE}}{Searcher efficiency trial data}
#' \item{\code{CP}}{Carcass persistence trial data}
#' \item{\code{SS}}{Search schedule parameters}
#' \item{\code{DWP}}{Density weighted proportion of area searched}
#' \item{\code{CO}}{Carcass observations}
#' }
#' @section Searcher Efficiency (\code{SE}):
#' \code{$SE} is a data frame with each row representing the fate of a single
#' carcass in the searcher efficiency trials. There are columns for:
#' \describe{
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"}}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{"Search1",...,"Search5"}}{fate of carcass on the 1st, 2nd, 3rd,
#' 4th, and 5th search after placement. A value of 1 implies that a carcass
#' was discovered by searchers, 0 implies the carcass was present but not
#' discovered, and any other value is interpreted as "no search" or
#' "carcass not present" and ignored in the model. In this data set,
#' \code{NA} indicates that a carcass had been previously discovered and
#' removed from the field. A user may use a variety of values to
#' differentiate different reasons no search was conducted or the carcass
#' was not present. For example, "NS" to indicate the search
#' was not scheduled in that location at that time, or "SC" to indicate the
#' carcass had been removed by scavengers prior to the search.}
#' \item{\code{Distance}}{the distance a carcass was placed from the
#' observer's transect.}
#' }
#' @section Carcass Persistence (\code{CP}):
#' \code{$CP} is a data frame with each row representing the fate of a single
#' carcass in the carcass persistence trials. There are columns for:
#' \describe{
#' \item{\code{Index}}{unique ID for each carcass}
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"}}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{LastPresent}, \code{FirstAbsent}}{endpoints of the interval bracketing the
#' time the carcass was scavenged or otherwise removed from the field. For
#' example, \code{LastPresent = 2.04}, \code{FirstAbsent = 3.21} indicates that the carcass
#' was last observed 2.04 days after being placed in the field and was noted
#' missing 3.21 days after being placed. If the precise time of carcass
#' removal is known (e.g., recorded by camera), then \code{LastPresent} and
#' \code{FirstAbsent} should be set equal to each other. If a carcass persists
#' beyond the last day of the field trial, \code{LastPresent} is the last time it was
#' observed and \code{FirstAbsent} is entered as \code{Inf} or \code{NA}.}
#' }
#'
#' @section Search Schedule (\code{SS}):
#' \code{$SS} is a data frame with a row for each date a transect at the site
#' was searched, a column of \code{SearchDate}s, and a column for each
#' transect. In addition, there is an optional column to indicate the
#' \code{Season}. The columns for distinct area (array) and the date column
#' are required, and the names of the columns for search areas must match the
#' names of areas used in the DWP and CO files.
#' \describe{
#' \item{\code{SearchDate}}{columns of dates when a transect was searched.
#' Format in this data is \code{"\%Y-\%m-\%d CDT"}, but time zone (\code{CDT})
#' is optional. A time stamp may be included if desired (e.g.,
#' \code{2018-03-20 02:15:41}). Alternatively, \code{\\} can be used in
#' place of \code{-}.}
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"} to indicate which season the search was conducted in.
#' \code{Season} is optional but may be used as a temporal covariate for
#' fatality estimates.}
#' }
#'
#' @section Density Weighted Proportion (\code{DWP}):
#' \code{$DWP} is a data frame with a row for each transect and columns for
#' each carcass size class (labels must match those of the class factors in the
#' carcass observation file). Values represent the density-weighted proportion
#' of the searched area for each size (or the fractionof carcasses that fall
#' in the searched area). Since the whole site was searched, DWP is uniformly
#' set equal to 1.
#' \describe{
#' \item{\code{Unit}}{unique ID for each transect. IDs match those used in
#' the \code{$CO} data frame and the column names in the \code{$SS} data.}
#' \item{\code{bat}}{DWP associated with size class Bat}
#' \item{\code{sml}}{DWP associated with size class Small}
#' \item{\code{med}}{DWP associated with size class Medium}
#' \item{\code{lrg}}{DWP associated with size class Large}
#' }
#'
#' @section Carcass Observations (\code{CO}):
#' \code{$CO} is a data frame with a row for carcass observed in the carcass
#' searches and a number of columns giving information about the given carcass
#' (date found, size, species, etc.)
#' \describe{
#' \item{\code{Index}}{unique identifier for each carcass.}
#' \item{\code{Unit}}{identifier for which transect the given carcass was
#' found at. Values must match with DWP Transect values Search Schedule
#' column names.}
#' \item{\code{Species}}{species of the carcass: \code{"BA"}, \code{"BB"},
#' \code{"BC"}, \code{"BD"}, \code{"BE"}, \code{"LA"}, \code{"LB"},
#' \code{"LD"}, \code{"LE"}, \code{"MA"}, \code{"MB"}, \code{"SA"},
#' \code{"SB"}, \code{"SC"}, \code{"SD"}, \code{"SE"}, \code{"SF"},
#' \code{"SG"}}
#' \item{\code{Size}}{size: \code{"bat"}, \code{"lrg"}, \code{"med"},
#' \code{"sml"}}
#' \item{\code{Row}}{Optional indicator of which row within an array a carcass
#' was found at.}
#' \item{\code{Distance}}{The perpendicular distance from the searcher's
#' transect at which the carcass was discovered at.}
#' \item{\code{DateFound}}{dates entered in the same format as in
#' \code{$SS$SearchDate}.
#' Every date entered here is (and must be) included in the search schedule
#' (\code{$SS$SearchDate})}
#' \item{\code{X}}{UTM Easting of carcass.}
#' \item{\code{Y}}{UTM Northing of carcass.}
#' }
#'
#' @source \code{solar_trough}
"solar_trough"
# PV ------------------------------
#' Photovoltaic Example Dataset
#'
#' An example data set for estimating fatalities from a large photovoltaic solar
#' generation facility.
#'
#' The simulated site is organized into 300 arrays of panels. As observers walk
#' north-south along paths between arrays, they look east or west down rows
#' between solar panels 150 meters long, with 38 searchable rows per array.
#' Observers consistently look for animals down one cardinal direction, making
#' this a one-sided distance sample. Searches are scheduled on a seven day
#' rotation, with 60 arrays searched per weekday. A sitewide clearout search
#' is implemented before the first scheduled winter search.
#'
#' The dataset consists of five parts: Data on carcass observations (CO) from
#' array searches, field trials for estimating carcass persistence (CP) and
#' searcher efficiency (SE), search schedule (SS), and density weighted
#' proportion (DWP) of area searched at each array (which is an area adjustment
#' factor to account for incomplete search coverage).
#'
#' @format \code{solar_PV} is a list with 5 elements:
#' \describe{
#' \item{\code{SE}}{Searcher efficiency trial data}
#' \item{\code{CP}}{Carcass persistence trial data}
#' \item{\code{SS}}{Search schedule parameters}
#' \item{\code{DWP}}{Density weighted proportion of area searched}
#' \item{\code{CO}}{Carcass observations}
#' }
#' @section Searcher Efficiency (\code{SE}):
#' \code{$SE} is a data frame with each row representing the fate of a single
#' carcass in the searcher efficiency trials. There are columns for:
#' \describe{
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"}}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{"Search1",...,"Search5"}}{fate of carcass on the 1st, 2nd, 3rd,
#' 4th, and 5th search after placement. A value of 1 implies that a carcass
#' was discovered by searchers, 0 implies the carcass was present but not
#' discovered, and any other value is interpreted as "no search" or
#' "carcass not present" and ignored in the model. In this data set,
#' \code{NA} indicates that a carcass had been previously discovered and
#' removed from the field. A user may use a variety of values to
#' differentiate different reasons no search was conducted or the carcass
#' was not present. For example, "NS" to indicate the search
#' was not scheduled in that location at that time, or "SC" to indicate the
#' carcass had been removed by scavengers prior to the search.}
#' \item{\code{Distance}}{the distance a carcass was placed from the
#' observer's transect. Used in determining probability to detect with
#' distance sampling.}
#' }
#' @section Carcass Persistence (\code{CP}):
#' \code{$CP} is a data frame with each row representing the fate of a single
#' carcass in the carcass persistence trials. There are columns for:
#' \describe{
#' \item{\code{Index}}{unique ID for each carcass}
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"}}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{LastPresent}, \code{FirstAbsent}}{endpoints of the interval bracketing
#' the time the carcass was scavenged or otherwise removed from the field.
#' For example, \code{LastPresent = 2.04}, \code{FirstAbsent = 3.21} indicates that the
#' carcass was last observed 2.04 days after being placed in the field and
#' was noted missing 3.21 days after being placed. If the precise time of
#' carcass removal is known (e.g., recorded by camera), then \code{LastPresent} and
#' \code{FirstAbsent} should be set equal to each other. If a carcass persists
#' beyond the last day of the field trial, \code{LastPresent} is the last time it
#' was observed and \code{FirstAbsent} is entered as \code{Inf} or \code{NA}.}
#' }
#'
#'
#' @section Search Schedule (\code{SS}):
#' \code{$SS} is a data frame with a row for each date an array at the site was
#' searched, a column of \code{SearchDate}s, and a column for each array. In
#' addition, there is an optional column to indicate the \code{Season}. The
#' columns for distinct area (array) and the date column are required, and the
#' names of the columns for search areas must match the names of areas used in
#' the DWP and CO files.
#' \describe{
#' \item{\code{SearchDate}}{columns of dates when arrays were searched. Format
#' in this data is \code{"\%Y-\%m-\%d CDT"}, but time zone (\code{CDT}) is
#' optional. A time stamp may be included if desired (e.g.,
#' \code{2018-03-20 02:15:41}). Alternatively, \code{\\} can be used in
#' place of \code{-}.}
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"},
#' or \code{"fall"} to indicate which season the search was conducted in.
#' \code{Season} is optional but may be used as a temporal covariate for
#' fatality estimates.}
#' }
#'
#' @section Density Weighted Proportion (\code{DWP}):
#' \code{$DWP} is a data frame with a row for each array and columns for each
#' carcass size class (labels must match those of the class factors in the
#' carcass observation file). Values represent the density-weighted proportion
#' of the searched area for each size (or the fraction of carcasses that fall
#' in the searched area). In this example, observers walk along transects
#' separated by 150 meters, and search coverage is assumed to be 100%, i.e.,
#' DWP = 1 for each unit. This requires that carcasses be placed at random
#' locations in the field, even at distances from the transects that would make
#' it unlikely to observe small carcasses.
#' \describe{
#' \item{\code{Unit}}{unique ID for each array. IDs match those used in the
#' \code{$CO} data frame and the column names in the \code{$SS} data.}
#' \item{\code{bat}}{DWP associated with size class Bat}
#' \item{\code{sml}}{DWP associated with size class Small}
#' \item{\code{med}}{DWP associated with size class Medium}
#' \item{\code{lrg}}{DWP associated with size class Large}
#' }
#'
#' @section Carcass Observations (\code{CO}):
#' \code{$CO} is a data frame with a row for carcass observed in the carcass
#' searches and a number of columns giving information about the given
#' carcass (date found, size, species, etc.)
#' \describe{
#' \item{\code{Index}}{unique identifier for each carcass.}
#' \item{\code{Unit}}{identifier for which unit the given carcass was found
#' at: \code{"arc19"}, \code{"arc65"}, etc, for arcs in the outer heliostat
#' field, or \code{"center"}, indicating the inner heliostat field.}
#' \item{\code{Species}}{species of the carcass: \code{"BA"}, \code{"BB"},
#' \code{"BC"}, \code{"BD"}, \code{"BE"}, \code{"LA"}, \code{"LB"},
#' \code{"LD"}, \code{"LE"}, \code{"MA"}, \code{"MB"}, \code{"SA"},
#' \code{"SB"}, \code{"SC"}, \code{"SD"}, \code{"SE"}, \code{"SF"},
#' \code{"SG"}}
#' \item{\code{Size}}{size: \code{"bat"}, \code{"lrg"}, \code{"med"},
#' \code{"sml"}}
#' \item{\code{Row}}{Optional indicator of which row within an array a carcass
#' was found at.}
#' \item{\code{Distance}}{The perpendicular distance from the searcher's
#' transect at which the carcass was discovered at.}
#' \item{\code{DateFound}}{dates entered in the same format as in
#' \code{$SS$SearchDate}. Every date entered here is (and must be) included in
#' the search schedule
#' (\code{$SS$SearchDate}}
#' \item{\code{X}}{UTM Easting of carcass.}
#' \item{\code{Y}}{UTM Northing of carcass.}
#' }
#'
#' @source \code{solar_PV}
"solar_PV"
# Power Tower ------------------------------
#' Power Tower Example Dataset
#'
#' An example data set for estimating fatalities from a concentrating
#' solar-thermal (power tower) generation facility.
#'
#' The simulated site consists of a single tower generating approximately 130
#' MW. The tower is surrounded by a 250 meter radius circular inner field of
#' heliostats, searched on a weekly schedule. From the inner circle, 18
#' concentric rings of heliostats 50 meters deep extend to the boundaries of the
#' simulated site. Rings are subdivided into 8 arcs each, with arcs 1-8
#' immediately adjacent to the central circle. Arcs are search using distance
#' sampling techniques on a weekly schedule, with 29 arcs searched per weekday.
#'
#' There are two sources of mortality simulated: flux and non-flux (collision or
#' unknown cause).Flux carcasses are generated (weibull) about the tower, with
#' 5\% to be found in the outer field. Non-flux mortality is assumed uniform
#' across the site.
#'
#' The dataset consists of five parts: Data on carcass observations (CO) from
#' inner and outer heliostat searches, field trials for estimating carcass
#' persistence (CP) and searcher efficiency (SE), search schedule (SS), and
#' density weighted proportion (DWP) of area searched at each turbine (which is
#' an area adjustment factor to account for incomplete search coverage).
#'
#' @format \code{solar_powerTower} is a list with 5 elements:
#' \describe{
#' \item{\code{SE}}{Searcher efficiency trial data}
#' \item{\code{CP}}{Carcass persistence trial data}
#' \item{\code{SS}}{Search schedule parameters}
#' \item{\code{DWP}}{Density weighted proportion of area searched}
#' \item{\code{CO}}{Carcass observations}
#' }
#' @section Searcher Efficiency (\code{SE}):
#' \code{$SE} is a data frame with each row representing the fate of a single
#' carcass in the searcher efficiency trials. There are columns for:
#' \describe{
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"}}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{Field}}{indicates carcass placed in inner or outer heliostat
#' field, with levels \code{"inner"} or \code{outer}.}
#' \item{\code{"Search1",...,"Search5"}}{fate of carcass on the 1st, 2nd, 3rd,
#' 4th, and 5th search after placement. A value of 1 implies that a carcass
#' was discovered by searchers, 0 implies the carcass was present but not
#' discovered, and any other value is interpreted as "no search" or
#' "carcass not present" and ignored in the model. In this data set,
#' \code{NA} indicates that a carcass had been previously discovered and
#' removed from the field. A user may use a variety of values to
#' differentiate different reasons no search was conducted or the carcass
#' was not present. For example, "NS" to indicate the search
#' was not scheduled in that location at that time, or "SC" to indicate the
#' carcass had been removed by scavengers prior to the search.}
#' }
#' @section Carcass Persistence (\code{CP}):
#' \code{$CP} is a data frame with each row representing the fate of a single
#' carcass in the carcass persistence trials. There are columns for:
#' \describe{
#' \item{\code{cpID}}{unique ID for each carcass}
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"}}
#' \item{\code{Size}}{\code{"bat"}; or \code{"lrg"}, \code{"med"}, or
#' \code{"sml"} bird}
#' \item{\code{LastPresent}, \code{FirstAbsent}}{endpoints of the interval bracketing
#' the time the carcass was scavenged or otherwise removed from the field.
#' For example, \code{LastPresent = 2.04}, \code{FirstAbsent = 3.21} indicates that the
#' carcass was last observed 2.04 days after being placed in the field and
#' was noted missing 3.21 days after being placed. If the precise time of
#' carcass removal is known (e.g., recorded by camera), then \code{LastPresent} and
#' \code{FirstAbsent} should be set equal to each other. If a carcass persists
#' beyond the last day of the field trial, \code{LastPresent} is the last time it
#' was observed and \code{FirstAbsent} is entered as \code{Inf} or \code{NA}.}
#' }
#'
#' @section Search Schedule (\code{SS}):
#' \code{$SS} is a data frame with a row for each date an arc at the site
#' was searched, a column of \code{SearchDate}s, and a column for each arc, and
#' one column at the end for the inner heliostat field, labeled \code{center}.
#' In addition, there is a column to indicate the \code{Season}. A column with
#' search dates and columns for each distinct area (arcs and center) searched
#' are required. Other columns are optional.
#' \describe{
#' \item{\code{SearchDate}}{columns of dates on which an arc was searched.
#' Format in this data is \code{"\%Y-\%m-\%d CDT"}, but time zone (\code{CDT})
#' is optional. A time stamp may be included if desired (e.g.,
#' \code{2018-03-20 02:15:41}). Alternatively, \code{\\} can be used in place
#' of \code{-}.}
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"} to indicate which season the search was conducted in.
#' \code{Season} is optional but may be used as a temporal covariate for
#' fatality estimates.}
#' }
#'
#' @section Density Weighted Proportion (\code{DWP}):
#' \code{$DWP} is a data frame with a row for each arc and columns for each
#' carcass size class (labels must match those of the class factors in the
#' carcass observation file). Values represent the density-weighted proportion
#' of the searched area for each size (or the fraction of carcasses that fall
#' in the searched area). In this example, within the inner field (center)
#' observers are unobstructed in ability to discover carcasses, for a DWP of 1.
#' In the outer heliostat field observers walk along transects separated by 50
#' meters, but the entire area is surveyed, so DWP = 1.
#' \describe{
#' \item{\code{Unit}}{unique ID for each arc, plus one labeled \code{center}
#' for the inner heliostat field. IDs match those used in the \code{$CO} data
#' frame and the column names in the \code{$SS} data.}
#' \item{\code{bat}}{DWP associated with size class Bat}
#' \item{\code{sml}}{DWP associated with size class Small}
#' \item{\code{med}}{DWP associated with size class Medium}
#' \item{\code{lrg}}{DWP associated with size class Large}
#' }
#'
#' @section Carcass Observations (\code{CO}):
#' \code{$CO} is a data frame with a row for carcass observed in the carcass
#' searches and a number of columns giving information about the given
#' carcass (date found, size, species, etc.)
#' \describe{
#' \item{\code{carcID}}{unique identifier for each carcass.}
#' \item{\code{Unit}}{identifier for which unit the given carcass was found
#' at: \code{"arc19"}, \code{"arc65"}, etc, for arcs in the outer heliostat
#' field, or \code{"center"}, indicating the inner heliostat field.}
#' \item{\code{Species}}{species of the carcass: \code{"BA"}, \code{"BB"},
#' \code{"BC"}, \code{"BD"}, \code{"BE"}, \code{"LA"}, \code{"LB"},
#' \code{"LD"}, \code{"LE"}, \code{"MA"}, \code{"MB"}, \code{"SA"},
#' \code{"SB"}, \code{"SC"}, \code{"SD"}, \code{"SE"}, \code{"SF"},
#' \code{"SG"}}
#' \item{\code{Size}}{size: \code{"bat"}, \code{"lrg"}, \code{"med"},
#' \code{"sml"}}
#' \item{\code{Season}}{\code{"winter"}, \code{"spring"}, \code{"summer"}, or
#' \code{"fall"}}
#' \item{\code{Flux}}{An optional field indicating whether there Was evidence
#' the animal was killed by flux. \code{"TRUE"}, or \code{"False"}.}
#' \item{\code{Field}}{Optional indicator of whether the animal found in the
#' \code{"inner"} or \code{"outer"} heliostat field?}
#' \item{\code{Ring}}{Optional note animals found in the outer heliostat field
#' indicating which concentric ring the carcass was found in.}
#' \item{\code{Distance}}{Optional note animals found in the outer heliostat
#' field representing the perpendicular distance from the searcher the carcass
#' was discovered at.}
#' \item{\code{DateFound}}{dates entered in the same format as in
#' \code{$SS$SearchDate}. Every date entered here is (and must be) included
#' in the search schedule (\code{$SS$SearchDate}}
#' \item{\code{X}}{Distance in meters from the Western edge of the facility.}
#' \item{\code{Y}}{Distance in meters from the Southern edge of the facility.}
#' }
#'
#' @source \code{solar_powerTower}
"solar_powerTower"
|
558bc8de40d59be0ad6847a734c985017a913c97
|
b55450d14a00c79884dbac71dc58c7ed54feb3f5
|
/visuals/LINE_karyoplot.R
|
ce800350aa5ec37380897a7c25791609b28f84df
|
[] |
no_license
|
eagomezc/Farfalloni
|
ce0af0fd84071ee404fbb20695bd4ad4e430afa5
|
e8fa3e53569565356220442d2fbde98b1fde444a
|
refs/heads/master
| 2022-04-13T00:50:45.008038
| 2020-04-01T09:46:52
| 2020-04-01T09:46:52
| 117,115,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,408
|
r
|
LINE_karyoplot.R
|
# kpPlotDensity
#load LINE dataset
data1 <-read.csv("/Users/vithusaaselva1/Desktop/Line1.csv")
head(data1)
#chromosome column number
chromo <- data1[data1$genoName == "chr", ]
#genoName
Gname<-(data1[1] == "chr1")
#genoStart
gstart <- data1$genoStart
#genoEnd
gend <- data1$genoEnd
#************
# FUNCTIONS
#************
# @details
# call function and store information for each chromosome
# start and end coordinates
# genoStart
function1 <- function(chr){
Gname<-(data1[1] == chr)
gstart <- data1$genoStart[Gname]
return(gstart)
}
# genoEnd
function2 <- function(chr){
Gname<-(data1[1] == chr)
gend <- data1$genoEnd[Gname]
return(gend)
}
#call function for each chromosome start and end
chr1start <- function1("chr1")
chr1end <- function2("chr1")
chr2start <- function1("chr2")
chr2end <- function2("chr2")
chr3start <- function1("chr3")
chr3end <- function2("chr3")
chr4start <- function1("chr4")
chr4end <- function2("chr4")
chr5start <- function1("chr5")
chr5end <- function2("chr5")
chr6start <- function1("chr6")
chr6end <- function2("chr6")
chr7start <- function1("chr7")
chr7end <- function2("chr7")
chr8start <- function1("chr8")
chr8end <- function2("chr8")
chr9start <- function1("chr9")
chr9end <- function2("chr9")
chr10start <- function1("chr10")
chr10end <- function2("chr10")
chr11start <- function1("chr11")
chr11end <- function2("chr11")
chr12start <- function1("chr12")
chr12end <- function2("chr12")
chr13start <- function1("chr13")
chr13end <- function2("chr13")
chr14start <- function1("chr14")
chr14end <- function2("chr14")
chr15start <- function1("chr15")
chr15end <- function2("chr15")
chr16start <- function1("chr16")
chr16end <- function2("chr16")
chr17start <- function1("chr17")
chr17end <- function2("chr17")
chr18start <- function1("chr18")
chr18end <- function2("chr18")
chr19start <- function1("chr19")
chr19end <- function2("chr19")
chr20start <- function1("chr20")
chr20end <- function2("chr20")
chr21start <- function1("chr21")
chr21end <- function2("chr21")
chr22start <- function1("chr22")
chr22end <- function2("chr22")
chrXstart <- function1("chrX")
chrXend <- function2("chrX")
chrYstart <- function1("chrY")
chrYend <- function2("chrY")
source("https://bioconductor.org/biocLite.R")
biocLite("karyoploteR")
library(karyoploteR)
# store chromosome start and end functions in GRanges object
#***************
#KpPlotDensity
#***************
# @details
# \code{kpPlotDensity} Density of features (coordinates) are plotted along the genome represented by
# \code {GRanges} object through the length of the genome. It computes the number of features per chromosome
# and ensures no overlapping flooring along the genome.
# \code {chr=c(x), start= c(y),end= c(z)}, firstly need to specify chromosome,
# the start coordinate, and then end coordinate
# Returns the regions where the chromosome coordinates should be located on the genome.
# @parameters
# \code{kp} the initial argument for the data plotting functions of karyoplotR. Returned
# by calling \code{plotKaryotype}
# \code {data= chr1data} this is a GRanges object, from which density is computed
# \code {col= "#DBBDED"} specifies colour of the density plotted, in this case purple
# \code {r= r0=0, r1=0.5} specifies vertical range of data panel to sketch the plot
chr1data <- toGRanges(
data.frame(
chr=c("chr1"), start= c(chr1start),end= c(chr1end)))
chr2data <- toGRanges(
data.frame(
chr=c("chr2"), start= c(chr2start),end= c(chr2end)))
chr3data <- toGRanges(
data.frame(
chr=c("chr3"), start= c(chr3start),end= c(chr3end)))
chr4data <- toGRanges(
data.frame(
chr=c("chr4"), start= c(chr4start),end= c(chr4end)))
chr5data <- toGRanges(
data.frame(
chr=c("chr5"), start= c(chr5start),end= c(chr5end)))
chr6data <- toGRanges(
data.frame(
chr=c("chr6"), start= c(chr6start),end= c(chr6end)))
chr7data <- toGRanges(
data.frame(
chr=c("chr7"), start= c(chr7start),end= c(chr7end)))
chr8data <- toGRanges(
data.frame(
chr=c("chr8"), start= c(chr8start),end= c(chr8end)))
chr9data <- toGRanges(
data.frame(
chr=c("chr9"), start= c(chr9start),end= c(chr9end)))
chr10data <- toGRanges(
data.frame(
chr=c("chr10"), start= c(chr10start),end= c(chr10end)))
chr11data <- toGRanges(
data.frame(
chr=c("chr11"), start= c(chr11start),end= c(chr11end)))
chr12data <- toGRanges(
data.frame(
chr=c("chr12"), start= c(chr12start),end= c(chr12end)))
chr13data <- toGRanges(
data.frame(
chr=c("chr13"), start= c(chr13start),end= c(chr13end)))
chr14data <- toGRanges(
data.frame(
chr=c("chr14"), start= c(chr14start),end= c(chr14end)))
chr15data <- toGRanges(
data.frame(
chr=c("chr15"), start= c(chr15start),end= c(chr15end)))
chr16data <- toGRanges(
data.frame(
chr=c("chr16"), start= c(chr16start),end= c(chr16end)))
chr17data <- toGRanges(
data.frame(
chr=c("chr17"), start= c(chr17start),end= c(chr17end)))
chr18data <- toGRanges(
data.frame(
chr=c("chr18"), start= c(chr18start),end= c(chr18end)))
chr19data <- toGRanges(
data.frame(
chr=c("chr19"), start= c(chr19start),end= c(chr19end)))
chr20data <- toGRanges(
data.frame(
chr=c("chr20"), start= c(chr20start),end= c(chr20end)))
chr21data <- toGRanges(
data.frame(
chr=c("chr21"), start= c(chr21start),end= c(chr21end)))
chr22data <- toGRanges(
data.frame(
chr=c("chr22"), start= c(chr22start),end= c(chr22end)))
chrXdata <- toGRanges(
data.frame(
chr=c("chrX"), start= c(chrXstart),end= c(chrXend)))
chrYdata <- toGRanges(
data.frame(
chr=c("chrY"), start= c(chrYstart),end= c(chrYend)))
kp <- plotKaryotype(genome="hg38", plot.type=1, main="Density plot for LINE retrotransposons", cex=0.6)
kpAddBaseNumbers(kp)
kpPlotDensity(kp, data= chr1data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr2data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr3data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr4data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr5data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr6data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr7data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr8data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr9data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr10data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr11data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr12data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr13data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr14data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr15data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr16data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr17data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr18data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr19data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr20data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr21data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chr22data, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chrXdata, col="#DBBDED", r0=0, r1=0.5)
kpPlotDensity(kp, data= chrYdata, col="#DBBDED", r0=0, r1=0.5)
kpPlotRegions(kp, data= chrYdata, col="#DBBDED", r0=0, r1=0.5)
|
dc9e4161667d4136140f003a1e2c52a0f3fab70f
|
89d524f2a6e278787a16ad3a7db1f672917ffea0
|
/man/print.plfm.Rd
|
20dc9685cbda5736a6b923b1f42ea19f6d7c1816
|
[] |
no_license
|
cran/plfm
|
4def3d16d72f3fb7b160cdccafd19119bc4f9150
|
8ebe52e8347e3ce2b027a17509a5ceb4591ebcee
|
refs/heads/master
| 2022-04-29T16:41:02.292885
| 2022-03-30T13:50:02
| 2022-03-30T13:50:02
| 17,698,560
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
rd
|
print.plfm.Rd
|
\name{print.plfm}
\alias{print.plfm}
\title{Printing plfm objects}
\description{Printing method for probabilistic latent feature analysis objects.}
\usage{
\method{print}{plfm}(x,\dots)
}
\arguments{
\item{x}{Probabilistic latent feature analysis object returned by \code{\link{plfm}}.}
\item{\dots}{Further arguments are ignored.}
}
\details{The printing method for probabilistic latent feature analysis objects displays
(1) the parameters used to call the \code{\link{plfm}} function,
(2) information on the descriptive fit of the model (i.e. correlation between observed and expected frequencies,
and proportion of the variance in the observed frequencies accounted for by the model),
and (3) the estimated object- and attribute parameters.
}
%%\seealso{\code{\link{print.summary.plfm}}}
\examples{
## example print.plfm(plfm(...))
}
|
32808c194a93f4ac9003f2b8ce3e1f2551e3bb51
|
5ababdaa8c02b04015cf171bc5dc1710bad5ee4c
|
/Bootbinary.R
|
ba7a9b2c0dbed56f909749bddac9bdf5193890e4
|
[] |
no_license
|
chhsueh/Research
|
8bf6e493289376fe2100e42bb3ae0140f47b6e50
|
7f7a7c589286f9ee4234c4919683c8605f0c1164
|
refs/heads/master
| 2020-12-24T18:13:35.525629
| 2016-04-27T17:48:59
| 2016-04-27T17:48:59
| 57,233,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,103
|
r
|
Bootbinary.R
|
source("EnergyOptim.r")
library(compiler)
## this is a function for bootstrapping an adjancency matrix
## with constraint of its row and column sums.
## return: (1) the energy of the bootstrapping adj matrix (2) the new Adj matrix
pij = function(da,db,dahat,dbhat,m,alpha){
p = (dahat*dbhat)^alpha*(1-((da*db)/(4*m)))
return(p)
}
Bootbinary = function(Adj){
Adj = as.matrix(Adj)
m = mean(Adj)
if(m==0 || m==1){
Bootmatrix = Adj
}else{
Bootmatrix = matrix(0,nrow(Adj),ncol(Adj))
ii=1
while(sum(abs(rowSums(Bootmatrix)-rowSums(Adj)))>0 &
sum(abs(colSums(Bootmatrix)-colSums(Adj)))>0){
######
#print(ii)
if(ii==1000){Bootmatrix = Adj; break}
da = rowSums(Adj)
db = colSums(Adj)
Na = nrow(Adj)
Nb = ncol(Adj)
m = (sum(da)+sum(db))/2
E = c()
dahat = da
dbhat = db
P = 1
C = expand.grid(1:Na,1:Nb)
#alpha = 1
for (i in 1:(Na*Nb)){
diff = abs(dahat[C[,1]]-dbhat[C[,2]])
alpha = rep(1,nrow(C)) #initial of alpha
alpha[which(diff>quantile(diff,0.7))] = 2
#print(table(alpha))
# calculate probability pij
Pr = pij(da[C[,1]],db[C[,2]],dahat[C[,1]],dbhat[C[,2]],m,alpha)
## weird...
if(sum(Pr)==0) break
Pr = Pr/sum(Pr)
idx = which(rmultinom(1,1,Pr)==1)
P = P*Pr[idx]
dahat[C[idx,1]] = dahat[C[idx,1]]-1
dbhat[C[idx,2]] = dbhat[C[idx,2]]-1
E = rbind(E,C[idx,])
C = C[-idx,]
# break the for loop when there is no edges and add in
if((sum(dahat)+sum(dbhat))==0) break
}
Bootmatrix = matrix(0,nrow(Adj),ncol(Adj))
for(j in 1:nrow(E)){
Bootmatrix[E[j,1],E[j,2]] = 1
}
ii = ii+1
}# end of while loop
} # End of if
#print(ii)
Energy = GetBipEnergy(Bootmatrix)
return(list(Energy=Energy, Matrix=Bootmatrix))
}
Bootbinary = cmpfun(Bootbinary)
|
2f3af4aed80871d6450ee29a40acb28385f266de
|
78c6fb7a90dfd4ce5240032408811660358dfbd7
|
/test/path_index.r
|
8ef100365e2af41b18bd7fcbbf4588b9b65772d0
|
[] |
no_license
|
haleyjeppson/tourr
|
b335178bcb9c155be194afbe4d20ee662581578c
|
272a2f88d9981b4bc7d5c86e2f8cde24183037e5
|
refs/heads/master
| 2021-12-07T17:54:33.348101
| 2021-05-30T00:39:47
| 2021-05-30T00:39:47
| 167,255,934
| 0
| 0
| null | 2019-01-23T21:20:03
| 2019-01-23T21:20:03
| null |
UTF-8
|
R
| false
| false
| 530
|
r
|
path_index.r
|
### Name: path_index
### Title: Compute index values for a tour history.
### Aliases: path_index
### Keywords: hplot
### ** Examples
fl_holes <- save_history(flea[, 1:6], guided_tour(holes()), sphere = TRUE)
path_index(fl_holes, holes())
path_index(fl_holes, cmass())
plot(path_index(fl_holes, holes()), type = "l")
plot(path_index(fl_holes, cmass()), type = "l")
# Use interpolate to show all intermediate bases as well
## Not run:
##D hi <- path_index(interpolate(fl_holes), holes())
##D hi
##D plot(hi)
## End(Not run)
|
9f7c88841b1cbe3061d431252cbdb733465aaf9a
|
2758eef8d85ae79b6df52a0787b9e0ffdd245bf3
|
/R/RunScenario.r
|
ccd957eda4c03848fdf6c012cc3f3522b3aefa62
|
[] |
no_license
|
pkuhnert/HSTMM
|
1b2950630c7acb27292740714f07bd4476b5d70e
|
4e38b24562def34c993241e84b2922c81bccbfb4
|
refs/heads/master
| 2020-03-18T23:22:51.170605
| 2018-12-13T04:55:34
| 2018-12-13T04:55:34
| 135,398,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,221
|
r
|
RunScenario.r
|
#' RunScenario
#'
#' @description Just a simple scenario to run Hooten Model
#'
#' @param niter Number of iterations
#' @param nT Number of time points
#' @param nburn Number of burn-ins
#'
#' @import ggplot2
#' @import mvtnorm
#' @import RColorBrewer
#' @import gridExtra
#'
#'
#' @export
RunScenario <- function(niter = 30000, nburn = 10000, nT = 10){
if(nburn > niter) stop("No. of burn-ins needs to be less than number of iterations.\n")
# create dummy locations
longitude <- seq(40, 50, length = 5)
latitude <- seq(20, 30, length = 5)
coords <- expand.grid(longitude = longitude, latitude = latitude)
m <- nrow(coords)
coords_farm <- coords
coords_farm$farm <- as.factor(c(rep(1, 10), rep(2, 15)))
p_farm <- ggplot(coords_farm, aes_string("longitude", "latitude", col = "farm")) +
geom_point(shape = 15, size = 5) +
theme(axis.text=element_blank(),
axis.ticks=element_blank(),
axis.title=element_blank()) + coord_fixed() +
geom_hline(yintercept = 23.75, size = 1, color = "black")
lambda_m <- c(rep(log(15), 10), rep(log(1.5), 15))
# Surveillance
sample_n <- matrix(0, nrow = m, ncol = nT)
Hout <- ExampleInvasion(m = m, nT = nT, niter = niter, coords = coords,
prior_lambda = list(lambda_m = lambda_m, lambda_sig = log(1.01)),
prior_tau = list(tau_m = log(1.5), tau_sig = log(1.01)),
prior_K = list(alpha = 3, beta = 12),
prior_r = list(mu = 0.5, sig2 = 0.25),
prior_theta = list(alpha_theta = 3, beta_theta = 12),
initialV = list(K = 150, r = 0.3),
tune = list(h_lambda = 0.05, h_tau = 0.2, h_K = 10, h_r = 0.05,
h_n = 1),
sample_n = sample_n)
Hres <- lapply(Hout$AR, mean)
# iteration range to examine
startMC <- niter-nburn+1
stopMC <- niter
#------------------- lambda ---------------------------#
sumlambda <- Hout$lambda[[startMC]]
count <- 0
for(i in (startMC+1):stopMC){
check <- (i/2) - trunc(i/2)
if(check == 0){
sumlambda <- sumlambda + Hout$lambda[[i]]
count <- count + 1
}
}
#lambda_m <- sumlambda/(stopMC - startMC + 1)
lambda_m <- sumlambda/count
lambda_m <- data.frame(lambda_m)
names(lambda_m) <- paste("t", 1:nT, sep = "")
coords_lambda <- cbind(coords, lambda_m)
p <- list()
for(i in 1:nT){
sub <- coords_lambda[,c("longitude", "latitude", paste("t", i, sep = ""))]
names(sub)[3] <- "time"
p[[i]] <- ggplot(sub, aes(longitude, latitude, col = time)) +
geom_point(size = 10, shape = 15) + ggtitle(paste("Time: ", i, sep = "")) +
scale_color_gradientn("lambda (Intensity)",
colours=c(rev(brewer.pal(8,"Spectral"))), na.value = "white",
limits = range(0, lambda_m, 80)) +
theme(axis.text=element_blank(),
axis.ticks=element_blank(),
axis.title=element_blank()) + coord_fixed()
}
p_spread <- marrangeGrob(p, nrow = 2, ncol = 2, as.table = FALSE)
list(p_farm = p_farm, p_spread = p_spread, Hres = Hres)
}
|
d47f1caad5c0ffd22af2d3a373f828c739f9fc5f
|
9984fa5e343a7810ae8da2ee3933d1acce9a9657
|
/run_pipeline/prepCountInputs.R
|
52ad46f443fd58cd5dd4a063bf5fa1ec63dc4895
|
[] |
no_license
|
kennyjoseph/twitter_matching
|
4757e7cff241d90167888ce24625be36015b5a93
|
9fe3a2b970e0ff2f559261f89d29b3202db25920
|
refs/heads/master
| 2022-01-05T15:45:19.643082
| 2019-07-11T20:28:54
| 2019-07-11T20:28:54
| 84,259,451
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,025
|
r
|
prepCountInputs.R
|
library(data.table)
# To run:
usage = "Usage: Rscript [--vanilla] prepCountInputs.R <fileNum>"
# where fileNum corresponds to the 50 states
#args = commandArgs(T) # everything after executable name
# This script bundles data from ts_cleaned + extra_state_files into input files for a state:
# -rbind raw-ish and extra state files for a state
# -redo counts
# -split into files of 2 million lines each (if remainder < 1M, append to previous)
# Directory structure:
# /net/data/twitter-voters
# /voter-data
# /targetsmart # raw data from TargetSmart
# /ts_cleaned # preprocessed
# /ts_chunks # split into files of < 3 million people.
# /matching-work-files
# /cand-matches #
# /one-subdir-per-input-file
# /with-locations #
# /one-subdir-per-input-file # (eventually deleted)
# /handful of files per input file
# /match-results #
# /handful of files per input file
# Initialize vars
#rawStateDir = "/net/data/twitter-voters/voter-data/ts_cleaned" # example filename: tsmart_northeastern_install_file_AK.tsv
#extraStateDir = "/net/data/twitter-voters/voter-data/extra_state_files" # example filename: AK_extra.tsv
#voterfileDir = "/net/data/twitter-voters/voter-data/ts_chunks" # for output
# example rawState filename: tsmart_northeastern_install_file_AK.tsv
# example extraState filename: AK_extra.tsv
# Returns the number of chunk files written
run_command_line_call = function(fileNum, rawStateDir, extraStateDir, voterfileDir, linesPerFile = 2000000, maxLinesPerFile = 3000000) {
#if (length(args) != 1) {
#stop("Expected exactly 1 arg\n", usage)
#}
#fileNum = as.integer(args[1])
if (!dir.exists(voterfileDir) && !dir.create(voterfileDir)) {
stop(paste("Cannot create output directory", voterfileDir))
}
allStateFiles = list.files(rawStateDir, pattern="\\.tsv$")
rawStateFileStem = allStateFiles[fileNum]
rawStateFile = file.path(rawStateDir, rawStateFileStem)
stateAbbrev = substr(rawStateFileStem, nchar(rawStateFileStem) - 5, nchar(rawStateFileStem) - 4)
extraStateFile = file.path(extraStateDir, paste0(toupper(stateAbbrev), "_extra.tsv"))
if (!file.exists(extraStateFile)) {
stop("Didn't find extra state file")
}
data1 = fread(rawStateFile)
# data1 contains all records from the original targetsmart file, including those from out of state.
data1_state = data1[state==toupper(stateAbbrev),]
data2 = fread(extraStateFile, header=FALSE)
allData = rbind(data1_state, data2, use.names=FALSE)
print(paste(stateAbbrev, ": found", nrow(allData), "rows"))
withCntZip = allData[, .(zipcode_cnt = .N), by=.(first_name, last_name, zipcode, state)]
withCntCity = allData[, .(city_cnt = .N), by=.(first_name, last_name, city, state)]
withCntCounty = allData[, .(county_cnt = .N), by=.(first_name, last_name, county, state)]
withCntState = allData[, .(state_cnt = .N), by=.(first_name, last_name, state)]
allDataWith1 = merge(allData, withCntZip)
# must specify "by" b/c default for data.table is shared "key" columns only
allDataWith2 = merge(allDataWith1, withCntCity, by=intersect(colnames(allDataWith1), colnames(withCntCity)))
allDataWith3 = merge(allDataWith2, withCntCounty, by=intersect(colnames(allDataWith2), colnames(withCntCounty)))
allDataWith4 = merge(allDataWith3, withCntState, by=intersect(colnames(allDataWith3), colnames(withCntState)))
# (manually checked that counts are reasonable compared to original ones. now drop originals.)
origColnamesWanted = setdiff(colnames(allData), c("zipcode_count", "city_count", "county_count", "state_count"))
# notice: allData's order is how we want to write it out
cleanCnts = allDataWith4[, .(zipcode_count = zipcode_cnt, city_count = city_cnt, county_count = county_cnt, state_count = state_cnt,
zipcode_cnt = NULL, city_cnt = NULL, county_cnt = NULL, state_cnt = NULL)]
allDataClean = cbind(allDataWith4[, origColnamesWanted, with=F], cleanCnts)
# <-- looks just like input files, except:
# 1. Counts updated
# 2. Count columns reordered to be more reasonable
# Write it out!
# Into how many files? Generally use linesPerFile lines, but the last one can be up to maxLinesPerFile.
linesWritten = 0
fileCnt = 0
while (linesWritten < nrow(allDataClean)) {
# write a file
fileCnt = fileCnt + 1
outfile = file.path(voterfileDir, paste0(stateAbbrev, "_chunk", fileCnt, ".tsv"))
startLine = linesWritten + 1
# if the remainder is small, push it into this file too
if (nrow(allDataClean) - linesWritten <= maxLinesPerFile) {
stopLine = nrow(allDataClean)
} else {
stopLine = linesWritten + linesPerFile
}
fwrite(allDataClean[c(startLine:stopLine),], file=outfile, sep="\t")
print(paste("Wrote", outfile))
linesWritten = stopLine
}
return(fileCnt)
}
# Actually do the call down here
#run_command_line_call(args)
|
13e29ad9baa60d20d497ad9b52211e48c7606e5b
|
c18ba4045763e39255f78461b5e0692215c9c20b
|
/R/simplify.R
|
9e01c357ecd9fd719692392ce5b576076f52f48e
|
[] |
no_license
|
iMarcello/cppRouting
|
5bf603654ddb5461941638f0eda01a738bd10359
|
4f5680cdbcb21645fe25881b4cf7bd113cdee87a
|
refs/heads/master
| 2020-06-25T07:21:30.144444
| 2019-07-08T08:05:33
| 2019-07-08T08:05:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,161
|
r
|
simplify.R
|
cpp_simplify<-function(Graph,keep=NULL,new_edges=FALSE,rm_loop=TRUE,iterate=FALSE,silent=TRUE){
#Nodes to keep
to_keep<-rep(0,Graph$nbnode)
if (!is.null(keep)) {
to_keep[Graph$dict$ref %in% keep]<-1
}
simp<-Simplify2(Graph$data$from,Graph$data$to,Graph$data$dist,Graph$nbnode,loop=rm_loop,keep = to_keep,dict = Graph$dict$ref)
if (new_edges==TRUE)edges<-list(simp[[3]])
else edges<-NULL
#Because removing nodes can create other nodes to remove
counter<-1
while(iterate==TRUE){
if (counter==1 & silent==FALSE) message(paste(" iteration :",counter,"-",Graph$nbnode-simp[[2]],"nodes removed"))
if (simp[[2]]==Graph$nbnode) break
count<-simp[[2]]
rd<-Remove_duplicate(simp[[1]][,1],simp[[1]][,2],simp[[1]][,3],Graph$nbnode)
simp<-Simplify2(rd[,1],rd[,2],rd[,3],Graph$nbnode,loop=rm_loop,keep = to_keep,dict = Graph$dict$ref)
counter<-counter+1
if (count == simp[[2]]) break
if(silent==FALSE) message(paste(" iteration :",counter,"-",count-simp[[2]],"nodes removed"))
if (new_edges==TRUE) edges[[length(edges)+1]]<-simp[[3]]
}
rd<-Remove_duplicate(simp[[1]][,1],simp[[1]][,2],simp[[1]][,3],Graph$nbnode)
simp<-rd
if (nrow(simp)==0) stop("All nodes have been removed")
Nodes=unique(c(simp[,1],simp[,2]))
dict<-Graph$dict[Graph$dict$id %in% Nodes,]
dict$idnew<-0:(nrow(dict)-1)
simp[,1]<-dict$idnew[match(simp[,1],dict$id)]
simp[,2]<-dict$idnew[match(simp[,2],dict$id)]
simp<-as.data.frame(simp)
simp[,1]<-as.integer(simp[,1])
simp[,2]<-as.integer(simp[,2])
colnames(simp)<-c("from","to","dist")
if (!is.null(Graph$coords)){
coords<-Graph$coords
coords<-coords[match(dict$id,Graph$dict$id),]
}
else coords=NULL
dict<-dict[,-2]
colnames(dict)<-c("ref","id")
return(list(graph=list(data=simp,
coords=coords,
nbnode=length(Nodes),
dict=dict),
new_edges=edges))
}
|
dd1c3dc3e8425fe5578a5c0f494d72ada06621af
|
dd816cc3b8ade19255eeebd88dbd2efdb2699d34
|
/scripts/udpipe_exer.R
|
d5ee3ae31f71f5aecdbec4a9c4d13509eef87428
|
[] |
no_license
|
bap-project/R-version
|
12a1cb7b02cbc7a5f05cdfb383649e4f035081bd
|
02fa6388859695c978c989273ac1fbf7a19900f6
|
refs/heads/master
| 2020-04-22T10:55:06.758101
| 2019-02-12T13:23:21
| 2019-02-12T13:23:21
| 168,320,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,922
|
r
|
udpipe_exer.R
|
source("beg_lib.R")
uk <-RJSONIO::fromJSON("UK_afterJaccard.json",encoding = "UTF-8")
us <- RJSONIO::fromJSON("US_afterJaccard.json",encoding = "UTF-8")
cn <- RJSONIO::fromJSON("CN_afterJaccard.json",encoding = "UTF-8")
uk <- data_frame(docid=uk$docid, text =uk$content, date=uk$date, source=uk$source,title=uk$title, country="uk")
us <- data_frame(docid=us$docid, text =us$content, date=us$date, source=us$source,title=us$title, country="us")
cn <- data_frame(docid=cn$docid, text =cn$content, date=cn$date, source=cn$source,title=cn$title , country="cn")
df<- rbind(uk, us, cn) %>%
filter(Reduce(`+`, lapply(., is.na)) != ncol(.))#delete empty rows
df<-df%>%mutate(date =as.Date.POSIXct(date/1000))
df = df %>% mutate_at(vars(date), funs(year, month, day))
doc_id<-str_c("doc", rownames(df))
df$docid<-doc_id
#str_extract_all(df$text[1:416],"[qualcomm].*" )
#str_view(df$text, ".html")
#sum(str_count(df$text,"a.i"))
dd<-df$text
source("cln_txt.R")
df$text <-dd
unigram_probs <- df%>%
unnest_tokens(word, text) %>%
count(word, sort = TRUE) %>%
mutate(p = n / sum(n))
library(widyr)
tidy_skipgrams <- df%>%
unnest_tokens(ngram, text, token = "ngrams", n = 8) %>%
mutate(ngramID = row_number()) %>%
unite(skipgramID, docid, ngramID) %>%
unnest_tokens(word, ngram)
skipgram_probs <- tidy_skipgrams %>%
pairwise_count(word, skipgramID, diag = TRUE, sort = TRUE) %>%
mutate(p = n / sum(n))
normalized_prob <- skipgram_probs %>%
filter(n > 20) %>%
rename(word1 = item1, word2 = item2) %>%
left_join(unigram_probs %>%
select(word1 = word, p1 = p),
by = "word1") %>%
left_join(unigram_probs %>%
select(word2 = word, p2 = p),
by = "word2") %>%
mutate(p_together = p / p1 / p2)
normalized_prob %>%
filter(word1 == "china") %>%
arrange(-p_together)
pmi_matrix <- normalized_prob %>%
mutate(pmi = log10(p_together)) %>%
cast_sparse(word1, word2, pmi)
library(irlba)
pmi_svd <- irlba(pmi_matrix, 256, maxit = 1e3)
word_vectors <- pmi_svd$u
rownames(word_vectors) <- rownames(pmi_matrix)
library(broom)
search_synonyms <- function(word_vectors, selected_vector) {
similarities <- word_vectors %*% selected_vector %>%
tidy() %>%
as_tibble() %>%
rename(token = .rownames,
similarity = unrowname.x.)
similarities %>%
arrange(-similarity)
}
facebook <- search_synonyms(word_vectors, word_vectors["quantum",])
facebook
library(wordcloud)
tidy_skipgrams %>% anti_join(stop_words) %>%count(word, skipgramID,sort = TRUE) %>%
bind_tf_idf( word, skipgramID, n) %>% dplyr::filter(tf_idf>.0009)%>%
with(wordcloud(word, n, max.words = 100 ))
library(ggpmisc)
min <- as.Date("2013-1-1")
max <- as.Date("2015-1-1")
df %>% group_by(date) %>% count() %>%ggplot( aes(date, n)) +geom_line() +
scale_x_date( breaks='months' , limits = c(min, max),date_labels = "%b/%Y" )+ ggpubr::rotate_x_text(-45)
#EXAMINE THE DATA
df %>% group_by(year) %>% mutate(n= n()) %>% distinct(year, .keep_all=TRUE) %>% ggplot() + geom_bar(aes(year,n, fill = source), stat ='identity')
df %>% group_by(country) %>% count() %>% ggplot() + geom_bar(aes(country,n), stat ='identity')
#udmodel <- udpipe_load_model(file = "english-ewt-ud-2.3-181115.udpipe")
#dfp <-udpipe_annotate(udmodel, df$text)
#saveRDS(dfp, "dfPOS.rds")
dfpos <-as.data.frame(readRDS( "dfPOS.rds"))
dfpos$id <- unique_identifier(dfpos, fields = c("sentence_id", "doc_id"))
dff <- dfpos %>%dplyr::filter( upos %in% c("NOUN", "ADJ"))
dff <-trimws(gsub("\\w*[0-9]+\\w*\\s*", "", dff))#removes numbers with letters
dfpos<-gsub(" *\\b(?<!-)\\w{1,2}(?!-)\\b *", " ",dfpos, perl=T)#removes 1 and 2 chr tokens
stopwrd <- c("*.jpg","http","info","imgs","syndigate","*.png", "abff", "fbeb", "per", "cent", "artificial intelligence")
dfpos<-gsub(paste0("\\b(",paste(stopwrd, collapse="|"),")\\b"), "", dtf$term)
dfdt <- document_term_frequencies(dff, document = "id", term = "lemma")
dtmuk <- document_term_matrix(dfdt)
dtmuk <- dtm_remove_lowfreq(dtmuk, minfreq = 5)
dtm_clean <- dtm_remove_terms(dtmuk, terms = c( "hl", "rm"))
termcorrelations <- dtm_cor(dtm_clean)
y <- as_cooccurrence(termcorrelations)
y <- subset(y, term1 < term2 & abs(cooc) > 0.2)
y <- y[order(abs(y$cooc), decreasing = TRUE), ]
head(y, 30)
cooccurrence(x = dff,
term = "lemma",
group = c("doc_id", "paragraph_id", "sentence_id"))
dfpos$phrase_tag <- as_phrasemachine(dfpos$upos, type = "upos")
phrs <- keywords_phrases(x = dfpos$phrase_tag , term = dfpos$lemma,
pattern = "(A|N)*N(P+D*(A|N)*N)*",
is_regex = TRUE, detailed = FALSE)
dplyr::filter(phrs, ngram > 1 & freq > 3)
keywords_collocation(x = dfpos, term = "lemma", group = "doc_id")
rake <- keywords_rake(x = dfpos, term = "lemma", group = "doc_id",
relevant = dfpos$upos %in% c("NOUN", "ADJ"))
dfpos$mwe <- txt_recode_ngram(dfpos$token, compound = rake$keyword, ngram = rake$ngram)
dfpos$mwe <- ifelse(dfpos$mwe %in% rake$keyword, dfpos$mwe, NA)
dfpos$term_noun <- ifelse(dfpos$upos %in% "NOUN", dfpos$lemma, NA)
dtf <- document_term_frequencies(dfpos, document = "doc_id", c("lemma", "mwe"))
dtm <- document_term_matrix(x = dtf)
dtm <- dtm_remove_lowfreq(dtm, minfreq = 3)
library(text2vec)
lda_model = LDA$new(n_topics = 50, doc_topic_prior = 0.1, topic_word_prior = 0.01)
doc_topic_distr =
lda_model$fit_transform(x = dtm, n_iter = 1000,
convergence_tol = 0.001, n_check_convergence = 25,
progressbar = FALSE)
lda_model$plot()
x <- dtf$term
#cl_fr_mallet
dtf$term <-x
xx <-data.frame(dtf)
source("colps_fr_mallet.R")
collapsed$text <-trimws(gsub("\\w*[0-9]+\\w*\\s*", "", collapsed$text))#removes numbers with letters
collapsed$text<-gsub(" *\\b(?<!-)\\w{1,2}(?!-)\\b *", " ", collapsed$text, perl=T)
stopwrd <- c("*.jpg","http","info","img","syndigate","*.png", "abff", "fbeb", "per", "cent", "artificial intelligence")
collapsed$text<-gsub(paste0("\\b(",paste(stopwrd, collapse="|"),")\\b"), "", collapsed$text)
library("dfrtopics")
library(mallet)
# create an empty file of "stopwords"
file.create(empty_file <- tempfile())
docs <- mallet.import(collapsed$doc_id, collapsed$text, empty_file)
#check for Topic model diagnostics for number of topics
k<- 100 #number of topics
n<- 5 #sequences, intervals
#source("no_tops_coh.R"); pp
#a low alpha value: more weight on having each document composed of only a few dominant topics
#a low beta value: more weight on having each topic composed of only a few dominant words.
mm <-train_model(docs, n_topics=50,
n_iters=1000,
seed=1066)
write_mallet_model(mm, "modeling_results")
m <- load_mallet_model_directory("modeling_results")
d <- read_diagnostics(file.path("modeling_results", "diagnostics.xml"))
which.min(d$topics$coherence)
# n is the number of words to return for each topic
top_wrd <-top_words(m, n=10)
lbls <-topic_labels(m, n=3)
xxx<-as.factor(top_wrd$topic)
levels(xxx) <- paste0(lbls)
top_wrd$topic<-xxx
top_wrd%>%dplyr::filter(topic==19 )
top_wrd%>% mutate(word = reorder(word, weight))%>%
ggplot(aes(word, weight, fill = topic)) +
geom_bar(alpha = 0.8, stat = "identity", show.legend = FALSE) +
facet_wrap(~ topic, scales = "free")+theme(text = element_text(size=10)) +
coord_flip()
library(ggwordcloud)
set.seed(42)
top_words(m, n=30) %>%
mutate(angle = 45 * sample(-2:2, n(), replace = TRUE, prob = c(1, 1, 4, 1, 1)))%>%
ggplot(aes(label = word, size = weight,color = weight,
angle = angle)) +
geom_text_wordcloud_area(rm_outside = TRUE) +
scale_size_area(max_size = 24) +
scale_color_gradient(low = "darkred", high = "red")+
theme_minimal()+facet_wrap(~ topic, scales = "free")
#print some diagnosis measures
t = list()
for(i in 1:25)
{
str = paste0(names(d$'topics'[3:13]), "=", eval(d$'topics'[i,c(3:13)]))
t[[length(t)+1]] = str
}
for(i in 1:25){
xxx <-top_words(m, n=50) %>%filter(topic==i ) %>%
mutate(angle = 45 * sample(-2:2, n(), replace = TRUE, prob = c(1, 1, 4, 1, 1)))
p <-ggplot(xxx,aes(label = word, size = weight,color = weight,
angle = angle)) +
geom_text_wordcloud_area(rm_outside = TRUE) +
scale_size_area(max_size = 24) +
scale_color_gradient(low = "darkred", high = "red")+
theme_minimal()
library(gridExtra)
grid.arrange(p, right = tableGrob(matrix(t[[i]],ncol=1),
theme = ttheme_minimal(padding = unit(c(.1,.1),"line")))
,vp=viewport(width=.75, height=1.5))
}
|
e05926b05491c5192be0f77f04615a1065e97b86
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/bomb/p10-5.pddl_planlen=11/p10-5.pddl_planlen=11.R
|
2acac12ec8446aab25bbe4af59244a4aab38d4a3
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
p10-5.pddl_planlen=11.R
|
71cad296084391daebb7c2974ec1e9f9 p10-5.pddl_planlen=11.qdimacs 1325 19976
|
3db6f8b0b3ce650ca0585a9148e09df4259e53f6
|
69668ad25d79800fed0a6efcf6fc8b0639d34e9c
|
/Main/Figure_epigraHMM/Figure_ThreeCellLines.R
|
af4ac91e56ed213de4b0874bf39bc7cf16da6cbf
|
[
"MIT"
] |
permissive
|
plbaldoni/epigraHMMPaper
|
96dc6f18997a42ad8797acd17d86ae1712caa8ef
|
93cab7c12bac3ca96c5d4e23cbe5c7aead1f1bc4
|
refs/heads/main
| 2023-03-30T20:27:08.611206
| 2021-04-10T10:51:02
| 2021-04-10T10:51:02
| 301,452,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,941
|
r
|
Figure_ThreeCellLines.R
|
library(dplyr)
library(ggplot2)
library(GenomicRanges)
library(reshape2)
library(tidyr)
library(magrittr)
library(plyr)
library(ggpubr)
library(data.table)
library(RColorBrewer)
library(SummarizedExperiment)
bp = 500
chromosome = 'chr7'
idx.genome = c(130720310, 133063299)
ngroups = 3
mark = 'H3K27me3'
data = 'Encode_threecells'
size = 2
fdr = 0.05
# Loading data
load(
paste0(
'../../Data/Encode_helas3/H3K27me3/wgEncodeBroadHistoneHelas3H3k27me3StdAlnRep1.markdup.q10.sorted.RData'
)
)
helas31 = subset(counts[[paste0(bp)]])
rm(counts)
load(
paste0(
'../../Data/Encode_helas3/H3K27me3/wgEncodeBroadHistoneHelas3H3k27me3StdAlnRep2.markdup.q10.sorted.RData'
)
)
helas32 = subset(counts[[paste0(bp)]])
rm(counts)
load(
paste0(
'../../Data/Encode_hepg2/H3K27me3/wgEncodeBroadHistoneHepg2H3k27me3StdAlnRep1.markdup.q10.sorted.RData'
)
)
hepg21 = subset(counts[[paste0(bp)]])
rm(counts)
load(
paste0(
'../../Data/Encode_hepg2/H3K27me3/wgEncodeBroadHistoneHepg2H3k27me3StdAlnRep2.markdup.q10.sorted.RData'
)
)
hepg22 = subset(counts[[paste0(bp)]])
rm(counts)
load(
paste0(
'../../Data/Encode_huvec/H3K27me3/wgEncodeBroadHistoneHuvecH3k27me3StdAlnRep1.markdup.q10.sorted.RData'
)
)
huvec1 = subset(counts[[paste0(bp)]])
rm(counts)
load(
paste0(
'../../Data/Encode_huvec/H3K27me3/wgEncodeBroadHistoneHuvecH3k27me3StdAlnRep2.markdup.q10.sorted.RData'
)
)
huvec2 = subset(counts[[paste0(bp)]])
# Genomic ranges
counts = subset(counts[[paste0(bp)]], chr == chromosome, select = c('chr', 'start', 'stop'))
gr.counts = with(counts, GRanges(chr, IRanges(start = start, end = stop)))
# Combining data
ChIP = cbind(counts, Window = 1:nrow(counts))
ChIP = cbind(ChIP, Helas3_1 = subset(helas31, chr == chromosome)$counts)
ChIP = cbind(ChIP, Helas3_2 = subset(helas32, chr == chromosome)$counts)
ChIP = cbind(ChIP, Hepg2_1 = subset(hepg21, chr == chromosome)$counts)
ChIP = cbind(ChIP, Hepg2_2 = subset(hepg22, chr == chromosome)$counts)
ChIP = cbind(ChIP, Huvec_1 = subset(huvec1, chr == chromosome)$counts)
ChIP = cbind(ChIP, Huvec_2 = subset(huvec2, chr == chromosome)$counts)
ChIP = as.data.table(ChIP)
ChIP.se <- SummarizedExperiment::SummarizedExperiment(
assays = list(counts = as.matrix(ChIP[, c('Helas3_1',
'Helas3_2',
'Hepg2_1',
'Hepg2_2',
'Huvec_1',
'Huvec_2')])),
rowRanges = GRanges(ChIP$chr, IRanges::IRanges(ChIP$start, ChIP$stop)),
colData = data.frame(id = c(
c(
'Helas3_1',
'Helas3_2',
'Hepg2_1',
'Hepg2_2',
'Huvec_1',
'Huvec_2'
)
))
)
ChIP.se <-
epigraHMM::normalizeCounts(ChIP.se, epigraHMM::controlEM(), span = 1)
ChIP[, (paste0(
c(
'Helas3_1',
'Helas3_2',
'Hepg2_1',
'Hepg2_2',
'Huvec_1',
'Huvec_2'
),
'.adj'
)) := .SD / exp(assay(ChIP.se, 'offsets')),
.SDcols = c('Helas3_1',
'Helas3_2',
'Hepg2_1',
'Hepg2_2',
'Huvec_1',
'Huvec_2')]
ChIP[, Helas3 := rowSums(.SD), .SDcols = c('Helas3_1.adj', 'Helas3_2.adj')]
ChIP[, Hepg2 := rowSums(.SD), .SDcols = c('Hepg2_1.adj', 'Hepg2_2.adj')]
ChIP[, Huvec := rowSums(.SD), .SDcols = c('Huvec_1.adj', 'Huvec_2.adj')]
ChIP <-
ChIP[, c('chr', 'start', 'stop', 'Window', 'Helas3', 'Hepg2', 'Huvec')] %>%
as_tibble() %>%
gather(Group, Counts, Helas3:Huvec)
ChIP$Mark = mark
# Genomic Ranges to Plot
idx = which.min(abs(counts$start - idx.genome[1])):which.min(abs(counts$start -
idx.genome[2]))
# Color of peak calls
methods <-
c(
'Genes',
"ChIPComp + MACS2",
"csaw",
"DiffBind + MACS2",
"diffReps",
"epigraHMM",
'RSEG',
'THOR'
)
colors = c('#000000', Polychrome::kelly.colors(22)[c('red',
'yellow',
'purplishpink',
'yellowgreen',
'lightblue',
'buff',
'orange')])
names(colors) <- methods
# Position of peak calls (e.g, 1.2 means that the peak call will be placed 1.2 times the maximum observed read count of the plotted region)
peakpos = c(1.05, 1.15, 1.25, 1.35, NA, NA, NA, NA)
names(peakpos) = methods
# Loading csaw
csaw <-
fread(list.files(
file.path('../../Public/csaw', mark, data, paste0('Output', bp)),
'.tsv.gz',
full.names = TRUE
))
gr.csaw <-
with(csaw, GRanges(seqnames, IRanges(start = start, end = end)))
gr.csaw$FDR <- csaw$FDR
gr.csaw <- gr.csaw[seqnames(gr.csaw) %in% chromosome]
gr.csaw <- gr.csaw[gr.csaw$FDR < fdr]
csaw = cbind(
counts,
Window = 1:nrow(counts),
Group = 'Helas3',
Method = 'csaw'
)
csaw = cbind(csaw, Output = 1 * overlapsAny(gr.counts, gr.csaw))
csaw$Counts = ifelse(csaw$Output == 1, max(ChIP[ChIP$Window %in% idx, 'Counts']) *
peakpos['csaw'], NA)
csaw <- csaw %>% as_tibble()
csaw$Output %<>% mapvalues(from = 0:1,
to = c('Non-differential', 'Differential'))
# Loading ChIPComp
chipcomp = fread(list.files(
file.path('../../Public/ChIPComp', mark, data, paste0('Output', bp)),
'.txt',
full.names = TRUE
), header = T)
chipcomp[, FDR := p.adjust(pvalue.wald, method = 'BH')]
gr.chipcomp = with(chipcomp, GenomicRanges::GRanges(chr, IRanges(start, end)))
gr.chipcomp$FDR = chipcomp$FDR
gr.chipcomp <- gr.chipcomp[seqnames(gr.chipcomp) %in% chromosome]
gr.chipcomp = gr.chipcomp[gr.chipcomp$FDR < fdr]
ChIPComp = cbind(
counts,
Window = 1:nrow(counts),
Group = 'Helas3',
Method = 'ChIPComp + MACS2'
)
ChIPComp = cbind(ChIPComp, Output = 1 * overlapsAny(gr.counts, gr.chipcomp))
ChIPComp$Counts = ifelse(ChIPComp$Output == 1, max(ChIP[ChIP$Window %in% idx, 'Counts']) *
peakpos['ChIPComp + MACS2'], NA)
ChIPComp <- ChIPComp %>% as.tbl()
ChIPComp$Output %<>% mapvalues(from = 0:1,
to = c('Non-differential', 'Differential'))
# Loading DiffBind
diffbind = fread(list.files(
file.path('../../Public/DiffBind', mark, data, paste0('Output', bp)),
'.txt',
full.names = TRUE
), header = T)
gr.diffbind = with(diffbind, GenomicRanges::GRanges(seqnames, IRanges(start, end)))
gr.diffbind$FDR = diffbind$FDR #It already gives me adjusted p-values
gr.diffbind <- gr.diffbind[seqnames(gr.diffbind) %in% chromosome]
gr.diffbind = gr.diffbind[gr.diffbind$FDR < fdr]
DiffBind = cbind(
counts,
Window = 1:nrow(counts),
Group = 'Helas3',
Method = 'DiffBind + MACS2'
)
DiffBind = cbind(DiffBind, Output = 1 * overlapsAny(gr.counts, gr.diffbind))
DiffBind$Counts = ifelse(DiffBind$Output == 1, max(ChIP[ChIP$Window %in% idx, 'Counts']) *
peakpos['DiffBind + MACS2'], NA)
DiffBind <- DiffBind %>% as.tbl()
DiffBind$Output %<>% mapvalues(from = 0:1,
to = c('Non-differential', 'Differential'))
# Loading refseq genes
load('../../Public/Salmon/ENCODE.rnaseq.scaled.RData')
ENCODE.rnaseq.scaled <-
ENCODE.rnaseq.scaled[, colData(ENCODE.rnaseq.scaled)$Cells %in% c('Helas3', 'Hepg2', 'Huvec')]
ENCODE.rnaseq.scaled$Cells <- droplevels(ENCODE.rnaseq.scaled$Cells)
ENCODE.rnaseq.scaled <-
ENCODE.rnaseq.scaled[rowRanges(ENCODE.rnaseq.scaled)$gene_biotype == 'protein_coding']
ENCODE.rnaseq.scaled <-
ENCODE.rnaseq.scaled[overlapsAny(rowRanges(ENCODE.rnaseq.scaled),
GRanges(chromosome, IRanges(start = idx.genome[1], end = idx.genome[2])))]
ENCODE.rnaseq.scaled <-
ENCODE.rnaseq.scaled[end(ENCODE.rnaseq.scaled) < idx.genome[2] &
start(ENCODE.rnaseq.scaled) > idx.genome[1]]
refseq.out = cbind(
counts,
Window = 1:nrow(counts),
Group = 'Helas3',
Method = 'Genes'
)
refseq.out = cbind(refseq.out,
Output = 1 * overlapsAny(gr.counts, ENCODE.rnaseq.scaled))
refseq.out$Counts = ifelse(refseq.out$Output == 1, max(ChIP[ChIP$Window %in%
idx, 'Counts']) * peakpos['Genes'], NA)
refseq.out <- refseq.out %>% as.tbl()
refseq.out$Output %<>% mapvalues(from = 0:1,
to = c('Non-differential', 'Differential'))
# Organizing the data
dt.segment = rbindlist(list(
as.data.table(ChIPComp),
as.data.table(csaw),
as.data.table(DiffBind),
as.data.table(refseq.out)
))
dt.segment$Method %<>% factor(levels = c('ChIPComp + MACS2', 'csaw', 'DiffBind + MACS2', 'Genes'))
## Plotting Peak calls
fig_example <-
ggplot(data = ChIP[ChIP$Window %in% idx, ], aes(x = start, y = Counts)) +
facet_grid(rows = vars(Group)) +
geom_segment(
x = 130785000,
xend = 130785000,
y = 0,
yend = 42.5,
color = 'grey',
linetype = 2
) +
geom_segment(
x = 131015000,
xend = 131015000,
y = 0,
yend = 42.5,
color = 'grey',
linetype = 2
) +
geom_segment(
x = 131200000,
xend = 131200000,
y = 0,
yend = 42.5,
color = 'grey',
linetype = 3
) +
geom_segment(
x = 131350000,
xend = 131350000,
y = 0,
yend = 42.5,
color = 'grey',
linetype = 3
) +
geom_segment(
x = 132250000,
xend = 132250000,
y = 0,
yend = 42.5,
color = 'grey',
linetype = 4
) +
geom_segment(
x = 131795000,
xend = 131795000,
y = 0,
yend = 42.5,
color = 'grey',
linetype = 4
) +
annotate(
'rect',
alpha = 0.10,
xmin = 130785000,
xmax = 131015000,
ymin = 0,
ymax = 42.5
) +
annotate(
'rect',
alpha = 0.10,
xmin = 131200000,
xmax = 131350000,
ymin = 0,
ymax = 42.5
) +
annotate(
'rect',
alpha = 0.10,
xmin = 132250000,
xmax = 131795000,
ymin = 0,
ymax = 42.5
) +
geom_line() +
geom_segment(
data = dt.segment[Window %in% idx, ],
aes(
x = start,
xend = stop,
y = Counts,
yend = Counts,
color = Method
),
size = size
) +
scale_x_continuous(
limits = range(ChIP[ChIP$Window %in% idx, 'start']),
labels = scales::comma,
position = 'top'
) +
scale_color_manual(values = colors) +
theme_bw() + xlab(paste0('Genomic Window (', chromosome, ')')) + ylab('Normalized ChIP-seq Counts') +
guides(col = guide_legend(nrow = 1)) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.title = element_blank(),
legend.position = 'top',
legend.direction = 'horizontal'
)
save(fig_example,file = 'Figure_ThreeCellLines.RData')
ggsave(fig_example,filename = 'Figure_ThreeCellLines.pdf',height = 4.5,width = 9,dpi = 'retina')
|
a0f66d8e3e1c28fd3fa41b50de6ac765483d072c
|
27674239c0da0b7afc6ad9dc2622e084c3f5c004
|
/inst/ochiai_worker.R
|
c79b54738ce1753d55fe84f8e87ba04531821754
|
[] |
no_license
|
RobinHankin/knotR
|
112248605c8a89a21641be35f2363c19db1c3783
|
0a5a6015a51340faa1ee43066d76be8f39adb499
|
refs/heads/master
| 2023-05-15T03:19:57.311824
| 2023-05-14T09:03:35
| 2023-05-14T09:03:35
| 99,854,849
| 5
| 0
| null | 2017-10-15T04:48:05
| 2017-08-09T21:37:28
|
R
|
UTF-8
|
R
| false
| false
| 635
|
r
|
ochiai_worker.R
|
library(knotR)
filename <- "ochiai.svg"
ou <-
matrix(c(
07,01,
01,20,
02,09,
22,03,
16,04,
05,18,
25,06,
08,21,
10,14,
19,11,
13,20,
15,23,
24,19
),ncol=2,byrow=TRUE)
jj <- knotoptim(filename,
xver = 6,
ou = ou,
prob=0,
iterlim=1000, print.level=2
# control=list(trace=100,maxit=1000), # these arguments for optim()
# useNLM=FALSE
)
write_svg(jj,filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename))
|
3f8da066da4a50249aada11c66607f1976d9ead2
|
0a9e904d3e0b8983bc2bdd861bd6e96da66b2d04
|
/af/namelist.R
|
8a23e4962137f361cd1f2b3f7c23ed352affe771
|
[] |
no_license
|
donasoo/gitR
|
8fa8a4e53a1f26b5f986289f10270bc95e579a41
|
176758134ea31fd2b186cad909cded950cbaaeec
|
refs/heads/master
| 2021-05-15T01:09:09.171793
| 2019-10-31T14:40:56
| 2019-10-31T14:40:56
| 56,468,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,920
|
r
|
namelist.R
|
#
library(stringr)
library(dplyr)
rank_pattern <-'GENERAL|BRIGADIER GENERAL|LIEUTENANT GENERAL|MAJOR GENERAL'
f.caption <- function(x) x %>% html_nodes("tr.dal_row a.title") %>% html_text() %>% str_trim()
f.url <- function(x) x %>% html_nodes("a.title") %>% html_attr('href') %>% str_trim()
f.img <- function(x) x %>% html_nodes("a.da_news_link img") %>% html_attr('src') %>% str_trim()
f.alt <- function(x) x %>% html_nodes("a.da_news_link img") %>% html_attr('alt') %>% str_trim()
f.remark <- function(x) x %>% html_nodes("tr.dal_row span.red") %>% html_text() %>% str_trim()
f.update <- function(x) x %>% html_nodes("tr.dal_row td.updated") %>% html_text() %>% str_trim()
#f.intro <- function(x) x %>% html_nodes("a.title") %>% html_text() %>% str_trim()
roster.nodes <- list()
roster.nodes <- fectch.rosters(roster.nodes, 1)
rosters <- data.frame(caption=sapply(roster.nodes, FUN = f.caption) %>% unlist() , stringsAsFactors = F)
rosters <- mutate(rosters, caption=sapply(roster.nodes, FUN = f.caption) %>% unlist())
rosters <- mutate(rosters, rank= str_extract(caption, rank_pattern))
rosters <- mutate(rosters, name=str_replace(caption, rank_pattern, ''))
rosters <- mutate(rosters, url=sapply(roster.nodes, FUN = f.url) %>% unlist())
rosters <- mutate(rosters, id=str_extract(url, '\\d{6}') )
rosters <- mutate(rosters, img=sapply(roster.nodes, FUN = f.img) %>% unlist())
rosters <- mutate(rosters, alt=sapply(roster.nodes, FUN = f.alt) %>% unlist())
rosters <- mutate(rosters, remark=sapply(roster.nodes, FUN = f.remark) %>% unlist())
rosters <- mutate(rosters, year=str_extract(remark, '\\d{4}') )
#rosters <- mutate(rosters, intro=sapply(roster.nodes, FUN = f.intro) %>% unlist())
rosters <- mutate(rosters, update=sapply(roster.nodes, FUN = f.update) %>% unlist())
rosters <- mutate(rosters, update=as.Date(update, '%m/%d%Y'))
write.csv(rosters, 'roster.csv')
save(rosters, file='roster.RData')
|
d04e3af8461937feb5fff35c8fe40ab868aa9c38
|
05db64975eda0ad69a1ef7a5ef60c8513d8261b2
|
/Logistic_Regression.R
|
a057998e4abedb2f1c9f9766a4215fcef2686fb4
|
[] |
no_license
|
nishaganesh/hypothesis_testing
|
3d625ba7079fdf084d2ed6ddf13d5f6b7424380a
|
c5a7c5b8c8fa127b5d894d04e01c2493d5209a5a
|
refs/heads/master
| 2022-04-05T19:49:43.474634
| 2020-02-23T02:17:58
| 2020-02-23T02:17:58
| 242,439,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,008
|
r
|
Logistic_Regression.R
|
# INST627 Project
# Team: Magnificent
#load the data
water <- read.csv(file.choose())
#number of variables and observations
dim(water)
str(water)
#descriptive statistics
summary(water)
#exploratory analysis
par(mar = rep(2, 4))
plot(water$What.is.your.preferred.method.of.drinking.water., col="green", main="Preferred method of\n drinking water")
plot(water$Which.age.group.do.you.belong.to., col="green", main="Age group")
plot(water$Did.you.ever.fall.sick.because.of.drinking.tap.water., col="green", main="Falling sick because of\n drinking tap water")
hist(water$How.would.you.rate.your.trust.in.all.brands.of.bottled.water.to.be.contaminate.free...Rate.from.1.to.5..with.5.being.trusting.completely., col="green", main="Distribution of trust in\n bottled water rating", xlab="Trust in bottled water rating")
par(mfrow=c(1,1))
legend("topleft", legend=c("5-High Trust", "1- No Trust"), cex=0.7, bty = "n")
hist(water$How.would.you.rate.your.trust.in.the.tap.water.at.your.house.to.be.contaminate.free..Rate.from.1.to.5..with.5.being.trusting.completely., col="green", main = "Distribution of trust in\n tap water rating", xlab="Trust in tap water rating")
legend("topleft", legend=c("5-High Trust", "1- No Trust"), cex=0.7, bty = "n")
hist(water$How.would.you.rate.the.taste.of.bottled.water..Rate.from.1.to.5..with.5.being.the.highest., col="green", main="Distribution of taste of bottled water rating", xlab="Taste of bottled water rating")
legend("topleft", legend=c("5-Very good", "1- Very bad"), cex=0.7, bty = "n")
hist(water$How.would.you.rate.the.taste.of.your.tap.water...Rate.from.1.to.5..with.5.being.the.highest., col="green", main=" Distribution of taste of tap water rating", xlab="Taste of tap water rating")
legend("topleft", legend=c("5-Very good", "1- Very bad"), cex=0.7, bty = "n")
par(mar=c(11, 4.1, 4.1, 2.1))
plot(water$In.which.county.of.Maryland.do.you.reside., col="green", las=2, main="County")
mosaicplot(table(water$In.which.county.of.Maryland.do.you.reside., water$What.is.your.preferred.method.of.drinking.water), col = rainbow(4), las=2, main = "Water preference vs county")
mosaicplot(table(water$Which.age.group.do.you.belong.to., water$What.is.your.preferred.method.of.drinking.water), col = rainbow(4), las=4, main = "Water preference vs Age group")
mosaicplot(table(water$Did.you.ever.fall.sick.because.of.drinking.tap.water., water$What.is.your.preferred.method.of.drinking.water), col = rainbow(4), las=4, main = "Water preference vs falling sick")
mosaicplot(table(water$How.many.glasses.bottles.of.water.do.you.drink.per.day.on.average...Consider.a.scale.of.16oz.glass.bottle., water$What.is.your.preferred.method.of.drinking.water.), col = rainbow(4), las=4, main = "Water preference vs consumption of water in glasses")
#install car package
install.packages("car")
library(car)
levels(water$What.is.your.preferred.method.of.drinking.water.)
preference <- factor(water$What.is.your.preferred.method.of.drinking.water.)
table(preference)
factor_county <- factor(water$In.which.county.of.Maryland.do.you.reside.)
table(factor_county)
levels(factor_county)
county<-recode(as.numeric(factor_county), "1=5;2=1;3=2;4:10=5;11=3;12=4;13:14=5")
county<-factor(county, labels = c("Baltimore City","Baltimore County","MoCo", "PG", "Others"))
table(water$In.which.county.of.Maryland.do.you.reside., county)
# Independent variable county
model1 <- glm(preference ~ county, family=binomial)
summary(model1)
exp(cbind(OR=coef(model1), confint(model1)))
factor_age <- factor(water$Which.age.group.do.you.belong.to.)
table(factor_age)
levels(factor_age)
age <- recode(as.numeric(water$Which.age.group.do.you.belong.to.), "1=2; 2=1; 3=3; 4=4; 5:7=5")
age <-factor(age, labels = c("25-34","18-24","35-44", "45-54", "Others"))
table(water$Which.age.group.do.you.belong.to., age)
# Independent variable: age group
model2 <- glm(preference ~ age , family=binomial)
summary(model2)
exp(cbind(OR=coef(model2), confint(model2)))
# Independent variables: county, age group
model3 <- glm(preference ~ age + county, family=binomial)
summary(model3)
exp(cbind(OR=coef(model3), confint(model3)))
factor_device <- factor(water$Do.you.use.a.device.at.home.to.filter.your.tap.water.)
table(factor_device)
device <- recode(as.numeric(water$Do.you.use.a.device.at.home.to.filter.your.tap.water.), "1=2; 2=2; 3=1")
device <-factor(device, labels = c("Yes","No and Don't know"))
table(water$Do.you.use.a.device.at.home.to.filter.your.tap.water., device)
# Independent variable: device to filter tap water
model4 <- glm(preference ~ device , family=binomial)
summary(model4)
exp(cbind(OR=coef(model4), confint(model4)))
# Independent variables: county, age group, device to filter tap water
model5 <- glm(preference ~ age + county + device , family=binomial)
summary(model5)
exp(cbind(OR=coef(model5), confint(model5)))
factor_glasses <- factor(water$How.many.glasses.bottles.of.water.do.you.drink.per.day.on.average...Consider.a.scale.of.16oz.glass.bottle.)
table(factor_glasses)
glasses <- recode(as.numeric(water$How.many.glasses.bottles.of.water.do.you.drink.per.day.on.average...Consider.a.scale.of.16oz.glass.bottle.), "11=11")
table(glasses)
# Independent variables: county, age group, device to filter tap water- Yes, number of glasses of water
model6 <- glm(preference ~ age + county + device + glasses , family=binomial)
summary(model6)
exp(cbind(OR=coef(model6), confint(model6)))
factor_sick <- factor(water$Did.you.ever.fall.sick.because.of.drinking.tap.water.)
table(factor_sick)
sick <- recode(as.numeric(water$Did.you.ever.fall.sick.because.of.drinking.tap.water.), "1=2; 2=1; 3=2")
sick <-factor(sick, labels = c("No","Yes and Don't know"))
table(water$Did.you.ever.fall.sick.because.of.drinking.tap.water., sick)
# Independent variables: county, age group, device to filter tap water, number of glasses of water, fall sick
model7 <- glm(preference ~ age + county + device + glasses + sick, family=binomial)
summary(model7)
exp(cbind(OR=coef(model7), confint(model7)))
factor_taste <- factor(water$How.would.you.rate.the.taste.of.your.tap.water...Rate.from.1.to.5..with.5.being.the.highest.)
table(factor_taste)
taste <- as.numeric(water$How.would.you.rate.the.taste.of.your.tap.water...Rate.from.1.to.5..with.5.being.the.highest.)
table(taste)
# Independent variable: taste of tap water
model8 <- glm(preference ~ taste, family=binomial)
summary(model8)
exp(cbind(OR=coef(model8), confint(model8)))
# Independent variables: county, age group, falling sick, taste of tap water
model9 <- glm(preference ~ county + age + sick + taste, family=binomial)
summary(model9)
exp(cbind(OR=coef(model9), confint(model9)))
# Independent variables: county, age group, falling sick, taste of tap water, number of glasses of water
model10 <- glm(preference ~ county + age + sick + glasses + taste , family=binomial)
summary(model10)
exp(cbind(OR=coef(model10), confint(model10)))
factor_trust <- factor(water$How.would.you.rate.your.trust.in.the.tap.water.at.your.house.to.be.contaminate.free..Rate.from.1.to.5..with.5.being.trusting.completely.)
table(factor_trust)
trust <- as.numeric(water$How.would.you.rate.your.trust.in.the.tap.water.at.your.house.to.be.contaminate.free..Rate.from.1.to.5..with.5.being.trusting.completely.)
table(trust)
# Independent variables: county, age group, falling sick, taste of tap water, number of glasses of water, trust in tap water
model11 <- glm(preference ~ county + age + sick + glasses + taste + trust , family=binomial)
summary(model11)
exp(cbind(OR=coef(model11), confint(model11)))
factor_trust_bottled <- factor(water$How.would.you.rate.your.trust.in.all.brands.of.bottled.water.to.be.contaminate.free...Rate.from.1.to.5..with.5.being.trusting.completely.)
table(factor_trust_bottled)
trust_bottled <- as.numeric(water$How.would.you.rate.your.trust.in.all.brands.of.bottled.water.to.be.contaminate.free...Rate.from.1.to.5..with.5.being.trusting.completely.)
table(trust_bottled)
# Independent variables: county, age group, falling sick, taste of tap water, number of glasses of water, trust in tap water, trust in bottled water
model12 <- glm(preference ~ county + age + sick + glasses + taste + trust + trust_bottled , family=binomial)
summary(model12)
exp(cbind(OR=coef(model12), confint(model12)))
taste_bottled <- factor(water$How.would.you.rate.the.taste.of.bottled.water..Rate.from.1.to.5..with.5.being.the.highest.)
table(taste_bottled)
taste_bottled <- as.numeric(water$How.would.you.rate.the.taste.of.bottled.water..Rate.from.1.to.5..with.5.being.the.highest.)
table(taste_bottled)
# Independent variables: county, age group, falling sick, taste of tap water, number of glasses of water, trust in tap water, trust in bottled water
model13 <- glm(preference ~ county + age + sick + glasses + taste + trust + trust_bottled + taste_bottled , family=binomial)
summary(model13)
exp(cbind(OR=coef(model13), confint(model13)))
#checking assumptions- multicollinearity
cor.test(as.numeric(county), as.numeric(age))
cor.test(as.numeric(county), as.numeric(sick))
cor.test(as.numeric(county), as.numeric(glasses))
cor.test(as.numeric(county), as.numeric(trust))
cor.test(as.numeric(county), as.numeric(taste_bottled))
cor.test(as.numeric(county), as.numeric(trust_bottled))
cor.test(as.numeric(county), as.numeric(taste))
cor.test(as.numeric(age), as.numeric(sick))
cor.test(as.numeric(age), as.numeric(trust_bottled))
cor.test(as.numeric(age), as.numeric(glasses))
cor.test(as.numeric(age), as.numeric(trust))
cor.test(as.numeric(glasses), as.numeric(sick))
cor.test(as.numeric(glasses), as.numeric(taste))
cor.test(as.numeric(trust), as.numeric(trust_bottled))
cor.test(as.numeric(trust_bottled), as.numeric(taste))
#regression diagnostic plot
pl <- glm(as.numeric(preference)~as.numeric(age) + as.numeric(age) + as.numeric(sick) + as.numeric(glasses) + as.numeric(taste) + as.numeric(trust) + as.numeric(trust_bottled) + as.numeric(taste_bottled))
par(mfrow= c(2,2))
plot(pl)
par(mfrow= c(1,1))
|
ecb701d413ab78be31340a94e3ae5745f6cd99da
|
7de34974ddc7bb2f12246a0372f2cf8c6f7fa0c9
|
/R/S3.R
|
4d2c1a0ca27e6b78c4aad2de5901b20eab3b49aa
|
[
"MIT"
] |
permissive
|
FinnishCancerRegistry/vbscript
|
94add9cedf26ae2062ed99d5299100b2d4757c88
|
cb35e40d3c4be48fb23636bd9c4a38ee66743460
|
refs/heads/master
| 2023-04-11T03:06:47.232942
| 2023-04-02T09:21:37
| 2023-04-02T09:21:37
| 218,963,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,117
|
r
|
S3.R
|
#' @title `vbscript_lines`
#' @description
#' Coercion to class `vbscript_lines`
#' @param x `[character]` (mandatory, no default)
#'
#' character string vector to coerce to `vbscript_lines`
#' @export
as.vbscript_lines <- function(x) {
UseMethod("as.vbscript_lines")
}
#' @describeIn as.vbscript_lines coerces character string vector to class
#' `vbscript_lines``
#' @export
as.vbscript_lines.character <- function(x) {
assert_is_character_nonNA_vector(x)
y <- as.character(x)
class(y) <- c("vbscript_lines" , "character")
y
}
#' @title Print `vbscript_lines`
#' @description
#' Print method for `vbscript_lines` objects
#' @param x `[vbscript_lines]` (mandatory, no default)
#'
#' a `vbscript_lines` object
#' @param max.print `[integer, numeric]` (mandatory, default 50)
#'
#' maximum number of lines allowed to be printed; if `x` has more elements
#' than this, only the fist 10 and last 10 elements are shown in print
#' @param ... added for compatibility with [print]
#' @export
print.vbscript_lines <- function(x, max.print = 50, ...) {
n_lines <- length(x)
stopifnot(
length(max.print) == 1,
max.print %% 1 == 0,
max.print > 0
)
max.print <- min(max.print, n_lines)
printable <- rep(TRUE, n_lines)
if (n_lines > max.print) {
first_10 <- 1:10
last_10 <- seq(n_lines, n_lines-9, -1)
printable[-c(first_10, last_10)] <- FALSE
}
cat("--- vbscript_lines vector with", n_lines, "lines ---\n")
row_num <- which(printable)
row_num <- formatC(x = row_num, digits = nchar(n_lines), flag = " ")
if (n_lines > max.print) {
cat(paste0(row_num[1:10], ": ", x[1:10]), sep = "\n")
n_hidden_lines <- n_lines-20L
cat("---", n_hidden_lines, "lines not shown ---\n")
cat(paste0(row_num[11:20], ": ", x[11:20]), sep = "\n")
} else {
cat(paste0(row_num, ": ", x), sep = "\n")
}
cat("--- vbscript_lines vector end ---\n")
invisible(NULL)
}
#' @export
`[.vbscript_lines` <- function(x, ...) {
y <- NextMethod()
as.vbscript_lines(y)
}
#' @export
c.vbscript_lines <- function(...) {
y <- NextMethod()
as.vbscript_lines(y)
}
|
9e416b180f82eb917a2e7d14bba7e22e29f49fd4
|
0ae5e8642a9338c41ba7f8c03f294f11758eff7b
|
/01-topic-models/05-adding-random-US-and-merging-subissues.R
|
d346c855bcfc97f843881e18c3972ad1c8b0abbe
|
[
"MIT"
] |
permissive
|
nasimovi/lead_follow_apsr
|
45a9891c05975778680e54e724e9a00358651bb8
|
727b9a9b5eec81dd556eafb4470c3a8c33f953eb
|
refs/heads/master
| 2022-02-25T18:24:21.572183
| 2019-10-10T15:36:47
| 2019-10-10T15:36:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,312
|
r
|
05-adding-random-US-and-merging-subissues.R
|
#===============================================================================
# File: 05-adding-random-US-and-merging-subissues.R
# Date: May 3, 2019
# Paper: Who Leads? Who Follows? Measuring Issue Attention and Agenda Setting
# by Legislators and the Mass Public Using Social Media Data
# Journal: American Political Science Review
# Authors: Pablo Barbera, Andreu Casas, Jonathan Nagler, Patrick J. Egan,
# Richard Bonneau, John Jost, Joshua A. Tucker
# Purpose: this script prepares the final dataset that will be used in most of
# the analysis: group-level time series describing how much attention
# each group under analysis paid to each topic during the 113th Cong.
# In particular, in here:
# A) we merge a set of subissues detected in the original output.
# B) we add to the dataset generated in 01/04.R, the attention paid to
# each issues by the users geolocated in the US (estimated in 01/0X.R)
# Data In:
# # Topic attention distribution for each group across time
# ./data/main-time-series-PRE.csv
# # Posterior topic distribution for the US-located random sample
# ./topics/lda-output/lda-USrs-results.Rdata
# Data Out:
# # Main time-series used in the analysis
# ./data/main-time-series.csv
#===============================================================================
# PACKAGES
#===============================================================================
library(dplyr)
library(rio)
# DATA
#===============================================================================
# - load dataset created in 04-....R (all time-series but the one for the random
# users geolocated in the United States)
db <- import("data/main-time-series-PRE.csv")
# - load the posterior topic distribution for the US-located random sample
print(load(paste0("topics/lda-output/lda-USrs-results.Rdata")))
# [A] Merging Sub-Issues
#===============================================================================
# - list of subissues to merge
subissues_to_merge <- list(
# - 2 subissues about "Student Debt" will become topic 101
c(27, 56),
# - 2 subissues about "Hobby Lobby Supreme Court Decision" will become 102
c(11, 74),
# - 2 subissues about general "Budget Discussion" will become topic 103
c(38, 59),
# - 5 subissues about the "Government Shutdown" will become topic 104:
# (Nov. 30, 2017: +1 shutdown issue that I forgot: 49)
c(17, 26, 35, 42, 49)
)
# - a list with the different agendas
groups <- c("dem", "rep", "pubdem", "pubrep", "public", "random", "media")
# - making sure the dataset is sorted by topic and by date
new_db <- db %>%
arrange(topic, date) %>%
mutate(topic = paste0("topic", topic))
# - adding up the attention paid to subissues belonging to the same issue.
# Creating new issue codes for these new merged issues.
for (i in 1:length(subissues_to_merge)) {
# - iterate through groups of subissues to merge
subissues <- subissues_to_merge[[i]]
# - initializing new empty rows for the new issue
new_empty_rows <- as.data.frame(matrix(nrow = 730, ncol = ncol(new_db),
data = rep(NA, 730 * ncol(new_db))))
colnames(new_empty_rows) <- colnames(new_db)
new_empty_rows$topic <- paste0("topic", (100 + i))
new_db <- rbind(new_db, new_empty_rows)
# - iterate through the different agendas
for (group in groups) {
# - pull the time-series for that group that are realted to the subissues
group_db <- new_db[, c("date", "topic", group)]
colnames(group_db)[3] <- "gr"
new_issue_group_series <- group_db %>%
filter(topic %in% paste0("topic", subissues)) %>%
# - transform the categorical issue variable into dummies\
spread(topic, gr) %>%
# - adding up the attention to both subissues
dplyr::select(-date) %>%
mutate(merged_issue = rowSums(.)) %>%
dplyr::select(merged_issue)
new_db[new_db$topic == paste0("topic", (100 + i)),
group] <- new_issue_group_series
}
}
new_db$topic <- gsub("topic", "", new_db$topic)
# [B] Adding issue attention by random users located in the United States
#===============================================================================
model_data <- new_db
# - pulling from the LDA predictions, the posterior topic distributions
top_dist <- results$topics
# - merging the same sub-issues we merged in the previous model data. Adding up
# the posterior distributions of topics that are sub-issues of the same macro
# issue
# - 2 subissues about "Student Debt" will become topic 101: #27 and #56
issue101 <- top_dist[,27] + top_dist[,56]
# - 2 subissues about "Hobby Lobby Supreme Court Decision" will become topic
# 102: #11 and #74
issue102 <- top_dist[,11] + top_dist[,74]
# - 2 subissues about general "Budget Discussion" will become topic 103:
# #38 and #59
issue103 <- top_dist[,38] + top_dist[,59]
# - 5 subissues about general "Government Shutdown" will become topic 104:
# #17, #26, #35, #42 and #49
issue104 <- top_dist[,17] + top_dist[,26] + top_dist[,35] + top_dist[,42] +
top_dist[,49]
top_dist <- cbind(top_dist, issue101)
top_dist <- cbind(top_dist, issue102)
top_dist <- cbind(top_dist, issue103)
top_dist <- cbind(top_dist, issue104)
# - reshaping the topic distributions so they fit the 'model_data' format.
# Stacking all the topic-level columns into a single one containing all topic
# info about this US-located random sample.
# - I already made sure that the new topic distributions are sorted by DATE in
# the same way the previous model data is.
top_dist_reshaped <- NULL
for (i in 1:ncol(top_dist)) {
top_series <- top_dist[,i]
new_df <- data.frame(
topic = as.character(i),
random_us = top_series
)
top_dist_reshaped <- rbind(top_dist_reshaped, new_df)
}
# - finally, merging these topic probabilities for the new US-located random
# sample to the previous model data. I checked both DATE and TOPIC align with
# the model data structure.
random_us <- top_dist_reshaped[,"random_us"]
new_model_data <- cbind(model_data, random_us)
new_model_data <- as.data.frame(new_model_data)
# - sanity check: we expect the time series for the original group of random
# users and those we geolocated in the US to be highly correlated (the overlap
# in terms of users is large)
cor.test(new_model_data$random, new_model_data$random_us)
# correlation = 0.99
# - drop topics not coded as politically relevant
pol_issues <- c(3, 7, 9, 12, 14, 15, 16, 18, 20, 23, 28,
32, 33, 36, 37, 39, 41, 43, 46, 47, 48, 49, 50, 51,
53, 58, 62, 63, 64, 66, 67, 70, 75, 81, 83, 85, 88,
89, 93, 96, 97, 99, 100,
# removed subissues: 27, 56, 11, 74, 38, 59, 17, 26, 35, 42
# new merged issues:
101, 102, 103, 104)
final_data <- new_model_data %>%
filter(topic %in% pol_issues)
# OUTPUT
#===============================================================================
# - the main time-series that will be used in the analysis
# /!\ (if you uncomment next line you'll ovewrite existing copy of the file)
# write.csv(final_data, "data/main-time-series.csv", row.names = FALSE)
|
139d3eaac2bcbf83e153f128b672316f03592dd9
|
00d3ed6fb3db344beef3ccbfa38e6fd5a9b1f67b
|
/Rscripts/FIA_biomass.R
|
03ac55981dded8bd091631b7927dd04a4387b8cc
|
[] |
no_license
|
atrlica/FragEVI
|
63140345b5f5e3f9f0c92b9ab2b1b14e18f71599
|
8407655c8ef538eaa8ef819e38c23941c8c17ec5
|
refs/heads/master
| 2021-05-12T04:28:15.730204
| 2019-06-08T00:59:16
| 2019-06-08T00:59:16
| 117,163,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,730
|
r
|
FIA_biomass.R
|
library(raster)
library(data.table)
### FIA V1: Equation for wood volume~time (proxy for stand biomass density)
a=123.67
b=0.04
AGE=seq(0,500)
y = a*(1-exp(-b*AGE))^3
plot(AGE, y, ylab="stand MgC/ha")
z = diff(y) ## yearly growth increment, absolute kg
z.rel <- z/y[2:501] ## yearly growth increment, % of previous year biomass
plot(y[2:501], z, xlab="biomass MgC/ha", ylab="absolute growth rate, MgC/ha/yr") ## zero growth above ~125 MgC/ha
points(y[2:501], z.rel, pch=14, cex=0.5, col="red") #hyperbolic
plot(AGE[2:501], z, xlab="Stand age, yr", ylab="absolute growth rate, MgC/ha/yr") ## this is the gain curve over site age, near zero >200 yrs
### above 250 yrs gain is <1 kgC/ha/yr
### what is equivalent age of stand if we know live biomass?
log(1-(y/a)^(1/3))/(-b) ## predicts 40 years
tC <- seq(0,120)
plot(tC, log(1-(tC/a)^(1/3))/(-b), xlab="live biomass, tC/ha", ylab="Site age, yr")
### standing live wood C storage in local forest that resemble (?) what we'd have in Boston:
### i.e. N.red oak; red maple/oak; mixed upland hwoods; red maple uplands: Range is 94.7-105.1 MgC/ha
## read in summaries of C stock change in Q.alba/Q.rubra/Carya and Q.rubra forest
## both forest types include values for reforestation and afforestation conditions
# ne.summ <- read.csv("docs/FIA_CTMANHRI.csv")
# plot(ne.summ$Age.yrs, ne.summ$live.tree.tCha)
#
# mix.oak.ref <- read.csv("docs/FIA_QUALQURUCATO_Reforest.csv")
# plot(mix.oak.ref$Age.yrs, mix.oak.ref$live.tree.c.inc)
# mix.oak.aff <- read.csv("docs/FIA_QUALQURUCATO_Afforest.csv")
# plot(mix.oak.aff$Age.yrs, mix.oak.aff$live.tree.c.inc)
#
# red.oak.ref <- read.csv("docs/FIA_QURU_Reforest.csv")
# plot(red.oak.ref$Age.yrs, red.oak.ref$live.tree.c.inc)
# red.oak.aff <- read.csv("docs/FIA_QURU_Afforest.csv")
# plot(red.oak.aff$Age.yrs, red.oak.aff$live.tree.c.inc)
#
# fia.summ <- as.data.frame(cbind(ne.summ$Age.yrs, ne.summ$live.tree.c.inc, mix.oak.ref$live.tree.c.inc,
# mix.oak.aff$live.tree.c.inc, red.oak.ref$live.tree.c.inc, red.oak.aff$live.tree.c.inc))
# colnames(fia.summ) <- c("Age", "NE.total", "mix.oak.ref", "mix.oak.aff", "red.oak.ref", "red.oak.aff")
# plot(fia.summ$Age, fia.summ$NE.total, pch=15, col="black")
# points(fia.summ$Age, fia.summ$mix.oak.aff, pch=16, col="lightblue")
# points(fia.summ$Age, fia.summ$mix.oak.ref, pch=16, col="blue")
# points(fia.summ$Age, fia.summ$red.oak.aff, pch=17, col="pink")
# points(fia.summ$Age, fia.summ$red.oak.ref, pch=17, col="red")
### the only thing that changes between reforestation and afforestation are values for forest floor and soil C
# ## what is relationship between standing live biomass-C and C increment
# plot(ne.summ$mean.vol.m3, ne.summ$live.tree.c.inc)
# plot(ne.summ$live.tree.tCha, ne.summ$live.tree.c.inc)
# plot(ne.summ$Age.yrs, ne.summ$live.tree.tCha)
# plot(ne.summ$Age.yrs, ne.summ$mean.vol.m3) ## basic sigmoid 0 to max at 100
##########
### prototype process for using FIA aggregate data to figure npp from Raciti biomass
# 1) determine MgC/ha in 30m plot, normalize by appropriate area factor (raw ground/canopy/pervious) (i.e. there is X many ha of forest there with Y much living carbon in place)
# 2) Figure out the next-year tC/ha in the plot
# 3) apply this incrememnt to the area fraction in question
biom <- raster("processed/boston/bos.biom30m.tif") ## this is summed 1m kg-biomass to 30m pixel
aoi <- raster("processed/boston/bos.aoi30m.tif")
can <- raster("processed/boston/bos.can30m.tif")
isa <- raster("processed/boston/bos.isa.rereg30m.tif")
biom <- crop(biom, aoi)
can <- crop(can, aoi)
isa <- extend(isa, aoi)
biom.dat <- as.data.table(as.data.frame(biom))
biom.dat[,aoi:=as.vector(getValues(aoi))]
can.dat <- as.data.table(as.data.frame(can))
biom.dat[, can.frac:=can.dat$bos.can30m]
isa.dat <- as.data.table(as.data.frame(isa))
biom.dat[, isa.frac:=isa.dat$bos.isa.rereg30m]
biom.dat[,pix.ID:=seq(1, dim(biom.dat)[1])]
### live MgC by area of GROUND in each pixel
biom.dat[, live.MgC.ha.ground:=(bos.biom30m/aoi)*(1/2)*(1E-03)*(1E4)] ## convert kg-biomass/pixel based on size of pixel (summed by 1m2 in "aoi"), kgC:kgbiomass, Mg:kg, m2:ha
biom.dat[aoi>800, range(live.MgC.ha.ground, na.rm=T)] ## up to 284 MgC/ha, as we'd expect from what we saw in Raciti
hist(biom.dat[aoi>800,live.MgC.ha.ground]) #v skewed, most below 50 MgC/ha
biom.MgC.ha.ground <- raster(biom)
biom.MgC.ha.ground <- setValues(biom.MgC.ha.ground, biom.dat[,live.MgC.ha.ground])
### ALTERNATIVE BIOMASS DENSITIES: correct biomass figures for the amount of canopy or pervious cover present per cell
### i.e. we assume FIA is measuring trees in "forest" with essentially continuous canopy, so that differences in tC/ha as a function of age are purely a function of tree growth and not differences in tree %coverage
### live MgC/ha for "forest" fraction in each pixel
biom.dat[,live.MgC.ha.forest:=(bos.biom30m/(aoi*can.frac))*(1/2)*(1E-03)*(1E4)]
biom.dat[bos.biom30m==0, live.MgC.ha.forest:=0] ## have to manually fix this because of 0 canopy pix
# biom.dat[can.frac<=0.01, live.MgC.ha.forest:=0]
range(biom.dat[aoi>800,live.MgC.ha.forest], na.rm=T) ## 0 - 284
hist(biom.dat[aoi>800,live.MgC.ha.forest]) ## correcting for canopy cover, more mid-rage values
biom.dat[live.MgC.ha.forest<100, length(live.MgC.ha.forest)]/dim(biom.dat[!is.na(can.frac),])[1] ## 84% of pixels are below 100 MgC/ha
biom.MgC.ha.forest <- raster(biom)
biom.MgC.ha.forest <- setValues(biom.MgC.ha.forest, biom.dat[,live.MgC.ha.forest])
plot(biom.MgC.ha.forest)
## correct for pervious cover
biom.dat[,live.MgC.ha.perv:=(bos.biom30m/(aoi*(1-isa.frac)))*(1/2)*(1E-03)*(1E4)]
biom.dat[bos.biom30m==0, live.MgC.ha.perv:=0] ## have to manually fix this because of isa=1 pix
# biom.dat[isa.frac>0.99, live.MgC.ha.perv:=0]
range(biom.dat[aoi>800 & isa.frac<0.98,live.MgC.ha.perv], na.rm=T) ## 0 - 3890
hist(biom.dat[aoi>800 & isa.frac<0.98,live.MgC.ha.perv]) ## a small number of very extreme values
biom.dat[live.MgC.ha.perv<100, length(live.MgC.ha.perv)]/dim(biom.dat[!is.na(can.frac),])[1] ## 75% of pixels are below 100 MgC/ha
biom.MgC.ha.perv <- raster(biom)
biom.MgC.ha.perv <- setValues(biom.MgC.ha.perv, biom.dat[,live.MgC.ha.perv])
plot(biom.MgC.ha.perv)
### get delta figures
biom.dat[, delta.C.perv:=live.MgC.ha.perv-(live.MgC.ha.ground)]
biom.dat[, delta.C.forest:=live.MgC.ha.forest-(live.MgC.ha.ground)]
plot(biom.dat[isa.frac<0.9, isa.frac], biom.dat[isa.frac<0.9, delta.C.perv]) ## deviation using pervious correction gets higher with greater impervious fraction
plot(biom.dat[can.frac>0.07, can.frac], biom.dat[can.frac>0.07, delta.C.forest]) ## as canopy nears 100%, NPP estimates converge on raw area
### figure out forest "age" for the cells (using coefficients for NE total)
## age based on ground area
a=123.67
b=0.04
biom.dat[,age.ground:=log(1-(live.MgC.ha.ground/a)^(1/3))/(-b)] ## some of these will produce infinities with too dense biomass
# biom.dat[age.ground>120, age.ground:=120] ## fix the divergent ones to just "old, not growing"
# biom.dat[!is.finite(age.ground), age.ground:=120] ## again, fix the ones that got fucked to "old, not growing"
# biom.dat[age.ground>250, age.ground:=NA] ## don't cancel the high ages -- need to see them in order to fix them in post-process
biom.dat[is.na(aoi), age.ground:=NA] # cancel places out of bounds
# biom.dat[bos.biom30m<=10, age.ground:=NA] ## cancel places with no biomass
biom.dat[is.na(bos.biom30m), age.ground:=NA]
ground.age <- raster(biom)
ground.age <- setValues(ground.age, biom.dat[,age.ground])
plot(ground.age)
hist(biom.dat[,age.ground]) ## got a lot of "old" ones, indicating high density of biomass
## age based on canopy area
biom.dat[,age.forest:=log(1-(live.MgC.ha.forest/a)^(1/3))/(-b)] ## some of these will produce infinities with too dense biomass
# biom.dat[age.forest>120, age.forest:=120] ## fix the divergent ones to just "old, not growing"
# biom.dat[!is.finite(age.forest), age.forest:=120] ## again, fix the ones that got fucked to "old, not growing"
# biom.dat[age.forest>250, age.forest:=NA] ## cancel ages that are unreliably retrieved
biom.dat[is.na(aoi), age.forest:=NA] # cancel places out of bounds
# biom.dat[bos.biom30m<10, age.forest:=NA] ## cancel places with no biomass
biom.dat[is.na(bos.biom30m), age.forest:=NA]
forest.age <- raster(biom)
forest.age <- setValues(forest.age, biom.dat[,age.forest])
plot(forest.age)
hist(biom.dat[,age.forest]) ## many more old forest, peak has moved older
## age based on pervious area
biom.dat[,age.perv:=log(1-(live.MgC.ha.perv/a)^(1/3))/(-b)] ## some of these will produce infinities with too dense biomass
# biom.dat[age.perv>120, age.perv:=120] ## fix the divergent ones to just "old, not growing"
# biom.dat[!is.finite(age.perv), age.perv:=120] ## again, fix the ones that got fucked to "old, not growing"
# biom.dat[age.perv>250, age.perv:=NA] ## cancel ages that are unreliably retrieved
biom.dat[is.na(aoi), age.perv:=NA] # cancel places out of bounds
# biom.dat[bos.biom30m<=10, age.perv:=NA] ## cancel places with no biomass
biom.dat[is.na(bos.biom30m), age.perv:=NA]
perv.age <- raster(biom)
perv.age <- setValues(perv.age, biom.dat[,age.perv])
plot(perv.age)
hist(biom.dat[,age.perv])
### compare
biom.dat[,delta.age.perv:=age.perv-age.ground]
biom.dat[,delta.age.forest:=age.forest-age.ground]
plot(biom.dat$bos.biom30m, biom.dat$delta.age.forest)
plot(biom.dat$bos.biom30m, biom.dat$delta.age.perv)
## frequency distributions of different methods
par(mfrow=c(3,1), mar=c(4,4,2,1))
hist(biom.dat$age.ground, main="Forest age, unadjusted", xlab="Age, yrs", xlim=c(0, 300), breaks=40)
hist(biom.dat$age.forest, main="Forest age, canopy area adj.", xlab="Age, yrs", xlim=c(0, 300), breaks=40)
hist(biom.dat$age.perv, main="Forest age, pervious area adj.", xlab="Age, yrs", xlim=c(0, 300), breaks=40)
biom.dat[is.finite(age.ground), length(age.ground)]/biom.dat[aoi>800, length(aoi)] ## 97% retrieval
biom.dat[is.finite(age.forest), length(age.forest)]/biom.dat[aoi>800, length(aoi)] ## 71% retrieval
biom.dat[is.finite(age.perv), length(age.perv)]/biom.dat[aoi>800, length(aoi)] ## 65% retreival
### so you have different total numbers with the different methods -- beware then when you are comparing freq distributions (i.e. gap fill all you can)
### calculate the npp for each method
### coefficients for growth equation
a=123.67
b=0.04
### I don't like the idea of treating the age=NA pix as if they were unknown -- they are NA because they don't retrieve good age, which means they are "old" and npp=0
## figure out next annual increment possible for the "forest" average present in each cell, based on projected "age" and corrected for area
biom.dat[,npp.ann.ground:=((a*(1-(exp(-b*(age.ground+1))))^3)-(a*(1-(exp(-b*(age.ground))))^3))*(1E-4)*aoi] ## by ground area
biom.dat[,npp.ann.forest:=((a*(1-(exp(-b*(age.forest+1))))^3)-(a*(1-(exp(-b*(age.forest))))^3))*(1E-4)*(aoi*can.frac)] ## by canopy area
biom.dat[,npp.ann.perv:=((a*(1-(exp(-b*(age.perv+1))))^3)-(a*(1-(exp(-b*(age.perv))))^3))*(1E-4)*(aoi*(1-isa.frac))] ## by pervious area
#### convert these to kg-biomass gain rather than MgC gain
biom.dat[, npp.ann.ground:=npp.ann.ground*1000*2]
biom.dat[, npp.ann.forest:=npp.ann.forest*1000*2]
biom.dat[, npp.ann.perv:=npp.ann.perv*1000*2]
hist(biom.dat[,npp.ann.ground]) ## OK
hist(biom.dat[,npp.ann.forest])
hist(biom.dat[,npp.ann.perv])
### clean up artifacts
summary(biom.dat$npp.ann.ground)
summary(biom.dat$npp.ann.forest)
summary(biom.dat$npp.ann.perv) ## a lot more NAs in the forest and perv
biom.dat[is.finite(live.MgC.ha.ground) & !is.finite(age.ground) & aoi>800, min(live.MgC.ha.ground)] ### anything 123.7 and above fails to retrieve age+npp
biom.dat[is.finite(live.MgC.ha.forest) & !is.finite(age.forest) & aoi>800, min(live.MgC.ha.forest)] ### anything 123.7 and above fails to retrieve age
biom.dat[is.finite(live.MgC.ha.perv) & !is.finite(age.perv) & aoi>800, min(live.MgC.ha.perv)] ### anything 123.7 and above fails to retrieve age
par(mfrow=c(3,1))
plot(biom.dat$live.MgC.ha.ground, biom.dat$npp.ann.ground, xlim=c(0,200))
plot(biom.dat$live.MgC.ha.forest, biom.dat$npp.ann.forest, xlim=c(0,200))
plot(biom.dat$live.MgC.ha.perv, biom.dat$npp.ann.perv, xlim=c(0,200)) ## different, but all cut out ~123 MgC/ha
### assign 0 npp to all super-high biomass cells
biom.dat[live.MgC.ha.ground>123.6, npp.ann.ground:=0]
biom.dat[live.MgC.ha.forest>123.6, npp.ann.forest:=0]
biom.dat[live.MgC.ha.perv>123.6, npp.ann.perv:=0]
# summary(biom.dat$npp.ann.ground)
# summary(biom.dat$npp.ann.forest)
# summary(biom.dat$npp.ann.perv) ## a handful of extra NAs in perv
# View(biom.dat[is.finite(npp.ann.ground) & !is.finite(npp.ann.perv),]) ## all partial pix with NA isa, fine
# biom.dat[aoi>800 & is.na(npp.ann.ground),] #962 non retreivs, all missing biomass
# biom.dat[aoi>800 & is.na(npp.ann.forest),] #962 non retreivs, all missing biomass
# biom.dat[aoi>800 & is.na(npp.ann.perv),] #972 non retreivs, all missing biomass
# View(biom.dat[aoi>800 & is.na(npp.ann.perv) & !is.na(npp.ann.ground),]) #972 non retreivs, all missing biomass
## fix for all biomass==0
biom.dat[bos.biom30m==0, npp.ann.ground:=0]
biom.dat[bos.biom30m==0, npp.ann.forest:=0]
biom.dat[bos.biom30m==0, npp.ann.perv:=0] ## good enough, have retrievals for almost everything consistently
### look at some plots
plot(biom.dat$npp.ann.ground, biom.dat$npp.ann.forest)
plot(biom.dat$npp.ann.ground, biom.dat$npp.ann.perv) ## nothing ever exceeds the ground figure
par(mfrow=c(3,1))
hist(biom.dat$npp.ann.ground, main="NPP, raw area", xlab="MgC/pix/yr", breaks=40)
hist(biom.dat$npp.ann.forest, main="NPP, canopy area", xlab="MgC/pix/yr", breaks=40)
hist(biom.dat$npp.ann.perv, main="NPP, pervious area", xlab="MgC/pix/yr", breaks=40)
### aggregated stats
biom.dat[,sum(npp.ann.ground, na.rm=T)]/(2*1000) #13.8k tC/yr by raw ground area
((biom.dat[,sum(npp.ann.ground, na.rm=T)]/(2*1000))/biom.dat[,sum(aoi, na.rm=T)])*1E4 ### 1.1 MgC/ha/yr raw ground area
biom.dat[,sum(npp.ann.forest, na.rm=T)]/(2*1000) #4.6k tC/yr by canopy area
((biom.dat[,sum(npp.ann.forest, na.rm=T)]/(2*1000))/biom.dat[,sum(aoi, na.rm=T)])*1E4 ### 0.37 MgC/ha/yr canopy area area
biom.dat[,sum(npp.ann.perv, na.rm=T)]/(2*1000) #5.4k tC/yr by pervious area
((biom.dat[,sum(npp.ann.perv, na.rm=T)]/(2*1000))/biom.dat[,sum(aoi, na.rm=T)])*1E4 ### 0.43 MgC/ha/yr pervious area
## contrast 10.3-8.9 = 1.4 NEP for boston region in Hardiman
### age distribution
hist(biom.dat$age.ground)
hist(biom.dat$age.forest)
hist(biom.dat$age.perv)
biom.dat[, median(age.ground, na.rm = T)] ##20.2
biom.dat[, median(age.forest, na.rm = T)] ##39.7
biom.dat[, median(age.perv, na.rm = T)] ##37.3
write.csv(biom.dat, "processed/npp.FIA.v3.csv")
# ######################
### A SLIGHT TWEAK (not a big or systematic effect apparently)
# ### Applying different FIA coefficients for different forest types to estimate 30m annual NPP (MgC/yr)
#
# library(data.table)
# library(raster)
#
# ## cleaned up code, handles different growth parameters for different forests (note "Afforestation" and "Reforestation" values are same viz. live biomass growth rates)
# ## initialize parameters for different forest types that ?? resemble tree species distributions in Boston
# for.type <- c("NEdefault","Mixoak", "Redoak")
# a <- c(123.67, 130.81, 123.09)
# b <- c(0.04, 0.03, 0.04)
#
# ## test limits of the live biomass~stand age function
# for(f in 1:length(for.type)){
# x=seq(0,120)
# liveC=a[f]*(1-(exp(-b[f]*x)))^3
# plot(x,liveC, main=for.type[f], ylab="live tC/ha", xlab="stand age")
# x <- seq(0, 120) ## inverse: model age vs. live biomass
# st.age=log(1-(x/a[f])^(1/3))/(-b[f]) ##
# plot(x, st.age, main=for.type[f], ylab="live tC/ha", xlab="stand age")
# }
# diff(st.age) ## lagged differences --> yearly increment in C gain
# diff(liveC)
# ## conservatively, none of the models is particularly stable over 100 yrs stand age
#
# biom.dat[, delta.npp.forest:=npp.ann.forest-npp.ann.ground]
# biom.dat[, delta.npp.perv:=npp.ann.perv-npp.ann.ground]
#
# ## package up some summary rasters
# biom.dat.r <- biom.dat
# for(g in 5:19){
# r <- raster(biom)
# r <- setValues(r, biom.dat.r[[g]])
# writeRaster(r, filename=paste("processed/boston/fia/fia", colnames(biom.dat)[g], "tif", sep="."),
# format="GTiff", overwrite=T)
# }
#######
####
#### FIA V2: empirical NPP~biomass function
### process and assessment of FIA individual dbh records provided by Moreale
### individual FIA tree data from sites near Boston
### V2.2: 1) uses species-specific biometrics; 2) models hardwood growth separately from trees in general; 3) uses nls to avoid dumping negative growth records
### V2.3 MIGHT want to be more careful about excluding sites that are only partially forested
spp.allo <- read.csv("data/FIA/spp_allometrics.csv") ## manually entered selected map from spp to b0+b1
live <- read.csv("data/FIA/MA_Tree_Data_ID_NOMORT.csv")
live <- as.data.table(live)
names(live)[1] <- c("TreeID")
names(live)[2] <- c("PlotID")
spec <- read.csv("data/FIA/REF_SPECIES.csv")
live <- merge(x=live, y=spec[,c("SPCD", "GENUS", "SPECIES")], by.x="SPECIES_CD", by.y="SPCD", all.x=T, all.y=F)
live$GENUS <- as.character(live$GENUS)
live$GENUS <- as.factor(live$GENUS)
live$GENUS.num <- as.numeric(live$GENUS)
### calculate species specific allometrics
live[,spp:=paste(substr(GENUS, 1,1), ".", SPECIES, sep="")]
live <- merge(x=live, y=spp.allo[,c("spp", "b0", "b1")], by="spp", all.x=T)
live[is.na(b0), b0:=(-2.48)]
live[is.na(b1), b1:=2.4835]
biom.pred2 <- function(b0, b1, x){exp(b0+(b1*log(x)))}
live[,biom0.spp:=biom.pred2(b0, b1, DIAM_T0)]
live[,biom1.spp:=biom.pred2(b0, b1, DIAM_T1)]
## class as hard or soft wood
live[,type:="H"]
live[spp%in%c("P.strobus", "P.resinosa", "T.canadensis", "A.balsamea"), type:="S"]
live[,type:=as.factor(type)]
## compare the models of areal biomass growth with raw data and using only hardwoods
live.plot <- live[,.(sum(biom1.spp-biom0.spp, na.rm=T),
sum(biom0.spp, na.rm=T),
length(DIAM_T0)), by=PlotID] ## we are missing one plot -- all dead?
names(live.plot)[2:4] <- c("biom.growth.spp", "total.biom0.spp.kg",
"num.stems")
### growth rates by hard/soft wood
hwood <- live[type=="H", .(sum(biom1.spp-biom0.spp, na.rm=T),
sum(biom0.spp, na.rm=T)),
by=PlotID]
names(hwood)[2:3] <- c("biom.growth.spp.hw", "total.biom0.spp.kg.hw")
swood <- live[type=="S", .(sum(biom1.spp-biom0.spp, na.rm=T),
sum(biom0.spp, na.rm=T)),
by=PlotID]
names(swood)[2:3] <- c("biom.growth.spp.sw", "total.biom0.spp.kg.sw")
## plot-level growth
### all hard+softwood
live.plot[,growth.ann.rel.spp:=(biom.growth.spp/4.8)/total.biom0.spp.kg]
summary(live.plot$biom.growth.spp) ## a few that decline!
### log-log dummy model (excludes negative growth)
mod.spp <- lm(log(growth.ann.rel.spp)~log(((total.biom0.spp.kg/675)*1E4)), data=live.plot)
summary(mod.spp) ## slightly sloppier, R2 0.19, coefficients about the same
plot(live.plot[,log(((total.biom0.spp.kg/675)*1E4))], live.plot[,log(growth.ann.rel.spp)], cex=0.5, col="forestgreen")
### a marginal shift , doesn't apear to be pivotal
live.plot[growth.ann.rel.spp<0,] ## two plots 255 47 show a decline in biomss! One of them is very low biomass (255)
### Isolate the hardwood growth~total.biom0 if so (few softwoods in urban forest)
live.plot <- merge(live.plot, hwood, by="PlotID")
live.plot <- merge(live.plot, swood, by="PlotID")
live.plot[,growth.ann.rel.hw:=(biom.growth.spp.hw/4.8)/total.biom0.spp.kg.hw]
live.plot[,growth.ann.rel.sw:=(biom.growth.spp.sw/4.8)/total.biom0.spp.kg.sw]
summary(live.plot$growth.ann.rel.hw) # 1.7 to 3% growth for the hardwoods, a slight negative
summary(live.plot$growth.ann.rel.sw) # 1.9 to 4% for the softwoods, some real losses
# ## make sure the distribution of sizes is the same
# par(mfrow=c(2,1))
# hist(live[type=="H", DIAM_T0])
# hist(live[type=="S", DIAM_T0])
# summary(live[type=="H", DIAM_T0]) # 10-84, middle 17-30cm
# summary(live[type=="S", DIAM_T0]) # 9-91, middle 18-37cm -- softwoods are slightly larger if anything
par(mfrow=c(1,1), mar=c(4,3,1,1))
plot(live.plot[,log(((total.biom0.spp.kg/675)*1E4))], live.plot[,log(growth.ann.rel.spp)], cex=0.5, col="gray55", ylim=c(-6, -2))
points(live.plot[,log(((total.biom0.spp.kg/675)*1E4))], live.plot[,log(growth.ann.rel.hw)], cex=0.5, col="red", pch=14)
points(live.plot[,log(((total.biom0.spp.kg/675)*1E4))], live.plot[,log(growth.ann.rel.sw)], cex=0.5, col="blue", pch=16)
## dummy log-log models
mod.hw <- lm(log(growth.ann.rel.hw)~log(((total.biom0.spp.kg/675)*1E4)), data=live.plot[growth.ann.rel.hw>0,])
summary(mod.hw) ## v. miserable, R2 0.06, but factor is significant
mod.sw <- lm(log(growth.ann.rel.sw)~log(((total.biom0.spp.kg/675)*1E4)), data=live.plot[growth.ann.rel.sw>0,])
summary(mod.sw) ## v. miserable, R2 0.13
lines(live.plot[growth.ann.rel.hw>0, log(((total.biom0.spp.kg/675)*1E4))], predict(mod.hw), col="red")
lines(live.plot[growth.ann.rel.sw>0, log(((total.biom0.spp.kg/675)*1E4))], predict(mod.sw), col="blue")
legend(x=10, y=-5, legend = c("Hardwood", "Softwood"), fill=c("red", "blue"))
## so pines growth faster at low density, but slow down at higher densities harder
## this is the relationship of HARDWOOD relative growth rate to total forest density
## it is significant but does not vary much across the range of densities (37-200 MgC/ha)
## it is also based on a fair amount of data that might not actually be wall-to-wall "forest"
# dim(live.plot[num.stems<20,])[1]/dim(live.plot)[1] ## 35% of our sample might be from areas without full tree coverage
# ##20 stems per 1/6 acre plot is 1 stem per 34 m2, or a minimum of 3000 kg-biomass/ha = 1.5MgC/ha
# hist(live.plot$num.stems)
## so this threshold will have to somehow be found because we intend to treat this as if it indicated a density~growth relationship in 100% canopy
#######
## final exponential model fit, hardwood growth~biomass
plot((live.plot$total.biom0.spp.kg/675)*1E4, live.plot$growth.ann.rel.spp, ylim=c(-0.03, 0.16)) ## bulk growth~biomass up to 400k kg/ha = 200 MgC
points((live.plot$total.biom0.spp.kg/675)*1E4, live.plot$growth.ann.rel.hw, cex=0.4, pch=14, col="red") ## just the hardwoods
summary((live.plot$total.biom0.spp.kg.hw*(1E4/675))/(live.plot$total.biom0.spp.kg*(1E4/675))) ## most plots are more than half hardwood
live.plot[,hw.frac:=total.biom0.spp.kg.hw/total.biom0.spp.kg]
live.plot[hw.frac<0.1,] ## the high ones are all low fraction of HW
live.plot[growth.ann.rel.hw>0.1, hw.frac] ## but there's one that is 40% HW
### fuck it, life is messy
mod.exp.all <- nls(growth.ann.rel.spp ~ exp(a + b * (total.biom0.spp.kg*(1E4/675))),
data=live.plot, start=list(a=0, b=0))
mod.exp.all.log <- nls(growth.ann.rel.spp ~ exp(a + b * log(total.biom0.spp.kg*(1E4/675))),
data=live.plot, start=list(a=0, b=0))
mod.exp.hw <- nls(growth.ann.rel.hw ~ exp(a + b * (total.biom0.spp.kg*(1E4/675))),
data=live.plot, start=list(a=0, b=0))
v <- summary(mod.exp.all) ## a slightly better model than just hardwoods, everything is sig
w <- summary(mod.exp.hw) ### the b coefficient is barely worth including
x=seq(0,400000)
lines(x, exp(w$coefficients[1]+w$coefficients[2]*x), cex=0.3, col="red")
lines(x, exp(v$coefficients[1]+v$coefficients[2]*x), cex=0.3, col="gray55")
legend(fill=c("red", "gray55"), x = 20000, y = 0.1, legend = c("Hardwoods", "All trees"))
### OK Here's our model to work through the pinche biomass data
### NOTE: this model predicts relative growth (kg/kg) as a function of BIOMASS DENSITY IN KG/HA
## function maxes out at about 3% growth
## reload the biomass data and reprocess
biom <- raster("processed/boston/bos.biom30m.tif") ## this is summed 1m kg-biomass to 30m pixel
aoi <- raster("processed/boston/bos.aoi30m.tif")
can <- raster("processed/boston/bos.can30m.tif")
isa <- raster("processed/boston/bos.isa.rereg30m.tif")
biom <- crop(biom, aoi)
can <- crop(can, aoi)
isa <- extend(isa, aoi)
biom.dat <- as.data.table(as.data.frame(biom))
biom.dat[,aoi:=as.vector(getValues(aoi))]
can.dat <- as.data.table(as.data.frame(can))
biom.dat[, can.frac:=can.dat$bos.can30m]
isa.dat <- as.data.table(as.data.frame(isa))
biom.dat[, isa.frac:=isa.dat$bos.isa.rereg30m]
biom.dat[,pix.ID:=seq(1, dim(biom.dat)[1])]
### live MgC by area of GROUND in each pixel
biom.dat[, live.kgbiom.ha.ground:=(bos.biom30m/aoi)*(1E4)] ## convert kg-biomass/pixel based on size of pixel (summed by 1m2 in "aoi"),
biom.dat[,live.kgbiom.ha.forest:=(bos.biom30m/(aoi*can.frac))*(1E4)]
biom.dat[bos.biom30m==0, live.kgbiom.ha.forest:=0] ## have to manually fix this because of 0 canopy pix
biom.dat[can.frac<0.01, live.kgbiom.ha.forest:=0]
biom.dat[,live.kgbiom.ha.perv:=(bos.biom30m/(aoi*(1-isa.frac)))*(1E4)]
biom.dat[bos.biom30m==0, live.kgbiom.ha.perv:=0] ## have to manually fix this because of isa=1 pix
biom.dat[isa.frac>0.99, live.kgbiom.ha.perv:=0]
hist(biom.dat[aoi>800, live.kgbiom.ha.ground]) ## up to 600k kgbiom/ha = 300 MgC/ha
hist(biom.dat[aoi>800, live.kgbiom.ha.forest]) ## same, more medium sized
hist(biom.dat[aoi>800 & isa.frac<0.98, live.kgbiom.ha.perv]) ## up to 800k kgbiom/ha
# ## root out any artifacts in density calcs
# biom.dat[aoi>800, length(aoi)] #136667 valid pixels
# biom.dat[aoi>800, length(bos.biom30m)] #136667 valid pixels
# biom.dat[aoi>800 & is.finite(bos.biom30m), length(bos.biom30m)] ##135705 is the number to hit
# biom.dat[!is.na(npp.kg.hw.ground) & aoi>800, length(npp.kg.hw.ground)] ## 135705 pix
# biom.dat[!is.na(npp.kg.hw.ground) & aoi>800, length(live.kgbiom.ha.ground)] ## 135705 pix
# biom.dat[is.finite(npp.kg.hw.forest) & aoi>800 & is.finite(bos.biom30m), length(npp.kg.hw.forest)] ## 135705 pix
# biom.dat[is.finite(npp.kg.hw.perv) & aoi>800 & is.finite(bos.biom30m), length(npp.kg.hw.perv)] ## 135695, a few have can but no isa
# biom.dat[is.na(npp.kg.hw.perv) & aoi>800 & is.finite(bos.biom30m),] ## 127736 pix
# ### this is biomass associated with 100% paved pixels, added fix above
## calculate growth factors per cell
biom.dat[,ground.gfact:=exp(w$coefficients[1]+w$coefficients[2]*live.kgbiom.ha.ground)]
biom.dat[,forest.gfact:=exp(w$coefficients[1]+w$coefficients[2]*live.kgbiom.ha.forest)]
biom.dat[,perv.gfact:=exp(w$coefficients[1]+w$coefficients[2]*live.kgbiom.ha.perv)]
hist(biom.dat$ground.gfact) ## distribution of growth factors with different density calcs
hist(biom.dat$forest.gfact)
hist(biom.dat$perv.gfact)
### the biomass growth rate regression is valid up to ~400k kgbiom/ha
## calculate npp from these growth factors
## regression coeff*biom.density(kg/ha, 3 approaches)-->growth factors (kg/kg) per cell
## growth factors * cell biomass (kg) --> npp (kg biomass per cell)
biom.dat[,npp.kg.hw.ground:=bos.biom30m*ground.gfact]
biom.dat[,npp.kg.hw.forest:=bos.biom30m*forest.gfact]
biom.dat[,npp.kg.hw.perv:=bos.biom30m*perv.gfact]
### totals for aoi
biom.dat[aoi>800 & is.finite(isa.frac) & is.finite(can.frac), sum(npp.kg.hw.ground, na.rm=T)]/2000 ## 9.1k MgC/yr ground basis
biom.dat[aoi>800 & is.finite(isa.frac) & is.finite(can.frac), sum(npp.kg.hw.forest, na.rm=T)]/2000 ## 8.7k MgC/yr forest basis
biom.dat[aoi>800 & is.finite(isa.frac) & is.finite(can.frac), sum(npp.kg.hw.perv, na.rm=T)]/2000 ## 8.4k MgC/yr perv basis
write.csv(biom.dat, "processed/npp.FIA.empirV22.csv")
# rr <- biom
# rr <- setValues(rr, biom.dat$npp.kg.hw.forest)
# plot(rr)
# plot(biom)
biom.dat[aoi>800, .(median(npp.kg.hw.ground, na.rm=T), ## median is ~50-60 kg-biomass/pix
median(npp.kg.hw.forest, na.rm=T),
median(npp.kg.hw.perv, na.rm=T))]
|
504c5b4c9dd64d0aec72e4e4a310312a98b3826e
|
fd9575d215009f41ccb9e8b20d7082f646dc7bc5
|
/analytical_full_web.R
|
2cf070068fa8b6c478a66ced73c26bb43546861c
|
[] |
no_license
|
JadeYu/TrophLab2
|
3a25223967511c547e4a8f28563e60484dcd1bc6
|
64ae6d5926cc1eb58a0f566423a242382722176e
|
refs/heads/master
| 2016-09-09T21:37:45.728391
| 2015-11-04T00:22:00
| 2015-11-04T00:22:00
| 42,339,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,772
|
r
|
analytical_full_web.R
|
Dr_seq <- runif(10,0,0.8)
theta_seq <- runif(10,0,1)
R0 <- 1
FFW <- solve_full_web(theta_seq,Dr_seq,R0)
##compare webs with different Dr-theta relationship (random, positive or negative)
##plot without the resource!!
##Incorporate basal-or-not by brutal binary selection (whether basal link > sum of other links); might be mechanistically hard to justify
##The conundrum for Dr distribution is still unsolved.
library(nleqslv)
Ri2j <- function(C_i,Dr_j,theta_j){
D <- 1/(Dr_j-1)
(C_i*theta_j)^D * theta_j
}
in_flows <- function(C_seq,Dr_i,theta_i){
flows <- numeric(length(C_seq))
for(s in 1:length(C_seq)){
flows[s] <- Ri2j(C_seq[s],Dr_i,theta_i)
}
sum(flows)
}
out_flows <- function(C_i,Dr_seq,theta_seq){
flows <- numeric(length(Dr_seq))
for(s in 1:length(Dr_seq)){
flows[s] <- Ri2j(C_i,Dr_seq[s],theta_seq[s])
}
sum(flows)
}
R_eqs <- function(C_seq,theta_seq,Dr_seq,R0){
EQS <- numeric(length(C_seq))
for(i in 1:(length(C_seq)-1)){
EQS[i] <- in_flows(C_seq,Dr_seq[i],theta_seq[i]) - out_flows(C_seq[i],Dr_seq,theta_seq)
}
EQS[length(C_seq)] <- sum(all_links(C_seq,theta_seq,Dr_seq)$R_seq)-R0
EQS
}
all_links <- function(C_seq,theta_seq,Dr_seq){
link_mat <- matrix(nrow=length(C_seq),ncol=length(C_seq))
for(i in 1:length(C_seq)){
for(j in 1:length(C_seq)){
link_mat[i,j] <- Ri2j(C_seq[i],Dr_seq[j],theta_seq[j])
}
}
R_seq <- rowSums(link_mat)
list(link_mat=link_mat,R_seq=R_seq)
}
solve_full_web <- function(theta_seq,Dr_seq,R0){
MERA_R <- theta_seq^(Dr_seq/(1-Dr_seq)) #
R_seq <- R0*MERA_R/sum(MERA_R)
C_init <- (R_seq/sum(MERA_R))^(mean(Dr_seq)-1)
C.sol <- nleqslv(C_init,R_eqs,theta_seq=theta_seq,Dr_seq=Dr_seq,R0=R0)
if(C.sol[[3]]!=1){
print(C.sol[[4]])
}
C_seq <- C.sol[[1]]
all_links(C_seq,theta_seq,Dr_seq)
}
|
e00661ec49ee35dfeaf2d48480410b8e879bf8d1
|
b6443e4bff6b9e41e8353d5636560e092efce75e
|
/man/user_getTopTags.Rd
|
a88ad9a70e263dd5bc2ffdac68e0a254c0a98af7
|
[] |
no_license
|
juyeongkim/lastfmr
|
38bf6c081f3f3d37a875984c4aebba6629b856df
|
4f22f841d6427c52790b813224bc6e1360c81e55
|
refs/heads/master
| 2020-05-21T04:35:36.068739
| 2019-05-30T15:37:56
| 2019-05-30T15:37:56
| 57,168,949
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 595
|
rd
|
user_getTopTags.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user.R
\name{user_getTopTags}
\alias{user_getTopTags}
\title{Get the top tags listened to by a user.}
\usage{
user_getTopTags(user, limit = NA)
}
\arguments{
\item{user}{The user name.}
\item{limit}{Limit the number of tags returned}
}
\value{
A list of the top tags listened to by a user.
}
\description{
Get the top tags listened to by a user.
Implementation of last.fm's \emph{user.getTopTags} API method
(\url{http://www.last.fm/api/show/user.getTopTags}).
}
\examples{
\dontrun{
user_getTopTags("platyjus")
}
}
|
2a5735fe6c81457097639b6a57477f9f1f6b7c66
|
1388a4a98b64dcc93eb8504cecd93f74f9cec777
|
/man/Assert.summary.Rd
|
9a804c85660f7864a5958f412d5b1f72596eadc7
|
[] |
no_license
|
rbuhler/unitTest
|
c24c923841655cb960211e90515572e0f35d5e99
|
7b9539787c6f6b84deb49ed20284f0ff6b1b5dfd
|
refs/heads/master
| 2020-05-19T12:41:03.176357
| 2015-01-09T00:39:28
| 2015-01-09T00:39:28
| 28,789,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
rd
|
Assert.summary.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{Assert.summary}
\alias{Assert.summary}
\title{Method Assert.summary}
\usage{
Assert.summary(object)
}
\arguments{
\item{object}{Description.}
}
\description{
Description.
}
\examples{
Assert.summary()
}
|
54dd52d5bfb35cdfb9eca6da63c5d2e4bbdf0e29
|
0018de1cea9677207535607821081e8a6ce74883
|
/R/xEDA.R
|
df05c1886ab47d4dc1164e264c8f3b1881ca6e24
|
[
"MIT"
] |
permissive
|
cwendorf/EASI
|
fde3fe05e6527e2d957712ea313582fcc0b799f8
|
a384e5ef74e823646d0126e6d420147af057b7db
|
refs/heads/main
| 2023-07-22T04:58:51.054314
| 2023-07-17T14:58:14
| 2023-07-17T14:58:14
| 193,248,564
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 764
|
r
|
xEDA.R
|
# Estimation Approach to Statistical Inference
## Exploratory Data Analysis
### Plots
plotViolins <- function(...,main=NULL,col="black") {
if(is.null(main)) main="Violin Plots"
plotBoxes(...,values=FALSE,main=main,col=col)
plotDensity(...,add=TRUE,offset=0,type="full",col=col)
}
plotBeans <- function(...,main=NULL,col="black") {
if(is.null(main)) main="Bean Plots"
plotDensity(...,main=main,type="full",offset=0,col=col)
plotData(...,add=TRUE,offset=0,pch=95,col=col,method="overplot")
}
plotRainclouds <- function(...,main=NULL,col="black") {
if(is.null(main)) main="Raincloud Plots"
plotBoxes(...,main=main,values=FALSE,col=col)
plotDensity(...,add=TRUE,offset=.1,col=col)
plotData(...,add=TRUE,method="jitter",offset=-.15,col=col)
}
|
45410544188266a2006a7e713f5b35da2e6918ac
|
8db66dbb7c37644a8dae8b2fb2fa17a85f01d0e0
|
/gee/multivar_mixed_model.R
|
cc29e13f429d7b6ff445238102273288d8788290
|
[] |
no_license
|
AshleyLab/device_validation
|
61c511f18cfc6cf7c78ecf215eee869d41c11dbb
|
d19fa94edef9948b4049ff6a1e12c7c55b1996fb
|
refs/heads/master
| 2020-12-30T12:44:43.739460
| 2017-06-05T16:42:28
| 2017-06-05T16:42:28
| 91,356,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,277
|
r
|
multivar_mixed_model.R
|
rm(list=ls())
library('gee')
library('data.table')
library('MuMIn')
data=data.frame(read.table('full_df.tsv',header=TRUE,sep='\t'))
#remove any rows with "NA" -- no device value recorded
data=na.omit(data)
data$Error=abs(data$Error)
data$Activity=factor(data$Activity,levels=c("sit","walk","run","bike","max"))
hr=data[which(data$Metric=="hr"),]
en=data[which(data$Metric=="en"),]
en$Device=factor(en$Device,levels=c("Apple","Basis","Fitbit","Microsoft","PulseOn"))
#GLM model
#hr_glm=glm(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=hr)
#en_glm=glm(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=en)
#Fit Generalized estimation equation (GEE) with independent correlation structure
#hr_gee_ind=gee(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=hr,id=Subject,corstr="independence")
#en_gee_ind=gee(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=en,id=Subject,corstr="independence")
#Fit Generalized estimation equation (GEE) with exchangeable correlation structure
hr_gee_exch=gee(Error~
Sex+
Age+
Sex:Age+
Height+
Weight+
BMI+
Skin+
Fitzpatrick+
Wrist+
VO2max+
Activity+
Intensity+
Device+
Activity:Device+
Intensity:Device
,data=hr,id=Subject,corstr="exchangeable")
en_gee_exch=gee(Error~
Sex+
Age+
Sex:Age+
Height+
Weight+
BMI+
Skin+
Fitzpatrick+
Wrist+
VO2max+
Activity+
Intensity+
Device+
Activity:Device+
Intensity:Device
,data=en,id=Subject,corstr="exchangeable")
en_gee_exch_pval=2 * pnorm(abs(coef(summary(en_gee_exch))[,5]), lower.tail = FALSE)
en_results=data.frame(summary(en_gee_exch)$coefficients,en_gee_exch_pval)
hr_results=hr_results[order(hr_gee_exch_pval),]
en_results=en_results[order(en_gee_exch_pval),]
dd=pdredge(hr_gee_exch)
# Model average models with delta AICc < 4
model.avg(dd, subset = delta < 4)
#or as a 95% confidence set:
model.avg(dd, subset = cumsum(weight) <= .95) # get averaged coefficients
#'Best' model
hr_best=summary(get.models(dd, 1)[[1]])
hr_gee_exch_pval=2 * pnorm(abs(coef(hr_best)[,5]), lower.tail = FALSE)
hr_results=data.frame(hr_best$coefficients,hr_gee_exch_pval)
par(mar = c(3,5,6,4))
plot(dd, labAsExpr = TRUE)
dd=pdredge(en_gee_exch)
# Model average models with delta AICc < 4
model.avg(dd, subset = delta < 4)
#or as a 95% confidence set:
model.avg(dd, subset = cumsum(weight) <= .95) # get averaged coefficients
#'Best' model
en_best=summary(get.models(dd, 1)[[1]])
en_gee_exch_pval=2 * pnorm(abs(coef(en_best)[,5]), lower.tail = FALSE)
en_results=data.frame(en_best$coefficients,en_gee_exch_pval)
par(mar = c(3,5,6,4))
plot(dd, labAsExpr = TRUE)
|
c9a840020ece43ace4e75ac147401b53ede444dd
|
682fdb45d76bd462593d07113a0f642665ff44a3
|
/R/helpers.R
|
202c0585a81c86d320fd18a2956e5da4b6dd6e6c
|
[
"MIT"
] |
permissive
|
dfsp-spirit/fsbrain
|
dd782c91f95c52b8039e4ec6642345d994a6ed84
|
09f506dbf5467356ab26a65246f31051da58f198
|
refs/heads/master
| 2023-07-06T10:11:18.468284
| 2023-06-26T16:42:45
| 2023-06-26T16:42:45
| 209,085,379
| 44
| 12
|
NOASSERTION
| 2023-01-15T19:49:54
| 2019-09-17T15:05:51
|
R
|
UTF-8
|
R
| false
| false
| 53,371
|
r
|
helpers.R
|
#' @title Transform first character of a string to uppercase.
#'
#' @description Transform first character of a string to uppercase. This is useful when labeling plots. Important: this function does not know about different encodings, languages or anything, it just calls \code{\link{toupper}} for the first character.
#'
#' @param word, string. Any string.
#'
#' @return string, the input string with the first character transformed to uppercase.
#'
#' @examples
#' word_up = fup("word");
#'
#' @export
fup <- function(word) {
substr(word, 1, 1) <- toupper(substr(word, 1, 1));
return(word);
}
#' @title Show demo visualization to test whether fsbrain is setup correctly.
#'
#' @note This function will try to download optional data from the internet (unless the data have already been downloaded).
#'
#' @keywords internal
demo <- function() {
fsbrain::download_optional_data();
sjd = get_optional_data_filepath("subjects_dir");
sj = "subject1";
return(invisible(vis.subject.morph.native(sjd, sj, "thickness", cortex_only = T, draw_colorbar = T)));
}
#' @title Compute neighborhood of a vertex
#'
#' @description Given a set of query vertex indices and a mesh *m*, compute all vertices which are adjacent to the query vertices in the mesh. A vertex *u* is *adjacent* to another vertex *v* iff there exists an edge *e = (u, v)* in *m*. While you could call this function repeatedly with the old output as its new input to extend the neighborhood, you should maybe use a proper graph library for this.
#'
#' @param surface a surface as returned by functions like \code{\link[fsbrain]{subject.surface}} or \code{\link[freesurferformats]{read.fs.surface}}.
#'
#' @param source_vertices Vector of source vertex indices.
#'
#' @param k positive integer, how often to repeat the procedure and grow the neighborhood, using the output `vertices` as the `source_vertices` for the next iteration. Warning: settings this to high values will be very slow for large meshes.
#'
#' @param restrict_to_vertices integer vector of vertex indices. If given, the neighborhood growth will be limited to the given vertex indices. Defaults to NULL, which means the neighborhood is not restricted.
#'
#' @return the neighborhood as a list with two entries: "faces": integer vector, the face indices of all faces the source_vertices are a part of. "vertices": integer vector, the unique vertex indices of all vertices of the faces in the 'faces' property. These vertex indices include the indices of the source_vertices themselves.
#'
#' @family surface mesh functions
#'
#' @export
mesh.vertex.neighbors <- function(surface, source_vertices, k=1L, restrict_to_vertices=NULL) {
if(! freesurferformats::is.fs.surface(surface)) {
stop("Parameter 'surface' must be an fs.surface instance.");
}
if(k < 1L) {
stop("Parameter k must be a positive integer.");
}
if(length(source_vertices) < 1L) {
stop("Parameter 'source_vertices' must not be empty.");
}
vertex_indices = source_vertices;
if(is.null(restrict_to_vertices)) {
max_neighborhood_size = nrow(surface$vertices);
} else {
max_neighborhood_size = length(restrict_to_vertices);
}
for(iter_idx in seq_len(k)) {
if(is.null(restrict_to_vertices)) {
#face_indices = which(apply(surface$faces, 1, function(face_vertidx) any(face_vertidx %in% vertex_indices)));
face_indices = which(surface$faces[,1] %in% vertex_indices | surface$faces[,2] %in% vertex_indices | surface$faces[,3] %in% vertex_indices);
} else {
#face_indices = which(apply(surface$faces, 1, function(face_vertidx) any(face_vertidx %in% vertex_indices) && all(face_vertidx %in% restrict_to_vertices)));
face_indices = which((surface$faces[,1] %in% restrict_to_vertices & surface$faces[,2] %in% restrict_to_vertices & surface$faces[,3] %in% restrict_to_vertices) & (surface$faces[,1] %in% vertex_indices | surface$faces[,2] %in% vertex_indices | surface$faces[,3] %in% vertex_indices));
}
vertex_indices = unique(as.vector(surface$faces[face_indices, ]));
if(length(vertex_indices) == max_neighborhood_size) {
break; # Neighborhood is already covering the whole mesh / allowed area.
}
}
return(list("vertices"=vertex_indices, "faces"=face_indices));
}
#' @title Return all faces which are made up completely of the listed vertices.
#'
#' @param surface_mesh surface mesh, as loaded by \code{\link[fsbrain]{subject.surface}} or \code{\link[freesurferformats]{read.fs.surface}}.
#'
#' @param source_vertices integer vector, the vertex indices.
#'
#' @return integer vector, the face indices
#'
#' @family surface mesh functions
#'
#' @keywords internal
mesh.vertex.included.faces <- function(surface_mesh, source_vertices) {
#return(which(apply(surface_mesh$faces, 1, function(face_vertidx) all(face_vertidx %in% source_vertices))));
return(which(surface_mesh$faces[,1] %in% source_vertices & surface_mesh$faces[,2] %in% source_vertices & surface_mesh$faces[,3] %in% source_vertices));
}
#' @title Compute outline vertex colors from annotation.
#'
#' @description For each region in an atlas, compute the outer border and color the respective vertices in the region-specific color from the annot's colortable.
#'
#' @param annotdata an annotation, as returned by functions like \code{\link[fsbrain]{subject.annot}}. If a character string, interpreted as a path to a file containing such data, and loaded with \code{freesurferformats::read.fs.annot}
#'
#' @param surface_mesh brain surface mesh, as returned by functions like \code{\link[fsbrain]{subject.surface}} or \code{\link[freesurferformats]{read.fs.surface}}. If a character string, interpreted as a path to a file containing such data, and loaded with \code{freesurferformats::read.fs.surface}
#'
#' @param background color, the background color to assign to the non-border parts of the regions. Defaults to 'white'.
#'
#' @param silent logical, whether to suppress status messages.
#'
#' @param expand_inwards integer, additional thickness of the borders. Increases computation time, defaults to 0L.
#'
#' @param outline_color NULL or a color string (like 'black' or '#000000'), the color to use for the borders. If left at the default value `NULL`, the colors from the annotation color lookup table will be used.
#'
#' @param limit_to_regions vector of character strings or NULL, a list of regions for which to draw the outline (see \code{\link[fsbrain]{get.atlas.region.names}}). If NULL, all regions will be used. If (and only if) this parameter is used, the 'outline_color' parameter can be a vector of color strings, one color per region.
#'
#' @return vector of colors, one color for each mesh vertex
#'
#' @note Sorry for the computational time, the mesh datastructure is not ideal for neighborhood search.
#'
#' @export
# @importFrom foreach foreach
# @importFrom parallel detectCores
# @importFrom doParallel registerDoParallel
annot.outline <- function(annotdata, surface_mesh, background="white", silent=TRUE, expand_inwards=0L, outline_color=NULL, limit_to_regions=NULL) {
if(is.character(annotdata)) {
annotdate = freesurferformats::read.fs.annot(annotdata);
}
if(! freesurferformats::is.fs.annot(annotdata)) {
stop("Parameter 'annotdata' must be an fs.annot instance.");
}
if(is.character(surface_mesh)) {
surface_mesh = freesurferformats::read.fs.surface(surface_mesh);
}
if(! freesurferformats::is.fs.surface(surface_mesh)) {
stop("Parameter 'surface_mesh' must be an fs.surface instance.");
}
if(length(annotdata$vertices) != nrow(surface_mesh$vertices)) {
stop(sprintf("Annotation is for %d vertices but mesh contains %d, vertex counts must match.\n", length(annotdata$vertices), nrow(surface_mesh$vertices)));
}
col = rep(background, length(annotdata$vertices));
#doParallel::registerDoParallel(parallel::detectCores());
#foreach::foreach(region_idx = seq_len(annotdata$colortable$num_entries)) %dopar% {
for(region_idx in seq_len(annotdata$colortable$num_entries)) {
region_name = annotdata$colortable$struct_names[[region_idx]];
region_index_in_limit_to_regions_parameter = NULL;
if(! is.null(limit_to_regions)) {
if(! is.character(limit_to_regions)) {
stop("Parameter 'limit_to_regions' must be NULL or a vector of character strings.");
}
if(! region_name %in% limit_to_regions) {
next;
} else {
region_index_in_limit_to_regions_parameter = which(limit_to_regions == region_name);
if(length(region_index_in_limit_to_regions_parameter) != 1L) {
stop("Regions in parameter 'limit_to_regions' must be unique.");
}
}
}
if(!silent) {
message(sprintf("Computing outline for region %d of %d: '%s'\n", region_idx, annotdata$colortable$num_entries, region_name));
}
label_vertices = label.from.annotdata(annotdata, region_name, error_on_invalid_region = FALSE);
label_border = label.border(surface_mesh, label_vertices, expand_inwards=expand_inwards);
if(is.null(outline_color)) {
col[label_border$vertices] = as.character(annotdata$colortable_df$hex_color_string_rgba[[region_idx]]);
} else {
if(length(outline_color) > 1L) {
if(length(outline_color) != length(limit_to_regions)) {
stop(sprintf("Number of colors in parameter 'outline_color' must be 1 or exactly the number of regions in parameter 'limit_to_regions' (%d), but is %d.\n", length(limit_to_regions), length(outline_color)));
}
if(! is.null(region_index_in_limit_to_regions_parameter)) {
col[label_border$vertices] = outline_color[region_index_in_limit_to_regions_parameter];
}
} else {
col[label_border$vertices] = outline_color;
}
}
}
return(col);
}
#' @title Compute annot border vertices.
#'
#' @inheritParams vis.subject.morph.native
#'
#' @inheritParams subject.annot
#'
#' @inheritParams annot.outline
#'
#' @return hemilist of integer vectors, the vertices in the border
subject.annot.border <- function (subjects_dir, subject_id, hemi, atlas, surface="white", expand_inwards=0L, limit_to_regions=NULL) {
if (!(hemi %in% c("lh", "rh", "both"))) {
stop(sprintf("Parameter 'hemi' must be one of 'lh', 'rh' or 'both' but is '%s'.\n", hemi));
}
if (hemi == "both") {
res = list();
res$lh = subject.annot.border(subjects_dir, subject_id, hemi="lh", atlas=atlas, surface=surface, expand_inwards=expand_inwards, limit_to_regions=limit_to_regions);
res$rh = subject.annot.border(subjects_dir, subject_id, hemi="rh", atlas=atlas, surface=surface, expand_inwards=expand_inwards, limit_to_regions=limit_to_regions);
return(res);
}
else {
annot_file = file.path(subjects_dir, subject_id, "label", sprintf("%s.%s.annot", hemi, atlas));
if (!file.exists(annot_file)) {
stop(sprintf("Annotation file '%s' for subject '%s' atlas '%s' hemi '%s' cannot be accessed.\n", annot_file, subject_id, atlas, hemi));
}
annot = freesurferformats::read.fs.annot(annot_file);
surface_file = file.path(subjects_dir, subject_id, "surf", sprintf("%s.%s", hemi, surface));
if (!file.exists(surface_file)) {
stop(sprintf("Surface file '%s' for subject '%s' surface '%s' hemi '%s' cannot be accessed.\n", surface_file, subject_id, surface, hemi));
}
surface = freesurferformats::read.fs.surface(surface_file);
border_vertices = annot.outline.border.vertices(annot, surface, expand_inwards=expand_inwards, limit_to_regions=limit_to_regions);
return(border_vertices);
}
}
#' @title Compute the border vertices for each region in an annot.
#'
#' @inheritParams annot.outline
#'
#' @return named list, the keys are the region names and the values are vectors of integers encoding vertex indices.
#'
#' @export
annot.outline.border.vertices <- function(annotdata, surface_mesh, silent=TRUE, expand_inwards=0L, limit_to_regions=NULL) {
if(is.character(annotdata)) {
annotdate = freesurferformats::read.fs.annot(annotdata);
}
if(! freesurferformats::is.fs.annot(annotdata)) {
stop("Parameter 'annotdata' must be an fs.annot instance.");
}
if(is.character(surface_mesh)) {
surface_mesh = freesurferformats::read.fs.surface(surface_mesh);
}
if(! freesurferformats::is.fs.surface(surface_mesh)) {
stop("Parameter 'surface_mesh' must be an fs.surface instance.");
}
if(length(annotdata$vertices) != nrow(surface_mesh$vertices)) {
stop(sprintf("Annotation is for %d vertices but mesh contains %d, vertex counts must match.\n", length(annotdata$vertices), nrow(surface_mesh$vertices)));
}
border_vertices = list();
for(region_idx in seq_len(annotdata$colortable$num_entries)) {
region_name = annotdata$colortable$struct_names[[region_idx]];
region_index_in_limit_to_regions_parameter = NULL;
if(! is.null(limit_to_regions)) {
if(! is.character(limit_to_regions)) {
stop("Parameter 'limit_to_regions' must be NULL or a vector of character strings.");
}
if(! region_name %in% limit_to_regions) {
next;
} else {
region_index_in_limit_to_regions_parameter = which(limit_to_regions == region_name);
if(length(region_index_in_limit_to_regions_parameter) != 1L) {
stop("Regions in parameter 'limit_to_regions' must be unique.");
}
}
}
if(!silent) {
message(sprintf("Computing outline for region %d of %d: '%s'\n", region_idx, annotdata$colortable$num_entries, region_name));
}
label_vertices = label.from.annotdata(annotdata, region_name, error_on_invalid_region = FALSE);
label_border = label.border(surface_mesh, label_vertices, expand_inwards=expand_inwards);
border_vertices[[region_name]] = label_border$vertices;
}
return(border_vertices);
}
#' @title Draw a 3D line from vertex to vertex
#'
#' @description To get a nice path along the surface, pass the vertex indices along a geodesic path. Note: You can first open an interactive brain view (`views='si'`) with a vis* function like \code{\link[fsbrain]{vis.subject.morph.native}}, then run this function to draw into the active plot.
#'
#' @param surface_vertices float matrix of size (n, 3), the surface vertex coordinates, as returned as part of \code{\link[fsbrain]{subject.surface}} or \code{\link[freesurferformats]{read.fs.surface}}, in the member "vertices". Can also be a \code{freesurferformats::fs.surface} or \code{rgl::tmesh3d} instance, in which case the coordinates are extracted automatically.
#'
#' @param path_vertex_indices vector of vertex indices, the path. You will need to have it computed already. (This function does **not** compute geodesic paths, see \code{\link[fsbrain]{geodesic.path}} for that. You can use it to visualize such a path though.) If omitted, the vertex coordinates will be traversed in their given order to create the path.
#'
#' @param do_vis logical, whether to actually draw the path.
#'
#' @param color a color string, like '#FF0000' to color the path.
#'
#' @param no_material logical, whether to use set the custom rendering material properties for path visualization using \code{rgl::material3d} before plotting. If you set this to FALSE, no material will be set and you should set it yourself before calling this function, otherwise the looks of the path are undefined (dependent on the default material on your system, or the last material call). Setting this to TRUE also means that the 'color' argument is ignored of course, as the color is part of the material.
#'
#' @return n x 3 matrix, the coordinates of the path, with appropriate ones duplicated for rgl pair-wise segments3d rendering.
#'
#' @family surface mesh functions
#'
#' @seealso \code{\link[fsbrain]{vis.paths}} if you need to draw many paths, \code{\link[fsbrain]{geodesic.path}} to compute a geodesic path.
#'
#' @examples
#' \dontrun{
#' sjd = fsaverage.path(TRUE);
#' surface = subject.surface(sjd, 'fsaverage3',
#' surface = "white", hemi = "lh");
#' p = geodesic.path(surface, 5, c(10, 20));
#' vis.subject.morph.native(sjd, 'fsaverage3', views='si');
#' vis.path.along.verts(surface$vertices, p[[1]]);
#' }
#'
#' @export
#' @importFrom rgl segments3d material3d
vis.path.along.verts <- function(surface_vertices, path_vertex_indices = NULL, do_vis = TRUE, color='#FF0000', no_material=FALSE) {
if(freesurferformats::is.fs.surface(surface_vertices)) {
surface_vertices = surface_vertices$vertices;
} else if("mesh3d" %in% class(surface_vertices)) {
surface_vertices = t(surface_vertices$vb[1:3,]);
} else {
if(! is.matrix(surface_vertices)) {
stop("Parameter 'surface_vertices' must be an fs.surface, an rgl::tmesh3d, or an nx3 numeric matrix");
}
}
if(is.null(path_vertex_indices)) {
path_vertex_indices = seq(1L, nrow(surface_vertices));
}
path_vertex_coords = surface_vertices[path_vertex_indices,];
num_path_points = nrow(path_vertex_coords);
if(num_path_points < 2L) {
warning(sprintf("Path won't be visible, it only contains %d vertex/vertices.\n", num_path_points));
}
if(num_path_points == 2L) {
path = path_vertex_coords;
} else {
num_drawn_path_points = (2L * (num_path_points - 2L)) + 2L;
path = matrix(rep(NA, (num_drawn_path_points * 3L)), ncol = 3L);
path[1,] = path_vertex_coords[1L, ]; # copy 1st value
path[num_drawn_path_points, ] = path_vertex_coords[num_path_points, ]; # copy last value
inner_original_values = path_vertex_coords[2L:(num_path_points-1L),];
target_indices_one = seq(2L, (num_drawn_path_points-1L), by = 2);
target_indices_two = seq(3L, (num_drawn_path_points-1L), by = 2);
path[target_indices_one,] = inner_original_values;
path[target_indices_two,] = inner_original_values;
}
if(do_vis) {
if(! no_material) {
rgl::material3d(size=2.0, lwd=2.0, color=color, point_antialias=TRUE, line_antialias=TRUE);
}
rgl::segments3d(path[,1], path[,2], path[,3]);
}
return(invisible(path));
}
#' @title Visualize several paths in different colors.
#'
#' @inheritParams vis.path.along.verts
#'
#' @param paths list of positive integer vectors, the vertex indices of the paths
#'
#' @export
vis.paths.along.verts <- function(surface_vertices, paths, color=viridis::viridis(length(paths))) {
if(! is.list(paths)) {
stop("Parameter 'paths' must be a list of integer vectors.");
}
color = recycle(color, length(paths));
for(p_idx in seq_along(paths)) {
p = paths[[p_idx]];
vis.path.along.verts(surface_vertices, p, color = color[p_idx]);
}
}
#' @title Compute slopes of paths relative to axes.
#'
#' @inheritParams vis.paths
#'
#' @param return_angles logical, whether to return angles instead of slopes. Angles are returned in degrees, and will range from \code{-90} to \code{+90}.
#'
#' @return \code{m} x 3 matrix, each row corresponds to a path and describes the 3 slopes of the path against the 3 planes/ x, y, and z axes (in that order).
#'
#' @keywords internal
path.slopes <- function(coords_list, return_angles = FALSE) {
if(! is.list(coords_list)) {
stop("Parameter 'coords_list' must be a list.");
}
# Compute coords of first and last point of each track, we ignore the intermediate ones.
fl = flc(coords_list); # fl is an m x 6 matrix, each row contains 6 coords: the xyz of the first and last point of each track.
path_lengths = sqrt(abs((fl[,1]-fl[,4])*(fl[,1]-fl[,4])) + abs((fl[,1]-fl[,4])*(fl[,1]-fl[,4])) + abs((fl[,2]-fl[,5])*(fl[,3]-fl[,6])));
# Compute slopes.
x_diff = fl[,1]-fl[,4]; # The variable names should maybe rather represent the planes.
y_diff = fl[,2]-fl[,5];
z_diff = fl[,3]-fl[,6];
if(return_angles) {
# TODO: fix this, see https://math.stackexchange.com/questions/463415/angle-between-two-3d-lines
num_paths = nrow(fl);
x_angles = atan2(x_diff, rep(0L, num_paths));
y_angles = atan2(y_diff, rep(0L, num_paths));
z_angles = atan2(z_diff, rep(0L, num_paths));
return(rad2deg(cbind(x_angles, y_angles, z_angles)));
} else {
axes_diffs = cbind(x_diff, y_diff, z_diff);
slopes_xyz = axes_diffs / path_lengths;
return(slopes_xyz);
}
}
#' @title Convert raduians to degree
#' @keywords internal
rad2deg <- function(rad) {(rad * 180) / (pi)}
#' @title Convert degree to radians
#' @keywords internal
deg2rad <- function(deg) {(deg * pi) / (180)}
#' @title Compute path color from its orientation.
#'
#' @inheritParams vis.paths
#'
#' @param use_three_colors_only logical, whether to use only three different colors, based on closest axis.
#'
#' @return \code{m} x 3 matrix, each row corresponds to a path and contains its color value (RGB, range 0..255).
#'
#' @keywords internal
path.colors.from.orientation <- function(coords_list, use_three_colors_only = FALSE) {
if(! is.list(coords_list)) {
stop("Parameter 'coords_list' must be a list.");
}
num_paths = length(coords_list);
path_colors = matrix(rep(0L, (num_paths * 3L)), ncol = 3L); # init all white.
if(use_three_colors_only) {
slopes = abs(path.slopes(coords_list));
for(path_idx in 1L:num_paths) {
full_channel = which.min(slopes[path_idx, ]);
path_colors[path_idx, full_channel] = 255L;
}
} else {
angles = path.slopes(coords_list, return_angles = TRUE); # in degrees, -90..+90
path_colors = cbind(as.integer(scale.to.range.zero.one(angles[,1])*255), as.integer(scale.to.range.zero.one(angles[,2])*255), as.integer(scale.to.range.zero.one(angles[,3])*255));
}
return(path_colors);
}
#' @title Scale given values to range 0..1.
#' @keywords internal
scale.to.range.zero.one <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
#' @title Scale given values to range 0..1.
#'
#' @param x the numeric data
#'
#' @param ... the numeric data
#'
#' @return the scaled data
#'
#' @export
scale01 <- function(x, ...) { scale.to.range.zero.one(x, ...) }
#' @title Given a list of path coordinates, create matrix containing only the first and last point of each path.
#'
#' @inheritParams vis.paths
#'
#' @return m x 6 numeric matrix, containing the first and last point of a path per row (two 3D xyz-coords, so 6 values per row).
#'
#' @keywords internal
flc <- function(coords_list) {
if(! is.list(coords_list)) {
stop("Parameter 'coords_list' must be a list.");
}
num_tracks = length(coords_list);
if(num_tracks < 1L) {
stop("Empty coords_list, cannot determine first and last points of tracks.");
}
fl_coords = matrix(rep(NA, (num_tracks * 6L)), ncol = 6L);
current_fl_coords_row_idx = 1L;
for(path_idx in 1L:num_tracks) {
current_path_coords = coords_list[[path_idx]];
if(nrow(current_path_coords) < 2L) {
warning(sprintf("Skipping path # %d: consists only of a single point.\n", path_idx));
current_fl_coords_row_idx = current_fl_coords_row_idx + 1L;
next;
}
fl_coords[current_fl_coords_row_idx, 1:3] = current_path_coords[1L, ]; # first point of path.
fl_coords[current_fl_coords_row_idx, 4:6] = current_path_coords[nrow(current_path_coords), ]; # last point of path.
current_fl_coords_row_idx = current_fl_coords_row_idx + 1L;
}
return(fl_coords);
}
#' @title Visualize many paths.
#'
#' @param coords_list list of \code{m} matrices, each \code{n} x 3 matrix must contain the 3D coords for one path.
#'
#' @param path_color a color value, the color in which to plot the paths.
#'
#' @note This function is a lot faster than calling \code{vis.path.along.verts} many times and having it draw each time.
#'
#' @export
vis.paths <- function(coords_list, path_color = "#FF0000") {
if(! is.list(coords_list)) {
stop("Parameter 'coords_list' must be a list.");
}
path = NULL;
for(path_coords in coords_list) {
if(is.null(path)) {
path = vis.path.along.verts(path_coords, do_vis = FALSE);
} else {
path = rbind(path, vis.path.along.verts(path_coords, do_vis = FALSE));
}
}
rgl::material3d(size = 1.0, lwd = 1.0, color = path_color, point_antialias = TRUE, line_antialias = TRUE);
rgl::segments3d(path[,1], path[,2], path[,3]);
}
# sjd = fsaverage.path(T);
# sj = "fsaverage";
# mesh = subject.surface(sjd, sj, hemi="lh");
# lab = subject.label(sjd, sj, "cortex", hemi = "lh");
# sm = submesh.vertex(mesh, lab);
# vis.fs.surface(mesh);
# vis.fs.surface(sm);
#
# col = rep("white", nrow(mesh$vertices));
# bd = label.border.fast <- function(surface_mesh, label);
# col[bd$vertices] = "red";
# vis.fs.surface(mesh, col=col);
#' @title Create a submesh including only the given vertices.
#'
#' @param surface_mesh an fs.surface instance, the original mesh
#'
#' @param old_vertex_indices_to_use integer vector, the vertex indices of the 'surface_mesh' that should be used to construct the new sub mesh.
#'
#' @param ret_mappings whether to return the vertex mappings. If TRUE, the return value becomes a list with entries 'submesh', 'vmap_full_to_submesh', and 'vmap_submesh_to_full'.
#'
#' @return the new mesh, made up of the given 'old_vertex_indices_to_use' and all (complete) faces that exist between the query vertices in the source mesh. But see 'ret_mapping' parameter.
#'
#' @examples
#' \dontrun{
#' sjd = fsaverage.path(T);
#' sj = "fsaverage";
#' mesh = subject.surface(sjd, sj, hemi="lh");
#' lab = subject.label(sjd, sj, "cortex", hemi = "lh");
#' sm = fsbrain:::submesh.vertex(mesh, lab);
#' vis.fs.surface(mesh);
#' vis.fs.surface(sm);
#' }
#'
#' @keywords internal
#' @importFrom stats complete.cases
submesh.vertex <- function(surface_mesh, old_vertex_indices_to_use, ret_mappings=FALSE) {
if(! is.vector(old_vertex_indices_to_use)) {
stop("Argument 'old_vertex_indices_to_use' must be a vector.");
}
old_vertex_indices_to_use = sort(as.integer(old_vertex_indices_to_use)); # sort is essential! The vertex indices in 'old_vertex_indices_to_use' may not be sorted,
# and the order of the 'new_vertices' will be wrong then (vertices will be ordered incorrectly, and thus faces will be broken).
nv_old = nrow(surface_mesh$vertices);
if(min(old_vertex_indices_to_use) < 1L | max(old_vertex_indices_to_use) > nv_old) {
stop(sprintf("Invalid 'old_vertex_indices_to_use' parameter: must be integer vector containing values >=1 and <=num_verts(surface_mesh), which is %d.\n", nv_old));
}
nv_new = length(old_vertex_indices_to_use);
vert_mapping = rep(NA, nv_old); # position/index is old vertex, value is new vertex. old ones not in new mesh receive value of NA.
# Create a map from the old vertex indices to the new ones. Needed to construct faces later.
mapped_new_vertex_index = 1L;
vertex_is_retained = rep(FALSE, nv_old);
vertex_is_retained[old_vertex_indices_to_use] = TRUE;
for(old_vert_idx in seq(nv_old)) {
if(vertex_is_retained[old_vert_idx]) {
vert_mapping[old_vert_idx] = mapped_new_vertex_index;
mapped_new_vertex_index = mapped_new_vertex_index + 1L;
} # no 'else' needed, the rest stays at NA.
}
# Use the subset of the old vertices (simply grab coords).
new_vertices = surface_mesh$vertices[old_vertex_indices_to_use, ];
# Now for the faces.
nf_old = nrow(surface_mesh$faces);
new_faces = matrix(rep(NA, (nf_old*3L)), ncol=3L, nrow=nf_old); #over-allocate and remove invalid ones later.
new_face_idx = 0L;
for(old_face_idx in seq(nf_old)) {
new_face_idx = new_face_idx + 1L;
old_face_indices = surface_mesh$faces[old_face_idx, ];
new_face_indices = vert_mapping[old_face_indices];
new_faces[new_face_idx, ] = new_face_indices;
}
df = data.frame(new_faces);
new_faces = data.matrix(df[stats::complete.cases(df),]); # remove all faces containing an NA vertex
new_mesh = list('vertices'=new_vertices, 'faces'=new_faces); #, 'vert_mapping'=vert_mapping); # the sub mesh
class(new_mesh) = c(class(new_mesh), 'fs.surface');
if(ret_mappings) {
nnv = nrow(new_vertices); # nnv = number of new vertices.
rev_mapping = rep(-1L, nnv);
for(map_idx in seq.int(nv_old)) {
if(! is.na(vert_mapping[map_idx])) {
rev_mapping[vert_mapping[map_idx]] = map_idx;
}
}
if(any(rev_mapping < 0L)) {
stop("wth");
}
res = list('submesh'=new_mesh, 'vmap_full_to_submesh'=vert_mapping , 'vmap_submesh_to_full'=rev_mapping);
return(res);
}
return(new_mesh);
}
#' @title Compute border vertices of a label using Rvcg.
#'
#' @param surface_mesh an fs.surface instance, see \code{\link{subject.surface}}.
#'
#' @param label an fs.label instance (see \code{\link{subject.label}}) or an integer vector, the vertex indices of the label.
#'
#' @return named list with entry 'vertices' containing an integer vector with the indices of the border vertices.
#'
#' @note This is faster than using the \code{\link{label.border}} function, but it does not fully match its functionality (some parameter are not implemented for this function), and it requires the \code{Rvcg} package, which is an optional dependency.
#'
#' @seealso \code{\link{label.border}}, which is slower but provides more options and does not require Rvcg.
#'
#' @examples
#' \dontrun{
#' sjd = fsaverage.path(T);
#' sj = "fsaverage";
#' mesh = subject.surface(sjd, sj, hemi="lh");
#' lab = subject.label(sjd, sj, "cortex", hemi = "lh");
#' col = rep("white", nrow(mesh$vertices));
#' bd = fsbrain:::label.border.fast <- function(surface_mesh, label);
#' col[bd$vertices] = "red";
#' vis.fs.surface(mesh, col=col);
#' }
#'
#' @keywords internal
label.border.fast <- function(surface_mesh, label) {
if(requireNamespace("Rvcg", quietly = TRUE)) {
if(freesurferformats::is.fs.label(label)) {
label_vertices = label$vertexdata$vertex_index;
} else {
label_vertices = label;
}
submesh_res = submesh.vertex(surface_mesh, label_vertices, ret_mappings = TRUE); # a submesh of the surface, only including the vertices (and faces) which are part of the label.
label_mesh = submesh_res$submesh;
rev_mapping = submesh_res$vmap_submesh_to_full;
bd = Rvcg::vcgBorder(freesurferformats::fs.surface.to.tmesh3d(label_mesh));
res = list("vertices"=rev_mapping[which(bd$bordervb)]);
return(res);
} else {
stop("The optional dependency package 'Rvcg' is required to use this function, please install it.");
}
}
#' @title Compute border of a label.
#'
#' @description Compute the border of a label (i.e., a subset of the vertices of a mesh). The border thickness can be specified. Useful to draw the outline of a region, e.g., a significant cluster on the surface or a part of a ROI from a brain parcellation.
#'
#' @param surface_mesh surface mesh, as loaded by \code{\link[fsbrain]{subject.surface}} or \code{\link[freesurferformats]{read.fs.surface}}.
#'
#' @param label instance of class `fs.label` or an integer vector, the vertex indices. This function only makes sense if they form a patch on the surface, but that is not checked.
#'
#' @param inner_only logical, whether only faces consisting only of label_vertices should be considered to be label faces. If FALSE, faces containing at least one label vertex will be used. Defaults to TRUE. Leave this alone if in doubt, especially if you want to draw several label borders which are directly adjacent on the surface.
#'
#' @param expand_inwards integer, border thickness extension. If given, once the border has been computed, it is extended by the given graph distance. It is guaranteed that the border only extends inwards, i.e., it will never extend to vertices which are not part of the label.
#'
#' @param derive logical, whether the returned result should also include the border edges and faces in addition to the border vertices. Takes longer if requested, defaults to FALSE.
#'
#' @return the border as a list with the following entries: `vertices`: integer vector, the vertex indices of the border. Iff the parameter `derive` is TRUE, the following two additional fields are included: `edges`: integer matrix of size (n, 2) for n edges. Each row defines an edge by its start and target vertex. `faces`: integer vector, the face indices of the border.
#'
#' @family surface mesh functions
#'
#' @export
#' @importFrom data.table as.data.table .N
label.border <- function(surface_mesh, label, inner_only=TRUE, expand_inwards=0L, derive=FALSE) {
if(freesurferformats::is.fs.label(label)) {
label_vertices = label$vertexdata$vertex_index;
} else {
label_vertices = label;
}
if(length(label_vertices) == 0L) {
return(list("vertices"=c(), "edges"=c(), "faces"=c()));
}
#if(expand_inwards == 0L & derive == FALSE & inner_only == TRUE) {
# if(requireNamespace("Rvcg", quietly = TRUE)) {
# return(label.border.fast(surface_mesh, label));
# }
#}
if(inner_only) {
label_faces = mesh.vertex.included.faces(surface_mesh, label_vertices);
} else {
label_faces = mesh.vertex.neighbors(surface_mesh, label_vertices)$faces;
}
label_edges = face.edges(surface_mesh, label_faces);
#cat(sprintf("Found %d label faces and %d label edges based on the %d label_vertices.\n", length(label_faces), nrow(label_edges), length(label_vertices)))
if(nrow(label_edges) == 0L) {
# return early in this case, because otherwise the line that computes border_edges below will fail (because the $N==1 part will return no rows)
return(list("vertices"=c(), "edges"=c(), "faces"=c()));
}
label_edges_sorted = as.data.frame(t(apply(label_edges, 1, sort))); # Sort start and target vertex within edge to count edges (u,v) and (v,u) as 2 occurrences of same edge later.
edge_dt = data.table::as.data.table(label_edges_sorted);
edgecount_dt = edge_dt[, .N, by = names(edge_dt)]; # add column 'N' which contains the counts (i.e., how often each edge occurs over all faces).
border_edges = edgecount_dt[edgecount_dt$N==1][,1:2]; # Border edges occur only once, as the other face they touch is not part of the label.
border_vertices = unique(as.vector(t(border_edges)));
if(expand_inwards > 0L) {
num_before_expansion = length(border_vertices);
border_vertices = mesh.vertex.neighbors(surface_mesh, border_vertices, k=expand_inwards, restrict_to_vertices=label_vertices)$vertices;
#cat(sprintf("Expanded border by %d, this increased the border vertex count from %d to %d.\n", expand_inwards, num_before_expansion, length(border_vertices)));
}
if(! derive) {
return(list("vertices"=border_vertices));
}
# Now retrieve the faces from the neighborhood that include any border vertex.
border_faces = mesh.vertex.included.faces(surface_mesh, border_vertices);
if(expand_inwards > 0L) {
# We still need to recompute the border edges based on the updated vertices (and derived faces).
border_edges = face.edges(surface_mesh, border_faces);
}
return(list("vertices"=border_vertices, "edges"=border_edges, "faces"=border_faces));
}
#' @title Check for the given color strings whether they represent gray scale colors.
#'
#' @param col_strings vector of RGB(A) color strings, like \code{c("#FFFFFF", ("#FF00FF"))}.
#'
#' @param accept_col_names logical, whether to accept color names like 'white'. Disables all sanity checks.
#'
#' @return logical vector
#'
#' @examples
#' colors.are.grayscale(c("#FFFFFF", "#FF00FF"));
#' all((colors.are.grayscale(c("#FFFFFF00", "#ABABABAB"))));
#'
#' @export
#' @importFrom grDevices col2rgb
colors.are.grayscale <- function(col_strings, accept_col_names=TRUE) {
if(! accept_col_names) {
if(all(nchar(col_strings) == 9)) {
has_alpha = TRUE;
} else if(all(nchar(col_strings) == 7)) {
has_alpha = FALSE;
} else {
stop("Invalid input: parameter 'colstring' must contain RBG or RGBA color strings with 7 chars each for RGB or 9 chars each for RGBA.");
}
if(has_alpha) {
col_strings = substr(col_strings, 1, 7); # The alpha is irrelevant for determining whether the color is grayscale, strip it.
}
}
return(unname(unlist(apply(grDevices::col2rgb(col_strings), 2, function(x){length(unique(x)) == 1}))));
}
#' @title Check for the given color strings whether they have transparency, i.e., an alpha channel value != fully opaque.
#'
#' @param col_strings vector of RGB(A) color strings, like \code{c("#FFFFFF", ("#FF00FF"))}.
#'
#' @param accept_col_names logical, whether to accept color names like 'white'. Disables all sanity checks.
#'
#' @return logical vector
#'
#' @examples
#' colors.have.transparency(c("#FFFFFF", "#FF00FF", "#FF00FF00", "red", "#FF00FFDD"));
#' all((colors.have.transparency(c("#FFFFFF00", "#ABABABAB"))));
#'
#' @export
#' @importFrom grDevices col2rgb
colors.have.transparency <- function(col_strings, accept_col_names=TRUE) {
if(! accept_col_names) {
if(! (all(nchar(col_strings) == 9) || (all(nchar(col_strings) == 9)))) {
stop("These strings do not look like RGBA color strings: invalid number of chars (expected 7 or 9 for RGB/RGBA).");
}
}
return(unname(unlist(apply(grDevices::col2rgb(col_strings, alpha=TRUE), 2, function(x){x[4] != 255L}))));
}
#' @title Enumerate all edges of the given faces or mesh.
#'
#' @description Compute edges of a tri-mesh. Can compute all edges, or only a subset, given by the face indices in the mesh.
#'
#' @param surface_mesh surface mesh, as loaded by \code{\link[fsbrain]{subject.surface}} or \code{\link[freesurferformats]{read.fs.surface}}.
#'
#' @param face_indices integer vector, the face indices. Can also be the character string 'all' to use all faces.
#'
#' @return integer matrix of size (n, 2) where n is the number of edges. The indices (source and target vertex) in each row are **not** sorted, and the edges are **not** unique. I.e., each undirected edge `u, v` (with the exception of edges on the mesh border) will occur twice in the result: once as `u, v` and once as `v, u`.
#'
#' @family surface mesh functions
#'
#' @export
face.edges <- function(surface_mesh, face_indices='all') {
if(is.character(face_indices)) {
if(face_indices=='all') {
face_indices = seq.int(nrow(surface_mesh$faces));
}
}
e1 = surface_mesh$faces[face_indices, 1:2];
e2 = surface_mesh$faces[face_indices, 2:3];
e3 = surface_mesh$faces[face_indices, c(3,1)];
return(rbind(e1, e2, e3));
}
#' @title Return diverging color list
#'
#' @param num_colors integer, the number of colors you want
#'
#' @return vector of colors
#'
#' @importFrom grDevices colorRampPalette rgb
#' @export
colorlist.brain.clusters <- function(num_colors) {
if(num_colors %% 2 == 1L) {
num_colors_per_side = num_colors %/% 2L;
num_central = 1L;
} else {
num_colors_per_side = (num_colors %/% 2L) - 1L;
num_central = 2L;
}
blue = grDevices::rgb(0.,0.,1.);
cyan = grDevices::rgb(0., 1., 1.);
ramp_bc = grDevices::colorRampPalette(c(cyan, blue))
red = grDevices::rgb(1., 0., 0.);
yellow = grDevices::rgb(1., 1., 0.);
ramp_ry = grDevices::colorRampPalette(c(red, yellow))
central_value = grDevices::rgb(0.8, 0.8, 0.8); # gray
return(c(ramp_bc(num_colors_per_side), rep(central_value, num_central), ramp_ry(num_colors_per_side)));
}
#' @title Read colors from CSV file.
#'
#' @param filepath character string, path to a CSV file containing colors
#'
#' @return vector of hex color strings
#'
#' @export
#' @importFrom utils read.table
read.colorcsv <- function(filepath) {
color_df = read.table(filepath, header = TRUE, stringsAsFactors = FALSE);
if("rgb_hexcolorstring" %in% names(color_df)) {
return(color_df$rgb_hexcolorstring);
} else if("rgbint_red" %in% names(color_df) & "rgbint_green" %in% names(color_df) & "rgbint_blue" %in% names(color_df)) {
return(grDevices::rgb(color_df$rgbint_red/255., color_df$rgbint_green/255., color_df$rgbint_blue/255.));
} else if("rgbfloat_red" %in% names(color_df) & "rgbfloat_green" %in% names(color_df) & "rgbfloat_blue" %in% names(color_df)) {
return(grDevices::rgb(color_df$rgbfloat_red, color_df$rgbfloat_green, color_df$rgbfloat_blue));
} else {
stop(sprintf("No valid color definition found in colorcsv file '%s'.", filepath));
}
}
#' @title Retrieve values from nested named lists
#'
#' @param named_list a named list
#'
#' @param listkeys vector of character strings, the nested names of the lists
#'
#' @param default the default value to return in case the requested value is `NULL`.
#'
#' @return the value at the path through the lists, or `NULL` (or the 'default') if no such path exists.
#'
#' @examples
#' data = list("regions"=list("frontal"=list("thickness"=2.3, "area"=2345)));
#' getIn(data, c("regions", "frontal", "thickness")); # 2.3
#' getIn(data, c("regions", "frontal", "nosuchentry")); # NULL
#' getIn(data, c("regions", "nosuchregion", "thickness")); # NULL
#' getIn(data, c("regions", "nosuchregion", "thickness"), default=14); # 14
#'
#' @export
getIn <- function(named_list, listkeys, default=NULL) {
num_keys = length(listkeys);
if(length(named_list) < 1L | num_keys < 1L) {
return(default);
}
nlist = named_list;
current_key_index = 0L;
for(lkey in listkeys) {
current_key_index = current_key_index + 1L;
if(current_key_index < num_keys) {
if(!is.list(nlist)) {
return(default);
}
if(lkey %in% names(nlist)) {
nlist = nlist[[lkey]];
} else {
return(default);
}
} else {
if(lkey %in% names(nlist)) {
return(nlist[[lkey]]);
} else {
return(default);
}
}
}
}
#' @title Check for values in nested named lists
#'
#' @param named_list a named list
#'
#' @param listkeys vector of character strings, the nested names of the lists
#'
#' @return whether a non-NULL value exists at the path
#'
#' @examples
#' data = list("regions"=list("frontal"=list("thickness"=2.3, "area"=2345)));
#' hasIn(data, c("regions", "nosuchregion")); # FALSE
#'
#' @export
hasIn <- function(named_list, listkeys) {
return(! is.null(getIn(named_list, listkeys)));
}
#' @title Find the subject directory containing the fsaverage subject (or others) on disk.
#'
#' @description Try to find directory containing the fsaverage subject (or any other subject) by checking in the following places and returning the first path where it is found: first, the directory given by the environment variable SUBJECTS_DIR, then in the subir 'subjects' of the directory given by the environment variable FREESURFER_HOME, and finally the base dir of the package cache. See the function \code{\link[fsbrain]{download_fsaverage}} if you want to download fsaverage to your package cache and ensure it always gets found, no matter whether the environment variables are set or not.
#'
#' @param subject_id string, the subject id of the subject. Defaults to 'fsaverage'.
#'
#' @param mustWork logical. Whether the function should with an error stop if the directory cannot be found. If this is TRUE, the return value will be only the 'found_at' entry of the list (i.e., only the path of the subjects dir).
#'
#' @return named list with the following entries: "found": logical, whether it was found. "found_at": Only set if found=TRUE, the path to the fsaverage directory (NOT including the fsaverage dir itself). "found_all_locations": list of all locations in which it was found. See 'mustWork' for important information.
#'
#' @seealso \code{\link{fsaverage.path}}
#'
#' @export
find.subjectsdir.of <- function(subject_id='fsaverage', mustWork=FALSE) {
ret = list();
ret$found = FALSE;
ret$found_all_locations = NULL;
guessed_path = get_optional_data_filepath(file.path("subjects_dir", subject_id), mustWork = FALSE);
if(nchar(guessed_path) > 0L & dir.exists(guessed_path)) {
ret$found = TRUE;
ret$found_at = get_optional_data_filepath(file.path("subjects_dir"));
ret$found_all_locations = c(ret$found_all_locations, ret$found_at);
}
fs_home_search_res = find.freesurferhome();
if(fs_home_search_res$found) {
fs_home = fs_home_search_res$found_at;
guessed_path = file.path(fs_home, "subjects", subject_id);
if(dir.exists(guessed_path)) {
ret$found = TRUE;
ret$found_at = file.path(fs_home, "subjects");
ret$found_all_locations = c(ret$found_all_locations, ret$found_at);
}
}
subj_dir=Sys.getenv("SUBJECTS_DIR");
if(nchar(subj_dir) > 0) {
guessed_path = file.path(subj_dir, subject_id);
if(dir.exists(guessed_path)) {
ret$found = TRUE;
ret$found_at = subj_dir;
ret$found_all_locations = c(ret$found_all_locations, ret$found_at);
}
}
ret$found_all_locations = unique(ret$found_all_locations);
if(mustWork) {
if(ret$found) {
return(ret$found_at);
} else {
stop(sprintf("Could not find subjects dir containing subject '%s' and parameter 'mustWork' is TRUE. Checked for directories given by environment variables FREESURFER_HOME and SUBJECTS_DIR and in package cache. Please set the environment variables by installing and configuring FreeSurfer.\n Or, if you want to download fsaverage without installing FreeSurfer, have a look at the 'download_fsaverage' function in this package.\n", subject_id));
}
}
return(ret);
}
#' @title Return path to fsaverage dir.
#'
#' @param allow_fetch logical, whether to allow trying to download it.
#'
#' @return the path to the fsaverage directory (NOT including the 'fsaverage' dir itself).
#'
#' @note This function will stop (i.e., raise an error) if the directory cannot be found. The fsaverage template is part of FreeSurfer, and distributed under the FreeSurfer software license.
#'
#' @export
fsaverage.path <- function(allow_fetch = FALSE) {
search_res = find.subjectsdir.of(subject_id='fsaverage', mustWork=FALSE);
if(search_res$found) {
return(search_res$found_at);
}
if(allow_fetch) {
fsbrain::download_fsaverage(accept_freesurfer_license = TRUE);
}
return(find.subjectsdir.of(subject_id='fsaverage', mustWork=TRUE));
}
#' @title Return FreeSurfer path.
#'
#' @return the FreeSurfer path, typically what the environment variable `FREESURFER_HOME` points to.
#'
#' @note This function will stop (i.e., raise an error) if the directory cannot be found.
#'
#' @export
fs.home <- function() {
return(find.freesurferhome(mustWork=TRUE));
}
#' @title Find the FREESURFER_HOME directory on disk.
#'
#' @description Try to find directory containing the FreeSurfer installation, based on environment variables and *educated guessing*.
#'
#' @param mustWork logical. Whether the function should with an error stop if the directory cannot be found. If this is TRUE, the return value will be only the 'found_at' entry of the list (i.e., only the path of the FreeSurfer installation dir).
#'
#' @return named list with the following entries: "found": logical, whether it was found. "found_at": Only set if found=TRUE, the path to the FreeSurfer installation directory (including the directory itself). See 'mustWork' for important information.
#'
#' @seealso \code{\link{fs.home}}
#'
#' @export
find.freesurferhome <- function(mustWork=FALSE) {
ret = list();
ret$found = FALSE;
fs_home=Sys.getenv("FREESURFER_HOME");
if(nchar(fs_home) > 0) {
guessed_path = file.path(fs_home);
if(dir.exists(guessed_path)) {
ret$found = TRUE;
ret$found_at = guessed_path;
}
}
# Check in some typical paths
if(! ret$found) {
if(tolower(Sys.info()[["sysname"]]) == 'darwin') {
search_paths = c("/Applications/freesurfer");
} else if(tolower(Sys.info()[["sysname"]]) == 'linux') {
search_paths = c("/usr/local/freesurfer", "/opt/freesurfer");
} else {
# Windows, needed for AppVeyor
search_paths = c();
}
user_home = Sys.getenv("HOME");
if(nchar(user_home) > 0) {
search_paths = c(search_paths, file.path(user_home, 'freesurfer'), file.path(user_home, 'software', 'freesurfer'), file.path(user_home, 'opt', 'freesurfer'));
}
for(sp in search_paths) {
if(dir.exists(sp)) {
ret$found = TRUE;
ret$found_at = sp;
}
}
}
if(mustWork) {
if(ret$found) {
return(ret$found_at);
} else {
stop(sprintf("Could not find FreeSurfer installation dir and parameter 'mustWork' is TRUE. Please set the environment variables by installing and configuring FreeSurfer.\n"));
}
}
return(ret);
}
#' @title Get rgloptions for testing.
#'
#' @description This function defines the figure size that is used during the unit tests. Currently \code{list('windowRect' = c(50, 50, 800, 800)}.
#'
#' @return named list, usable as 'rgloptions' parameter for vis functions like \code{\link[fsbrain]{vis.subject.morph.native}}.
#'
#' @export
rglot <- function() {
return(list('windowRect' = c(50, 50, 800, 800)));
}
#' @title Get rgloptions and consider global options.
#'
#' @description This function retrieves the global rgloptions defined in \code{getOption('fsbrain.rgloptions')}, or, if this is not set, returns the value from \code{\link{rglot}}.
#'
#' @return named list, usable as 'rgloptions' parameter for vis functions like \code{\link[fsbrain]{vis.subject.morph.native}}.
#'
#' @note You can set the default size for all fsbrain figures to 1200x1200 pixels like this: \code{options("fsbrain.rgloptions"=list("windowRect"=c(50,50,1200,1200)))}.
#'
#' @export
rglo <- function() {
return(getOption('fsbrain.rgloptions', default=rglot()));
}
#' @title Set default figure size for fsbrain visualization functions.
#'
#' @param width integer, default figure width in pixels
#'
#' @param height integer, default figure height in pixels
#'
#' @param xstart integer, default horizontal position of plot window on screen, left border is 0. The max value (right border) depends on your screen resolution.
#'
#' @param ystart integer, default vertical position of plot window on screen, upper border is 0. The max value (lower border) depends on your screen resolution.
#'
#' @note This function overwrites \code{options("fsbrain.rgloptions")}. Output size is limited by your screen resolution. To set your preferred figure size for future R sessions, you could call this function in your \code{'~/.Rprofile'} file.
#'
#' @export
fsbrain.set.default.figsize <- function(width, height, xstart=50L, ystart=50L) {
options("fsbrain.rgloptions"=list("windowRect"=c(xstart, ystart, width, height)));
}
#' @title Split morph data vector at hemisphere boundary.
#'
#' @description Given a single vector of per-vertex data for a mesh, split it at the hemi boundary. This is achieved by loading the respective surface and checking the number of vertices for the 2 hemispheres.
#'
#' @param vdata numerical vector of data for both hemispheres, one value per vertex
#'
#' @param surface the surface to load to determine the vertex counts
#'
#' @param expand logical, whether to allow input of length 1, and expand (repeat) it to the length of the hemispheres.
#'
#' @inheritParams subject.morph.native
#'
#' @note Instead of calling this function to split the data, you could use the 'split_by_hemi' parameter of \code{\link[fsbrain]{subject.morph.native}}.
#'
#' @return a hemilist, each entry contains the data part of the respective hemisphere.
#' @export
vdata.split.by.hemi <- function(subjects_dir, subject_id, vdata, surface='white', expand=TRUE) {
nv = subject.num.verts(subjects_dir, subject_id, surface=surface);
nv_sum = nv$lh + nv$rh;
if(length(vdata) == 1L && expand) {
vdata = rep(vdata, nv_sum);
}
if(length(vdata) != nv_sum) {
if(length(vdata) == (163842L*2L)) {
warning("Hint: The length of 'vdata' matches the number of vertices in the fsaverage template. Wrong 'subject_id' parameter with standard space data?");
}
stop(sprintf("Cannot split data: surfaces contain a total of %d vertices (lh=%d, rh=%d), but vdata has length %d. Lengths must match.\n", (nv$lh + nv$rh), nv$lh, nv$rh, length(vdata)));
}
return(list('lh'=vdata[1L:nv$lh], 'rh'=vdata[(nv$lh+1L):(nv$lh + nv$rh)]));
}
#' @title Generate test 3D volume of integers. The volume has an outer background area (intensity value 'bg') and an inner foreground areas (intensity value 200L).
#'
#' @param vdim integer vector of length 3, the dimensions
#'
#' @param bg value to use for outer background voxels. Typically `0L` or `NA`.
#'
#' @note This function exists for software testing purposes only, you should not use it in client code.
#'
#' @return a 3d array of integers
#' @export
gen.test.volume <- function(vdim=c(256L, 256L, 256L), bg = NA) {
data3d = rep(bg, prod(vdim));
v3d = array(data = data3d, dim = vdim);
vcenter = vdim %/% 2;
vcore_start = vcenter %/% 2;
vcore_end = vdim - vcore_start;
v3d[vcore_start[1]:vcore_end[1],vcore_start[2]:vcore_end[2],vcore_start[3]:vcore_end[3]] = 200L;
return(v3d);
}
|
dcb16bb72ea7749b06e0f94e389e40dac1dc1428
|
1176e185df07a19c00d0baf112fa11f2f8a8f5f2
|
/man/cor_spatial.Rd
|
edd650e9337d19270a35898918bf30fd12b353ab
|
[] |
no_license
|
Pintademijote/multipack
|
142cba0c0376a5779e06a8dd66762bf6e497bb9e
|
e9ff3c6695ac794cfc7bf681a109e03740d77f0f
|
refs/heads/master
| 2020-05-02T14:44:20.301357
| 2019-09-23T11:54:47
| 2019-09-23T11:54:47
| 178,019,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,248
|
rd
|
cor_spatial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor_spatial.R
\name{cor_spatial}
\alias{cor_spatial}
\title{Produce a tab containing the value selected each x pixel on the map generated by Chloe per metric.}
\usage{
cor_spatial(directory, metrics, dist)
}
\arguments{
\item{directory}{The directory where your ascii generated with Chloe are stored}
\item{metrics}{Vector of metrics you choosed in Chloe when you created the ascii whith Cloe}
\item{dist}{Vector of scales you choosed in Chloe when you created the ascii whith Cloe}
}
\value{
Return a tab containing a the value selected each x pixel on the map generated by Chloe per metric
}
\description{
`cor_spatial()` Return a tab containing a the value selected each x pixel on the map generated by Chloe per metric.
}
\details{
This fonction is to used when you search to find correlation between metrics
at multiple scales and when your sampling is randomly selected across
the the landscape and patches. To test the correlation, you must used after that the
'temp()' function.
}
\section{Warning}{
This process can take some time depending of the size of your ascii and the numbers of your metrics
}
\author{
Pierre-Gilles Lemasle <pg.lemasle@gmail.com>
}
|
616c52a2addccb86856b3eb62f7aa1ac84c2975c
|
580064be6f7e0b39bd07fb260b1cf2f48abd51c5
|
/analysis/CiteSeq_Minerva/HTO_Pt4/HTO_Tagspt4.R
|
f8684f1886a9b2a240db1dd6c0fb6f835f0681b9
|
[] |
no_license
|
RoussosLab/pcqc
|
58f904199774a71f9b64193e75fbc4e0008570ae
|
e282f2b0fc422194706dbbeebf759b221d9a8a14
|
refs/heads/main
| 2023-01-08T13:16:39.590571
| 2020-11-05T18:41:25
| 2020-11-05T18:41:25
| 310,384,896
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,573
|
r
|
HTO_Tagspt4.R
|
#First do Python QC Check
library(Seurat)
df = read.table(file = 'dense_umis.tsv', sep = '\t', header = TRUE, row.names = 1)
#df = read.table(file = 'subset_dense_umis.csv', sep = ',', header = TRUE, row.names = 1)
pbmc.htos <- as.matrix(df[1:4,]) #confirm with rownames that you have the right rows
#normally this should actually include counts- but not necessary
pbmc.hashtag <- CreateSeuratObject(counts = pbmc.htos)
pbmc.hashtag[["HTO"]] <- CreateAssayObject(counts = pbmc.htos)
pbmc.hashtag <- NormalizeData(pbmc.hashtag, assay = "HTO", normalization.method = "CLR")
pbmc.hashtag <- HTODemux(pbmc.hashtag, assay = "HTO", positive.quantile = 0.99)
table(pbmc.hashtag$HTO_classification.global)
# Doublet Negative Singlet
# 2184 1 8255
pdfPath = "sc_pcqc/hto.pdf"
pdf(file = pdfPath)
Idents(pbmc.hashtag) <- "HTO_maxID"
RidgePlot(pbmc.hashtag, assay = "HTO", features = rownames(pbmc.hashtag[["HTO"]])[1:4], ncol = 4)
FeatureScatter(pbmc.hashtag, feature1 = "hto_HTO1-GTCAACTCTTTAGCG", feature2 = "hto_HTO2-TGATGGCCTATTGGG")
FeatureScatter(pbmc.hashtag, feature1 = "hto_HTO3-TTCCGCCTCTCTTTG", feature2 = "hto_HTO4-AGTAAGTTCAGCGTA")
FeatureScatter(pbmc.hashtag, feature1 = "hto_HTO3-TTCCGCCTCTCTTTG", feature2 = "hto_HTO2-TGATGGCCTATTGGG")
HTOHeatmap(pbmc.hashtag, assay = "HTO", ncells = 5000)
dev.off()
hto_list <- as.list(pbmc.hashtag$HTO_classification.global)
hto_df <- do.call("rbind", lapply(hto_list, as.data.frame))
write.csv(hto_df, file = "sc_pcqc/hto.csv")
#see https://satijalab.org/seurat/v3.2/hashing_vignette.html for more plotting options
|
a1ff60e2c0f90810d7cd0e95097bbc7c50549074
|
e1c4adfbf80d475b6a11aa6d003e77a18bad5089
|
/Loan.R
|
4c209ebdb79eb1076260843172d9aa5f7c3c92b2
|
[] |
no_license
|
nirvana0311/Loan-Prediction-problem-3-of-Analytics-Vindhya-80-accuracy
|
ae503104e56643ca8bf1a6b8bf8b2ae10b57a05a
|
b131fc3fa2e12620b127f4e16b53f60f5e7db90d
|
refs/heads/master
| 2020-12-25T18:43:17.160590
| 2017-06-11T05:22:26
| 2017-06-11T05:22:26
| 93,983,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,607
|
r
|
Loan.R
|
libs=c('caret','randomForest','mice','VIM','e1071')
lapply(libs,require,character.only=T)
setwd("C:\\Users\\nirva\\IdeaProjects\\R\\Loan Prediction")
training=read.csv("C:\\Users\\nirva\\IdeaProjects\\R\\Loan Prediction\\train.csv")
test=read.csv("C:\\Users\\nirva\\IdeaProjects\\R\\Loan Prediction\\test.csv")
training=training[,!(colnames(training) %in% 'Loan_ID')]
test=test[,!(colnames(training) %in% 'Loan_ID')]
pMiss=function(x){(sum(is.na(x))/length(x))*100}
aggr(training,col=c('blue','red'),numbers=T,sortVars=T,labels=names(training),cex.axis=.7,gap=3, ylab=c("Histogram of missing data","Pattern"))
apply(training,2,pMiss)
tempdata=mice(data = training,m = 1,method = 'pmm',maxit = 50,seed = 518)
training=complete(tempdata,1)
densityplot(tempdata)
xyplot(tempdata,LoanAmount~Credit_History+Loan_Amount_Term,pch=18,cex=1)
colnames(training)
training$TotalIncome=training$ApplicantIncome+training$CoapplicantIncome
training_a=training[1:560,]
training_b=training[561:614,]
control=trainControl(method = 'repeatedcv',number = 10,repeats = 3,search = 'grid')
tunegrid=expand.grid(.mtry=3)
first_model=train(Loan_Status~.,method='rf',data = training,trControl=control,tuneGrid=tunegrid,ntree=2501,na.action = na.omit)
table(training$Loan_Status,predict(first_model))
first_model
plot(first_model)
names(training)
#SVM
second_model=svm(Loan_Status~.,data = training)
table(training$Loan_Status,predict(second_model))
tun=tune(svm,train.x =Loan_Status~.,data=training,ranges = list(epsilon=0.1,cost=4))
table(training$Loan_Status,predict(tun$best.model))
second_model
sum(is.na(training))
|
4a4f1439359052408da5faa23800a49070ea49fd
|
657a411bc8098a84f5b117103d65b7ab058da67e
|
/man/verb-DELETE.Rd
|
210f09255ae16485c113f96fbf43b692038f2549
|
[
"MIT"
] |
permissive
|
ropensci/crul
|
5389913572808be02d1a22075dc61a7ef6c52538
|
1b76ad3768b45acf7b7aa15d98a748b0e27791f4
|
refs/heads/main
| 2023-05-29T08:18:09.489490
| 2023-05-23T19:09:47
| 2023-05-23T19:10:05
| 72,250,512
| 101
| 27
|
NOASSERTION
| 2023-08-22T13:43:33
| 2016-10-28T23:29:59
|
R
|
UTF-8
|
R
| false
| true
| 1,280
|
rd
|
verb-DELETE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verbs.R
\name{verb-DELETE}
\alias{verb-DELETE}
\title{HTTP verb info: DELETE}
\description{
The DELETE method deletes the specified resource.
}
\section{The DELETE method}{
The DELETE method requests that the origin server remove the
association between the target resource and its current
functionality. In effect, this method is similar to the rm command
in UNIX: it expresses a deletion operation on the URI mapping of the
origin server rather than an expectation that the previously
associated information be deleted.
}
\examples{
\dontrun{
x <- HttpClient$new(url = "https://hb.opencpu.org")
x$delete(path = 'delete')
## a list
(res1 <- x$delete('delete', body = list(hello = "world"), verbose = TRUE))
jsonlite::fromJSON(res1$parse("UTF-8"))
## a string
(res2 <- x$delete('delete', body = "hello world", verbose = TRUE))
jsonlite::fromJSON(res2$parse("UTF-8"))
## empty body request
x$delete('delete', verbose = TRUE)
}
}
\references{
\url{https://datatracker.ietf.org/doc/html/rfc7231#section-4.3.5}
}
\seealso{
\link{crul-package}
Other verbs:
\code{\link{verb-GET}},
\code{\link{verb-HEAD}},
\code{\link{verb-PATCH}},
\code{\link{verb-POST}},
\code{\link{verb-PUT}}
}
\concept{verbs}
|
01d445fba9929a457a123555141ad4bd3ab1bef8
|
fed543ff068a5b1ae51c19e95b2471952d72adb7
|
/tests/testthat.R
|
a6b0aa5d7bc84b1ebb56d70960e8652ebbf10dc5
|
[] |
no_license
|
githubmpc/marimba2
|
848627c80de60e4ef53d623a3f6e7960c5afcbc4
|
332c283592d9cad4ca1a4ee4cc652709580b62f3
|
refs/heads/master
| 2021-01-20T02:19:13.442176
| 2017-10-18T14:51:17
| 2017-10-18T14:51:17
| 101,313,331
| 0
| 0
| null | 2017-10-18T14:52:26
| 2017-08-24T15:59:52
|
R
|
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(marimba)
test_check("marimba")
|
8aaa18e7d799f67712261c31ab30b96dadbddca5
|
f729673ed0738bd666863ca767b1a584ae505244
|
/getoutputloopabc.R
|
f58761becbfd30bad8c7107d54c687bea9fb0ec4
|
[] |
no_license
|
SepidehMosaferi/SAE-Correlated-Errors
|
68de287faaafc55ba08e584f3f5648371dcae8b3
|
08c50ab90767bbd510f57ce2b9acd7685693bc78
|
refs/heads/master
| 2023-06-01T09:36:39.690846
| 2021-06-30T22:20:30
| 2021-06-30T22:20:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,907
|
r
|
getoutputloopabc.R
|
rm(list= ls(all = TRUE))
library("xtable")
outfixpar <- c()
outmse <- c()
#fnames <- c("June2021SimsML/n100UnequalPsi.Rdata","June2021SimsML/n500UnequalPsi.Rdata",
# "June2021SimsML/n100Cor2.Rdata","June2021SimsML/n500Cor2.Rdata",
# "June2021SimsML/n100UnequalPsiFlipped.Rdata","June2021SimsML/n500UnequalPsiFlipped.Rdata",
# "June2021SimsML/n100FlippedCor2.Rdata","June2021SimsML/n500FlippedCor2.Rdata")
#
fnames <- paste("abrhosims/Config", 1:8, ".Rdata")
iterfname <- 0
repeat{
iterfname <- iterfname + 1
load(fnames[iterfname])
#### Output for equal psi:
##### Fixed parameter estimates:
tabfixpar <- cbind(c(apply(betas[1:1000,], 2, mean), mean(sig2bhats[1:1000])), c(apply(betas[1:1000,], 2, sd), sd(sig2bhats[1:1000])) )
tabfixparyl <- cbind(apply(estsyl[1:1000,],2,mean), apply(estsyl[1:1000,],2,sd))
tabfixparFH <- cbind(apply(omegahatsfixed[1:1000,] , 2, mean), apply(omegahatsfixed[1:1000,] , 2, sd))
outfixpar <- rbind(outfixpar, cbind(n,tabfixpar, tabfixparyl, tabfixparFH) )
#xtable(cbind(tabfixpar, tabfixparyl, tabfixparFH), digits = 3)
### MC MSE of predictor:
msecompare <- apply(cbind( apply((Ys[1:1000,] - thetas[1:1000,])^2,2,mean), apply((thetatilde2s[1:1000,] - thetas[1:1000,])^2,2,mean), apply((predyls - thetas)^2,2,mean), apply( (thetahatfixeds - thetas)^2,2,mean)), 2, mean)
### Average of estimated MSE:
meadj <- mean(apply((thetatilde2s - thetas)^2,2,mean))
fh <- mean(apply( (thetahatfixeds - thetas)^2,2,mean))
outmse <- rbind(outmse, c(n,msecompare, mean(M1hatses + M2hats - M1biases) , mean(mseFHs)) )
if(iterfname == length(fnames)){break}
}
a <- c(0.25, 0.25, 0.25, 0.25, 0.75, 0.75, 0.75, 0.75)
b <- c(0.75, 0.75, 0.75, 0.75, 0.25, 0.25, 0.25, 0.25)
rho <- c(0.2, 0.2, 0.8, 0.8, 0.2, 0.2, 0.8, 0.8)
outfixpar <- cbind(rep(a,each = 3), rep(b, each = 3), rep(rho, each = 3) , outfixpar)
outmse <- cbind(a, b, rho, outmse)
|
40bf5f29a5474851b4c20b6d17c3f17b0341fd91
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bucky/examples/mi.eval.Rd.R
|
26189e73412996a86cc77e7e97d8d8f52ea5b301
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,301
|
r
|
mi.eval.Rd.R
|
library(bucky)
### Name: mi.eval
### Title: Multiple-imputation evaluation
### Aliases: mi.eval
### Keywords: models htest
### ** Examples
if (require("Amelia")) {
## Load data
data(africa)
africa$civlib <- factor(round(africa$civlib*6), ordered=TRUE)
## Estimate a linear model using imputed data sets
model0 <- lm(trade ~ log(gdp_pc), data=africa, subset=year==1973)
summary(model0)
## Impute using Amelia
a.out <- amelia(x = africa, cs = "country", ts = "year",
logs = "gdp_pc", ord="civlib")
## Estimate a linear model using imputed data sets
model1 <- mi.eval(lm(trade ~ log(gdp_pc), data=a.out, subset=year==1973))
## Show estimates
model1
coef(model1)
## Show summary information
summary(model1)
if (require("MASS")) {
## Estimate an ordered logit model
model2 <- mi.eval(polr(civlib ~ log(gdp_pc) + log(population),
data=a.out))
summary(model2)
## Also show thresholds by including thresholds with coefficients
model3 <- mi.eval(polr(civlib ~ log(gdp_pc) + log(population),
data=a.out),
coef=function(x) c(x$coefficients, x$zeta))
summary(model2)
}
}
|
53396b682f71eaf20800c74d6fc026e1aa2d1f3e
|
ba2845eadc8880147e906ab727d322d875226efa
|
/Analyses/EffectiveWarming_Simple.R
|
5fb8bf5cbfba0078beeeedb0aa7e1a0783f6b876
|
[] |
no_license
|
AileneKane/radcliffe
|
80e52e7260195a237646e499bf4e3dad4af55330
|
182cd194814e46785d38230027610ea9a499b7e8
|
refs/heads/master
| 2023-04-27T19:55:13.285880
| 2023-04-19T15:15:02
| 2023-04-19T15:15:02
| 49,010,639
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,493
|
r
|
EffectiveWarming_Simple.R
|
# Analysis of the effective (observed) warming of each experiment:
library(car); library(reshape2)
options(stringsAsFactors = FALSE)
# ------------------------------
# 1. read & format the RAW data
# ------------------------------
{
expclim <- read.csv("expclim.csv")
expclim <- expclim[!is.na(expclim$doy),]
expclim$year.frac <- expclim$year + expclim$doy/366
summary(expclim)
# First creating a single vector for aboveground warming (tagging what its source is)
expclim[,"AGtemp_max"] <- apply(expclim[,c("airtemp_max", "cantemp_max", "surftemp_max")], 1, min, na.rm=T)
expclim[expclim$AGtemp_max==Inf,"AGtemp_max"] <- NA
expclim[,"AGtemp_min"] <- apply(expclim[,c("airtemp_min", "cantemp_min", "surftemp_min")], 1, min, na.rm=T)
expclim[expclim$AGtemp_min==Inf,"AGtemp_min"] <- NA
# Add a flag just to make it clear which kind of temperature I'm using
expclim[which(!is.na(expclim$surftemp_max)), "AG.type"] <- "surface"
expclim[which(!is.na(expclim$cantemp_max)), "AG.type"] <- "canopy"
expclim[which(!is.na(expclim$airtemp_max)), "AG.type"] <- "air"
expclim$AG.type <- as.factor(expclim$AG.type)
summary(expclim)
# Check the plot IDs
summary(expclim$plot)
}
# ------------------------------
# ------------------------------
# Calculate the day-level deviations from the control
# Both Aboveground and Belowground when possible
# ------------------------------
expclim.orig <- expclim
exp.dev <- NULL # Making the deviation dataframe NULL to start with
for(s in unique(expclim$site)){
print(paste0(" ** processing site: ", s))
dat.tmp <- expclim[expclim$site==s, ]
dat.tmp$year.frac <- as.ordered(dat.tmp$year.frac)
# identify the control plots; note: it didn't look like anything had both 0 and ambient,
# so I'm using these interchangeably
plot.control <- unique(dat.tmp[dat.tmp$temptreat %in% c(NA, "0", "ambient") & dat.tmp$preciptreat %in% c(NA, "0", "ambient"), "plot"])
plot.all <- unique(dat.tmp$plot)
# Subsetting the datasets we want in a very clunky manner
ag.max <- recast(dat.tmp[, c("year.frac", "plot", "AGtemp_max")], year.frac ~ plot, fun.aggregate=mean, na.rm=T)
ag.min <- recast(dat.tmp[, c("year.frac", "plot", "AGtemp_min")], year.frac ~ plot, fun.aggregate=mean, na.rm=T)
bg.max <- recast(dat.tmp[, c("year.frac", "plot", "soiltemp1_max")], year.frac ~ plot, fun.aggregate=mean, na.rm=T)
bg.min <- recast(dat.tmp[, c("year.frac", "plot", "soiltemp1_min")], year.frac ~ plot, fun.aggregate=mean, na.rm=T)
# Calculating the mean temp
ag.mean <- (ag.max[,2:ncol(ag.max)] + ag.min[,2:ncol(ag.min)])/2
ag.mean$year.frac <- ag.max$year.frac
bg.mean <- (bg.max[,2:ncol(bg.max)] + bg.min[,2:ncol(bg.min)])/2
bg.mean$year.frac <- bg.max$year.frac
# Calculate the plot-level deviation from the mean of the control
ag.max[,paste(plot.all)] <- ag.max[,paste(plot.all)] - apply(ag.max[,paste(plot.control)], 1, mean, na.rm=T)
ag.min[,paste(plot.all)] <- ag.min[,paste(plot.all)] - apply(ag.min[,paste(plot.control)], 1, mean, na.rm=T)
ag.mean[,paste(plot.all)] <- ag.mean[,paste(plot.all)] - apply(ag.mean[,paste(plot.control)], 1, mean, na.rm=T)
bg.max[,paste(plot.all)] <- bg.max[,paste(plot.all)] - apply(bg.max[,paste(plot.control)], 1, mean, na.rm=T)
bg.min[,paste(plot.all)] <- bg.min[,paste(plot.all)] - apply(bg.min[,paste(plot.control)], 1, mean, na.rm=T)
bg.mean[,paste(plot.all)] <- bg.mean[,paste(plot.all)] - apply(bg.mean[,paste(plot.control)], 1, mean, na.rm=T)
# Stack everything together and merge it back into the original dataset
# "_dev" stands for deviation from control
dat.stack <- stack(ag.max[,paste(plot.all)])
names(dat.stack) <- c("AGtemp_max_dev", "plot")
dat.stack$year.frac <- ag.max$year.frac
dat.stack$site <- as.factor(s)
dat.stack$AGtemp_min_dev <- stack(ag.min [,paste(plot.all)])[,1]
dat.stack$AGtemp_mean_dev <- stack(ag.mean[,paste(plot.all)])[,1]
dat.stack$BGtemp_max_dev <- stack(bg.max [,paste(plot.all)])[,1]
dat.stack$BGtemp_min_dev <- stack(bg.min [,paste(plot.all)])[,1]
dat.stack$BGtemp_mean_dev <- stack(bg.mean[,paste(plot.all)])[,1]
# make a data frame to store all of the deviations in
# The merge needs to happen last otherwise it freaks out with the NAs when it tries to merge
if(is.null(exp.dev)){
exp.dev <- dat.stack
} else {
exp.dev <- rbind(exp.dev, dat.stack)
}
}
# Check the plot IDs on exp.dev
summary(exp.dev$plot)
# Merge the deviations into expclim
expclim <- merge(expclim, exp.dev, all.x=T)
summary(expclim)
# Check the plot IDs on exp.dev
summary(expclim$plot)
# ------------------------------
# ------------------------------
# Aggregate the experimental data to produce
# treatment- and plot-level stats
# ------------------------------
{
treatvars <- c("AGtemp_max_dev", "AGtemp_min_dev", "AGtemp_mean_dev",
"BGtemp_max_dev", "BGtemp_min_dev", "BGtemp_mean_dev")
# Note: need to make NAs in our aggregation variables a dummy name to get aggregation to work right
expclim$plot <- as.factor(ifelse(is.na(expclim$plot) , "BLANK", paste(expclim$plot) ))
expclim$temptreat <- as.factor(ifelse(is.na(expclim$temptreat) , "BLANK", paste(expclim$temptreat) ))
expclim$preciptreat <- as.factor(ifelse(is.na(expclim$preciptreat), "BLANK", paste(expclim$preciptreat)))
expclim$block <- as.factor(ifelse(is.na(expclim$block) , "BLANK", paste(expclim$block) ))
expclim$AG.type <- as.factor(ifelse(is.na(expclim$AG.type) , "BLANK", paste(expclim$AG.type) ))
# ----------------------
# Aggregating by plot
# ----------------------
effect.plot <- aggregate(expclim[,treatvars],
by=expclim[,c("site", "plot", "temptreat", "preciptreat", "block", "AG.type")],
FUN=mean, na.rm=T)
# put our NAs back in
effect.plot$plot <- as.factor(ifelse(effect.plot$plot =="BLANK", NA, paste(effect.plot$plot) ))
effect.plot$temptreat <- as.factor(ifelse(effect.plot$temptreat =="BLANK", NA, paste(effect.plot$temptreat) ))
effect.plot$preciptreat <- as.factor(ifelse(effect.plot$preciptreat=="BLANK", NA, paste(effect.plot$preciptreat)))
effect.plot$block <- as.factor(ifelse(effect.plot$block =="BLANK", NA, paste(effect.plot$block) ))
effect.plot$AG.type <- as.factor(ifelse(effect.plot$AG.type =="BLANK", NA, paste(effect.plot$AG.type) ))
summary(effect.plot)
# check plotIDs
summary(effect.plot$plot)
# Save as a csv
write.csv(effect.plot, "EffectiveWarming_Plot.csv", row.names=F, eol="\n")
# ----------------------
# ----------------------
# Aggregating by Treatment directly from raw
# NOTE: if you want to go through plot first to remove any bias, that can easily be done
# ----------------------
effect.treat <- aggregate(expclim[,treatvars],
by=expclim[,c("site", "temptreat", "preciptreat", "AG.type")],
FUN=mean, na.rm=T)
# put our NAs back in
effect.treat$temptreat <- as.factor(ifelse(effect.treat$temptreat =="BLANK", NA, paste(effect.treat$temptreat) ))
effect.treat$preciptreat <- as.factor(ifelse(effect.treat$preciptreat=="BLANK", NA, paste(effect.treat$preciptreat)))
effect.treat$AG.type <- as.factor(ifelse(effect.treat$AG.type =="BLANK", NA, paste(effect.treat$AG.type) ))
summary(effect.treat)
# Save as a csv
write.csv(effect.treat, "EffectiveWarming_Treatment.csv", row.names=F, eol="\n")
# ----------------------
}
# ------------------------------
|
30b753b2c66f55a9b43a89e0e5ddb8ebab27ef9b
|
909cb5b97f1a9d0b988e3a6721445a23b93859ee
|
/R/msg.R
|
7c39786deb3de417abfa7990920fac5c14ea0cc9
|
[] |
no_license
|
xiaoran831213/knn
|
d369db0ac18369e7491b314b42c9d405d6c35fa8
|
30f6c2a605cf53c9a5f2e066bb0d14f965289c62
|
refs/heads/master
| 2021-07-01T02:11:49.859443
| 2019-04-01T16:15:05
| 2019-04-01T16:15:05
| 134,107,318
| 0
| 0
| null | 2018-09-14T02:53:34
| 2018-05-20T00:15:24
|
TeX
|
UTF-8
|
R
| false
| false
| 1,206
|
r
|
msg.R
|
## Training Track
msg <- function(obj, fmt=NULL, hdr=NULL, ...)
{
if(is.function(obj))
{
ret <- function()
{
sprintf(fmt, obj(...))
}
}
else if(inherits(obj, 'formula'))
{
ret <- function()
{
e <- environment(obj)
sprintf(fmt, eval(attr(terms(obj), 'variables'), e)[[1]])
}
}
else
{
ret <- function()
{
sprintf(fmt, obj)
}
}
if(is.null(fmt))
fmt <- "%8s"
if(is.null(hdr))
hdr <- deparse(substitute(obj))
hdr <- sub("^~", "", hdr)
len <- as.integer(sub("^%[+-]?([0-9]*)[.]?([0-9])?[A-z]$", "\\1", fmt))
hdr <- sprintf(paste0("% ", len, "s"), hdr)
structure(ret, class=c('msg', 'function'), hdr=hdr)
}
## is
is.msg <- function(.) 'msg' %in% class(.)
## Header of the tracks
hd.msg <- function(...)
{
d <- Filter(is.msg, unlist(list(...)))
d <- sapply(d, function(.)
{
h <- attr(., "hdr")
})
paste(d, collapse=" ")
}
## A line of the tracks
ln.msg <- function(...)
{
d <- Filter(is.msg, unlist(list(...)))
d <- sapply(d, do.call, list())
paste(d, collapse=" ")
}
|
883b6b936d5c7a2fa0686d774e0f57087b32a324
|
3af87e7a8ba063590b7488ff865b00424b8a62d9
|
/plot2.R
|
6d18c2e72832754e905fb13c85f148dc150eaed2
|
[] |
no_license
|
infinitesupermagic/ExData_Plotting1
|
19c2ba7309faaf46b54b947abb245ebfe04800fe
|
702138948a0f45db5e78ebbd09a3b6a8ce54c811
|
refs/heads/master
| 2021-01-17T06:23:46.078368
| 2015-06-07T03:57:32
| 2015-06-07T03:57:32
| 36,953,699
| 0
| 0
| null | 2015-06-05T20:22:46
| 2015-06-05T20:22:46
| null |
UTF-8
|
R
| false
| false
| 568
|
r
|
plot2.R
|
library(sqldf)
df <- read.csv.sql("household_power_consumption.txt", sql = "select * from file where Date = '2/2/2007' or Date = '1/2/2007'", header=TRUE, sep = ";")
df$Date <- as.Date(df$Date, "%d/%m/%Y")
df$Date <- paste(df$Date, df$Time)
df$Date <- strptime(df$Date, format="%Y-%m-%d %H:%M:%S")
df <- df[,c(1,3:9)]
df$Date <- as.POSIXct(df$Date)
plot(df$Date,df$Global_active_power, ylab = "Global Active Power (kilowatts)", xlab = "", cex.lab=.75, cex.axis=.75, cex.main=1, cex.sub=.5, pch=".", type="o")
dev.copy(png,"plot2.png", width=480, height=480)
dev.off()
|
482eb3eebe3c952aeefda97eed8970b8a65c3252
|
f9bc24751d593694fbc98648519df43c70d253ee
|
/R/synapseEndpoints.R
|
ae0b9a3310f49bfdbc654c4244b5375651b89417
|
[] |
no_license
|
brian-bot/rSynapseClient
|
cf607b242fa292902f832d6a5ecffceeba80eaef
|
cef1a6bb1f28034a9de826f3e92f1b1139e56c61
|
refs/heads/master
| 2020-04-05T22:52:30.912248
| 2017-04-28T17:45:58
| 2017-04-28T17:45:58
| 3,354,254
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 862
|
r
|
synapseEndpoints.R
|
#
# Author: brucehoff
###############################################################################
synSetEndpoints<-function(repoEndpoint, authEndpoint, fileEndpoint, portalEndpoint) {
if (missing(repoEndpoint) &&
missing(authEndpoint) &&
missing(fileEndpoint) &&
missing(portalEndpoint)) {
synapseResetEndpoints()
}
if (!missing(repoEndpoint)) synapseRepoServiceEndpoint(repoEndpoint)
if (!missing(authEndpoint)) synapseAuthServiceEndpoint(authEndpoint)
if (!missing(fileEndpoint)) synapseFileServiceEndpoint(fileEndpoint)
if (!missing(portalEndpoint)) synapsePortalEndpoint(portalEndpoint)
}
synGetEndpoints<-function() {
list(repo=synapseRepoServiceEndpoint()$endpoint,
auth=synapseAuthServiceEndpoint()$endpoint,
file=synapseFileServiceEndpoint()$endpoint,
portal=synapsePortalEndpoint()$endpoint)
}
|
40f6586861288cb8d585017f8975aa646f637fdb
|
77ae1b3106953b9793d39c4910c6714fb8866f59
|
/script.R
|
8116b7e7f7b578cca474eb5b0e028d5f60ce8b0c
|
[] |
no_license
|
wmay/rollcall_scaling_intro
|
b7fb8d53f3b096f02d0e81f86f4cc6b89c6206c8
|
b4d00b224455c33cdbcd6fb3c1687de5dac3fc3d
|
refs/heads/master
| 2016-08-04T13:08:49.673388
| 2015-04-25T02:35:58
| 2015-04-25T02:35:58
| 33,318,859
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,194
|
r
|
script.R
|
# analyzing rollcalls from the New York state legislature
library(lubridate)
library(reshape2)
library(pscl) # Political Science Computation Library
chamber = "lower"
year = 2014
legs = read.csv("ny_legislators.csv", stringsAsFactors = F)
bill_votes = read.csv("ny_bill_votes.csv", stringsAsFactors = F)
leg_votes = read.csv("ny_bill_legislator_votes.csv",
stringsAsFactors = F)
# clarify party status
legs$party[legs$party == ""] = "Unknown"
# convert text dates to date objects
bill_votes$date = as.Date(bill_votes$date)
# find all the floor votes in the given chamber in the given year
rollcalls = bill_votes[bill_votes$vote_chamber == chamber &
year(bill_votes$date) == year &
bill_votes$motion == "Floor Vote", ]
# the relevant votes
rel_votes = leg_votes[leg_votes$vote_id %in% rollcalls$vote_id &
leg_votes$leg_id != "", ]
# transform rel_votes from long to wide format
vote_matrix = dcast(rel_votes, leg_id ~ vote_id, value.var = "vote")
# get the names
leg_ids = vote_matrix[, 1]
names = legs[match(leg_ids, legs$leg_id), "full_name"]
# get the parties
parties = legs[match(leg_ids, legs$leg_id), "party"]
parties = data.frame("party" = parties, stringsAsFactors = F)
# get the bill names
vote_ids = data.frame("Vote ID" = colnames(vote_matrix[, -1]),
stringsAsFactors = F)
# create the rollcall object
rc = rollcall(vote_matrix[, -1], yea = "yes", nay = "no",
missing = "other", notInLegis = NA, legis.names = names,
legis.data = parties, vote.data = vote_ids,
source = "Sunlight Foundation")
# IDEAL
# "ideal" function from "pscl" library
# Assumes quadratic utility, uses MCMC (a Bayesian algorithm) to sort
# legislators
ideal_results = ideal(rc)
plot(ideal_results)
# W-NOMINATE
library(wnominate)
# Assumes Gaussian utility, uses a linear algebra algorithm to sort
# legislators
wnom_results = wnominate(rc, polarity = c("Brian M Kolb", "Brian M Kolb"))
plot(wnom_results)
par(mfrow=c(1,1)) # fix graphing parameters, if needed
options(warn=-1) # stop pesky warning messages, if needed
plot.coords(wnom_results) # just the coordinates
# looking closely at A2597, the DREAM Act
rollcalls[rollcalls[, "bill_id"] == "A 2597", ]
which(rc$vote.data[, "Vote.ID"] == "NYV00019838")
plot.coords(wnom_results, cutline = 211)
wnom_results$rollcalls[211, ]
# The midpoint is the point between the estimated yea and nay
# locations. The spread is the distance from the midpoint to either
# the yea or nay point. If you were to draw a line connecting the yea
# and nay locations, the cutline would intersect with it at the
# midpoint, at a 90 degree angle.
# alpha-NOMINATE
library(anominate)
# Gaussian utility, runs wnominate first, then uses MCMC
anom_results = anominate(rc, polarity = 53)
# save the results for later
save(anom_results, file = "anom_results.RData")
# load old results
load("anom_results.RData")
plot(anom_results)
# Are the utilities Gaussian or quadratic?
# 1 = Gaussian, 0 = quadratic
densplot.anominate(anom_results)
# make writing the rollcall objects easier
create_rc = function(legs, rollcalls, leg_votes) {
# the relevant votes
rel_votes = leg_votes[leg_votes$vote_id %in% rollcalls$vote_id &
leg_votes$leg_id != "", ]
# transform rel_votes from long to wide format
vote_matrix = dcast(rel_votes, leg_id ~ vote_id, value.var = "vote")
# get the names
leg_ids = vote_matrix[, 1]
names = legs[match(leg_ids, legs$leg_id), "full_name"]
# get the parties
parties = legs[match(leg_ids, legs$leg_id), "party"]
parties = data.frame("party" = parties, stringsAsFactors = F)
# get the bill names
vote_ids = data.frame("Vote ID" = colnames(vote_matrix[, -1]),
stringsAsFactors = F)
# create the rollcall object
rc = rollcall(vote_matrix[, -1], yea = "yes", nay = "no",
missing = "other", notInLegis = NA, legis.names = names,
legis.data = parties, vote.data = vote_ids,
source = "Sunlight Foundation")
}
# Optimal Classification
library(oc)
# all the floor votes in both chambers, in all years
rollcalls = bill_votes[bill_votes$motion == "Floor Vote", ]
# make the rollcall object
oc_rc = create_rc(legs, rollcalls, leg_votes)
# Optimal Classification uses "nonmetric unfolding", meaning no
# specified utility curve, though we assume it is symmetric and
# single-peaked. Uses another linear algebra algorithm. Works well
# with missing data, i.e., when comparing multiple chambers.
oc_results = oc(oc_rc, polarity = c("Brian M Kolb", "Brian M Kolb"))
plot(oc_results)
plot.OCcoords(oc_results)
# try plotting the senate and assembly separately
assembly_rc_ids = unique(
bill_votes[bill_votes$vote_chamber == "lower", "vote_id"])
assembly_ids = unique(
leg_votes[leg_votes$vote_id %in% assembly_rc_ids, "leg_id"])
assemblymen = legs[legs$leg_id %in% assembly_ids, "full_name"]
senate_rc_ids = unique(
bill_votes[bill_votes$vote_chamber == "upper", "vote_id"])
senate_ids = unique(
leg_votes[leg_votes$vote_id %in% senate_rc_ids, "leg_id"])
senators = legs[legs$leg_id %in% senate_ids, "full_name"]
plot(oc_results$legislators[assemblymen, "coord1D"],
oc_results$legislators[assemblymen, "coord2D"])
plot(oc_results$legislators[senators, "coord1D"],
oc_results$legislators[senators, "coord2D"])
# how many legislators served in both chambers?
sum(senators %in% assemblymen)
senators[senators %in% assemblymen]
# just the Assembly then
# all the floor votes in the Assembly, in all years
rollcalls = bill_votes[bill_votes$vote_chamber == chamber &
bill_votes$motion == "Floor Vote", ]
# make the rollcall object
oc_rc = create_rc(legs, rollcalls, leg_votes)
oc_results = oc(oc_rc, polarity = c("Brian M Kolb", "Brian M Kolb"))
plot(oc_results)
plot.OCcoords(oc_results)
dems = list()
reps = list()
dem_means = vector()
rep_means = vector()
x = 1
for (year in 2011:2015) {
rc_ids = unique(rollcalls[year(rollcalls$date) == year, "vote_id"])
assembly_ids = unique(leg_votes[leg_votes$vote_id %in% rc_ids, "leg_id"])
assemblymen = legs[legs$leg_id %in% assembly_ids, ]
dem_names = assemblymen[assemblymen$party == "Democratic", "full_name"]
rep_names = assemblymen[assemblymen$party == "Republican", "full_name"]
dems[[year]] = oc_results$legislators[dem_names, "coord1D"]
reps[[year]] = oc_results$legislators[rep_names, "coord1D"]
# get rid of the NA's
dems[[year]] = dems[[year]][!is.na(dems[[year]])]
reps[[year]] = reps[[year]][!is.na(reps[[year]])]
dem_means[x] = mean(dems[[year]], na.rm = T)
rep_means[x] = mean(reps[[year]], na.rm = T)
x = x + 1
}
# plot the distributions
library(vioplot)
vioplot(dems[[2011]], dems[[2012]], dems[[2013]], dems[[2014]],
dems[[2015]],
names=c("2011", "2012", "2013", "2014", "2015"),
ylim = c(-1, 1), col="blue")
vioplot(reps[[2011]], reps[[2012]], reps[[2013]], reps[[2014]],
reps[[2015]],
ylim = c(-1, 1), col="red", add = T)
# plot the means
plot(2011:2015, dem_means, ylim = c(-1, 1), col = "blue", type = "b")
points(2011:2015, rep_means, col = "red", type = "b")
|
4d964341d4c11a5536d2802174a50c3a6099bac0
|
0a6de81c16a540a0e083634497ae75ccb02afe90
|
/man/mcr_fish.Rd
|
667e4f1ec85000255f443dbe3914ab09a920b8fc
|
[
"MIT"
] |
permissive
|
liaaaaran/alohalr
|
13b3ef041443cf59d95d809830eddb5f73b5c566
|
248c91297dd5ed05ea04ab3a98368a5258bcfbe0
|
refs/heads/master
| 2023-03-11T03:14:29.626864
| 2021-02-19T22:30:53
| 2021-02-19T22:30:53
| 340,502,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,390
|
rd
|
mcr_fish.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcr_fish_doc.R
\docType{data}
\name{mcr_fish}
\alias{mcr_fish}
\title{MCR LTER: Coral Reef: Changes in the abundance of fish functional groups: Adam et al. 2014 Oecologia}
\format{
An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 36 rows and 11 columns.
}
\source{
{ Moorea Coral Reef LTER, T. Adam, A. Brooks, and P. Edmunds. 2016. MCR LTER: Coral Reef: Changes in the abundance of fish functional groups: Adam et al. 2014 Oecologia ver 2. Environmental Data Initiative. https://doi.org/10.6073/pasta/2cea607bcfc63e41c5eee1da15e5a112 (Accessed 2021-02-15).}
\url{https://portal.edirepository.org/nis/mapbrowse?packageid=knb-lter-mcr.1041.2}
}
\usage{
mcr_fish
}
\description{
analyze the relationship between the cover of live and dead branching corals and changes in the
abundance of different functional groups of fishes following the loss of coral on the fore reef
of Moorea due to an outbreak of corallivorous crown-of-thorns sea stars (Acanthaster planci)
and a tropical cylcone.
}
\details{
A major ecosystem service provided by coral reefs is the provisioning of physical habitat for
other organisms, and consequently, many of the effects of climate change on coral reefs will
be mediated by their impacts on habitat structure
}
\keyword{datasets}
|
b68f6f69ed8f2db45a66f015a10447d72c8b3aec
|
dd318137bda91d0dee5c2c832a881b726d48d8eb
|
/plot4.R
|
3434ef26e452f78a313d9767171ff23ded930e22
|
[] |
no_license
|
Uttam1609/ExData_Plotting1
|
0cecfff2fd3e7c8ea824fac67fd561a438f82bf1
|
b1c920c4aa6a2eb569ed0141aa10cea3ecd27956
|
refs/heads/master
| 2022-08-03T19:26:28.367537
| 2020-05-25T07:59:47
| 2020-05-25T07:59:47
| 266,345,415
| 0
| 0
| null | 2020-05-23T13:46:40
| 2020-05-23T13:46:39
| null |
UTF-8
|
R
| false
| false
| 1,350
|
r
|
plot4.R
|
elec_data <- read.table("./data/household_power_consumption.txt", sep = ";", header = TRUE)
elec_data <- elec_data[which(as.character(elec_data$Date) == "1/2/2007" | as.character(elec_data$Date) == "2/2/2007"), ]
elec_data <- transform(elec_data, datetime = paste(Date, Time, sep = " "))
elec_data <- transform(elec_data, Global_active_power = as.numeric(as.character(Global_active_power)))
elec_data <- transform(elec_data, datetime = strptime(datetime, format = "%d/%m/%Y %H:%M:%S"))
elec_data <- transform(elec_data, Voltage = as.numeric(as.character(Voltage)), Global_reactive_power = as.numeric(as.character(Global_reactive_power)))
par(mfrow = c(2,2), mar = c(4,4,2,1))
with(elec_data, plot(datetime, Global_active_power, type = "l", ylab = "Global active power (kilowatts)"))
with(elec_data, plot(datetime, Voltage, type = "l"))
with(elec_data, plot(datetime, Sub_metering_1, ylab = "Energy sub metering", type = "n"))
with(elec_data, lines(datetime, Sub_metering_1, col = "black"))
with(elec_data, lines(datetime, Sub_metering_2, col = "red"))
with(elec_data, lines(datetime, Sub_metering_3, col = "blue"))
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1), col = c("black","red","blue"))
with(elec_data, plot(datetime, Global_reactive_power, type = "l"))
dev.copy(png, "plot4.png")
dev.off()
|
af00f2944bf6094d4bac921fe7c258b118a46d2a
|
a88ff00d49b14c84059a6fa74af25ee4c531d9fe
|
/man/print.mooFunction.Rd
|
3809c623cf4aeb27c4faba4be477f5e43150d43f
|
[] |
no_license
|
danielhorn/moobench
|
b4bcd7f7959f583fb6a9a895ce4c9e0af1a84d34
|
dacf01a327c9d8cd2ffc5f6be261a2af9b95e4fe
|
refs/heads/master
| 2016-09-06T11:18:22.257466
| 2015-11-19T11:46:40
| 2015-11-19T11:46:40
| 35,089,700
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 367
|
rd
|
print.mooFunction.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/print_moofunction.R
\name{print.mooFunction}
\alias{print.mooFunction}
\title{Print a mooFunction.}
\usage{
\method{print}{mooFunction}(x, ...)
}
\arguments{
\item{x}{[\code{function}] \cr
A \code{\link{mooFunction}}.}
\item{...}{Ignored.}
}
\description{
Print a mooFunction.
}
|
28e6621cce0dd406ee534955480e7f3955a7cc7f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spatstat/examples/plot.solist.Rd.R
|
6e3c6ff6dddd06f91df1df99946f33e3514731e6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
plot.solist.Rd.R
|
library(spatstat)
### Name: plot.solist
### Title: Plot a List of Spatial Objects
### Aliases: plot.solist
### Keywords: spatial hplot
### ** Examples
# Intensity estimate of multitype point pattern
plot(D <- density(split(amacrine)))
plot(D, main="", equal.ribbon=TRUE,
panel.end=function(i,y,...){contour(y, ...)})
|
362bc29107601d4a39b88a4be7f22e2bace5b050
|
44a3fad6338a63ac5417b1e52e47420c0e013f45
|
/man/dmesn.Rd
|
c01f9f691299aca0c86f9748fb743fc87a8b77a5
|
[] |
no_license
|
cran/ExtremalDep
|
4faac60ce0040262a98410edc6488ddf939ad9bd
|
18238416ddb6567610c4457dc332316272dbd16e
|
refs/heads/master
| 2023-03-06T18:03:59.304908
| 2023-02-26T14:40:02
| 2023-02-26T14:40:02
| 236,595,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,568
|
rd
|
dmesn.Rd
|
\name{dmesn}
\alias{dmesn}
\alias{pmesn}
\title{Bivariate and trivariate extended skew-normal distribution}
\description{Density function, distribution function for the bivariate and trivariate extended skew-normal (\acronym{ESN}) distribution.}
\usage{
dmesn(x=c(0,0), location=rep(0, length(x)), scale=diag(length(x)),
shape=rep(0,length(x)), extended=0)
pmesn(x=c(0,0), location=rep(0, length(x)), scale=diag(length(x)),
shape=rep(0,length(x)), extended=0)
}
\arguments{
\item{x}{ quantile vector of length \code{d=2} or \code{d=3}. }
\item{location}{a numeric location vector of length \code{d}. \code{0} is the default.}
\item{scale}{a symmetric positive-definite scale matrix of dimension \code{(d,d)}. \code{diag(d)} is the default.}
\item{shape}{a numeric skewness vector of length \code{d}. \code{0} is the default.}
\item{extended}{a single value extension parameter. \code{0} is the default.}
}
\value{
density (\code{dmesn}), probability (\code{pmesn}) from the bivariate or trivariate extended skew-normal distribution with given
\code{location}, \code{scale}, \code{shape} and \code{extended} parameters or from the skew-normal distribution if \code{extended=0}.
If \code{shape=0} and \code{extended=0} then the normal distribution is recovered.
}
\references{
Azzalini, A. and Capitanio, A. (1999).
Statistical applications of the multivariate skew normal distribution.
\emph{J.Roy.Statist.Soc. B} \bold{61}, 579--602.
Azzalini, A. with the collaboration of Capitanio, A. (2014).
\emph{The Skew-Normal and Related Families}.
Cambridge University Press, IMS Monographs series.
Azzalini, A. and Dalla Valle, A. (1996).
The multivariate skew-normal distribution.
\emph{Biometrika} \bold{83}, 715--726.
}
\examples{
sigma1 <- matrix(c(2,1.5,1.5,3),ncol=2)
sigma2 <- matrix(c(2,1.5,1.8,1.5,3,2.2,1.8,2.2,3.5),ncol=3)
shape1 <- c(1,2)
shape2 <- c(1,2,1.5)
dens1 <- dmesn(x=c(1,1), scale=sigma1, shape=shape1, extended=2)
dens2 <- dmesn(x=c(1,1), scale=sigma1)
dens3 <- dmesn(x=c(1,1,1), scale=sigma2, shape=shape2, extended=2)
dens4 <- dmesn(x=c(1,1,1), scale=sigma2)
prob1 <- pmesn(x=c(1,1), scale=sigma1, shape=shape1, extended=2)
prob2 <- pmesn(x=c(1,1), scale=sigma1)
\donttest{
prob3 <- pmesn(x=c(1,1,1), scale=sigma2, shape=shape2, extended=2)
prob4 <- pmesn(x=c(1,1,1), scale=sigma2)
}
}
\author{
Simone Padoan, \email{simone.padoan@unibocconi.it},
\url{https://faculty.unibocconi.it/simonepadoan/};
Boris Beranger, \email{borisberanger@gmail.com}
\url{https://www.borisberanger.com};
}
\keyword{distribution}
|
a9b74ddf39e0a9eaeb59516f49bcc9e083a55aa5
|
2027918b86990dfca61fc46bad9d7c09f60b9f2c
|
/01_enjoy_bays/data/chapter15/chapter15莠穂ク・ex-Gauss_fitting.r
|
94e06094eeb029ccdf8efae8e07e304c1c3a3b2d
|
[] |
no_license
|
kaida6213/bays_study
|
43b09dafccf1f33df0812b5879352db001bca025
|
fa6c3b6d8daef0a19d01ff36c01762a109b17a0d
|
refs/heads/master
| 2023-02-13T14:04:08.979744
| 2021-01-09T07:03:49
| 2021-01-09T07:03:49
| 326,894,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,558
|
r
|
chapter15莠穂ク・ex-Gauss_fitting.r
|
#Stan2.17, rstan 2.17.3, R3.4.4, Mac OS Sierraで動作確認済
#データの読み込み(rtは反応時間、subは参加者ID、cond=1は探索メインブロック、2は記憶メインブロック)
total_dat01 <- read.csv("search_rt.csv",header=T)
#外れ値を除外
total_dat01 <- subset(total_dat01,rt>0.20 & rt<15.00)
#参加者の条件ごとに平均値を算出し、対応のあるt検定(伝統的な方法)
total_mean <- tapply(total_dat01$rt,list(total_dat01$sub,total_dat01$cond),mean)
t.test(total_mean[,1],total_mean[,2],paired=T)
#stanの実行
library(rstan)
rstan_options(auto_write=TRUE)
options(mc.cores=parallel::detectCores())
datastan <- list(N = length(total_dat01$rt), RT=total_dat01$rt, SUBID = total_dat01$sub,CONDID = total_dat01$cond,S = length(sub_num))
#以下の2つの理由からadapt_deltaとmax_treedepthを大きめに設定しています。
#(1)divergent transitionsが多い
#(2)maximum treedepthのwarningがでる
#ただし、それでもdivergent transitionsはでます。
fit_exgauss01<-stan(file = 'exGauss_fit.stan', data = datastan, seed = 123, iter = 13000, warmup = 1000,chains = 4,thin = 3,control=list(adapt_delta=0.99,max_treedepth=15))
#stanの結果の要約情報
sum_dat <- summary(fit_exgauss01)$summary
#個人のパラメタの平均、2.5%点、97.5%点を取り出す
total_mu_ind <- cbind(sum_dat[paste("mu_ind[",1:10,",1]",sep = ""),c("mean","2.5%","97.5%")],
sum_dat[paste("mu_ind[",1:10,",2]",sep = ""),c("mean","2.5%","97.5%")])
total_sigma_ind <- cbind(sum_dat[paste("sigma_ind[",1:10,",1]",sep = ""),c("mean","2.5%","97.5%")],
sum_dat[paste("sigma_ind[",1:10,",2]",sep = ""),c("mean","2.5%","97.5%")])
total_lambda_ind <- cbind(sum_dat[paste("lambda_ind[",1:10,",1]",sep = ""),c("mean","2.5%","97.5%")],
sum_dat[paste("lambda_ind[",1:10,",2]",sep = ""),c("mean","2.5%","97.5%")])
#表15.1のデータ(集団レベルのパラメタ)
table1_dat <- rbind(sum_dat["mu[1]",c("mean","sd","2.5%","97.5%")],
sum_dat["mu[2]",c("mean","sd","2.5%","97.5%")],
sum_dat["sigma[1]",c("mean","sd","2.5%","97.5%")],
sum_dat["sigma[2]",c("mean","sd","2.5%","97.5%")],
sum_dat["lambda[1]",c("mean","sd","2.5%","97.5%")],
sum_dat["lambda[2]",c("mean","sd","2.5%","97.5%")])
table1_dat <- cbind(table1_dat,(table1_dat[,4]-table1_dat[,3]))
colnames(table1_dat) <- c("EAP","post.sd","2.50%","97.50%","95%幅")
rownames(table1_dat) <- c("mu_s","mu_m","sigma_s","sigma_m","lambda_s","lambda_m")
#表15.2のデータ(記憶メインブロックの個人パラメタ)
table2_dat <- cbind(total_mu_ind[,4],total_mu_ind[,6]-total_mu_ind[,5],
total_sigma_ind[,4],total_sigma_ind[,6]-total_sigma_ind[,5],
total_lambda_ind[,4],total_lambda_ind[,6]-total_lambda_ind[,5])
colnames(table2_dat) <- c("mu_eap","mu_95ci","sigma_eap","sigma_95ci","lambda_eap","lambda_95ci")
rownames(table2_dat) <- paste("s",1:10,sep="")
#表15.3のデータ(探索メインブロックの個人パラメタ)
table3_dat <- cbind(total_mu_ind[,1],total_mu_ind[,3]-total_mu_ind[,2],
total_sigma_ind[,1],total_sigma_ind[,3]-total_sigma_ind[,2],
total_lambda_ind[,1],total_lambda_ind[,3]-total_lambda_ind[,2])
colnames(table3_dat) <- c("mu_eap","mu_95ci","sigma_eap","sigma_95ci","lambda_eap","lambda_95ci")
rownames(table3_dat) <- paste("s",1:10,sep="")
#表15.4のデータ(差の分布)
table4_dat <- rbind(sum_dat["mu_diff",c("mean","sd","2.5%","97.5%")],
sum_dat["sigma_diff",c("mean","sd","2.5%","97.5%")],
sum_dat["lambda_diff",c("mean","sd","2.5%","97.5%")])
table4_dat <- cbind(table4_dat,(table4_dat[,4]-table4_dat[,3]))
colnames(table4_dat) <- c("EAP","post.sd","2.50%","97.5%","95%幅")
rownames(table4_dat) <- c("mu_diff","sigma_diff","lambda_diff")
###グラフの作成
library(ggplot2)
#ヒストグラムの作成(図15.3)
search_rt <- total_dat01$rt[total_dat01$cond==1]
memory_rt <- total_dat01$rt[total_dat01$cond==2]
cond_name <- c(rep("search-frequent",length(search_rt)), rep("memory-frequent",length(memory_rt)))
total_block <- data.frame(c(search_rt,memory_rt),cond_name)
names(total_block)<-c("rt","cond_name")
g <- ggplot() + theme_bw(base_size = 20) +
theme(panel.background = element_rect(fill = "transparent", colour = "black"),panel.grid = element_blank()) +
xlab("RT(sec)") + ylab("density") +
geom_histogram(data = total_block,aes(x = rt, y = ..density..)) + facet_grid(~cond_name)
g
#図15.1の作成(ただし、凡例はついていません)
library(retimes)
library(ggplot2)
library(cowplot)
x <- seq(0,3.5,0.01)
#retimesパッケージのdexgaussでは、λではなくτ(tau)を使用します。
#tau(τ)はλの逆数です。
#図15.1a
dat1 <- dexgauss(x,mu=0.5,sigma=0.1,tau=2.0^-1)
dat2 <- dexgauss(x,mu=1.0,sigma=0.1,tau=2.0^-1)
dat3 <- dexgauss(x,mu=1.5,sigma=0.1,tau=2.0^-1)
gdat <- data.frame(rep("A",length(x)*3),rep(x,3),c(rep("1",length(dat1)),rep("2",length(dat1)),rep("3",length(dat1))),c(dat1,dat2,dat3))
names(gdat) <- c("type","rt","cond","ds")
#図15.1b
dat4 <- dexgauss(x,mu=0.8,sigma=0.1,tau=2.0^-1)
dat5 <- dexgauss(x,mu=0.8,sigma=0.3,tau=2.0^-1)
dat6 <- dexgauss(x,mu=0.8,sigma=0.7,tau=2.0^-1)
gdat02 <- data.frame(rep("B",length(x)*3),rep(x,3),c(rep("1",length(dat4)),rep("2",length(dat5)),rep("3",length(dat6))),c(dat4,dat5,dat6))
names(gdat02) <- c("type","rt","cond","ds")
#図15.1c
dat7 <- dexgauss(x,mu=0.8,sigma=0.1,tau=1.0^-1)
dat8 <- dexgauss(x,mu=0.8,sigma=0.1,tau=2.0^-1)
dat9 <- dexgauss(x,mu=0.8,sigma=0.1,tau=3.0^-1)
gdat03 <- data.frame(rep("C",length(x)*3),rep(x,3),c(rep("1",length(dat4)),rep("2",length(dat5)),rep("3",length(dat6))),c(dat7,dat8,dat9))
names(gdat03) <- c("type","rt","cond","ds")
#図15.1d
dat10 <- dexgauss(x,mu=0.5,sigma=0.1,tau=1.0^-1)
dat11 <- dexgauss(x,mu=1.0,sigma=0.1,tau=2.0^-1)
gdat04 <- data.frame(rep("D",length(x)*2),rep(x,2),c(rep("1",length(dat4)),rep("2",length(dat5))),c(dat10,dat11))
names(gdat04) <- c("type","rt","cond","ds")
tdat <- rbind(gdat,gdat02,gdat03,gdat04)
g2 <- ggplot() +
geom_line(data = tdat,aes(x = rt,y = ds, linetype=cond),size=0.6) +
xlab("RT(sec)")+ ylab("density")+
theme_bw(base_size = 17) + theme(panel.background = element_rect(fill = "transparent", colour = "black"),panel.grid = element_blank()) + ylim(0,2.0) +
facet_wrap(~tdat$type,ncol = 2)
g2
|
0806c0f076e5a9bccce6d0845434dba9ff46dcb5
|
7ce35c255fe7506795ff7abc15b5222e582451bb
|
/5-visualizations/risk factors/fig-spline-plots.R
|
1eeba512628073f47d9b107dd926aa9f3c851d3a
|
[] |
no_license
|
child-growth/ki-longitudinal-growth
|
e464d11756c950e759dd3eea90b94b2d25fbae70
|
d8806bf14c2fa11cdaf94677175c18b86314fd21
|
refs/heads/master
| 2023-05-25T03:45:23.848005
| 2023-05-15T14:58:06
| 2023-05-15T14:58:06
| 269,440,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,945
|
r
|
fig-spline-plots.R
|
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
source(paste0(here::here(),"/0-project-functions/0_descriptive_epi_wast_functions.R"))
#Load haz and whz data
d <- readRDS(rf_co_occurrence_path)
d <- d %>% subset(., select=-c(tr))
#merge WLZ outcomes with covariates
cov<-readRDS(clean_covariates_path)
table(cov$mhtcm3)
table(cov$mbmi3)
cov <- cov %>% subset(., select=-c(mbmi, mhtcm)) %>% rename(mhtcm=mhtcm3, mbmi=mbmi3, )
cov <- cov %>% subset(., select=-c( region, month, W_gagebrth, W_birthwt, W_birthlen,
W_mage, W_mhtcm, W_mwtkg, W_mbmi, W_fage, W_fhtcm, W_meducyrs, W_feducyrs,
W_nrooms, W_nhh, W_nchldlt5, W_parity,
W_perdiar6, W_perdiar24))
dim(d)
d <- left_join(d, cov, by=c("studyid","country","subjid"))
dim(d)
d <- d %>% filter(agedays < 24 * 30.4167)
d <- subset(d, select = -c(id, arm, tr))
dim(d)
#N's for figure caption
d %>% ungroup() %>% filter(!is.na(mhtcm)) %>% summarize(N=n(), Nchild=length(unique(paste0(studyid, country, subjid))), Nstudies=length(unique(paste0(studyid, country))))
d %>% ungroup() %>% filter(!is.na(mwtkg)) %>% summarize(N=n(), Nchild=length(unique(paste0(studyid, country, subjid))), Nstudies=length(unique(paste0(studyid, country))))
d %>% ungroup() %>% filter(!is.na(mbmi)) %>% summarize(N=n(), Nchild=length(unique(paste0(studyid, country, subjid))), Nstudies=length(unique(paste0(studyid, country))))
#Adapted from:
#http://www.ag-myresearch.com/2012_gasparrini_statmed.html
spline_meta <- function(d, Y="haz", Avar, overall=F, cen=365, type="ps", mean_aic=F, forcedf=NULL){
# LOAD THE PACKAGES (mvmeta PACKAGE IS ASSUMED TO BE INSTALLED)
require(mvmeta)
require(dlnm)
colnames(d)[which(colnames(d)==Avar)] <- "Avar"
colnames(d)[which(colnames(d)==Y)] <- "Y"
d <- d %>% filter(!is.na(Y) & !is.na(Avar))
#Number of exposure levels
nlevels <- length(unique(d$Avar))
#Number of cohorts
d$id <- paste0(d$studyid, d$country,"__", d$Avar)
m <- length(unique(d$id))
d <- droplevels(d)
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# NOTE: set bound as average bound across studies and knots based on average quantiles, (same as example script)
# if that fails, set splines specific to each study
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# DEFINE THE AVERAGE RANGE, CENTERING POINT, DEGREE AND TYPE OF THE SPLINE
# (THESE PARAMETERS CAN BE CHANGED BY THE USER FOR ADDITIONAL ANALYSES)
bound <- c(1,730)
degree <- 3
df <- 10
# BUILT OBJECTS WHERE RESULTS WILL BE STORED:
# ymat IS THE MATRIX FOR THE OUTCOME PARAMETERS
# Slist IS THE LIST WITH (CO)VARIANCE MATRICES
ymat <- matrix(NA,m,df,dimnames=list(unique(d$id),paste("spl",seq(df),sep="")))
Slist <- vector("list",m)
names(Slist) <- unique(d$id)
####################################################################
# RUN THE FIRST-STAGE ANALYSIS
fullres <-NULL
for(i in 1:m){
res <- get_df_aic(data=d[d$id==unique(d$id)[i],], splinetype=type, splinedegree=degree, degreefreedom=df)
fullres <- bind_rows(fullres, res)
}
bestdf <- fullres %>% group_by(df) %>% summarize(med_aic=median(aic, na.rm=T), mean_aic=mean(aic, na.rm=T))
if(mean_aic){ #Use mean when median is too low for fit
bestdf <- bestdf %>% filter(mean_aic==min(mean_aic))
}else{
bestdf <- bestdf %>% filter(med_aic==min(med_aic))
}
bestdf <- bestdf$df
if(!is.null(forcedf)){bestdf<-forcedf}
for(i in 1:m){
# PRINT ITERATION
cat(i,"")
# LOAD
data <- d[d$id==unique(d$id)[i],]
# CREATE THE SPLINE
bt <- onebasis(data$agedays,fun=type,
degree=degree,
df=bestdf)
# RUN THE MODEL
#Note: add cv cross-validation
model <- glm(Y ~ bt,family=gaussian(), data)
# EXTRACT AND SAVE THE RELATED COEF AND VCOV
predbt <- NULL
try(predbt <- crosspred(bt,model,cen=cen))
if(!is.null(predbt)){
ymat[i,1:length(predbt$coef)] <- predbt$coef
Slist[[i]] <- predbt$vcov
}
}
Slist <- Filter(Negate(is.null), Slist)
#Drop missing columns
ymat<-ymat[!is.na(ymat[,1]),]
#drop missing rows
ymat<-ymat[, colSums(is.na(ymat)) != nrow(ymat)]
####################################################################
# PERFORM MULTIVARIATE META-ANALYSIS
####################################################################
# 1) MULTIVARIATE META-ANALYSIS
if(overall){
mv <- mvmeta(ymat,Slist,method="ml")
# PREDICTION FROM SIMPLE META-ANALYSES WITH NO PREDICTORS
# CENTERED TO SPECIFIC VALUE
tmean <- seq(bound[1],bound[2],length=30)
btmean <- onebasis(tmean,fun=type, degree=degree, df=bestdf #,
#knots=knots,
#Bound=bound
)
cp <- crosspred(btmean,coef=coef(mv),vcov=vcov(mv), by=0.1, cen=cen)
return(cp)
}else{
# 2) MULTIVARIATE META-REGRESSION - Avar-level has to be study specific predictor
Avar_lev<- stringr::str_split(rownames(ymat),"__", simplify=T)[,2]
(mvlat <- mvmeta(ymat~Avar_lev,Slist,method="reml"))
# NB: IN VERSION 0.4.1, CONVERGENCE MAY BE INSPECTED USING THE ARGUMENT:
# control=list(showiter=T)
# NB: LESS STRICT CONVERGENCE CRITERIA, USEFUL FOR HIGH DIMENSIONAL
# MODELS, MAY BE SELECTED BY ADDING A reltol ARGUMENT, FOR EXAMPLE:
# control=list(showiter=T,reltol=10^-3)
####################################################################
# CREATE BASIS FOR PREDICTION
####################################################################
# BASIS USED TO PREDICT AGE, EQUAL TO THAT USED FOR ESTIMATION
# NOTE: INTERNAL AND BOUNDARY KNOTS PLACED AT SAME VALUES AS IN ESTIMATION
tmean <- seq(bound[1],bound[2],length=30)
btmean <- onebasis(tmean,fun=type)
####################################################################
# PREDICTION FROM MODELS
####################################################################
# USE OF crosspred TO PREDICT THE EFFECTS FOR THE CHOSEN VALUES
# COMPUTE PREDICTION FOR MULTIVARIATE META-REGRESSION MODELS
# 1ST STEP: PREDICT THE OUTCOME PARAMETERS FOR SPECIFIC VALUES OF META-PREDICTOR
# 2ND STEP: PREDICT THE RELATIONSHIP AT CHOSEN VALUES GIVEN THE PARAMETERS
predAvar <- predict(mvlat,data.frame(Avar=factor(unique(d$Avar))),vcov=T)
predlist <- list()
for(i in 1:nlevels){
pred <- crosspred(btmean,coef=predAvar[[i]]$fit,vcov=predAvar[[i]]$vcov, model.link="log",by=0.1,cen=cen)
predlist[[i]] <- pred
}
names(predlist) <- unique(d$Avar)
return(predlist)
}
}
create_plotdf <- function(predlist, overall=F, stratlevel=NULL){
if(overall){
df <- data.frame(
level=stratlevel,
agedays=as.numeric(rownames(predlist$matfit)),
est=predlist$matfit[,1],
se=predlist$matse[,1]
)
df <- df %>%
mutate(ci.lb=est-1.96*se,
ci.ub=est+1.96*se)
}else{
df <- NULL
for(i in 1:length(predlist)){
temp <- data.frame(
level=names(predlist)[i],
agedays=as.numeric(rownames(predlist[[i]]$matfit)),
est=predlist[[i]]$matfit[,1],
se=predlist[[i]]$matse[,1]
)
temp <- temp %>%
mutate(ci.lb=est-1.96*se,
ci.ub=est+1.96*se)
df <- rbind(df, temp)
}
}
return(df)
}
offset_fun <- function(d, Y="haz", Avar, cen=365, range=60){
df <- d[!is.na(d[,Avar]),]
df <- df %>% filter(agedays < cen + range & agedays > cen - range) %>% mutate(agecat="first", agecat=factor(agecat))
z.summary <- cohort.summary(d=df, var=Y, ci=F, continious=T, severe=F, minN=50, measure="", strata=c("region","studyid","country","agecat",Avar))
z.res <- summarize_over_strata(cohort.sum=z.summary, proportion=F, continious=T, measure = "GEN", method = "REML", strata=c("region","studyid","country","agecat",Avar), region=F, cohort=F)
z.res <- data.frame(level=z.res[,2], offset=z.res[,5])
return(z.res)
}
#Function to pick degrees of freedom from lowest mean AIC across cohorts
get_df_aic <- function(data, splinetype=type, splinedegree=degree, degreefreedom=df){
res <- NULL
for(i in splinedegree:degreefreedom){
# CREATE THE SPLINE
bt <- onebasis(data$agedays,fun=splinetype,
degree=splinedegree,
df=i)
# RUN THE MODEL
#Note: add cv cross-validation
model <- glm(Y ~ bt,family=gaussian(), data)
res <- bind_rows(res, data.frame(cohort=data$id[1] , df=i, aic=model$aic))
}
return(res)
}
orange_color_gradient = c("#FF7F0E", "#ffb26e", "#f5caab")
blue_color_gradient = c("#1F77B4", "#85cdff", "#b8e7ff")
purple_color_gradient = c("#7644ff", "#b3adff", "#e4dbff")
#------------------------------------------------------------------------------------------------
# WLZ- maternal height
#------------------------------------------------------------------------------------------------
predlist1 <- predlist2 <- predlist3 <- NULL
table(d$mhtcm)
predlist1 <- spline_meta(d[d$mhtcm==">=155 cm",], Y="whz", Avar="mhtcm", overall=T, forcedf=5)
plotdf1 <- create_plotdf(predlist1, overall=T, stratlevel=">=155 cm")
predlist2 <- spline_meta(d[d$mhtcm=="[150-155) cm",], Y="whz", Avar="mhtcm", overall=T, mean_aic = T)
plotdf2 <- create_plotdf(predlist2, overall=T, stratlevel="[150-155) cm")
predlist3 <- spline_meta(d[d$mhtcm=="<150 cm",], Y="whz", Avar="mhtcm", overall=T)
plotdf3 <- create_plotdf(predlist3, overall=T, stratlevel="<150 cm")
plotdf_wlz_mhtcm <- rbind(plotdf1,plotdf2,plotdf3)
offsetZ_wlz_mhtcm <- offset_fun(d, Y="whz", Avar="mhtcm")
plotdf_wlz_mhtcm <- left_join(plotdf_wlz_mhtcm, offsetZ_wlz_mhtcm, by="level")
plotdf_wlz_mhtcm <- plotdf_wlz_mhtcm %>%
mutate(est= est + offset,
ci.lb= ci.lb + offset,
ci.ub= ci.ub + offset)
plotdf_wlz_mhtcm <- plotdf_wlz_mhtcm %>% mutate(level = factor(level, levels=c( ">=155 cm", "[150-155) cm", "<150 cm")))
Avar="Maternal height"
light_blue_color_gradient = c("#0fb3bf", "#83ced3", "#c5e0e2")
p2 <- ggplot() +
geom_line(data=plotdf_wlz_mhtcm, aes(x=agedays, y=est, group=level, color=level), size=1.25) +
#geom_ribbon(data=plotdf_wlz_mhtcm, aes(x=agedays,ymin=ci.lb, ymax=ci.ub, group=level, color=level, fill=level), alpha=0.3, color=NA) +
scale_color_manual(values=orange_color_gradient, name = paste0( Avar)) +
scale_fill_manual(values=orange_color_gradient, name = paste0( Avar)) +
scale_x_continuous(limits=c(1,730), expand = c(0, 0),
breaks = 0:12*30.41*2, labels = 0:12*2) +
scale_y_continuous(limits=c(-1, 0.45), breaks = seq(-1.2, 0.4, 0.2), labels = seq(-1.2, 0.4, 0.2)) +
xlab("Child age in months") + ylab("Mean WLZ") +
#coord_cartesian(ylim=c(-2,1)) +
ggtitle(paste0("Spline curves of WLZ, stratified by\nlevels of ", Avar)) +
theme(legend.position = c(0.8,0.9))
print(p2)
#------------------------------------------------------------------------------------------------
# LAZ- maternal height
#------------------------------------------------------------------------------------------------
df <- d %>% filter(!is.na(mhtcm)) %>% filter(agedays < 24* 30.4167)
dim(df)
df %>% group_by(studyid, country, subjid) %>% slice(1) %>% ungroup() %>% summarize(n(), Nstudies=length(unique(paste0(studyid, country))))
predlist1 <- predlist2 <- predlist3 <- NULL
predlist1 <- spline_meta(d[d$mhtcm==">=155 cm",], Y="haz", Avar="mhtcm", overall=T, forcedf=5)
plotdf1 <- create_plotdf(predlist1, overall=T, stratlevel=">=155 cm")
predlist2 <- spline_meta(d[d$mhtcm=="[150-155) cm",], Y="haz", Avar="mhtcm", overall=T, mean_aic = T)
plotdf2 <- create_plotdf(predlist2, overall=T, stratlevel="[150-155) cm")
predlist3 <- spline_meta(d[d$mhtcm=="<150 cm",], Y="haz", Avar="mhtcm", overall=T)
plotdf3 <- create_plotdf(predlist3, overall=T, stratlevel="<150 cm")
plotdf_laz_mhtcm <- rbind(plotdf1,plotdf2,plotdf3)
offsetZ_laz_mhtcm <- offset_fun(d, Y="haz", Avar="mhtcm")
plotdf_laz_mhtcm <- left_join(plotdf_laz_mhtcm, offsetZ_laz_mhtcm, by="level")
plotdf_laz_mhtcm <- plotdf_laz_mhtcm %>%
mutate(est= est + offset,
ci.lb= ci.lb + offset,
ci.ub= ci.ub + offset)
plotdf_laz_mhtcm <- plotdf_laz_mhtcm %>% mutate(level = factor(level, levels=c( ">=155 cm", "[150-155) cm", "<150 cm")))
Avar="Maternal height"
p4 <- ggplot() +
geom_line(data=plotdf_laz_mhtcm, aes(x=agedays, y=est, group=level, color=level), size=1.25) +
scale_color_manual(values =blue_color_gradient, name = paste0( Avar)) +
scale_fill_manual(values = blue_color_gradient, name = paste0( Avar)) +
scale_x_continuous(limits=c(1,730), expand = c(0, 0),
breaks = 0:12*30.41*2, labels = 0:12*2) +
scale_y_continuous(limits=c(-2.35, -0.5), breaks = seq(-2.2, -0.4, 0.2), labels = seq(-2.2, -0.4, 0.2)) +
xlab("Child age in months") + ylab("Mean LAZ") +
ggtitle(paste0("Spline curves of LAZ, stratified by\nlevels of ", Avar)) +
theme(legend.position = c(0.8,0.9))
print(p4)
#------------------------------------------------------------------------------------------------
# WLZ- maternal BMI
#------------------------------------------------------------------------------------------------
predlist1 <- predlist2 <- predlist3 <- NULL
table(d$mbmi)
predlist1 <- spline_meta(d[d$mbmi=="<20 kg/m²",], Y="whz", Avar="mbmi", overall=T, cen=365, mean_aic=T)
plotdf1 <- create_plotdf(predlist1, overall=T, stratlevel="<20 kg/m²")
predlist2 <- spline_meta(d[d$mbmi=="[20-24) kg/m²",], Y="whz", Avar="mbmi", overall=T, cen=365, mean_aic=T)
plotdf2 <- create_plotdf(predlist2, overall=T, stratlevel="[20-24) kg/m²")
predlist3 <- spline_meta(d[d$mbmi==">=24 kg/m²",], Y="whz", Avar="mbmi", overall=T, cen=365, mean_aic=T)
plotdf3 <- create_plotdf(predlist3, overall=T, stratlevel=">=24 kg/m²")
plotdf_wlz_mbmi <- rbind(plotdf1, plotdf2, plotdf3)
offsetZ_wlz_mbmi <- offset_fun(d, Y="whz", Avar="mbmi", cen=365)
plotdf_wlz_mbmi <- left_join(plotdf_wlz_mbmi, offsetZ_wlz_mbmi, by="level")
plotdf_wlz_mbmi <- plotdf_wlz_mbmi %>%
mutate(est= est + offset,
ci.lb= ci.lb + offset,
ci.ub= ci.ub + offset)
plotdf_wlz_mbmi <- plotdf_wlz_mbmi %>% mutate(level = factor(level, levels=c( ">=24 kg/m²", "[20-24) kg/m²", "<20 kg/m²")))
Avar="Maternal BMI"
p5 <- ggplot() +
geom_line(data=plotdf_wlz_mbmi, aes(x=agedays, y=est, group=level, color=level), size=1.25) +
scale_color_manual(values=orange_color_gradient, name = paste0( Avar),
labels = c( ">=24 kg/m²", "[20-24) kg/m²", "<20 kg/m²")) +
scale_fill_manual(values=orange_color_gradient, name = paste0( Avar)) +
scale_x_continuous(limits=c(1,730), expand = c(0, 0),
breaks = 0:12*30.41*2, labels = 0:12*2) +
scale_y_continuous(limits=c(-1, 0.45), breaks = seq(-1.2, 0.4, 0.2), labels = seq(-1.2, 0.4, 0.2)) +
xlab("Child age in months") + ylab("Mean WLZ") +
#coord_cartesian(ylim=c(-2,1)) +
ggtitle(paste0("Spline curves of WLZ, stratified by\nlevels of ", Avar)) +
theme(legend.position = c(0.8,0.9))
print(p5)
#------------------------------------------------------------------------------------------------
# LAZ- maternal BMI
#------------------------------------------------------------------------------------------------
df <- d %>% filter(!is.na(mbmi))
dim(df)
df %>% group_by(studyid, country, subjid) %>% slice(1) %>% ungroup() %>% summarize(n(), Nstudies=length(unique(paste0(studyid, country))))
predlist1 <- predlist2 <- predlist3 <- NULL
predlist1 <- spline_meta(d[d$mbmi=="<20 kg/m²",], Y="haz", Avar="mbmi", overall=T, cen=365, mean_aic=T)
plotdf1 <- create_plotdf(predlist1, overall=T, stratlevel="<20 kg/m²")
predlist2 <- spline_meta(d[d$mbmi=="[20-24) kg/m²",], Y="haz", Avar="mbmi", overall=T, cen=365, mean_aic=T)
plotdf2 <- create_plotdf(predlist2, overall=T, stratlevel="[20-24) kg/m²")
predlist3 <- spline_meta(d[d$mbmi==">=24 kg/m²",], Y="haz", Avar="mbmi", overall=T, cen=365, mean_aic=T)
plotdf3 <- create_plotdf(predlist3, overall=T, stratlevel=">=24 kg/m²")
plotdf_laz_mbmi <- rbind(plotdf1, plotdf2, plotdf3)
offsetZ_laz_mbmi <- offset_fun(d, Y="haz", Avar="mbmi", cen=365)
plotdf_laz_mbmi <- left_join(plotdf_laz_mbmi, offsetZ_laz_mbmi, by="level")
plotdf_laz_mbmi <- plotdf_laz_mbmi %>%
mutate(est= est + offset,
ci.lb= ci.lb + offset,
ci.ub= ci.ub + offset)
plotdf_laz_mbmi <- plotdf_laz_mbmi %>% mutate(level = factor(level, levels=c( ">=24 kg/m²", "[20-24) kg/m²", "<20 kg/m²")))
Avarwt="Maternal BMI"
p6 <- ggplot() +
geom_line(data=plotdf_laz_mbmi, aes(x=agedays, y=est, group=level, color=level), size=1.25) +
scale_color_manual(values=blue_color_gradient, name = paste0( Avarwt), labels = c( ">=24 kg/m²", "[20-24) kg/m²", "<20 kg/m²")) +
scale_fill_manual(values=blue_color_gradient, name = paste0( Avarwt)) +
scale_x_continuous(limits=c(1,730), expand = c(0, 0),
breaks = 0:12*30.41*2, labels = 0:12*2) +
scale_y_continuous(limits=c(-2.35, -0.5), breaks = seq(-2.2, -0.4, 0.2), labels = seq(-2.2, -0.4, 0.2)) +
xlab("Child age in months") + ylab("Mean LAZ") +
#coord_cartesian(ylim=c(-2,1)) +
ggtitle(paste0("Spline curves of LAZ, stratified by\nlevels of ", Avarwt)) +
theme(legend.position = c(0.8,0.9))
print(p6)
#Save plot objects
p1 <- p3 <- NULL
saveRDS(list(p1, p2, p3, p4, p5, p6), file=paste0(BV_dir,"/figures/plot-objects/risk-factor/rf_spline_objects.RDS"))
#save plot data
plotdf_wlz_mwtkg <- plotdf_laz_mwtkg <- NULL
saveRDS(list(plotdf_wlz_mwtkg,plotdf_laz_mwtkg,plotdf_wlz_mhtcm,plotdf_laz_mhtcm,plotdf_wlz_mbmi,plotdf_laz_mbmi),
file=paste0(BV_dir,"/figures/risk-factor/figure-data/rf_spline_data.RDS"))
|
bc11dbfdfaf1efffb2ed589de14876ca19cf47b3
|
e0e538679b6e29837839fdbc3d68b4550e256bb9
|
/docs/autumn/example/data-data.frame.r
|
54903e4e4d86d04fe81344c202dc0f5b038e1b9e
|
[] |
no_license
|
noboru-murata/sda
|
69e3076da2f6c24faf754071702a5edfe317ced4
|
4f535c3749f6e60f641d6600e99a0e269d1fa4ea
|
refs/heads/master
| 2020-09-24T20:23:36.224958
| 2020-09-22T07:17:54
| 2020-09-22T07:17:54
| 225,833,335
| 0
| 0
| null | 2019-12-05T08:20:51
| 2019-12-04T09:51:18
| null |
UTF-8
|
R
| false
| false
| 784
|
r
|
data-data.frame.r
|
### データフレームの例 (ベクトルから行列を作成)
(a <- data.frame(height=c(172,158,160),weight=c(60,53,51)))
(b <- matrix(1:8,nrow=4,ncol=2))
(c <- data.frame(b)) # 行列から作ることもできる
(rownames(c) <- letters[1:nrow(c)]) # 行名を付ける
(names(c) <- c("Left","Right")) # 列名を付ける
c # 内容を確認する
### データ例 datasets::airquality (詳細は help(airquality))
dim(airquality) # 大きさを確認
names(airquality) # 列の名前を表示
head(airquality,n=5) # 最初の5つのデータを表示
plot(airquality) # 散布図を表示
ts.plot(airquality) # 時系列として表示
subset(airquality, Ozone>100) # 条件を満たす部分集合を抽出
subset(airquality, Ozone>100, select=Wind:Day)
|
017a727a8499977ea1ebf098cc95b86bd120bb4c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BWStest/examples/murakami_cdf.Rd.R
|
6b9c21bb0813ee4bad81f4c629d2bb75a9e0bba3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 543
|
r
|
murakami_cdf.Rd.R
|
library(BWStest)
### Name: murakami_cdf
### Title: Murakami test statistic distribution.
### Aliases: murakami_cdf
### ** Examples
# basic usage:
xv <- seq(0,4,length.out=101)
yv <- murakami_cdf(xv, n1=8, n2=6, flavor=1L)
plot(xv,yv)
zv <- bws_cdf(xv)
lines(xv,zv,col='red')
# check under the null:
## Not run:
##D flavor <- 1L
##D n1 <- 8
##D n2 <- 8
##D set.seed(1234)
##D Bvals <- replicate(2000,murakami_stat(rnorm(n1),rnorm(n2),flavor))
##D # should be uniform:
##D plot(ecdf(murakami_cdf(Bvals,n1,n2,flavor)))
## End(Not run)
|
875001047a82aae326b7677833831cbfec6a7223
|
ae0d3cc702453a4884ad59c149e19010cbd6378a
|
/plot1.R
|
175b3c2a13a625e2278fe87ab821b578d869c8ea
|
[] |
no_license
|
dreammaster38/ExData_PeerAssessment2
|
e74f0292fa2ff202b5066b35dabd1fe040af7c73
|
55e2bf132d00df6b61ca59eca4a2cf6d82fd3540
|
refs/heads/master
| 2021-01-24T00:44:32.444927
| 2014-07-23T15:47:58
| 2014-07-23T15:47:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 745
|
r
|
plot1.R
|
library(data.table)
## Load the two data sets if the not already exists in memory
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
# create a data table from the data frame
data <- data.table(NEI)
# sum the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008
totalPM25ForYears <- data[, list(totalPM25=sum(Emissions)), by=year]
# plot it with the base plotting system
png("plot1.png")
plot(totalPM25ForYears$year,
totalPM25ForYears$totalPM25,
type="l",
main="Total emission of " ~ PM[2.5] ~ " between 1999 and 2008",
xlab="year", ylab=expression("Total emission of " ~ PM[2.5] ~ " in tons"))
dev.off()
|
aef3362dd04f26a3d3f04fd82bac9ae80695a383
|
7b122933da2451a501a6f6a930653d8c52f55bdc
|
/scripts/PopGenome_cisreg_chr5.R
|
42e5ed10d600f8c36ba66531875772f4ea89f445
|
[] |
no_license
|
rtraborn/Promoter_PopGen
|
54e5c31a6ca66fc93c8307a11cb2ed4ae5c2bfb5
|
384120a928451a73533e4067e701547c99609a70
|
refs/heads/master
| 2022-03-17T17:48:48.288667
| 2019-12-06T17:38:19
| 2019-12-06T17:38:19
| 67,723,999
| 0
| 0
| null | 2018-04-07T21:49:53
| 2016-09-08T17:12:11
|
R
|
UTF-8
|
R
| false
| false
| 2,738
|
r
|
PopGenome_cisreg_chr5.R
|
## This script tested and ready for production mode on Carbonate
library(PopGenome)
library(bigmemory)
library(tools)
##########################################################################
vcfDir <- "/N/dc2/projects/PromoterPopGen/human_complete_data/human-split-data/cisreg_chr5"
fileList <- "/N/dc2/projects/PromoterPopGen/human_complete_data/human-split-data/cisreg_chr5/human_file_list_5.txt"
file.names <- read.csv(file=fileList, header=FALSE)
colnames(file.names) <- c("chr", "start", "end", "file")
gffFile <- "/N/dc2/projects/PromoterPopGen/TSSs_gff/TSSset-1_chr5.gff3"
popListFile <- "/N/dc2/projects/PromoterPopGen/Promoter_PopGen/data/human/pop_list_1kGP.csv"
##########################################################################
setwd(vcfDir)
source("/N/dc2/projects/PromoterPopGen/Promoter_PopGen/scripts/identifiers_to_list.R")
pop.list <- identifiers_to_list(csv.file=popListFile)
for (i in 1:nrow(file.names)) {
print(i)
this.string <- file.names[i,]
this.chr <- as.character(this.string[1])
this.start <- this.string[2]
if (this.start==0) {
this.start <- 1
}
this.end <- this.string[3]
this.filename <- as.character(unlist(this.string[4]))
this.filename2 <- file_path_sans_ext(this.filename)
diversity_out <- paste(this.filename2, "diversity", sep="_")
diversity_filename <- paste(diversity_out, "txt", sep=".")
#for debugging
print(diversity_filename)
print(this.chr)
print(this.start)
print(this.end)
print(this.filename)
GENOME.class <- readVCF(filename=this.filename, numcols=100000, frompos=this.start, topos=this.end, tid=this.chr, gffpath=gffFile)
GENOME.class <- set.populations(GENOME.class, new.populations=pop.list, diploid=TRUE)
split <- GENOME.class.split <- splitting.data(GENOME.class,subsites="gene")
gc()
split <- diversity.stats(split, pi=TRUE, keep.site.info=TRUE)
feature.names <- split@region.names
n.features <- length(split@region.names)
nuc.diversity.m <- split@nuc.diversity.within
colnames(nuc.diversity.m) <- names(pop.list)
write.table(nuc.diversity.m, col.names=TRUE, row.names=FALSE, sep="\t", file=diversity_filename)
for (i in 1:n.features) {
print(i)
f.name <- feature.names[i]
root.name <- paste(f.name, "chr", this.chr, sep="_")
fileName <- paste(root.name, "txt", sep=".")
pi.ma <- split@region.stats@nuc.diversity.within[[i]]
if (is.null(pi.ma)==FALSE) {
pi.ma.t <- t(pi.ma)
colnames(pi.ma.t) <- names(pop.list)
write.table(pi.ma.t, col.names=TRUE, row.names=TRUE, sep="\t", quote=FALSE, file=fileName)
}
else { print("No variation in feature.")
}
}
gc()
}
|
174e05693d41f4a786bdb3f8f796fb6475b43c8f
|
42babe3bd1ebd1d9151e325aa6ce74e31330cbc1
|
/sRGES_all_cmpds.R
|
ac8a26adf46c383c4da820a490288cba780f1845
|
[
"MIT"
] |
permissive
|
minghao2016/RGES
|
ee841e11a2cdf939ffba8e6e6263b41793860005
|
af7fa59649dbc5dfa21b59f5446ea743ba236438
|
refs/heads/master
| 2020-03-22T22:22:55.641852
| 2017-07-06T23:34:57
| 2017-07-06T23:34:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,566
|
r
|
sRGES_all_cmpds.R
|
#using sRGES to summarize all compounds
library("plyr")
cancer <- "ER"
#build a reference model according to dose and time
output_path <- paste(cancer, "/all_lincs_score.csv", sep="")
lincs_drug_prediction <- read.csv(output_path)
#should use pert_dose > 0.01
lincs_drug_prediction_subset <- subset(lincs_drug_prediction, pert_dose > 0 & pert_time %in% c(6, 24))
#pairs that share the same drug and cell id
lincs_drug_prediction_pairs <- merge(lincs_drug_prediction_subset, lincs_drug_prediction_subset, by=c("pert_iname", "cell_id"))
#x is the reference
lincs_drug_prediction_pairs <- subset(lincs_drug_prediction_pairs, id.x != id.y & pert_time.x == 24 & pert_dose.x == 10) #, select <- c("cmap_score.x", "cmap_score.y", "pert_dose.y", "pert_time.y"))
#difference of RGES to the reference
lincs_drug_prediction_pairs$cmap_diff <- lincs_drug_prediction_pairs$cmap_score.x - lincs_drug_prediction_pairs$cmap_score.y
lincs_drug_prediction_pairs$dose <- round(log(lincs_drug_prediction_pairs$pert_dose.y, 2), 1)
#fix time
lincs_drug_prediction_pairs_subset <- subset(lincs_drug_prediction_pairs, pert_time.y == 24 )
dose_cmap_diff_24 <- tapply(lincs_drug_prediction_pairs_subset$cmap_diff, lincs_drug_prediction_pairs_subset$dose, mean)
dose_cmap_diff_24 <- data.frame(dose = as.numeric(names(dose_cmap_diff_24)), cmap_diff= dose_cmap_diff_24)
plot(dose_cmap_diff_24$dose, dose_cmap_diff_24$cmap_diff)
lm_dose_24 <- lm(cmap_diff ~ dose, data = dose_cmap_diff_24)
summary(lm_dose_24)
lincs_drug_prediction_pairs_subset <- subset(lincs_drug_prediction_pairs, pert_time.y == 6)
dose_cmap_diff_6 <- tapply(lincs_drug_prediction_pairs_subset$cmap_diff, lincs_drug_prediction_pairs_subset$dose, mean)
dose_cmap_diff_6 <- data.frame(dose = as.numeric(names(dose_cmap_diff_6)), cmap_diff= dose_cmap_diff_6)
lm_dose_6 <- lm(cmap_diff ~ dose, data = dose_cmap_diff_6)
plot(dose_cmap_diff_6$dose, dose_cmap_diff_6$cmap_diff)
summary(lm_dose_6)
#estimate difference
lincs_drug_prediction_pairs$dose_bin <- ifelse(lincs_drug_prediction_pairs$pert_dose.y < 10, "low", "high")
tapply(lincs_drug_prediction_pairs$cmap_diff, lincs_drug_prediction_pairs$dose_bin, mean)
tapply(lincs_drug_prediction_pairs$cmap_diff, lincs_drug_prediction_pairs$pert_time.y, mean)
diff <- tapply(lincs_drug_prediction_pairs$cmap_diff, paste(lincs_drug_prediction_pairs$dose_bin, lincs_drug_prediction_pairs$pert_time.y), mean)
cell_lines <- read.csv(paste("raw/cell_lines/", cancer, "_cell_lines.csv", sep=""))
ccle_lincs = read.csv("raw/cell_line_lincs_ccle.csv")
cell_line_cancer <- read.csv(paste(cancer, "/", "cell_line_", cancer, "_tacle.csv", sep=""))
cell_line_cancer <- merge(cell_line_cancer, ccle_lincs, by.x="Cell.line.primary.name", by.y="ccle_cell_line_name")
cell_line_cancer <- cell_line_cancer[order(cell_line_cancer$cor),]
pred <- merge(lincs_drug_prediction, cell_line_cancer, by.x="cell_id", by.y="lincs_cell_id")
pred$RGES <- sapply(1:nrow(pred), function(id){getsRGES(pred$cmap_score[id], pred$cor[id], pred$pert_dose[id], pred$pert_time[id], diff, max(pred$cor))})
cmpd_freq <- table(pred$pert_iname)
pred <- subset(pred, pert_iname %in% names(cmpd_freq[cmpd_freq>0]))
pred_merged <- ddply(pred, .(pert_iname), summarise,
mean = mean(RGES),
n = length(RGES),
median = median(RGES),
sd = sd(RGES))
pred_merged$sRGES <- pred_merged$mean
pred_merged <- pred_merged[order(pred_merged$sRGES), ]
write.csv(pred_merged,paste( cancer, "/lincs_cancer_sRGES.csv", sep=""))
|
fd9b7908ff2600fad0e691348e8b19c8e8c4a2cc
|
514e66bd56b9253aa10899c469a3f67dd2959777
|
/man/skIncrPartialPCA.Rd
|
a4aa117d3a03c6c0a85bbb75af8c49c3402c8c19
|
[] |
no_license
|
vjcitn/BiocSklearn
|
e37e07120b943c545d26ca7116912ac66fd56ba6
|
3fca39434eef1456c3026414bc1e3640f67cb9cc
|
refs/heads/master
| 2023-01-05T12:36:07.175212
| 2022-12-26T12:06:25
| 2022-12-26T12:06:25
| 97,358,482
| 1
| 1
| null | 2019-11-03T21:36:55
| 2017-07-16T03:53:58
|
R
|
UTF-8
|
R
| false
| true
| 862
|
rd
|
skIncrPartialPCA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skIPart.R
\name{skIncrPartialPCA}
\alias{skIncrPartialPCA}
\title{use basilisk discipline to perform partial (n_components) incremental (chunk.size) PCA with scikit.decomposition}
\usage{
skIncrPartialPCA(mat, n_components, chunk.size = 10)
}
\arguments{
\item{mat}{a matrix}
\item{n_components}{integer(1) number of PCs to compute}
\item{chunk.size}{integer(1) number of rows to use each step}
}
\description{
use basilisk discipline to perform partial (n_components) incremental (chunk.size) PCA with scikit.decomposition
}
\note{
A good source for capabilities and examples is at the \href{https://scikit-learn.org/stable/modules/decomposition.html\#decompositions}{sklearn doc site}.
}
\examples{
lk = skIncrPartialPCA(iris[,1:4], n_components=3L)
lk
head(getTransformed(lk))
}
|
6dccfe51edc71fb625cc65d7246a1591cbc57a9f
|
3524de329d1f28a6df15093155ea6c2df9c37f54
|
/long_algorythm(v0.3).r
|
1588c309e51ad008c02974009acafdfed5f82fa6
|
[] |
no_license
|
SidGor/turtle_project
|
324580276d8c57b7b5939f919fb7f48115458298
|
47f972d3c7aef2903e302081c9ac9a110094c71d
|
refs/heads/master
| 2021-01-23T16:17:48.310002
| 2017-06-19T07:12:19
| 2017-06-19T07:12:19
| 93,289,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,852
|
r
|
long_algorythm(v0.3).r
|
#考虑过整体设限的风险控制方案,比如说将现有持仓的向量乘以相关性矩阵,然后就可以判断整个持仓有没有超过风险限制,
#但是这么做有一个问题,那就是判断出持仓风险之后无法有效地设定反向建仓的规则,同时,这种办法无法有效地识别“
#同向开仓不能超过12个Unit“的问题。
#一个新的思路是当判定出爆仓之后,从holding的记录上面逐条删除重新判定,也就是说一共有两套方程:
#1. 加仓的时候用循环判定加仓是在范围内的,一旦超出范围,撤销上一个加仓并且转向下一个(暂时不考虑优先加信号强的仓)。
#2. 每回合开始的资金控制,一旦判定不及格就以倒叙的形式减仓,那就是说“standing_contract”的顺序往回减并计入损失。
library(rlist) #list.append,list.stack用到了
#建仓判定:
enter_date = NA #中转日期
product_name = NA #产品类型
direction = NA #中转合约方向
enter_price = NA #中转入场价
cut_point = NA #中转止损价
no_contract = NA #中转合约数量
#1.生成交易单
#long_plan <- sig_long * units #The aggregate plan of how many *contracts(not tons)* should be add
#建立测试仓位用的向量,相当于缓存
#
for (j in 1:length(product_ids)){
if (is.na(unit_long[j])){ next
}else if (unit_long[j] == 0) {next } #节省运算时间,跳过没有买入计划的产品
t_position = copy(position) #在单日开多单的情况下必须重复读取实际的position,因为
t_position[is.na(t_position)] = 0
#t_position会在k-loop里面累加,影响到其他产品的测试结果
for(k in 1:unit_long[j]) {
t_position[j] = t_position[j] + 1
#test 1: any direction ,single holding should be less than 4
if (any(abs(na.exclude(t_position)) > 4)) {
#test 2: any direction, combination of strong corr assets should be less than 6
}else if (any(abs(as.vector(t_position) %*% corr_mat$clscorr) > 6)){
#test 3: any direction, combination of losely corr assets should be less than 10
}else if (any(abs(c(t_position) %*% corr_mat$lslcorr) > 10)){
#test 4: any direction, total holding should be less than 12
}else if (abs(sum(t_position)) > 12){
}else {
position[j] <- t_position[j] #update the actual position
holding[j] <- holding[j] + units[j] #update holdings
enter_date <- cdt[[1]][ptr]
direction <- 1L # 1L long, -1L short
enter_price <- cdt[[15 + (j-1) * 15]][ptr] + slippage[j] #subset the channel price + slippage
fee <- fee + enter_price * units[j] * vm[j] * fee.rate[j] + fee_rate_fix[j] #update total fee
cut <- enter_price - 2 * cdt[[9+(j-1)*15]][ptr] #lost cutting point, 2N
trade_id <- paste("|",direction,"|",enter_date,cdt[[2 + (j-1) * 15]][ptr],"00",k,sep = "")
contract <- data.table(trade_id = trade_id,
enter_date = enter_date, #saving contract information
product_name = cdt[[2 + (j-1) * 15]][ptr],
direction = direction,
enter_price = enter_price,
cut_point = cut,
no_contract = units[j]
)
standing_contract = list.append(standing_contract, contract) #adding contract to current holding
cash <- cash - enter_price * units[j] * vm[j] - enter_price * units[j] * vm[j] * fee.rate[j] - fee_rate_fix[j] #update cash
}
}#end of k looping for open tests
}#开多仓loop
sta_contract_dt <- list.stack(standing_contract, data.table = TRUE) #use data.frame for easy tracking
|
384e8ee6a93c0863d942a95b85737f9d8865688a
|
2077274b03658de2834f88bba57480e3c9538e02
|
/packrat/lib/x86_64-apple-darwin15.6.0/3.4.2/shinytest/tests/testthat/test-exported-values.R
|
9e6c64db11bb5a024f60c29b42e948ad11e62aef
|
[
"MIT"
] |
permissive
|
danielg7/FireWeatherExplorer
|
331da0c2f47ac19bba5cdc8d7dbe8b70ece50cfe
|
567e3120ec4f63951de566547d678919b692ccd7
|
refs/heads/master
| 2021-09-20T05:13:31.989782
| 2021-08-08T15:07:43
| 2021-08-08T15:07:43
| 123,375,653
| 0
| 0
|
MIT
| 2020-06-18T19:25:34
| 2018-03-01T03:15:48
|
C++
|
UTF-8
|
R
| false
| false
| 371
|
r
|
test-exported-values.R
|
context("Exported values")
app <- ShinyDriver$new(test_path("apps/test-exports/"))
test_that("Exported values", {
x <- app$getAllValues()
expect_identical(x$export$x, 1)
expect_identical(x$export$y, 2)
app$setInputs(inc = "click")
app$setInputs(inc = "click")
x <- app$getAllValues()
expect_identical(x$export$x, 3)
expect_identical(x$export$y, 4)
})
|
cef21ddc00f6d1a15ab754e142814872b43d8ae4
|
e2c34286397b6fa5dfeb4646ea7d1aa68089924a
|
/plot2.R
|
f7b3436ae2065ae64221723efd0ee721c495eac0
|
[] |
no_license
|
jxchen01/plottingR
|
2686e78860c406a517f38c8613bc25a75c9d0a8c
|
d04e3243df89d89b07ace472b070d90bf99c9112
|
refs/heads/master
| 2021-01-01T15:37:04.950467
| 2014-07-11T03:29:09
| 2014-07-11T03:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 292
|
r
|
plot2.R
|
png('plot2.png')
plot(md$Global_active_power,type="l",axes=F,xlab=" ", ylab="Global Active Power (kilowatts)")
par(xaxp=c(0,length(md$Global_active_power),2))
axis(1,lab=F)
axis(2)
box()
text(axTicks(1), par("usr")[3] - 0.4, adj=1, labels=c("Thu", "Fri", "Sat"), xpd=T, cex=0.8)
dev.off()
|
28326f2c4295525db218fcfb3c9fd2c9dc6cdb3a
|
0e84ee8922b96bd526883e3b7dcab258c278d84e
|
/man/math_stat.Rd
|
f5d2c17e0af23ba3d23712620c39f91afb01e07d
|
[] |
no_license
|
zhaoxue-xmu/RDA
|
8f9f68620d9c1393c66e0efd1c9ccda7e1008ad6
|
ea8ed94680c1964f491bbbe17e22c9a52659f39c
|
refs/heads/master
| 2021-01-17T16:00:27.495392
| 2017-03-24T15:42:06
| 2017-03-24T15:42:06
| 82,945,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 456
|
rd
|
math_stat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_math_stat.R
\docType{data}
\name{math_stat}
\alias{math_stat}
\title{Dataset of students' math and stat score in chapter6}
\format{a dataframe with 24 rows and 2 variables
\describe{
\item{math}{students' math score}
\item{stat}{students' stat score}
}}
\usage{
math_stat
}
\description{
A dataset containing math and stat 2 variables of 24 objects
}
\keyword{datasets}
|
279fa3061aac831dd670b29ba6dc7b5600471ab3
|
83452e8685d58e9d02591d01525f0f3f0a0fb4cb
|
/454_adv_modeling/team_proj/scripts/wrapper_module.R
|
71c7970f34077571d4b69f4ce9eb5ab8aca79051
|
[] |
no_license
|
tulasikparadarami/mspa_projects
|
7469aec9b0addd23fc1f547711cc9762db1e39d8
|
2cba7a390a1f25c3bd6b3c322d90463916639d10
|
refs/heads/master
| 2022-03-08T21:24:36.216330
| 2015-08-14T01:08:34
| 2015-08-14T01:08:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,757
|
r
|
wrapper_module.R
|
# setwd("~/Google Drive/MSPA/454 (Advanced Modeling Techniques)/team project/allstate purchase prediction")
setwd("~/github/mspa_projects/454_adv_modeling/team_proj")
initialize = function() {
# load allstate data from csv into dataframe
source("scripts/load_data.R")
allstate_data_tbl = load_data()
allstate_data_tbl$FixedRiskFactor = ifelse(is.na(allstate_data_tbl$risk_factor),0,allstate_data_tbl$risk_factor) #NAs to 0
allstate_data_tbl$FixedCPrevious = ifelse(is.na(allstate_data_tbl$C_previous),0,allstate_data_tbl$C_previous) #NAs to 0
allstate_data_tbl$FixedDurationPrevious = ifelse(is.na(allstate_data_tbl$duration_previous),6,allstate_data_tbl$duration_previous)
# source("scripts/draw_plots.R")
# draw_plots(allstate_data_tbl)
# split original dataset into train and test datasets
# before running models, data is transformed such that
# [A..G] from purchased records are coverted to dependent
# variables on quotes
source("scripts/method_with_multiple_returns.R")
source("scripts/data_transformations.R")
c(train, test) := split_data(allstate_data_tbl)
c(train, test) := transform_data(train, test)
}
load_train_test = function() {
train = read.csv("data/allstate_train.csv")
test = read.csv("data/allstate_test.csv")
# factorize variables
return (list(train, test))
}
library(dplyr)
library(tidyr)
source("scripts/method_with_multiple_returns.R")
select = dplyr :: select
c(train, test) := load_train_test()
train$data = 'train'
test$data = 'test'
# level out time for merge to work
levels(test$time) = levels(train$time)
# merge datasets for now
merge = union(train, test)
merge = merge %>% dplyr :: select(shopping_pt, day, state, location, group_size, homeowner, car_age, car_value, age_oldest, age_youngest, married_couple, A, B, C, D, E, F, G, cost, hour, hod, FixedRiskFactor, FixedCPrevious, FixedDurationPrevious, purchased_A, purchased_B, purchased_C, purchased_D, purchased_E, purchased_F, purchased_G, data)
# factorize variables
merge$shopping_pt = factor(merge$shopping_pt)
merge$day = factor(merge$day)
merge$location = factor(merge$location)
merge$group_size = factor(merge$group_size)
merge$homeowner = factor(merge$homeowner)
merge$married_couple = factor(merge$married_couple)
merge$A = factor(merge$A)
merge$B = factor(merge$B)
merge$C = factor(merge$C)
merge$D = factor(merge$D)
merge$E = factor(merge$E)
merge$F = factor(merge$F)
merge$G = factor(merge$G)
merge$FixedRiskFactor = factor(merge$FixedRiskFactor)
merge$FixedCPrevious = factor(merge$FixedCPrevious)
merge$purchased_A = factor(merge$purchased_A)
merge$purchased_B = factor(merge$purchased_B)
merge$purchased_C = factor(merge$purchased_C)
merge$purchased_D = factor(merge$purchased_D)
merge$purchased_E = factor(merge$purchased_E)
merge$purchased_F = factor(merge$purchased_F)
merge$purchased_G = factor(merge$purchased_G)
# separate train and test
train = merge %>% filter(data == 'train')
test = merge %>% filter(data == 'test')
# merge train & test for some data manipulations
# ensure both train and test have same levels for each factor
levels(test$shopping_pt) = levels(train$shopping_pt)
levels(test$day) = levels(train$day)
levels(test$time) = levels(train$time)
levels(test$state) = levels(train$state)
levels(test$location) = levels(train$location)
levels(test$group_size) = levels(train$group_size)
levels(test$homeowner) = levels(train$homeowner)
levels(test$car_value) = levels(train$car_value)
levels(test$married_couple) = levels(train$married_couple)
levels(test$FixedCPrevious) = levels(train$FixedCPrevious)
levels(test$FixedRiskFactor) = levels(train$FixedRiskFactor)
levels(test$A) = levels(train$A)
levels(test$B) = levels(train$B)
levels(test$C) = levels(train$C)
levels(test$D) = levels(train$D)
levels(test$E) = levels(train$E)
levels(test$F) = levels(train$F)
levels(test$G) = levels(train$G)
levels(test$hod) = levels(train$hod)
levels(test$purchased_A) = levels(train$purchased_A)
levels(test$purchased_B) = levels(train$purchased_B)
levels(test$purchased_C) = levels(train$purchased_C)
levels(test$purchased_D) = levels(train$purchased_D)
levels(test$purchased_E) = levels(train$purchased_E)
levels(test$purchased_F) = levels(train$purchased_F)
levels(test$purchased_G) = levels(train$purchased_G)
# normalize continous variables in 0-1 range
train$car_age_norm = (train$car_age - min(train$car_age))/(max(train$car_age) - min(train$car_age))
train$age_oldest_norm = (train$age_oldest - min(train$age_oldest))/(max(train$age_oldest) - min(train$age_oldest))
train$age_youngest_norm = (train$age_youngest - min(train$age_youngest))/(max(train$age_youngest) - min(train$age_youngest))
train$cost_norm = (train$cost - min(train$cost))/(max(train$cost) - min(train$cost))
train$FixedDurationPrevious_norm = (train$FixedDurationPrevious - min(train$FixedDurationPrevious))/(max(train$FixedDurationPrevious) - min(train$car_age))
test$car_age_norm = (test$car_age - min(test$car_age))/(max(test$car_age) - min(test$car_age))
test$age_oldest_norm = (test$age_oldest - min(test$age_oldest))/(max(test$age_oldest) - min(test$age_oldest))
test$age_youngest_norm = (test$age_youngest - min(test$age_youngest))/(max(test$age_youngest) - min(test$age_youngest))
test$cost_norm = (test$cost - min(test$cost))/(max(test$cost) - min(test$cost))
test$FixedDurationPrevious_norm = (test$FixedDurationPrevious - min(test$FixedDurationPrevious))/(max(test$FixedDurationPrevious) - min(test$car_age))
# neural network
library(neuralnet)
compute = neuralnet :: compute
# neural network doesnt create dummy variables for qualitative vars
# use model.matrix to do that
train.matrix = model.matrix(~ shopping_pt + day + state + group_size + homeowner +
car_age + car_value + age_oldest + age_youngest + married_couple +
FixedCPrevious + FixedRiskFactor + FixedDurationPrevious +
A + B + C + D + E + F + G + cost +
purchased_A,
data = train)
cn = colnames(train.matrix)
cl = length(cn)
rhs = paste(cn[2:(length(cn)-2)], collapse = ' + ')
lhs = "purchased_A1 + purchased_A2"
form = paste(lhs, rhs, sep = ' ~ ')
test.matrix = model.matrix(~ shopping_pt + day + state + group_size + homeowner +
car_age + car_value + age_oldest + age_youngest + married_couple +
FixedCPrevious + FixedRiskFactor + FixedDurationPrevious +
A + B + C + D + E + F + G + cost +
purchased_A,
data = test)
# fit a neural network
nn.fit.A = neuralnet(formula = form,
data = train.matrix,
hidden = c(3),
rep = 3,
act.fct = 'logistic',
linear.output = TRUE)
# predictions for A
nn.predict.A = compute(nn.fit.A, testdata)
library(nnet)
# define formula
feature.list = c("shopping_pt","day","state","group_size","homeowner","car_age_norm","car_value", "age_oldest_norm",
"age_youngest_norm", "married_couple", "A", "B", "C", "D", "E", "F", "G",
"cost_norm", "hod", "FixedRiskFactor", "FixedCPrevious", "FixedDurationPrevious_norm"
)
A.formula = as.formula(paste("purchased_A ~ ", paste(feature.list, collapse = "+")))
# multinomial log-linear neural net
nn.fit.A = multinom(formula = A.formula,
data = train[c(feature.list, "purchased_A")],
model = as.logical("true")
)
nn.predict.A = predict(nn.fit.A, newdata = test[c(feature.list, "purchased_A")], type = "class")
test$predict_A = nn.predict.A
|
07c7e2412c4806c08790965b3d794d484ed7ac1a
|
1c343974009a6fb7910eb3ab338a21d865c7c870
|
/notebooks/render_integrated_datasets.R
|
40e99fec3752429939ed55ba3c73760445b29f35
|
[] |
no_license
|
szsctt/FRG_hep_snRNA
|
c8311f01967f82979a70f23121c4abbff5b9c7cf
|
32618be3dff281d0e36410b1af1d3559df32d6b6
|
refs/heads/master
| 2023-04-17T21:08:51.251906
| 2022-11-04T12:14:54
| 2022-11-04T12:14:54
| 458,967,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 277
|
r
|
render_integrated_datasets.R
|
for (m in c(FALSE, TRUE)) {
print(glue::glue("working on mouse {m}"))
rmarkdown::render("integration.Rmd",
params=list("include_mouse" = m),
output_file=glue::glue("../out/Seurat/integrated/integrated_mouse{m}.html"))
}
|
e2611c43c022a080a99d33b7375c34721c4a363e
|
ce6c631c021813b99eacddec65155777ca125703
|
/R/str.R
|
7c44fdfb529f769e9ef2c5ca67a11d04f2ec19a5
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
Zhenglei-BCS/smwrQW
|
fdae2b1cf65854ca2af9cd9917b89790287e3eb6
|
9a5020aa3a5762025fa651517dbd05566a09c280
|
refs/heads/master
| 2023-09-03T04:04:55.153230
| 2020-05-24T15:57:06
| 2020-05-24T15:57:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,329
|
r
|
str.R
|
#' Display Structure
#'
#' Displays the basic information about an object: methods for "lcens," "mcens,"
#'and "qw" data.
#'
#' @aliases str.lcens str.mcens str.qw
#' @param object an object of class "lcens,", "mcens," or "qw."
#' @param \dots any additional valid arguments ot the default method for \code{str} and
#'give.censoring, a logical value that includes the type of censoring in the output if TRUE.
#' @return Nothing is returned, the side effect is to print a short summary of the object.
#' @seealso \code{\link[utils]{str}}
#' @examples
#'
#'str(as.lcens(c(1,3), 2))
#'
#' @importFrom utils str
#' @rdname str
#' @export
#' @method str lcens
str.lcens <- function (object, ...) {
str.qw(object, ...)
}
#' @rdname str
#' @export
#' @method str mcens
str.mcens <- function (object, ...) {
str.qw(object, ...)
}
#' @rdname str
#' @export
#' @method str qw
str.qw <- function (object, ...) {
## Stolen from str.Date, with modifications for censoring
cl <- oldClass(object)
n <- length(object)
if (n == 0L) {
def <- getS3method("str", "default")
return(def(object))
}
if (n > 1000L)
object <- object[seq_len(1000L)]
give.length <- TRUE
give.censoring <- TRUE
if (length(larg <- list(...))) {
nl <- names(larg)
if(any(GC <- nl == "give.censoring"))
give.censoring <- larg[[GC]]
iGiveHead <- which(nl == "give.head")
if (any(Bgl <- nl == "give.length"))
give.length <- larg[[which(Bgl)]]
else if (length(iGiveHead))
give.length <- larg[[iGiveHead]]
if (length(iGiveHead))
larg <- larg[-iGiveHead]
if (is.numeric(larg[["nest.lev"]]) && is.numeric(v.len <- larg[["vec.len"]]))
larg[["vec.len"]] <- min(larg[["vec.len"]], (larg[["width"]] -
nchar(larg[["indent.str"]]) - 31)%/%19)
}
le.str <- if (give.length)
paste0("[1:", as.character(n), "]")
if(give.censoring) {
cen <- censoring(object)
if(cen == "none")
cen <- "no"
cat(" ", cl[1L], le.str, ", ", cen, " censoring: ", sep = "")
} else
cat(" ", cl[1L], le.str, sep = "")
if(cl == "qw") {
formObj <- format(object)
} else {
strO <- getOption("str")
if (!is.list(strO))
strO <- strOptions()
digits <- strO$digits.d
if(is.null(digits))
digits <- 3
formObj <- format(object, digits=digits)
}
do.call(str, c(list(formObj, give.head = FALSE), larg))
}
|
92bc48d716a34a93c575e629c0dcf63fe53320c2
|
7a91b0eec2b3ab87ef6c868d1203063fa97b43d4
|
/man/glmrob.control.Rd
|
98965c9e662234688d046b6aee121354b215ea9e
|
[] |
no_license
|
cran/robustbase
|
a40f49c769a17af095660947616d9fbbbc3cf1e4
|
335b69f2310bd21ca4cdfc17a2a99ebbcad84017
|
refs/heads/master
| 2023-06-30T09:52:16.026413
| 2023-06-16T12:30:02
| 2023-06-16T12:30:02
| 17,699,299
| 7
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,622
|
rd
|
glmrob.control.Rd
|
\name{glmrob..control}
\title{Controlling Robust GLM Fitting by Different Methods}
\alias{glmrobMqle.control}
\alias{glmrobMT.control}
\alias{glmrobBY.control}
\description{
These are auxiliary functions as user interface for \code{\link{glmrob}} fitting
when the different methods, \code{"Mqle"}, \code{"BY"}, or
\code{"MT"} are used. Typically only used when calling \code{\link{glmrob}}.
}
\usage{
glmrobMqle.control(acc = 1e-04, test.acc = "coef", maxit = 50, tcc = 1.345)
glmrobBY.control (maxit = 1000, const = 0.5, maxhalf = 10)
glmrobMT.control (cw = 2.1, nsubm = 500, acc = 1e-06, maxit = 200)
}
\arguments{
\item{acc}{positive convergence tolerance;
the iterations converge when ???}
\item{test.acc}{Only "coef" is currently implemented}
\item{maxit}{integer giving the maximum number of iterations. }
\item{tcc}{tuning constant c for Huber's psi-function}
\item{const}{for "BY", the normalizing constant ..}% FIXME
\item{maxhalf}{for "BY"; the number of halving steps when the gradient
itself no longer improves. We have seen examples when increasing
\code{maxhalf} was of relevance.}
\item{cw}{tuning constant c for Tukey's biweight psi-function}
\item{nsubm}{the number of subsamples to take for finding an initial
estimate for \code{method = "MT"}.}
}
%% \details{
%% }
\value{
A \code{\link{list}} with the arguments as components.
}
\author{Andreas Ruckstuhl and Martin Maechler}
\seealso{\code{\link{glmrob}}}
\examples{
str(glmrobMqle.control())
str(glmrobBY.control())
str(glmrobMT.control())
}
\keyword{robust}
\keyword{regression}
\keyword{nonlinear}
|
3d2faf5f581217deadba399a301d44f23aa7a0bb
|
78aa3820d01482c8435cd3418e75633f07767846
|
/lab1.r
|
8fffd1fc864f339e08961b02e23fa2f87817ee92
|
[] |
no_license
|
NicolasRomeroF/AnalisisDatosLab1
|
a438f36bdfa196160c421e1f54b4cfcf10a22c4c
|
5f834700d289705dc6c5f1ed1789b8464aa50f52
|
refs/heads/master
| 2020-04-05T02:32:36.699303
| 2018-11-19T05:35:17
| 2018-11-19T05:35:17
| 156,481,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,183
|
r
|
lab1.r
|
library(ggplot2)
library(gridExtra)
path = "~/Escritorio/USACH/Analisis de Datos/Lab1/hepatitis.data"
#path = "~/Documentos/AnalisisDatosLab1/hepatitis.data"
hepatitis <- read.table(path,sep=",", na.strings = c("?"))
names <- c("CLASS","AGE","SEX","STEROID","ANTIVIRALS","FATIGUE","MALAISE",
"ANOREXIA","LIVER_BIG","LIVER_FIRM","SPLEEN_PALPABLE","SPIDERS",
"ASCITES","VARICES","BILIRUBIN","ALK_PHOSPHATE","SGOT","ALBUMIN",
"PROTIME","HISTOLOGY")
colnames(hepatitis) <- names
getmode <- function(x){
uniqv <- unique(x)
uniqv[which.max(tabulate(match(x,uniqv)))]
}
hepatitis.without.na <- na.omit(hepatitis)
means <- sapply(hepatitis.without.na,mean)
medians <- sapply(hepatitis.without.na,median)
modes <- sapply(hepatitis.without.na,getmode)
vars <- sapply(hepatitis.without.na,var)
hepatitis.die <- hepatitis.without.na[which(hepatitis.without.na$CLASS == 1),]
hepatitis.live <- hepatitis.without.na[which(hepatitis.without.na$CLASS == 2),]
print(summary(hepatitis.die))
print(summary(hepatitis.live))
# p.1 <- ggplot(hepatitis.without.na, aes(x=NULL, y=AGE)) + geom_boxplot()
#
# p.2 <- ggplot(hepatitis.without.na, aes(x=NULL, y=BILIRUBIN)) + geom_boxplot()
#
# p.3 <- ggplot(hepatitis.without.na, aes(x=NULL, y=ALK_PHOSPHATE)) + geom_boxplot()
#
# p.4 <- ggplot(hepatitis.without.na, aes(x=NULL, y=SGOT)) + geom_boxplot()
#
# p.5 <- ggplot(hepatitis.without.na, aes(x=NULL, y=ALBUMIN)) + geom_boxplot()
#
# p.6 <- ggplot(hepatitis.without.na, aes(x=NULL, y=PROTIME)) + geom_boxplot()
#
# show(p.1)
# show(p.2)
# show(p.3)
# show(p.4)
# show(p.5)
# show(p.6)
SEX <- hepatitis.die[["SEX"]]
sex <- rep("male", length(SEX))
sex[SEX == 2] <- "female"
b.d.1 <-ggplot(data=hepatitis.die, aes(x=sex,y = ..prop..,group= 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.1)
SEX <- hepatitis.live[["SEX"]]
sex <- rep("male", length(SEX))
sex[SEX == 2] <- "female"
b.l.1 <-ggplot(data=hepatitis.live, aes(x=sex,y = ..prop..,group= 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.1)
STEROID <- hepatitis.die[["STEROID"]]
steroid <- rep("no", length(STEROID))
steroid[STEROID == 2] <- "yes"
b.d.2 <-ggplot(data=hepatitis.die, aes(x=steroid, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.2)
STEROID <- hepatitis.live[["STEROID"]]
steroid <- rep("no", length(STEROID))
steroid[STEROID == 2] <- "yes"
b.l.2 <-ggplot(data=hepatitis.live, aes(x=steroid, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.2)
ANTIVIRALS <- hepatitis.die[["ANTIVIRALS"]]
antivirals <- rep("no", length(ANTIVIRALS))
antivirals[ANTIVIRALS == 2] <- "yes"
b.d.3 <-ggplot(data=hepatitis.die, aes(x=antivirals, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.3)
ANTIVIRALS <- hepatitis.live[["ANTIVIRALS"]]
antivirals <- rep("no", length(ANTIVIRALS))
antivirals[ANTIVIRALS == 2] <- "yes"
b.l.3 <-ggplot(data=hepatitis.live, aes(x=antivirals, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.3)
FATIGUE <- hepatitis.die[["FATIGUE"]]
fatigue <- rep("no", length(FATIGUE))
fatigue[FATIGUE== 2] <- "yes"
b.d.4 <-ggplot(data=hepatitis.die, aes(x=fatigue, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.4)
FATIGUE <- hepatitis.live[["FATIGUE"]]
fatigue <- rep("no", length(FATIGUE))
fatigue[FATIGUE== 2] <- "yes"
b.l.4 <-ggplot(data=hepatitis.live, aes(x=fatigue, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.4)
MALAISE <- hepatitis.die[["MALAISE"]]
malaise <- rep("no", length(MALAISE))
malaise[MALAISE== 2] <- "yes"
b.d.5 <-ggplot(data=hepatitis.die, aes(x=malaise, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.5)
MALAISE <- hepatitis.live[["MALAISE"]]
malaise <- rep("no", length(MALAISE))
malaise[MALAISE== 2] <- "yes"
b.l.5 <-ggplot(data=hepatitis.live, aes(x=malaise, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.5)
ANOREXIA <- hepatitis.die[["ANOREXIA"]]
anorexia <- rep("no", length(ANOREXIA))
anorexia[ANOREXIA== 2] <- "yes"
b.d.6 <-ggplot(data=hepatitis.die, aes(x=anorexia, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.6)
ANOREXIA <- hepatitis.live[["ANOREXIA"]]
anorexia <- rep("no", length(ANOREXIA))
anorexia[ANOREXIA== 2] <- "yes"
b.l.6 <-ggplot(data=hepatitis.live, aes(x=anorexia, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.6)
LIVER_BIG <- hepatitis.die[["LIVER_BIG"]]
liver_big <- rep("no", length(LIVER_BIG))
liver_big[LIVER_BIG== 2] <- "yes"
b.d.7 <-ggplot(data=hepatitis.die, aes(x=liver_big, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.7)
LIVER_BIG <- hepatitis.live[["LIVER_BIG"]]
liver_big <- rep("no", length(LIVER_BIG))
liver_big[LIVER_BIG== 2] <- "yes"
b.l.7 <-ggplot(data=hepatitis.live, aes(x=liver_big, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.7)
LIVER_FIRM <- hepatitis.die[["LIVER_FIRM"]]
liver_firm <- rep("no", length(LIVER_FIRM))
liver_firm[LIVER_FIRM== 2] <- "yes"
b.d.8 <-ggplot(data=hepatitis.die, aes(x=liver_firm, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.8)
LIVER_FIRM <- hepatitis.live[["LIVER_FIRM"]]
liver_firm <- rep("no", length(LIVER_FIRM))
liver_firm[LIVER_FIRM== 2] <- "yes"
b.l.8 <-ggplot(data=hepatitis.live, aes(x=liver_firm, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.8)
SPLEEN_PALPABLE <- hepatitis.die[["SPLEEN_PALPABLE"]]
spleen_palpable <- rep("no", length(SPLEEN_PALPABLE))
spleen_palpable[SPLEEN_PALPABLE== 2] <- "yes"
b.d.9 <-ggplot(data=hepatitis.die, aes(x=spleen_palpable, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.9)
SPLEEN_PALPABLE <- hepatitis.live[["SPLEEN_PALPABLE"]]
spleen_palpable <- rep("no", length(SPLEEN_PALPABLE))
spleen_palpable[SPLEEN_PALPABLE== 2] <- "yes"
b.l.9 <-ggplot(data=hepatitis.live, aes(x=spleen_palpable, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.9)
SPIDERS <- hepatitis.die[["SPIDERS"]]
spiders <- rep("no", length(SPIDERS))
spiders[SPIDERS== 2] <- "yes"
b.d.10 <-ggplot(data=hepatitis.die, aes(x=spiders, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.10)
SPIDERS <- hepatitis.live[["SPIDERS"]]
spiders <- rep("no", length(SPIDERS))
spiders[SPIDERS== 2] <- "yes"
b.l.10 <-ggplot(data=hepatitis.live, aes(x=spiders, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.10)
ASCITES <- hepatitis.die[["ASCITES"]]
ascites <- rep("no", length(ASCITES))
ascites[ASCITES== 2] <- "yes"
b.d.11 <-ggplot(data=hepatitis.die, aes(x=ascites, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.11)
ASCITES <- hepatitis.live[["ASCITES"]]
ascites <- rep("no", length(ASCITES))
ascites[ASCITES== 2] <- "yes"
b.l.11 <-ggplot(data=hepatitis.live, aes(x=ascites, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.11)
VARICES <- hepatitis.die[["VARICES"]]
varices <- rep("no", length(VARICES))
varices[VARICES== 2] <- "yes"
b.d.12 <-ggplot(data=hepatitis.die, aes(x=varices, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.12)
VARICES <- hepatitis.live[["VARICES"]]
varices <- rep("no", length(VARICES))
varices[VARICES== 2] <- "yes"
b.l.12 <-ggplot(data=hepatitis.live, aes(x=varices, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.12)
HISTOLOGY <- hepatitis.die[["HISTOLOGY"]]
histology <- rep("no", length(HISTOLOGY))
histology[HISTOLOGY== 2] <- "yes"
b.d.13 <-ggplot(data=hepatitis.die, aes(x=histology, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: DIE")
show(b.d.13)
HISTOLOGY <- hepatitis.live[["HISTOLOGY"]]
histology <- rep("no", length(HISTOLOGY))
histology[HISTOLOGY== 2] <- "yes"
b.l.13 <-ggplot(data=hepatitis.live, aes(x=histology, y = ..prop..,group = 1)) + geom_bar(stat="count") +ggtitle("Class: LIVE")
show(b.l.13)
|
4d17e58ab2f01a91c5d41707cefea0772ea9a4b0
|
abd37a449baf156dfcdb03f26885fb03000c0d54
|
/feathers/script.R
|
ea9470e977268b7b09f7d201c74d89050c1c4919
|
[
"MIT"
] |
permissive
|
biologik303/data_art_gganimate
|
6844e8276e6713cf90a00ef2dd312d72f544a146
|
bf1f95125f440a8ce0a6ea64bed329c3c23c60c1
|
refs/heads/main
| 2023-07-09T16:19:14.748621
| 2021-08-11T09:58:06
| 2021-08-11T09:58:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,411
|
r
|
script.R
|
library(tidyverse)
library(gganimate)
library(paletteer)
library(gganimate)
GenArt_ellipse = function(n, from , by = 1 , rand1, rand2, rand3) {
dataframe = data.frame(t = seq(from,n,by)) %>%
mutate(
x = sin(t+rand1) + sin(t + rand2) * exp(rand3)^-rand1,
y = cos(t-rand3) + cos(t - rand3) * exp(rand2)^-rand1,
z = sin(t+rand3) +tan(t+rand2) * exp(rand1)^-rand2
)
}
dat$state <- rep(c('a', 'b', "c", "d", "e", "f", "g", "h", "i", "j"),
c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100))
GenArt_ellipse(1000, 1, by = 1, 5, 68, 200) -> dat
plot(dat)
ggplot(dat)+
geom_curve(aes(x = t, xend = y*z, y = y*t, yend = x*y, color = z*x, alpha = sin(z*x)), show.legend = FALSE, size = 0.5)+
geom_curve(aes(x = -t, xend = -y*z, y = -y*t, yend = -x*y, color = z*x, , alpha = sin(z*y)), show.legend = FALSE, size = 0.5)+
scale_color_gradientn(colors = paletteer_c("grDevices::Spectral", 30) ) +
theme_void() + theme(plot.background = element_rect(fill = "black", color = "black")) -> plot
plot
plot + transition_reveal(along = sin(z)*t) +
shadow_wake(wake_length = 0.1) +
ease_aes("circular-in-out") -> anim
animate(anim, nframes = 200, width = 6, height =6, units = "in", fps = 15, duration = 10, res = 150) -> anim_fin
anim_save(anim_fin,
path = r"(your_directory)",
filename = "feathers.gif")
|
e25a9acb10e5663d9c397aae68fffdb3062b8939
|
ce4b293b81ce787c2b183c9df3abbdf414543799
|
/merge_wigs.r
|
30812b717066ac239296a682230862acd6aef056
|
[] |
no_license
|
BethanyStone/NGS-scripts
|
61d46f30cf3cc8b0d93584766cf0bcac238c7f68
|
13f404b53616c0672bc25c6f2e76755f3ddf8be3
|
refs/heads/master
| 2021-01-22T18:37:45.398668
| 2018-06-05T07:47:27
| 2018-06-05T07:47:27
| 102,410,772
| 0
| 1
| null | 2017-09-04T23:24:00
| 2017-09-04T23:24:00
| null |
UTF-8
|
R
| false
| false
| 1,437
|
r
|
merge_wigs.r
|
#!/usr/bin/env Rscript
# merge wigs to make pairwise correlation matrices
# run in directory with wig files to correlate
options(echo=T)
args = commandArgs(trailingOnly=T)
print(args)
context=args[1]
files=dir(pattern=paste0(context,"_100bp.wig"))
data <- read.delim(files[1], head=F, skip=1)
data <- data[,1:4]
data <- data[data$V1 != 'Mt',]
data <- data[data$V1 != 'Pt',]
test <- as.numeric(regexec(text=paste0(files[1]), pattern='_C'))
name <- substr(paste0(files[1]), start=1, stop=test-1)
colnames(data)=c('V1','V2','V3',paste(name))
for(i in 2:length(files)){
file=read.delim(files[i],head=F,skip=1)
file=file[,1:4]
file <- file[file$V1 != 'Mt',]
file <- file[file$V1 != 'Pt',]
test <- as.numeric(regexec(text=paste0(files[i]), pattern='_C'))
name <- substr(paste0(files[i]), start=1, stop=test-1)
colnames(file)=c('V1','V2','V3',paste(name))
temp=merge(data,file,by=c('V1','V2','V3'),all=T)
data=temp
}
test=data[complete.cases(data),]
a <- cor(as.matrix(test[,4:length(test)]))
# output correlation matrix with hierarchical clustering
# hc <- hclust(as.dist(a))
# b <- a[hc$order, hc$order]
# write.table(b, 'correlation_matrix_hc_ordered.txt', sep='\t', row.names=T, col.names=T, quote=F)
library(gplots)
pdf(file=paste0('wig_cor_',context,'.pdf'), width=8, height = 7.5, pointsize = 10)
heatmap.2(a,
trace='none',
density.info='none',
symm=F,
symkey=F,
key=T,
dendrogram='both',
cexCol=1,
cexRow=1)
dev.off()
|
3cc3a48db257d2fda4371482589c099032cfb9f5
|
b2213d14bb828f04538f0e553750d6e5ad258a24
|
/R/get_gear.R
|
23beb0b65de94b327bf3b3ead0ea4995b54d37c6
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
fawda123/rStrava
|
99b8cf6ab0940e212c1f82cbbad1fd32587cd1cb
|
49c887dd4448fc56aac4fbb63bc84db67b26db75
|
refs/heads/master
| 2023-06-18T00:17:13.834153
| 2023-06-16T11:55:11
| 2023-06-16T11:55:11
| 23,404,183
| 148
| 38
| null | 2023-02-01T02:27:38
| 2014-08-27T20:23:47
|
R
|
UTF-8
|
R
| false
| false
| 882
|
r
|
get_gear.R
|
#' Get gear details from its identifier
#'
#' Get gear details from its identifier
#'
#' @param id string, identifier of the equipment item
#' @param stoken A \code{\link[httr]{config}} object created using the \code{\link{strava_oauth}} function
#'
#' @details Requires authentication stoken using the \code{\link{strava_oauth}} function and a user-created API on the strava website.
#'
#' @return Data from an API request.
#'
#' @export
#'
#' @concept token
#'
#' @import httr
#'
#' @examples
#' \dontrun{
#' # create authentication token
#' # requires user created app name, id, and secret from Strava website
#' stoken <- httr::config(token = strava_oauth(app_name, app_client_id,
#' app_secret, cache = TRUE))
#'
#' get_gear("g2275365", stoken)
#' }
get_gear <- function(id, stoken){
url_ <- url_gear(id)
dataRaw <- get_basic(url_, stoken)
return(dataRaw)
}
|
c6fe6c976ab40c46968d63c764ec50b31564a55a
|
b2d91fdcedbe5d9f6d88d2942c0867d7aa6dffe2
|
/r_tut10_multidimensionalarrays.R
|
7da8665f0e594e358fe291f0ed1d5c4eff701600
|
[] |
no_license
|
roisinod/r_programming_excerises
|
bd040482a2bd4d6d4073caac2fc176f4204003f9
|
ae24cd1277f70d0d835a1a360aa510058019ab99
|
refs/heads/main
| 2023-02-18T00:59:22.673109
| 2021-01-20T13:34:40
| 2021-01-20T13:34:40
| 329,304,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 84
|
r
|
r_tut10_multidimensionalarrays.R
|
#multi-dimensional arrays
array1 = array(data = 1:8, dim = c(2,2,2))
array1[1,2,2]
|
9830a68fa6ed1077677ee3df26cada6e5be016f4
|
9e5a8d51d4f6995a23ea7c6025a54932e8cd2aac
|
/R/markergene_config.R
|
893ddcaa27d47e892e4443ce721332dd2a1e80ee
|
[] |
no_license
|
XingjieShi/scRNAIdent
|
f923ad93fd2e5c84080b205cb966f2aeeb41c9e3
|
1293373ad459736011daa6fa2989d6129d105979
|
refs/heads/master
| 2023-09-05T13:30:31.380064
| 2021-11-19T15:03:42
| 2021-11-19T15:03:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 159
|
r
|
markergene_config.R
|
markergene.config.cellassign <- list(base_cell_type=NULL)
markergene.config.seurat <- list(only_pos=TRUE,min_pct=0.25,logfc_threshold=0.25,marker_gene_num=20)
|
5ca69bbef0a3252f53e190d69a7b6d46df96e012
|
282937161c6f9e14877757c56dabc66d0621a413
|
/EMD vs FFT/fft_filter.R
|
13c8d653911bfa436a59354b9877f67d457455b2
|
[] |
no_license
|
tarasevic-r/Vibro-acoustic
|
e292a6c1ebd1121d5640d9a17600466a8d9ba3f4
|
3eda43eed45d8eeef37c547ec479223b70589a71
|
refs/heads/master
| 2022-04-18T16:19:59.853574
| 2020-04-02T06:25:38
| 2020-04-02T06:25:38
| 247,627,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
r
|
fft_filter.R
|
## FFT
# 2020-01-15 T15:18 RT
# Order FFT coeficients ascending and choose all except main 3 coeficients
idx_weak_FTs1 <- order(magnitude1, decreasing = F)[1:(length(magnitude1)-3)]
# idx_weak_FTs2 <- order(magnitude2, decreasing = F)[1:(length(magnitude2)-3)]
# keep only 3 main coeficients, rest change to 0
# a)
if(length(idx_weak_FTs1) > 0) {
0 ->
FTs1[idx_weak_FTs1]
}
# b)
# if(length(idx_weak_FTs2) > 0) {
# 0 ->
# FTs2[idx_weak_FTs2]
# }
# Get 3 main FFT coeficients
FTs1_strong <- FTs1[-c(idx_weak_FTs1)]
# FTs2_strong <- FTs2[-c(idx_weak_FTs2)]
## Main coeficients magnitude
magnitude1_strong <- round(Mod(FTs1_strong), 2)
# magnitude2_strong <- round(Mod(FTs2_strong), 2)
#
## week and strong magnitudes sum
cat(
' number of weak s1:',length(magnitude1[idx_weak_FTs1]),
'& sum:', round(sum(magnitude1[idx_weak_FTs1]), 2),'\t'
,'number of strong s1:',length(magnitude1_strong),
'& sum:', sum(magnitude1_strong), '\n'
# ,' number of weak s2:',length(magnitude2[idx_weak_FTs2])
# ,'& sum:', round(sum(magnitude2[idx_weak_FTs2]), 2),'\t'
# ,'number of strong s2:',length(magnitude2_strong),
# '& sum:', sum(magnitude2_strong)
)
# ???? abejotini rezultatai
## join reverse vector (matlab: fft_shift)
# FTs1 = c(FTs1, rev(FTs1))
# FTs2 = c(FTs2, rev(FTs2))
|
207f2153a6b1c130b1f98ce2449068796ff2a1f0
|
979adfd664785863c04de2666753dede653372b0
|
/Códigos/graph_text_mining.R
|
4ccd6dca8bc7d3be2757c3e479cc2cc29d199d64
|
[] |
no_license
|
trifenol/trifenol.github.io
|
8c136a7ea73f578102e405d05a39f43dbfb7c35a
|
821735b0b93cf12d3c883d07ff7a48261759feb4
|
refs/heads/master
| 2021-11-25T22:22:40.588435
| 2021-11-10T15:07:46
| 2021-11-10T15:07:46
| 99,167,743
| 2
| 2
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 4,818
|
r
|
graph_text_mining.R
|
# Deletar todos os dados já carregados
rm(list=ls(all=TRUE))
# visualizar diretório atual
getwd()
setwd("INSIRA_AQUI_SUA_PASTA_DE_TRABALHO")
# A função ipak instala e carrega multiplos pacotes no R.
# Ela checa se os pacotes estão instalados, instalas os que não estão, depois carrega e informa o status de todos os pacotes
# criando a função ipak
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
# Implementando os pacotes que serão usados
packages <- c("rtweet", "dplyr", "ggplot2", "tidytext","tm", "igraph", "ggraph", "tidyr", "widyr")
ipak(packages)
# O nome que você deu para a sua aplicação
appname <- "TesteRJanderson"
## api key (examplo fictício abaixo)
key <- "0PPlbCn27tTGDR94GgZG2xDO8"
## api secret (examplo fictício abaixo)
secret <- "9v1HvT2dTpyH4S9UDX4vjsf42hp6S2Vcjzh32EUIcu2n9ixPHt"
# criando um token chamado "twitter_token"
twitter_token <- create_token(
app = appname,
consumer_key = key,
consumer_secret = secret)
#########################################
#Buscando por Tweets sobre a Anitta
search_anitta <- search_tweets(q = "anitta OR #anitta", retryonratelimit = TRUE, lang = "en", n = 10000, include_rts = FALSE)
# Verificando os dados adquiridos
head(search_anitta$text)
###############################################
#Limpando os dados
# Removendo os http manualmente
search_anitta$stripped_text <- gsub("http.*","", search_anitta$text)
search_anitta$stripped_text <- gsub("https.*","", search_anitta$stripped_text)
# Removendo a pontuação, convertendo maisúculas em minúsculas, adicionando ID para cada tweet!
search_anitta_clean <- search_anitta %>%
dplyr::select(stripped_text) %>%
unnest_tokens(word, stripped_text)
# plotando as top 15 palavras.
search_anitta_clean %>%
count(word, sort = TRUE) %>%
top_n(15) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
xlab(NULL) +
coord_flip() +
labs(x = "Contagem",
y = "Palavras únicas",
title = "Contagem de palavras únicas encontradas nos tweets")
#Carregando uma lista de stops words que vem do pacote tidytext.
data("stop_words")
# Vendo as 6 primeiras linhas da lista de stop words
head(stop_words)
#Verificando o número de linhas atual de search_anitta_clean.
nrow(search_anitta_clean)
## [1] 128597
# Revendo as stop words do nosso dataframe de análise.
cleaned_tweet_words <- search_anitta_clean %>%
anti_join(stop_words)
# Dá pra observar que agora temos menos palavras nesse dataframe
nrow(cleaned_tweet_words)
## [1] 70697
# plotando as top 15 palavras.
cleaned_tweet_words %>%
count(word, sort = TRUE) %>%
top_n(15) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
xlab(NULL) +
coord_flip() +
labs(y = "Contagem",
x = "Palavras únicas",
title = "Contagem de palavras únicas encontradas nos tweets",
subtitle = "Stop words removidas dessa análise")
##############################################
# Explorando a rede de palavras
# Construção dos bigrama
search_anitta_paired_words <- search_anitta %>%
dplyr::select(stripped_text) %>%
unnest_tokens(paired_words, stripped_text, token = "ngrams", n = 2)
search_anitta_paired_words %>%
count(paired_words, sort = TRUE)
# separando o bigrama em duas colunas
search_anitta_separated_words <- search_anitta_paired_words %>%
separate(paired_words, c("word1", "word2"), sep = " ")
search_anitta_filtered <- search_anitta_separated_words %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
# Nova contagem de bigramas
anitta_words_counts <- search_anitta_filtered %>%
count(word1, word2, sort = TRUE)
head(anitta_words_counts)
# plotando a rede das palavras gerada pela busca por Anitta
anitta_words_counts %>%
filter(n >= 30) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = n, edge_width = n)) +
geom_node_point(color = "darkslategray4", size = 3) +
geom_node_text(color = "red", aes(label = name), vjust = 1.8, size=3) +
labs(title= "Grafo de Palavras: Anitta ",
subtitle = "Mineração de dados Textuais de dados do Twitter usando o R ",
x = "", y = "")
##############################################
# Onde se fala da Anitta?
geo_anitta <- lat_lng(search_anitta)
## plotando o mapa do mundo
par(mar = c(0,0,0,0))
maps::map("world", lwd = 0.25)
with(geo_anitta, points(lng, lat, pch = 20, cex = .75, col = rgb(0, .3, .7, .75)))
|
e9c7bca54b7cc975aa43c88d9e05bf0e6e5a8327
|
ffa47806e5cf2d295109aa53e03bcf1650ae0fe6
|
/R/main.R
|
6bc04e5de5dfb52c36c113fa85fe5bc2cfca79af
|
[
"MIT"
] |
permissive
|
ShadowFiendSF/TFCluster
|
a44d904a334c3df07eb310e9f8f6b66f82348b87
|
95eba8a5543e392a3ff762467623a12deceeae63
|
refs/heads/master
| 2021-08-28T04:47:00.698275
| 2017-12-11T07:41:14
| 2017-12-11T07:41:14
| 112,009,776
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,886
|
r
|
main.R
|
#
# This script was used for eliminating redundant motif
#
# Author: Ryan Lee
# Date: 2017/10/16
# 1. determined pairwise motif similarity using the TOMTOM program
# 2. compiled a pseudo-distance matrix
# 3. 10 - log10-transformed TOMTOM q value
#
# @param memeFile meme file
# @param tomtomFile tomtom format filename
# @param BA, BC, BG, BT genome backgroud A, C, G, T frequency
# @param threshold q value quantile by default "75%"
#
#
##########################################################################
###################R version function#####################################
#############anothor Cpp version for better performance###################
#output <- function(resultList, memeFile, outputFile="result.meme")
#{
# con<-file(memeFile, open="r")
# out<-file(outputFile, open="wt")
# while(length(oneLine<-readLines(con, n=1))>0)
# {
# if(grepl("^(MEME|ALPHABET|strands)", oneLine, perl=T))
# writeLines(oneLine, out, sep = "\n\n")
# if(grepl("^MOTIF\\.*", oneLine, perl=T)) break
# }
#
# print_meme<-function(x, con = con)
# {
# motifID <- names(x)
# motifID<-paste("^MOTIF", motifID, sep=" ")
# for(i in 1:length(motifID))
# {
# seek(con, where = 0, origin = "start", rw = "read")
# while(length(oneLine<-readLines(con, n=1))>0)
# {
# find <- FALSE
# if(grepl(motifID[i], oneLine, perl=T))
# {
# writeLines(oneLine, out, sep = "\n")
# while(length(oneLine<-readLines(con, n=1))>0)
# {
# if(grepl("^(URL)", oneLine, perl=T))
# {
# writeLines(oneLine, out, sep = "\n\n")
# break
# }else{
# writeLines(oneLine, out, sep = "\n")
# }
# }
# find <- TRUE
# break
# }
# }
# if(!find) stop("Loss motif!\n")
# }
# }
#
# lapply(resultList, print_meme, con = con)
#
# close(con)
# close(out)
#}
.output<-function(resultList, memeFile, outputFile="result.meme")
{
if(!is.list(resultList) && !is.character(memeFile) && !is.character(outputFile))
stop("ParamterTypeError!")
if(length(memeFile)!=1 && length(outputFile)!=1)
stop("FileParamError!")
if(length(resultList)==0)
stop("ResultListLengthError!")
.Call("output_", resultList, memeFile, outputFile)
}
main<-function(memeFile, tomtomFile, outputFile="result.meme", Numclusters = 300, threshold = "75%", BA=0.25, BC=0.25, BG=0.25, BT=0.25)
{
disMat <- getDistMat(filename = tomtomFile)
message("Compiled Distance Matrix!")
clusterRes <- hierarchicalClustering(disMat = disMat, k = Numclusters)
categoryList <- cutree2category(clusterRes)
memeList <- parseMeme(filename = memeFile)
relativeEntropyList <- relativeEntropy(meme=memeList, BA=BA, BC=BC, BG=BG, BT=BT)
message("Computation of relative Entropy Completed! ")
result<-getMember(categoryList=categoryList, relativeEntropyList=relativeEntropyList ,threshold=threshold)
.output(result, memeFile)
message(paste("Result is ", outputFile))
return(result)
}
testit<-function()
{
result<-TFCluster::main("/data2/lizhaohong/motifAnalysis/all.meme","/data2/lizhaohong/motifAnalysis/tomtom_output_files_thresh1/tomtom.txt")
localE<-new.env()
attr(localE, "name")<-"testit"
assign("sum", 0, envir=localE)
lapply(result,function(x)
{
tmpsum <- get("sum", envir = localE)
tmpsum <- tmpsum + length(x)
assign("sum", tmpsum, envir = localE)
}
)
message(paste("The number of TF: ", get("sum", envir = localE),"!",sep=""))
con<-file("result.meme", open="r")
total <- 0
while(length(oneLine<-readLines(con, n=1))>0)
{
if(grepl("^MOTIF\\.*", oneLine, perl=T)) total <- total + 1
}
message(paste("The number of TF: ", total,"!",sep=""))
close(con)
unlink("result.meme")
if(total == get("sum", envir=localE))
message("Pass the test!")
else
stop("Test failed!")
}
|
81803ecc9fe2f1fdce08061fb53b012985c2182b
|
e4bfd14f59339fa79f1297045937f256a7d5baa9
|
/fastq_trimming/initLesson.R
|
853e917ebf9545bbb28d9292686d7c6a6f0f406a
|
[] |
no_license
|
biocswirl-dev-team/BiocSwirl_RNAseq
|
d4c4dfef0f7751e14ee9d28d6c2135aeebf1271a
|
9b6ae81f2e6092dd11446fd4ca525d5700ce1c1e
|
refs/heads/main
| 2023-03-13T23:21:36.348579
| 2021-03-11T05:09:38
| 2021-03-11T05:09:38
| 323,150,222
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,874
|
r
|
initLesson.R
|
# Code placed in this file fill be executed every time the
# lesson is started. Any variables created here will show up in
# the user's working directory and thus be accessible to them
# throughout the lesson.
.get_course_path <- function(){
tryCatch(swirl:::swirl_courses_dir(),
error = function(c) {file.path(find.package("swirl"),"Courses")}
)
}
overrepresentedSequences <- data.frame(Sequence = c("GGGCAGGATAGTTCAGACGGTTTCTATTTCCTGAGCGTCTGAGATGTTAG", "GTCTGTTAGTAGTATAGTGATGCCAGCAGCTAGGACTGGGAGAGATAGGA", "CCCCTTACTCAGCTTGAACTTGTCGCCCTCTTGGCAGGAGTACTTGTGGA", "GGGAGGGCGATGAGGACTAGGATGATGGCGGGCAGGATAGTTCAGACGGT", "CCCGTATCGAAGGCCTTTTTGGACAGGTGGTGTGTGGTGGCCTTGGTATG", "GCCTGGTTCTAGGAATAATGGGGGAAGTATGTAGGAGTTGAAGATTAGTC", "GTGGTGATTAGTCGGTTGTTGATGAGATATTTGGAGGTGGGGATCAATAG", "GGGGCAATGAATGAAGCGAACAGATTTTCGTTCATTTTGGTTCTCAGGGT", "CCCCCTTACTCAGCTTGAACTTGTCGCCCTCTTGGCAGGAGTACTTGTGG"),
Count = c(12671, 10274, 5055, 4772, 4753, 4597, 4586, 4496, 4296),
Percentage = c(0.2974133884, 0.2411510656, 0.1186508309, 0.1120082621, 0.1115622946, 0.1079006666, 0.1076424749, 0.1055299972, 0.1008356023),
Possible.Source = c("No Hit", "No Hit", "No Hit", "No Hit", "No Hit", "No Hit", "No Hit", "No Hit", "No Hit"))
p1 <- readPNG(file.path(.get_course_path(), 'BiocSwirl_RNAseq', 'fastq_trimming', 'per-base-quality.png'))
p2 <- readPNG(file.path(.get_course_path(), 'BiocSwirl_RNAseq', 'fastq_trimming', 'base-sequence-content.png'))
p3 <- readPNG(file.path(.get_course_path(), 'BiocSwirl_RNAseq', 'fastq_trimming', 'seq-len-distr.png'))
report_html <- file.path(.get_course_path(), 'BiocSwirl_RNAseq', 'fastq_trimming', 'SRR11412215_fastqc.html')
# Function for local testing
#.get_course_path <- function() {'courses'}
|
c87a8b0e03f93aa57d7b8ede4ac8ee85ffbff1c7
|
2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89
|
/R/GcContentNormalization.R
|
31a6a7ab8428515b74a2bc896367ad9883062631
|
[] |
no_license
|
HenrikBengtsson/aroma.affymetrix
|
a185d1ef3fb2d9ee233845c0ae04736542bb277d
|
b6bf76f3bb49474428d0bf5b627f5a17101fd2ed
|
refs/heads/master
| 2023-04-09T13:18:19.693935
| 2022-07-18T10:52:06
| 2022-07-18T10:52:06
| 20,847,056
| 9
| 4
| null | 2018-04-06T22:26:33
| 2014-06-15T03:10:59
|
R
|
UTF-8
|
R
| false
| false
| 13,979
|
r
|
GcContentNormalization.R
|
###########################################################################/**
# @RdocClass GcContentNormalization
#
# @title "The GcContentNormalization class"
#
# \description{
# @classhierarchy
# }
#
# @synopsis
#
# \arguments{
# \item{dataSet}{A @see "CnChipEffectSet".}
# \item{...}{Additional arguments passed to the constructor of
# @see "ChipEffectTransform".}
# \item{targetFunction}{A @function. The target function to which all arrays
# should be normalized to.}
# \item{subsetToFit}{The units from which the normalization curve should
# be estimated. If @NULL, all are considered.}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# \section{Requirements}{
# This class requires an Aroma unit GC-content (UGC) file.
# }
#
# @author "HB"
#*/###########################################################################
setConstructorS3("GcContentNormalization", function(dataSet=NULL, ..., targetFunction=NULL, subsetToFit=NULL) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'dataSet':
if (!is.null(dataSet)) {
dataSet <- Arguments$getInstanceOf(dataSet, "CnChipEffectSet")
if (dataSet$combineAlleles != TRUE) {
throw("Currently only total copy-number chip effects can be normalized, i.e. 'combineAlleles' must be TRUE")
}
# if (dataSet$mergeStrands != TRUE) {
# throw("Currently only non-strands specific copy-number chip effects can be normalized, i.e. 'mergeStrands' must be TRUE")
# }
}
if (!is.null(targetFunction)) {
if (!is.function(targetFunction)) {
throw("Argument 'targetFunction' is not a function: ", class(targetFunction)[1])
}
}
extend(ChipEffectTransform(dataSet, ...), "GcContentNormalization",
.subsetToFit = subsetToFit,
.targetFunction = targetFunction
)
})
setMethodS3("getParameters", "GcContentNormalization", function(this, ...) {
# Get parameters from super class
params <- NextMethod("getParameters")
# Get parameters of this class
params2 <- list(
subsetToFit = this$.subsetToFit,
.targetFunction = this$.targetFunction
)
# Append the two sets
params <- c(params, params2)
params
}, protected=TRUE)
setMethodS3("getCdf", "GcContentNormalization", function(this, ...) {
inputDataSet <- getInputDataSet(this)
getCdf(inputDataSet)
})
setMethodS3("getOutputDataSet00", "GcContentNormalization", function(this, ...) {
res <- NextMethod("getOutputDataSet")
# Carry over parameters too. AD HOC for now. /HB 2007-01-07
if (inherits(res, "SnpChipEffectSet")) {
ces <- getInputDataSet(this)
res$mergeStrands <- ces$mergeStrands
if (inherits(res, "CnChipEffectSet")) {
res$combineAlleles <- ces$combineAlleles
}
}
# Let the set update itself
update2(res)
res
}, protected=TRUE)
setMethodS3("getGcContent", "GcContentNormalization", function(this, units=NULL, force=FALSE, ..., verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
# Argument 'units':
cdf <- getCdf(this)
units <- Arguments$getIndices(units, max=nbrOfUnits)
verbose && enter(verbose, "Retrieving GC content")
chipType <- getChipType(cdf)
chipType <- gsub(",monocell", "", chipType)
verbose && cat(verbose, "Chip type: ", chipType)
verbose && cat(verbose, "Units:")
verbose && str(verbose, units)
gcContents <- NULL
# Try 1: Use an unit GC content (UGC) file
tryCatch({
ugc <- AromaUgcFile$byChipType(chipType)
gcContents <- ugc[units,1,drop=TRUE]
}, error = function(ex) {
})
# Try 2: Use a TSV file (deprecated; kept for backward compatibility)
if (is.null(gcContents)) {
tryCatch({
chipTypeS <- gsub(",.*", "", chipType)
tsv <- AffymetrixTsvFile$byChipType(chipTypeS)
gcContents <- getGc(tsv, units=units)
}, error = function(ex) {
})
}
if (is.null(gcContents)) {
throw("Failed to retrieve GC content information. No GC-content annotation file found: ", chipType)
}
verbose && cat(verbose, "GC contents:")
verbose && str(verbose, gcContents)
verbose && exit(verbose)
gcContents
}, protected=TRUE)
setMethodS3("getSubsetToFit", "GcContentNormalization", function(this, force=FALSE, ...) {
# Cached?
units <- this$.units
if (!is.null(units) && !force)
return(units)
# Identify all SNP & CN units
cdf <- getCdf(this)
types <- getUnitTypes(cdf, ...)
units <- which(types == 2 | types == 5)
# Keep only those for which we have GC contents information
gcContents <- getGcContent(this, units=units, ...)
keep <- is.finite(gcContents)
units <- units[keep]
# Fit to a subset of the units?
subsetToFit <- this$.subsetToFit
if (!is.null(subsetToFit)) {
# A fraction subset?
if (length(subsetToFit) == 1 && 0 < subsetToFit && subsetToFit < 1) {
keep <- seq(from=1, to=length(units), length=subsetToFit*length(units))
} else {
keep <- which(units %in% subsetToFit)
}
# Make sure to keep data points at the tails too
keep <- c(keep, which.min(gcContents), which.max(gcContents))
keep <- unique(keep)
# Now filter
units <- units[keep]
# Not needed anymore
keep <- NULL
}
# Sort units
units <- sort(units)
# Assert correctness
units <- Arguments$getIndices(units, max=nbrOfUnits(cdf))
# Cache
this$.units <- units
units
}, private=TRUE)
setMethodS3("getTargetFunction", "GcContentNormalization", function(this, ..., force=FALSE, verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
fcn <- this$.targetFunction
if (is.null(fcn) || force) {
verbose && enter(verbose, "Estimating target prediction function")
# Get the GC-content annotation data
gcContents <- getGcContent(this, verbose=less(verbose))
# Get target set
ces <- getInputDataSet(this)
verbose && enter(verbose, "Get average signal across arrays")
ceR <- getAverageFile(ces, force=force, verbose=less(verbose))
verbose && exit(verbose)
# Garbage collect
gc <- gc()
verbose && print(verbose, gc)
# Get units to fit
units <- getSubsetToFit(this)
# Get target log2 signals for SNPs
data <- getDataFlat(ceR, units=units, fields="theta", verbose=less(verbose))
units <- data[,"unit"]
verbose && cat(verbose, "Units:")
verbose && str(verbose, units)
yR <- data[,"theta"]
# Not needed anymore
data <- NULL; # Not needed anymore
yR <- log2(yR)
verbose && cat(verbose, "Signals:")
verbose && str(verbose, yR)
# Get GC contents for these units
gcContents <- gcContents[units]
verbose && cat(verbose, "GC content:")
verbose && str(verbose, gcContents)
# Fit lowess function
verbose && enter(verbose, "Fitting target prediction function")
ok <- (is.finite(gcContents) & is.finite(yR))
fit <- lowess(gcContents[ok], yR[ok])
class(fit) <- "lowess"
# Remove as many promises as possible
# Not needed anymore
fcn <- ces <- ceR <- units <- gc <- yR <- ok <- NULL
# Create target prediction function
fcn <- function(x, ...) {
predict(fit, x, ...); # Dispatched predict.lowess().
}
verbose && exit(verbose)
# Garbage collect
gc <- gc()
verbose && print(verbose, gc)
verbose && exit(verbose)
this$.targetFunction <- fcn
}
fcn
}, private=TRUE)
###########################################################################/**
# @RdocMethod process
#
# @title "Normalizes the data set"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# \item{force}{If @TRUE, data already normalized is re-normalized,
# otherwise not.}
# \item{verbose}{See @see "R.utils::Verbose".}
# }
#
# \value{
# Returns a @double @vector.
# }
#
# \seealso{
# @seeclass
# }
#*/###########################################################################
setMethodS3("process", "GcContentNormalization", function(this, ..., force=FALSE, verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Normalizing set for PCR fragment-length effects")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Already done?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (!force && isDone(this)) {
verbose && cat(verbose, "Already normalized")
verbose && exit(verbose)
outputSet <- getOutputDataSet(this)
return(invisible(outputSet))
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setup
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get input data set
ces <- getInputDataSet(this)
# Get SNP (& CN) units
cdf <- getCdf(ces)
# subsetToUpdate <- indexOf(cdf, "SNP_")
types <- getUnitTypes(cdf, ...)
subsetToUpdate <- which(types == 2 | types == 5)
verbose && enter(verbose, "Identifying the subset used to fit normalization function")
# Get subset to fit
subsetToFit <- getSubsetToFit(this, verbose=less(verbose))
verbose && str(verbose, subsetToFit)
verbose && exit(verbose)
# Get (and create) the output path
path <- getPath(this)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Normalize each array
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
gcContents <- NULL
targetFcn <- NULL
map <- NULL
nbrOfArrays <- length(ces)
res <- vector("list", nbrOfArrays)
for (kk in seq_len(nbrOfArrays)) {
ce <- ces[[kk]]
verbose && enter(verbose, sprintf("Array #%d of %d ('%s')",
kk, nbrOfArrays, getName(ce)))
filename <- getFilename(ce)
pathname <- filePath(path, filename)
if (isFile(pathname)) {
verbose && cat(verbose, "Already normalized. Skipping.")
ceN <- fromFile(ce, pathname)
# Carry over parameters too. AD HOC for now. /HB 2007-01-07
if (inherits(ce, "SnpChipEffectFile")) {
ceN$mergeStrands <- ce$mergeStrands
if (inherits(ce, "CnChipEffectFile")) {
ceN$combineAlleles <- ce$combineAlleles
}
}
# CDF inheritance
setCdf(ceN, cdf)
res[[kk]] <- ceN
verbose && exit(verbose)
next
}
# Get unit-to-cell (for optimized reading)?
if (is.null(map)) {
# Only loaded if really needed.
verbose && enter(verbose, "Retrieving unit-to-cell map for all arrays")
map <- getUnitGroupCellMap(ce, units=subsetToUpdate, verbose=less(verbose))
verbose && str(verbose, map)
verbose && exit(verbose)
}
if (is.null(gcContents)) {
# Get PCR fragment lengths for the subset to be fitted
gcContents <- getGcContent(this, units=map[,"unit"], verbose=less(verbose, 1))
# Get the index in the data vector of subset to be fitted.
# Note: match() only returns first match, which is why we do
# it this way.
subset <- match(map[,"unit"], subsetToFit)
subset <- subset[!is.na(subset)]
subset <- match(subsetToFit[subset], map[,"unit"])
}
if (is.null(targetFcn)) {
# Only loaded if really needed.
# Retrieve/calculate the target function
targetFcn <- getTargetFunction(this, verbose=less(verbose))
}
# Get target log2 signals for all SNPs to be updated
verbose && enter(verbose, "Getting signals")
data <- getDataFlat(ce, units=map, fields="theta", verbose=less(verbose))
verbose && exit(verbose)
# Extract the values to fit the normalization function
verbose && enter(verbose, "Normalizing log2 signals")
y <- log2(data[,"theta"])
y <- .normalizeFragmentLength(y, fragmentLengths=gcContents,
targetFcn=targetFcn, subsetToFit=subset, ...)
y <- 2^y
verbose && exit(verbose)
# Create CEL file to store results, if missing
verbose && enter(verbose, "Creating CEL file for results, if missing")
ceN <- createFrom(ce, filename=pathname, path=NULL, verbose=less(verbose))
verbose && exit(verbose)
# Carry over parameters too. AD HOC for now. /HB 2007-01-07
if (inherits(ce, "SnpChipEffectFile")) {
ceN$mergeStrands <- ce$mergeStrands
if (inherits(ce, "CnChipEffectFile")) {
ceN$combineAlleles <- ce$combineAlleles
}
}
# CDF inheritance
setCdf(ceN, cdf)
verbose && enter(verbose, "Storing normalized signals")
data[,"theta"] <- y
# Not needed anymore
y <- NULL
updateDataFlat(ceN, data=data, verbose=less(verbose))
# Not needed anymore
data <- NULL
## Create checksum file
ceNZ <- getChecksumFile(ceN)
verbose && exit(verbose)
# Garbage collect
gc <- gc()
verbose && print(verbose, gc)
res[[kk]] <- ceN
verbose && exit(verbose)
} # for (kk in ...)
# Create the output set (ad hoc for now so that we keep parameter too)
outputSet <- clone(ces)
outputSet$files <- res
clearCache(outputSet)
# Update the output data set
this$outputSet <- outputSet
verbose && exit(verbose)
outputSet
})
|
e6ce31b5a8144de808e43bb54818ee6edb83feff
|
6098cc7a77f988e28cc7f59012224b4a7cdc35ff
|
/Practise.R
|
a1636d9955bdbc5587332393dd59440b6fbfd1a4
|
[] |
no_license
|
mingyanz130/regrssion_models_and_predictions
|
da32bb7a9869de33e39849fa58889e5cefe426b6
|
2b305d5bb68a6cd38f1d8c12cea1cb112c86b962
|
refs/heads/master
| 2020-08-26T18:21:39.753355
| 2019-10-23T16:20:07
| 2019-10-23T16:20:07
| 217,102,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
r
|
Practise.R
|
## Set my working directory
setwd("~/Documents/STATS")
## Load necessary R libraries
library(ggplot2)
|
92f9c35729971e544793d261d8586088e22fda77
|
4384af4add4a62c2e922704e6ef2f7729f33bef0
|
/RCode/stratVarGBME/3_getPCA.R
|
582cbed31eab73a7f5d1dee05f2678a418dfc220
|
[] |
no_license
|
s7minhas/ForeignAid
|
8e1cffcae5550cbfd08a741b1112667629b236d6
|
2869dd5e8c8fcb3405e45f7435de6e8ceb06602a
|
refs/heads/master
| 2021-03-27T18:55:20.638590
| 2020-05-12T16:42:58
| 2020-05-12T16:42:58
| 10,690,902
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,833
|
r
|
3_getPCA.R
|
## Tweaks to PCA:
# 1) no option to return eigenvalues in the wrapper function GenerateDilsNetwork; changed code to do this which allows us to see how much variance each component explains
# 2) scale and center is turned to FALSE ; usually shouldn't data be centered before you do a PCA analysis?
# 3) does not sample with replacement, best practice with bootstrapping would seem to indicate that the sample size of each bootstrapped sample should be the same as the size of the original sample, change code to fix this http://www.stata.com/support/faqs/statistics/bootstrapped-samples-guidelines/
# 4) eigenvectors are sometimes inverted relative to each other across samples (i.e. bootstrap 1 will return eigenvector (.5, .4, -.1) while bootstrap 2 will return(-.51, -.42, .1) ;
# 5) to address this problem, made all the eigenvectors consistent with each other by constraining the first element of each eigenvector to be positive
# previously the code had addressed this problem by returning the absolute value for all elements of the eigenvector, which seems baffling
# Outstanding issues
# 1) subsample = F returns an error
# 2) unclear what weights value returns
rm(list = ls())
if (Sys.info()['user']=="cindycheng"){
pathCode="~/Documents/Papers/ForeignAid/RCode";
pathResults = '~/Dropbox/ForeignAid/Results'}
if (Sys.info()['user'] == 'cindy'){
pathCode="/home/cindy";
pathResults = "/home/cindy"
}
# load packages
# library(dplyr)
source(paste0(pathCode, "/Analysis/dilsTweak.R"))
# load data
setwd(paste0(pathResults, "/gbmeLatDist"))
load('allyWtDist.rda')
allyDist = data.frame(res)
load('igoDist.rda')
igoDist = data.frame(res)
load('unNewDist.rda')
unDist = data.frame(res)
load('midDist.rda')
midDist = data.frame(res)
load('hostlevDist.rda')
hostlevDist = data.frame(res)
load('hostlevsumDist.rda')
hostlevsumDist = data.frame(res)
load('armsDist.rda')
armsDist = data.frame(res)
load('warMsumDistStd.rda')
warDist = data.frame(res)
load('armsSumDist.rda')
armsSumDist = data.frame(res)
load('jmeDist.rda')
jmeDist = data.frame(res)
###### PCA - UN, IGO and Ally########
# merge data
D1 = merge(allyDist, igoDist, by = c("ccode1", "ccode2", "year"), all = T)
D = merge(D1, unDist, by = c("ccode1", "ccode2", "year"), all = T)
# PCA on full Data
PCA_AllYrs = NULL
PCA_coefficients = NULL
PCA_eigenvalues.sd = NULL
PCA_bootstrap.sds = NULL
for (yr in c(1970:2005)){
PCA = getPCA(D, yr = yr, n.sub = 1000)
PCA_AllYrs = rbind(PCA_AllYrs, data.frame(PCA$dils.edgelist))
PCA_coefficients = rbind(PCA_coefficients, c(yr, PCA$coefficients))
PCA_eigenvalues.sd = rbind(PCA_eigenvalues.sd, c(yr, PCA$sdev ))
PCA_bootstrap.sds = rbind(PCA_bootstrap.sds, c(yr, PCA$bootstrap.sds))
}
PCA_FullData = list(PCA_AllYrs= PCA_AllYrs, PCA_coefficients = PCA_coefficients, PCA_eigenvalues.sd= PCA_eigenvalues.sd, PCA_bootstrap.sds = PCA_bootstrap.sds )
save(PCA_FullData, file = "PCA_FullData.rda")
###### PCA - JME, HostLev and ArmsTransfers, War ? ########
# hostlev, arms
# hostlevsum, armsSum
#hostlev, armsSum
#hostlevsum, arms
# mid, arms
# mid armsSum
# # merge data
# D1 = merge(midDist, warDist, by = c("ccode1", "ccode2", "year"), all = T)
# D2 = armsSumDist
# D2 = merge(armsDist, jmeDist, by = c("ccode1", "ccode2", "year"), all = T)
# D = merge(D1, D2, by = c("ccode1", "ccode2", "year"), all = T)
# head(D2)
# #
# D$midDistRescale = -D$midDist + max(D$midDist, na.rm = T)
# D$hostlevDistRescale = -D$hostlevDist + max(D$hostlevDist, na.rm = T)
# D$warDistRescale = -D$warMsum5Dist + max(D$warMsum5Dist, na.rm = T)
# DF = D[, -which(names(D) %in% c("midDist", "warMsum5Dist"))]
# D$hostlevsumDistRescale = -D$hostlevsumDist + max(D$hostlevsumDist, na.rm = T)
# D$warDistRescale = -D$warMsum5Dist + max(D$warMsum5Dist, na.rm = T)
# DF = D[, -which(names(D) %in% c("hostlevsumDist", "warMsum5Dist"))]
# summary(DF)
# cor(DF[, -c(1:3)])
# # PCA on full Data
# PCA_AllYrs = NULL
# PCA_coefficients = NULL
# PCA_eigenvalues.sd = NULL
# PCA_bootstrap.sds = NULL
# for (yr in c(1990:2010)){
# PCA = getPCA(DF, yr = yr, n.sub = 1000)
# PCA_AllYrs = rbind(PCA_AllYrs, data.frame(PCA$dils.edgelist))
# PCA_coefficients = rbind(PCA_coefficients, c(yr, PCA$coefficients))
# PCA_eigenvalues.sd = rbind(PCA_eigenvalues.sd, c(yr, PCA$sdev ))
# PCA_bootstrap.sds = rbind(PCA_bootstrap.sds, c(yr, PCA$bootstrap.sds))
# }
# PCA_FullData = list(PCA_AllYrs= PCA_AllYrs, PCA_coefficients = PCA_coefficients, PCA_eigenvalues.sd= PCA_eigenvalues.sd, PCA_bootstrap.sds = PCA_bootstrap.sds )
# save(PCA_FullData, file = "PCA_FullData_midWarArmsSum.rda")
### Evaluate eigenvalues
setwd(pathResults)
load('PCA/PCA_FullData_allyIGOUN.rda')
colMeans(PCA_FullData$PCA_eigenvalues.sd[, -1]/c(rowSums(PCA_FullData$PCA_eigenvalues.sd[, -1])))
?colMeans
|
1081ab468196233a5cc4cb49d3cf9e4adbc7e579
|
86772a78af6ca3567ed333c9a4cd68c5af73848d
|
/examples/A novel algorithmic approach to Bayesian logic regression supplementaries/simple usage/inference_help.r
|
a69d3099fb4fb2f1699895244b5baa8dde96c54b
|
[] |
no_license
|
aliaksah/EMJMCMC2016
|
077170db8ca4a21fbf158d182f551b3814c6c702
|
3954d55fc45296297ee561e0f97f85eb5048c39e
|
refs/heads/master
| 2023-07-19T16:52:43.772170
| 2023-07-15T16:05:37
| 2023-07-15T16:05:37
| 53,848,643
| 17
| 5
| null | 2021-11-25T14:53:35
| 2016-03-14T10:51:06
|
R
|
UTF-8
|
R
| false
| false
| 2,139
|
r
|
inference_help.r
|
#read the most recent stable version of the package
source("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/R/the_mode_jumping_package4.r")
#make sure that you are using Mac Os or Linux (mclapply is currently not supported for Windows, unless some mclapply hack function for Windows is pre-loaded in your R session)
#simulate Gaussian responses
set.seed(040590)
X1= as.data.frame(array(data = rbinom(n = 50*1000,size = 1,prob = runif(n = 50*1000,0,1)),dim = c(1000,50)))
Y1=rnorm(n = 1000,mean = 1+0.7*(X1$V1*X1$V4) + 0.8896846*(X1$V8*X1$V11)+1.434573*(X1$V5*X1$V9),sd = 1)
X1$Y1=Y1
#specify the initial formula
formula1 = as.formula(paste(colnames(X1)[51],"~ 1 +",paste0(colnames(X1)[-c(51)],collapse = "+")))
data.example = as.data.frame(X1)
#run the inference with robust g prior
res4G = LogicRegr(formula = formula1,data = data.example,family = "Gaussian",prior = "G",report.level = 0.5,d = 15,cmax = 2,kmax = 15,p.and = 0.9,p.not = 0.01,p.surv = 0.2,ncores = 32)
print(res4G$feat.stat)
#run the inference with Jeffrey's prior
res4J = LogicRegr(formula = formula1,data = data.example,family = "Gaussian",prior = "J",report.level = 0.5,d = 15,cmax = 2,kmax = 15,p.and = 0.9,p.not = 0.01,p.surv = 0.2,ncores = 32)
print(res4J$feat.stat)
#change to Bernoulli responses
X1= as.data.frame(array(data = rbinom(n = 50*1000,size = 1,prob = 0.3),dim = c(1000,50)))
Y1=-0.7+1*((1-X1$V1)*(X1$V4)) + 1*(X1$V8*X1$V11)+1*(X1$V5*X1$V9)
X1$Y1=round(1.0/(1.0+exp(-Y1)))
#specify the initial formula
formula1 = as.formula(paste(colnames(X1)[51],"~ 1 +",paste0(colnames(X1)[-c(51)],collapse = "+")))
data.example = as.data.frame(X1)
#run the inference with robust g prior
res1G = LogicRegr(formula = formula1,data = data.example,family = "Bernoulli",prior = "G",report.level = 0.5,d = 15,cmax = 2,kmax = 15,p.and = 0.9,p.not = 0.2,p.surv = 0.2,ncores = 32)
print(res1G$feat.stat)
#run the inference with Jeffrey's prior
res1J = LogicRegr(formula = formula1,data = data.example,family = "Bernoulli",prior = "J",report.level = 0.5,d = 15,cmax = 2,kmax = 15,p.and = 0.9,p.not = 0.2,p.surv = 0.2,ncores = 32)
print(res1J$feat.stat)
|
8ae770ac31d3528b9e917fff1dadcdec5ceca750
|
3fe7b25ac1e9f824a531fbf7c43bad84e9ac7d9b
|
/WESyS/epa-biogas-rin-HTL-TEA/studies/FY18/sensitivity/src/vbsa/mcf.testing/mcf.scratch.R
|
ddddb9f59b6c237823f8a1582e20c7c15c624004
|
[] |
no_license
|
irinatsiryapkina/work
|
5f3b67d36ffc18cb1588f8a3e519a76cdfc52e81
|
1aaecb300d4d0082df36fd79748145b22d1d7acb
|
refs/heads/master
| 2021-01-16T10:25:55.539383
| 2020-02-25T18:58:31
| 2020-02-25T18:58:31
| 243,076,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,895
|
r
|
mcf.scratch.R
|
#04052018
#MCF with AD k comparisons scratching
#data cave prototyping
rm(list=ls())
R_LIBS= ("/home/R/library")
library (data.table)
library (dplyr)
library (kSamples)
setwd ("~/GitHub/epa-biogas-rin/studies/FY18/sensitivity/src/vbsa/mcf.testing/")
#Functions
get.cdf <- function (numeric.variable, df) {
Fn <- ecdf (numeric.variable)
return (knots (Fn))
}
#load the study design
load ("foo.vbsa.design.RDA")
load ("foo.data.RDA")
foo.sobol.output <- as.data.table (foo.sobol.output)
#design <- melt (vbsa.design, id.vars = "run_id")
#create some relationship form the foo data. this mimics a result and some factor that is a function of result like roi
#out <- data.table (run_id = seq (from = 1, to = nrow (foo.sobol.output), by = 1),
#result = abs(foo.sobol.output))
#out$vbl <- (out$result + runif (6000, -.01,.1))^ 0.5
#plot (out$vbl, out$result, col = "light grey")
#save (out, file = "foo.data.RDA")
#load study results
#output.centered <- data.table (run_id = out$run_id, result = out$result - mean(out$result),
#vbl = out$roi - mean(out$roi))
#out <- output.centered
#out$result <- out$result/max(out$result)
#out$vbl <- out$vbl/max(out$vbl)
#plot some relationship and select a set of interest (B group)
plot (out$vbl, out$result, col = "light grey")
#selection
B <- subset (out, out$vbl > 3.17 & out$result > 9.95)
points (B$vbl, B$result, col = "red")
#subset the study design based on B
B <- subset(design, design$run_id %in% B$run_id)
B <- setorder (B, variable)
B.list <- split(B, B$variable)
B.names <- as.character (unique (B$variable))
#get the cumulative dist function for B
LIST <- list ()
for (i in 1: length(B.list)) {
LIST [[i]] <- get.cdf (B.list[[i]]$value, B.list[[i]])
B.cdf <- LIST
}
names (B.cdf) <- B.names #apply the vbl names to the list elements
#Bbar are all other model runs
B.bar <- design [ !(design$run_id %in% B$run), ]#this creates the Bbar by subtracting B from the design
B.bar.sorted <- setorder (B.bar, variable)
B.bar.list <- split(B.bar.sorted, B.bar.sorted$variable)
B.bar.names <- as.character (unique (B.bar.sorted$variable))
#get the cumulative dist function for Bbar
LIST <- list ()
for (i in 1:length (B.bar.list)) {
LIST [[i]] <- get.cdf (B.bar.list[[i]]$value, B.bar.list[[i]])
B.bar.cdf <- LIST
}
names (B.bar.cdf) <- B.bar.names#apply the vbl names to the list elements
#perform Anderson-Darling test to compare B to Bbar. H0 = B and Bbar are from the same population; H1 = B and Bbar are not from the same population
#pvalues are based on bootstraps
LIST <- list()
for (i in 1:length (B.cdf)){
LIST [[i]] <- ad.test (B.cdf[[i]], B.bar.cdf[[i]], method = "simulated", Nsim = 5) #Nsim = bootstraps
ad.list <- (LIST)
}
names (ad.list) <- B.names
LIST <- list()
for (i in 1:length (ad.list)) {
LIST [[i]] <- ad.list[[i]]$ad[1,4]
test.results <- (LIST)
}
ad.results <- as.data.table (do.call (rbind, test.results)) #this takes the list elements from AD results and assembles them as a datatable
ad.results <- cbind (B.names, ad.results) #put the factor names in. Need to be careful here to make sure the order did not change
setnames(ad.results, c("factor", "sim.p.value"))
#need to add a summary of the input ranges to the above datatable
sig.results <- subset (ad.results, ad.results$sim.p.value < 0.01)#subset the AD results based on p-value
test <- subset (B, variable %in% sig.results$factor)
setorder (test, run_id, variable)
test.2 <- tapply (test$value, test$variable, summary)
test.3 <- as.data.table (do.call (rbind, test.2))
#save(ad.results, sig.results, B.list, B.bar.list, B.cdf, B.names, file = paste (results.path,"AD.results.RDA", sep="/"))
# local sensitivity analysis
# define behavioural (B) and non-behavioral (B.bar) sets to test for significant diff in their input settings
|
6daa9a9d2c16d55d4e4c061664ca37ed91fd5714
|
5010b9654424f6359239ebb19c6704fc3d89fac5
|
/man/samplesize_onefactormultigroupspower.Rd
|
3c6e3a149a79aa1d2b55dd382184085749d112a6
|
[] |
no_license
|
jaspershen/metabox.stat
|
1d1f36e0a6048e038c95daf695560d32aebcf002
|
b9292fa9f80d6b7c5c4d6aa9ec20d9c4b25b871a
|
refs/heads/master
| 2021-01-12T12:48:39.480099
| 2016-09-22T17:30:07
| 2016-09-22T17:30:07
| 68,992,738
| 2
| 1
| null | 2016-09-23T05:43:58
| 2016-09-23T05:43:57
| null |
UTF-8
|
R
| false
| true
| 493
|
rd
|
samplesize_onefactormultigroupspower.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/samplesize_onefactormultigroupspower.R
\name{samplesize_onefactormultigroupspower}
\alias{samplesize_onefactormultigroupspower}
\title{samplesize_onefactormultigroupspower}
\usage{
samplesize_onefactormultigroupspower(k = 3, effectsize = 0.8,
sig_level = 0.05, power = 0.8, forplot = FALSE, samplerange = NULL,
effectsizerange = NULL)
}
\description{
stat
}
\examples{
samplesize_onefactormultigroupspower()
}
|
50d4398b9477700d0aa656089d8035997149cf62
|
8dbde607e17f052b65320217557bb3dd6172f3ca
|
/man/IRTLikelihood.cfa.Rd
|
e6f1c60dc223ab0282fdcc9b5ba3930c8afec5b5
|
[] |
no_license
|
cran/TAM
|
0b18b6cf93ed28eabbb1c0c6ce197a1f25d6a789
|
f0902c41cf41d1c816bd719dcdee2980803f24a1
|
refs/heads/master
| 2022-09-05T14:37:58.976159
| 2022-08-28T17:40:02
| 2022-08-28T17:40:02
| 17,693,836
| 4
| 7
| null | 2021-05-25T02:31:46
| 2014-03-13T03:42:09
|
R
|
UTF-8
|
R
| false
| false
| 3,720
|
rd
|
IRTLikelihood.cfa.Rd
|
%% File Name: IRTLikelihood.cfa.Rd
%% File Version: 0.19
\name{IRTLikelihood.cfa}
\alias{IRTLikelihood.cfa}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Individual Likelihood for Confirmatory Factor Analysis
}
\description{
This function computes the individual likelihood evaluated
at a \code{theta} grid for confirmatory factor analysis
under the normality assumption of residuals. Either
the item parameters (item loadings \code{L}, item
intercepts \code{nu} and residual covariances \code{psi})
or a fitted \code{cfa} object from the \pkg{lavaan}
package can be provided. The individual likelihood
can be used for drawing plausible values.
}
\usage{
IRTLikelihood.cfa(data, cfaobj=NULL, theta=NULL, L=NULL, nu=NULL,
psi=NULL, snodes=NULL, snodes.adj=2, version=1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
Dataset with item responses
}
\item{cfaobj}{
Fitted \code{\link[lavaan:cfa]{lavaan::cfa}} (\pkg{lavaan}) object
}
\item{theta}{
Optional matrix containing the \code{theta} values
used for evaluating the individual likelihood
}
\item{L}{
Matrix of item loadings (if \code{cfaobj} is not provided)
}
\item{nu}{
Vector of item intercepts (if \code{cfaobj} is not provided)
}
\item{psi}{
Matrix with residual covariances
(if \code{cfaobj} is not provided)
}
\item{snodes}{
Number of \code{theta} values used for the approximation
of the distribution of latent variables.
}
\item{snodes.adj}{
Adjustment factor for quasi monte carlo nodes for
more than two latent variables.
}
\item{version}{Function version. \code{version=1} is based on a
\pkg{Rcpp} implementation while \code{version=0} is
a pure \R implementation.}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
Individual likelihood evaluated at \code{theta}
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\author{
%% ~~who you are~~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link[CDM:IRT.likelihood]{CDM::IRT.likelihood}}
}
\examples{
\dontrun{
#############################################################################
# EXAMPLE 1: Two-dimensional CFA data.Students
#############################################################################
library(lavaan)
library(CDM)
data(data.Students, package="CDM")
dat <- data.Students
dat2 <- dat[, c(paste0("mj",1:4), paste0("sc",1:4)) ]
# lavaan model with DO operator
lavmodel <- "
DO(1,4,1)
mj=~ mj\%
sc=~ sc\%
DOEND
mj ~~ sc
mj ~~ 1*mj
sc ~~ 1*sc
"
lavmodel <- TAM::lavaanify.IRT( lavmodel, data=dat2 )$lavaan.syntax
cat(lavmodel)
mod4 <- lavaan::cfa( lavmodel, data=dat2, std.lv=TRUE )
summary(mod4, standardized=TRUE, rsquare=TRUE )
# extract item parameters
res4 <- TAM::cfa.extract.itempars( mod4 )
# create theta grid
theta0 <- seq( -6, 6, len=15)
theta <- expand.grid( theta0, theta0 )
L <- res4$L
nu <- res4$nu
psi <- res4$psi
data <- dat2
# evaluate likelihood using item parameters
like2 <- TAM::IRTLikelihood.cfa( data=dat2, theta=theta, L=L, nu=nu, psi=psi )
# The likelihood can also be obtained by direct evaluation
# of the fitted cfa object "mod4"
like4 <- TAM::IRTLikelihood.cfa( data=dat2, cfaobj=mod4 )
attr( like4, "theta")
# the theta grid is automatically created if theta is not
# supplied as an argument
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
06737bbff342ca1171c36f482c66e1eb3db7a0a6
|
333ab0943a89f695b4eed5c639ea0775e34746c6
|
/man/lflt_bubbles_GlnGltCat.Rd
|
55f2cac0f0e24dd05c35db3f6218483b34e20a55
|
[] |
no_license
|
jimsforks/lfltmagic
|
0fca22f1b6ac9215b1b63b6190a308c741876008
|
c1179763e526767e9ef24243a80bce938a56ef74
|
refs/heads/master
| 2023-03-15T10:27:40.779892
| 2021-03-10T17:26:51
| 2021-03-10T17:26:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 485
|
rd
|
lflt_bubbles_GlnGltCat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bubbles_GlnGltCatNum.R
\name{lflt_bubbles_GlnGltCat}
\alias{lflt_bubbles_GlnGltCat}
\title{Leaflet bubbles by categorical variable}
\usage{
lflt_bubbles_GlnGltCat(data = NULL, ...)
}
\arguments{
\item{x}{A data.frame}
}
\value{
leaflet viz
}
\description{
Leaflet bubbles by categorical variable
}
\section{ctypes}{
Gln-Glt-Cat
}
\examples{
lflt_bubbles_GlnGltCat(sample_data("Gln-Glt-Cat", nrow = 10))
}
|
6542d0e805dcc2286ca215f0b432e60c83ed3a8b
|
fccc05683406bc5130e921058b2f356549c70a85
|
/decsn trees.R
|
24497c90af82f18191c61a8819b2bdc6ff8248ec
|
[] |
no_license
|
mohanpeddada/R-code-for-Machone-learning-algorithms
|
a9ce5962d7c6c4d0e8c2fe0ccc434f98c0994b33
|
2e30e27cfdca96bef203e7366abb2a1c60a9c3c7
|
refs/heads/master
| 2020-04-15T10:08:30.571567
| 2019-01-08T06:45:45
| 2019-01-08T06:45:45
| 164,582,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
decsn trees.R
|
## Decision tree regressor with prestige dataset
data1 <- read.csv('prestige.csv')
indices <- sample(nrow(data1), round(nrow(data1)*0.85))
train_set <- data1[indices,]
test_set <- data1[-indices,]
library(rpart)
reg_model <- rpart(income~., train_set,method = 'annova',)
?rpart
|
2e40f3f1221a33f70d739d8a556e2ee5adf4aad6
|
10b908437ccb5123218ee56191cd4bf42c6051df
|
/Genome_processing/Functional_annotation/Mapped_geobacillus/[OLD]/Plot_change_func_annot.R
|
914214a233bc5619ddffbb66cb358fc78b78bbff
|
[] |
no_license
|
AlexanderEsin/Scripts
|
da258f76c572b50da270c66fde3b81fdb514e561
|
b246b0074cd00f20e5a3bc31b309a73c676ff92b
|
refs/heads/master
| 2021-01-12T15:10:47.063659
| 2019-03-10T15:09:38
| 2019-03-10T15:09:38
| 69,351,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,333
|
r
|
Plot_change_func_annot.R
|
library(ggplot2)
library(reshape2)
library(gridExtra)
MyMerge <- function(x, y){
df <- merge(x, y, by = "row.names", all = TRUE, sort = FALSE)
rownames(df) <- df$Row.names
df$Row.names <- NULL
return(df)
}
###########################################################################
IG_included = TRUE
All_scenarios = FALSE
## Define subset of HGT predictions to be used ##
penalty_folder <- "t3_t4_t5"
###########################################################################
if (All_scenarios == TRUE) {
Scenario_ID <- "All_scenarios"
merged_relative_HGT_title <- "All_scenarios"
} else {
Scenario_ID <- "Scenarios_1_2"
merged_relative_HGT_title <- "Scenarios 1 & 2 ONLY"
}
if (IG_included == TRUE) {
IG_ID <- "IG"
} else {
IG_ID <- "NO_IG"
}
IG_folder <- paste0("/", IG_ID, "/", Scenario_ID)
###########################################################################
## Prepare the list of penalties to be queried downstream ##
penalty_list <- strsplit(penalty_folder, "_")[[1]]
penalty_num <- length(penalty_list)
## Prepare list for the ordered HGT tables, but also write them out to individual variables ##
ordered_hgt_table_list <- vector("list", penalty_num)
ordered_vert_table_list <- vector("list", penalty_num)
list_entry_counter <- 1
###########################################################################
## Read in total and merged tables ##
## The functional annotation for the complete set of gene families/groups/clusters ##
setwd("/users/aesin/desktop/Geo_analysis/Geo_ortholog_nucl/Functional_annotation")
total_cog_tab <- table(read.table(file = "Narrow_COG_list.txt", sep = "\n"))
total_cog_tab_ordered <- as.data.frame(total_cog_tab[order(-total_cog_tab)])
colnames(total_cog_tab_ordered)[1] <- "Total"
# Add to our table lists #
ordered_hgt_table_list[[list_entry_counter]] <- total_cog_tab_ordered
ordered_vert_table_list[[list_entry_counter]] <- total_cog_tab_ordered
list_entry_counter = list_entry_counter + 1
## The functional annotation for the sets predicted as HGT or Vertical by ALL the penalty reconciliation, e.g.: 1 1 1 1 or 0 0 0 0 ##
setwd(paste0("/users/aesin/desktop/Geo_analysis/HGT_sets/Functional_annotation/Merged", IG_folder, "/", penalty_folder))
hgt_cog_tab <- table(read.table(file = "HGT_narrow_COG.tsv", sep = "\n"))
hgt_cog_tab_ordered <- as.data.frame(hgt_cog_tab[order(-hgt_cog_tab)])
colnames(hgt_cog_tab_ordered)[1] <- "All_HGT"
# Add to our table list #
ordered_hgt_table_list[[list_entry_counter]] <- hgt_cog_tab_ordered
ordered_vert_table_list[[list_entry_counter]] <- hgt_cog_tab_ordered
list_entry_counter = list_entry_counter + 1
vertical_cog_tab <- table(read.table(file = "Vertical_narrow_COG.tsv", sep = "\n"))
vertical_cog_tab_ordered <- as.data.frame(vertical_cog_tab[order(-vertical_cog_tab)])
colnames(vertical_cog_tab_ordered)[1] <- "All_Vertical"
# Add to our table list #
ordered_hgt_table_list[[list_entry_counter]] <- vertical_cog_tab_ordered
ordered_vert_table_list[[list_entry_counter]] <- vertical_cog_tab_ordered
list_entry_counter = list_entry_counter + 1
###########################################################################
## Read in the table corresponding to the refined dataset containing transfers from only OUTSIDE the IG ##
setwd("/users/aesin/desktop/Mowgli/External_HGT/New_parser/Functional_annotation")
maximum_penalty <- max(as.numeric(gsub(pattern = "[a-z]+", replacement = "", x = penalty_list)))
refined_cog_tab <- table(read.table(file = paste0("T", maximum_penalty, "_narrow_COG.tsv"), sep = "\n"))
refined_cog_tab_ordered <- as.data.frame(refined_cog_tab[order(-refined_cog_tab)])
colnames(refined_cog_tab_ordered)[1] <- "Refined_external_only"
## Cloned the HGT table list ##
ordered_hgt_refined_tbl_list <- ordered_hgt_table_list
ordered_hgt_refined_tbl_list[[list_entry_counter]] <- refined_cog_tab_ordered
ordered_hgt_refined_tbl_list <- Filter(length, ordered_hgt_refined_tbl_list)
###########################################################################
## Read in the individual HGT and vertical tables ##
for (penalty in penalty_list) {
penalty <- toupper(penalty)
setwd(paste0("/users/aesin/desktop/Geo_analysis/HGT_sets/Functional_annotation/", penalty, IG_folder))
## Read in and order tables ##
read_in_hgt_table <- table(read.table(file = "HGT_narrow_COG.tsv", sep = "\n"))
ordered_hgt_table <- as.data.frame(read_in_hgt_table[order(-read_in_hgt_table)])
read_in_vert_table <- table(read.table(file = "Vertical_narrow_COG.tsv", sep = "\n"))
ordered_vert_table <- as.data.frame(read_in_vert_table[order(-read_in_vert_table)])
## Write out the tables individually ##
name_hgt <- paste0(penalty, "_hgt_tab_ordered")
name_vert <- paste0(penalty, "_vert_tab_ordered")
assign(name_hgt, ordered_hgt_table)
assign(name_vert, ordered_vert_table)
## Put the table into the list, and rename the columns accordingly for merging ##
ordered_hgt_table_list[[list_entry_counter]] <- ordered_hgt_table
colnames(ordered_hgt_table_list[[list_entry_counter]])[1] <- paste0("HGT_", penalty)
ordered_vert_table_list[[list_entry_counter]] <- ordered_vert_table
colnames(ordered_vert_table_list[[list_entry_counter]])[1] <- paste0("Vert_", penalty)
list_entry_counter = list_entry_counter + 1
}
###########################################################################
## Merge the values into a single table ##
merged_hgt <- Reduce(MyMerge, ordered_hgt_table_list)
merged_vert <- Reduce(MyMerge, ordered_vert_table_list)
# Convert any NAs to 0 #
merged_hgt[is.na(merged_hgt)] <- 0
merged_vert[is.na(merged_vert)] <- 0
## Output has the COG labels as rownames -> remove rownames and add a column corresponding to the COG functional categories ##
COG_classes <- rownames(merged_hgt)
COG_classes <- rownames(merged_vert)
rownames(merged_hgt) <- NULL
rownames(merged_vert) <- NULL
merged_hgt <- cbind(COG_classes, merged_hgt)
merged_vert <- cbind(COG_classes, merged_vert)
## Rearrange the columns such that All_HGT & All_Vertical appear at the end (independent of column / penalty number) ##
merged_hgt <- cbind(merged_hgt[,-3:-4], merged_hgt[,3:4])
merged_vert <- cbind(merged_vert[,-3:-4], merged_vert[,3:4])
###########################################################################
## Pull out the penalty-specific column names, coerce into data frame ##
penalty_cols_hgt <- colnames(merged_hgt[ ,3:(ncol(merged_hgt)-2)])
penalty_cols_vert <- colnames(merged_vert[ ,3:(ncol(merged_vert)-2)])
HGT_sums <- colSums(merged_hgt[penalty_cols_hgt])
Vert_sums <- colSums(merged_vert[penalty_cols_vert])
hgt_penalty_nums <- gsub(pattern = "HGT_T","", penalty_cols_hgt)
vert_penalty_nums <- gsub(pattern = "Vert_T","", penalty_cols_vert)
HGT_num_df <- data.frame("Penalty" = hgt_penalty_nums, "Groups_w_HGT" = HGT_sums, row.names = NULL)
Vert_num_df <- data.frame("Penalty" = vert_penalty_nums, "Groups_w_Vert" = Vert_sums, row.names = NULL)
###########################################################################
## Make the plots ##
as_fraction_total_hgt <- sweep(merged_hgt[,-1],2,colSums(merged_hgt[,-1]), "/")*100
as_fraction_total_vert <- sweep(merged_vert[,-1],2,colSums(merged_vert[,-1]), "/")*100
final_table_hgt <- cbind(COG = merged_hgt[,1], as_fraction_total_hgt)
final_table_vert <- cbind(COG = merged_vert[,1], as_fraction_total_vert)
final_hgt.molten <- melt(final_table_hgt, value.name="Percentage.of.group", variable.name="COG.category", na.rm = TRUE)
final_vert.molten <- melt(final_table_vert, value.name="Percentage.of.group", variable.name="COG.category", na.rm = TRUE)
final_hgt.molten.ordered <- final_hgt.molten
final_vert.molten.ordered <- final_vert.molten
final_hgt.molten.ordered$COG <- factor(final_hgt.molten$COG, levels = unique(final_hgt.molten$COG[order(-final_hgt.molten$Percentage.of.group)]))
final_vert.molten.ordered$COG <- factor(final_vert.molten$COG, levels = unique(final_vert.molten$COG[order(-final_vert.molten$Percentage.of.group)]))
#plot_1 <- ggplot(data = final.molten.ordered, aes(x = COG.category, y = Percentage.of.group, group = COG)) + geom_bar(stat = "identity") + facet_wrap("COG", nrow = 1) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
plot_hgt <- ggplot(data = final_hgt.molten.ordered, aes(x = COG.category, y = Percentage.of.group, group = COG)) +
geom_bar(position = "dodge", stat = "identity", colour = "black", fill = "white") +
facet_wrap("COG", nrow = 1) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle(paste0("Proportion of putative HGT events per COG over a penalty range - ", merged_relative_HGT_title)) +
geom_line(data = subset(final_hgt.molten.ordered, COG.category %in% penalty_cols_hgt), colour = "red", size = 1)
plot_vert<- ggplot(data = final_vert.molten.ordered, aes(x = COG.category, y = Percentage.of.group, group = COG)) +
geom_bar(position = "dodge", stat = "identity", colour = "black", fill = "white") +
facet_wrap("COG", nrow = 1) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle(paste0("Proportion of putative vertical events per COG over a penalty range - ", merged_relative_HGT_title)) +
geom_line(data = subset(final_vert.molten.ordered, COG.category %in% penalty_cols_vert), colour = "red", size = 1)
###########################################################################
## Get the table which shows how many groups were original reconciled to produce the data output ##
table_name <- paste0(IG_ID, "_intersect/", Scenario_ID, "/", "Groups_reconciled_per_penalty_", penalty_folder, ".tsv")
penalty_group_num_df <- read.table(paste0("/users/aesin/desktop/Mowgli/Mowgli_outputs/", table_name), header = TRUE)
names(penalty_group_num_df)[1] <- "Penalty"
merged_penalty_df <- merge(penalty_group_num_df, HGT_num_df, by = "Penalty")
merged_penalty_df <- merge(merged_penalty_df, Vert_num_df, by = "Penalty")
merged_penalty_df.molten <- melt(merged_penalty_df, id.vars = "Penalty")
plot_vert_hgt_total_num <- ggplot(merged_penalty_df.molten, aes(x = Penalty, y = value, colour = variable)) + geom_line() + geom_point()
###########################################################################
## Make a plot to show the relative (fold) enrichment of HGT events per functional category ##
final_table_hgt_2 <- final_table_hgt
final_table_hgt_2$relative_HGT <- (final_table_hgt_2$All_HGT / final_table_hgt_2$All_Vertical)
relative_hgt_ext <- final_table_hgt_2[1:19, c(1, ncol(final_table_hgt_2))]
relative_hgt_ext$COG <- factor(relative_hgt_ext$COG, levels = unique(relative_hgt_ext$COG[order(-relative_hgt_ext$relative_HGT)]))
plot_all_HGT_all_Vert <- ggplot(relative_hgt_ext, aes(x = COG, y = relative_HGT, group = 1)) + geom_line()
## It would be good to see how this changes across penalty ranges ##
extract_hgt <- final_table_hgt[1:19, c(-1:-2,-ncol(final_table_hgt))]
vert_rearrange <- cbind(final_table_vert[,-(ncol(final_table_hgt)-1):-ncol(final_table_hgt)], final_table_vert[,ncol(final_table_hgt):(ncol(final_table_hgt)-1)])
extract_vert <- vert_rearrange[1:19, c(-1:-2,-(ncol(vert_rearrange)))]
x <- extract_hgt / extract_vert
x_named <- cbind(COG_classes[1:19], x)
names(x_named)[1] <- "COG"
x_named$COG <- factor(x_named$COG, levels = unique(x_named$COG[order(-x_named$All_HGT)]))
x_named.molten <- melt(data.frame(as.matrix(x_named), row.names = NULL), id.vars = "COG")
x_named.molten$value = as.numeric(x_named.molten$value)
if (IG_included == TRUE) {
title_IG <- "(IG only groups included)"
} else {
title_IG <- "(IG only groups excluded)"
}
plot_relative_HGT <- ggplot(x_named.molten, aes(x = COG, y = value, group = variable, colour = variable)) + geom_line(size = 1.3) + scale_y_continuous("Relative HGT: Fraction of all groups assigned to HGT / fraction assigned to Vertical") + ggtitle(paste0("HGT signal by COG ", title_IG)) + scale_color_discrete(name = "HGT Penalties") + theme(plot.title = element_text(size = 20), legend.text = element_text(size = 16), legend.title = element_text(size = 18), axis.title.x = element_text(size = 14), axis.title.y = element_text(size = 14), axis.text = element_text(size = 14)) + guides(colour = guide_legend(title.hjust = 0.5))
###########################################################################
## For each conditions (each penalty - T +/- IG) take the
top_HGT_level <- paste0(penalty, "_", IG_ID)
COG_relative_col <- x_named[, c(1, ncol(x_named))]
names(COG_relative_col)[2] <- top_HGT_level
assign(as.character(top_HGT_level), COG_relative_col)
###########################################################################
## This section is only run once - it combines the outputs of several runs of the above ##
# library(RColorBrewer)
# MergeByCOG <- function(x, y){
# df <- merge(x, y, by = "COG", all = TRUE, sort = FALSE)
# return(df)
# }
# ## Merge the output of all the T (IG + NO_IG) values into a single able ##
# merged_relative_HGT <- Reduce(MergeByCOG, list(T4_IG, T4_NO_IG, T5_IG, T5_NO_IG, T6_IG, T6_NO_IG, T10_IG, T10_NO_IG, T20_IG, T20_NO_IG))
# ## Melt and make the y value a continuous variable ##
# merged_relative_HGT.molten <- melt(data.frame(as.matrix(merged_relative_HGT), row.names = NULL), id.vars = "COG")
# merged_relative_HGT.molten$value = as.numeric(merged_relative_HGT.molten$value)
# ## Set up a colour ramp palette ##
# col_ramp <- brewer.pal(9,"YlOrRd")
# col_palette <- colorRampPalette(col_ramp[1:9])(10)
# ## Make the plot ##
# plot_merged_relative_HGT <- ggplot(merged_relative_HGT.molten, aes(x = COG, y = value, group = variable, color = variable)) +
# geom_point(size = 4) +
# scale_y_continuous("Relative HGT: Fraction of all groups assigned to HGT / fraction assigned to Vertical") +
# ggtitle(paste0("HGT signal by COG where HGT is true across all penalties - ", merged_relative_HGT_title)) +
# theme(panel.background = element_rect(fill = "gray55"), panel.grid.major = element_line(color = "grey"), plot.title = element_text(size = 20), legend.key = element_rect(fill = "gray55"), legend.text = element_text(size = 16), legend.title = element_text(size = 18), axis.title.x = element_text(size = 14), axis.title.y = element_text(size = 14), axis.text = element_text(size = 14, colour = "black")) +
# guides(colour = guide_legend(title.hjust = 0.5)) +
# scale_color_manual(values = col_palette, name = "HGT Penalty Ranges")
# plot_merged_relative_HGT
###########################################################################
## Plot the refined / hgt / vertical / total graph ##
###########################################################################
## Merge the values into a single table ##
merged_refined <- Reduce(MyMerge, ordered_hgt_refined_tbl_list)
# Convert any NAs to 0 #
merged_refined[is.na(merged_refined)] <- 0
## Output has the COG labels as rownames -> remove rownames and add a column corresponding to the COG functional categories ##
COG_classes <- rownames(merged_refined)
rownames(merged_refined) <- NULL
merged_refined <- cbind(COG_classes, merged_refined)
## Rearrange the columns such that All_HGT & All_Vertical appear at the end (independent of column / penalty number) ##
merged_refined <- cbind(merged_refined[,-3:-4], merged_refined[,3:4])
###########################################################################
## Make the plots ##
as_fraction_total_refined <- sweep(merged_refined[,-1],2,colSums(merged_refined[,-1]), "/")*100
final_table_refined <- cbind(COG = merged_refined[,1], as_fraction_total_refined)
final_refined.molten <- melt(final_table_refined, value.name="Percentage.of.group", variable.name="COG.category", na.rm = TRUE)
final_refined.molten.ordered <- final_refined.molten
final_refined.molten.ordered$COG <- factor(final_refined.molten$COG, levels = unique(final_refined.molten$COG[order(-final_refined.molten$Percentage.of.group)]))
plot_refined <- ggplot(data = final_refined.molten.ordered, aes(x = COG.category, y = Percentage.of.group, group = COG, fill = COG.category)) +
geom_bar(position = "dodge", stat = "identity") +
facet_wrap("COG", nrow = 1) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle(paste0("Proportion of putative HGT events per COG - ", merged_relative_HGT_title, " with penatly = ", maximum_penalty))
## Only total, external HGT and vertical ##
final_table_refined_2 <- final_table_refined[-4]
final_table_refined_2$COG <- droplevels(final_table_refined_2$COG)
# Drop unused levels #
final_table_refined_2$COG <- factor(final_table_refined_2$COG, levels = unique(final_table_refined_2$COG[order(-final_table_refined_2$Total)]))
names(final_table_refined_2)[2:4]<- c("All groups", "Predicted HGT", "Predicted Vertical")
final_refined_2.molten <- melt(final_table_refined_2, value.name="Percentage.of.group", variable.name="COG.category", na.rm = TRUE, factorsAsStrings = FALSE)
final_refined_2.molten$Percentage.of.group <- as.numeric(final_refined_2.molten$Percentage.of.group)
plot_refined_2 <- ggplot(data = final_refined_2.molten, aes(x = COG.category, y = Percentage.of.group, group = COG, fill = COG.category)) +
geom_bar(position = "dodge", stat = "identity") +
facet_wrap("COG", nrow = 1) +
theme(axis.line.x = element_blank(), axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
ylab("Fraction of set assigned to each COG Category") +
xlab("COG Category") +
ggtitle(paste0("Proportion of putative HGT events per COG - ", merged_relative_HGT_title, " with penatly = ", maximum_penalty)) +
scale_fill_discrete(name="COG Category") +
scale_y_continuous(expand = c(0, 0), limits = c(0, 41))
devSVG(file = "/users/aesin/Dropbox/LSR/Presentation/COG_plot.svg")
ggplot(data = final_refined_2.molten, aes(x = COG.category, y = Percentage.of.group, group = COG, fill = COG.category)) +
geom_bar(position = "dodge", stat = "identity") +
facet_wrap("COG", nrow = 1) +
theme(axis.line.x = element_blank(), axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
ylab("Fraction of set assigned to each COG Category") +
xlab("COG Category") +
ggtitle(paste0("Proportion of putative HGT events per COG - ", merged_relative_HGT_title, " with penatly = ", maximum_penalty)) +
scale_fill_discrete(name="COG Category") +
scale_y_continuous(expand = c(0, 0), limits = c(0, 41))
dev.off()
# ###########################################################################
# ## Chi-squared tests ##
# ## We remove the B and Z categories as uninformative ##
# # HGT vs Vertical #
# hgt_vert <- merged_hgt[1:19, (ncol(merged_hgt)-1):(ncol(merged_hgt))]
# chisq.test(hgt_vert)
# # p-value = 1.5498e-35
# # Intra-HGT comparison #
# hgt_3_4 <- merged[1:19, 3:4]
# hgt_4_5 <- merged[1:19, 4:5]
# hgt_3_5 <- merged[1:19, c(3, 5)]
# hgt_3_6 <- merged[1:19, c(3, 6)]
# chisq.test(hgt_3_4); # p-value 0.9655
# chisq.test(hgt_4_5); # p-value 0.9996
# chisq.test(hgt_3_5); # p-value 0.6622
# chisq.test(hgt_3_6); # p-value 0.7583
# # Compare the distribution of S-categorized groups #
# s_not_s <- merged[1,]
# not_s <- merged[-1,]
# s_not_s <- rbind(s_not_s, data.frame(COG="Not_S",t(colSums(not_s[,-1]))))
# s_not_s_hgt_vert <- s_not_s[, (ncol(s_not_s)-1):(ncol(s_not_s))]
# ## There is no significant difference in the distribution between the S and not-S categories
# chisq.test(s_not_s_hgt_vert); #p-value 0.6462
|
80771f1bdf6a42537fa7ea0c9d1f10b1e6bb43bd
|
384dd8ffffaf0b791f3934589ab008a0da22920b
|
/man/modelStatistics.Rd
|
10e5a6f2806115753dd61a160be1b35e15dbf610
|
[] |
no_license
|
cran/ndl
|
51ca423e2cdad9411883eb723a79a74034cd72f0
|
52291ac2f05d4591d139240501c87b888093892b
|
refs/heads/master
| 2021-01-06T20:41:47.421368
| 2018-09-10T12:40:02
| 2018-09-10T12:40:02
| 17,697,831
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,750
|
rd
|
modelStatistics.Rd
|
\name{modelStatistics}
\alias{modelStatistics}
\title{
Calculate a range of goodness of fit measures for an object fitted with some multivariate statistical method that yields probability estimates for outcomes.
}
\description{
\code{modelStatistics} calculates a range of goodness of fit
measures.
}
\usage{
modelStatistics(observed, predicted, frequency=NA, p.values,
n.data, n.predictors, outcomes=levels(as.factor(observed)),
p.normalize=TRUE, cross.tabulation=TRUE,
p.zero.correction=1/(NROW(p.values)*NCOL(p.values))^2)
}
\arguments{
\item{observed}{observed values of the response variable}
\item{predicted}{predicted values of the response variable; typically the outcome estimated to have the highest probability}
\item{frequency}{frequencies of observed and predicted values; if \code{NA}, frequencies equal to 1 for all observed and predicted values}
\item{p.values}{matrix of probabilities for all values of the response variable (i.e outcomes)}
\item{n.data}{sum frequency of data points in model}
\item{n.predictors}{number of predictor levels in model}
\item{outcomes}{a vector with the possible values of the response variable}
\item{p.normalize}{if \code{TRUE}, probabilities are normalized so that \code{sum(P)} of all outcomes for each datapoint is equal to 1}
\item{cross.tabulation}{if \code{TRUE}, statistics on the crosstabulation of observed and predicted response values are calculated with \code{crosstableStatistics}}
\item{p.zero.correction}{a function to adjust slightly response/outcome-specific probability estimates which are exactly P=0; necessary for the proper calculation of pseudo-R-squared statistics; by default calculated on the basis of the dimensions of the matrix of probabilities \code{p.values}.}
}
\value{ A list with the following components: \describe{
\item{\code{loglikelihood.null}}{Loglikelihood for null model}
\item{\code{loglikelihood.model}}{Loglikelihood for fitted model}
\item{\code{deviance.null}}{Null deviance}
\item{\code{deviance.model}}{Model deviance}
\item{\code{R2.likelihood}}{(McFadden's) R-squared}
\item{\code{R2.nagelkerke}}{Nagelkerke's R-squared}
\item{\code{AIC.model}}{Akaike's Information Criterion}
\item{\code{BIC.model}}{Bayesian Information Criterion}
\item{\code{C}}{index of concordance C (for binary response variables only)}
\item{\code{crosstable}}{Crosstabulation of observed and predicted
outcomes, if \code{cross.tabulation=TRUE}}
\item{\code{crosstableStatistics(crosstable)}}{Various statistics
calculated on \code{crosstable} with \code{crosstableStatistics}, if
\code{cross.tabulation=TRUE}} }
}
\references{
Arppe, A. 2008. Univariate, bivariate and multivariate methods in
corpus-based lexicography -- a study of synonymy. Publications of the
Department of General Linguistics, University of Helsinki,
No. 44. URN: http://urn.fi/URN:ISBN:978-952-10-5175-3.
Arppe, A., and Baayen, R. H. (in prep.) Statistical modeling and the
principles of human learning.
Hosmer, David W., Jr., and Stanley Lemeshow 2000. Applied Regression Analysis
(2nd edition). New York: Wiley.
}
\author{
Antti Arppe and Harald Baayen
}
\seealso{
See also \code{\link{ndlClassify}}, \code{\link{ndlStatistics}}, \code{\link{crosstableStatistics}}.
}
\examples{
data(think)
think.ndl <- ndlClassify(Lexeme ~ Agent + Patient, data=think)
probs <- acts2probs(think.ndl$activationMatrix)$p
preds <- acts2probs(think.ndl$activationMatrix)$predicted
n.data <- nrow(think)
n.predictors <- nrow(think.ndl$weightMatrix) *
ncol(think.ndl$weightMatrix)
modelStatistics(observed=think$Lexeme, predicted=preds, p.values=probs,
n.data=n.data, n.predictors=n.predictors)
}
\keyword{ discriminative learning }
|
5d4ae8ddb3b83d80dc4d157b7c10c30ed2514106
|
5fad2095f7ba96cd8def6e237761b4cd0945c19d
|
/server.R
|
0d1afca6e20991c70830277d8460922552c0b89a
|
[] |
no_license
|
Omni-Analytics-Group/gitcoin-grants-round-11-badger-hackathon
|
c664df6309bd2a85159d0bea7632a345d32743c2
|
52a52775b85f37b773d0cb4e8c17dcbce182655a
|
refs/heads/main
| 2023-08-07T05:31:17.714653
| 2021-09-28T22:51:06
| 2021-09-28T22:51:06
| 411,401,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,422
|
r
|
server.R
|
## Load libraries
library(shinydashboard)
library(readr)
library(dygraphs)
library(lubridate)
library(xts)
library(httr)
library(dplyr)
library(ggplot2)
################################################################################
## Helper Functions
################################################################################
## Price Match
price_match_sett <- function(date_m,sett_hist,var_n = "value")
{
match_idx <- match(date_m,as_date(sett_hist$Time))
if(is.na(match_idx)) return(0)
as.numeric(sett_hist[match_idx[1],var_n])
}
## Get Price
get_price <- function(c_id)
{
start_t <- as.numeric(as_datetime("2021-01-01"))
end_t <- as.numeric(now())
query_call <- paste0(
"https://api.coingecko.com/api/v3/coins/",
c_id,
"/market_chart/range?vs_currency=usd&from=",
start_t,
"&to=",
end_t
)
data_p <- content(GET(query_call))$prices
data_p_df <- data.frame(
Date = as_date(as_datetime(sapply(data_p,"[[",1)/1000)),
Price = sapply(data_p,"[[",2)
)
names(data_p_df)[2] <- c_id
return(data_p_df)
}
################################################################################
################################################################################
################################################################################
## Read Data
################################################################################
## Read in ETH data
eth_setts <- read_csv("api_data/eth_setts.csv",col_types = cols())
eth_setts$id <- sub('.', '', eth_setts$Vault_Token)
eth_prices <- read_csv("api_data/eth_prices.csv",col_types = cols())
eth_setts_hist <- readRDS("api_data/eth_setts_hist.RDS")
## Read in BSC data
bsc_setts <- read_csv("api_data/bsc_setts.csv",col_types = cols())
bsc_prices <- read_csv("api_data/bsc_prices.csv",col_types = cols())
bsc_setts_hist <- readRDS("api_data/bsc_setts_hist.RDS")
## Read in MATIC data
matic_setts <- read_csv("api_data/matic_setts.csv",col_types = cols())
matic_prices <- read_csv("api_data/matic_prices.csv",col_types = cols())
matic_setts_hist <- readRDS("api_data/matic_setts_hist.RDS")
## Read in ARBITRUM data
arbitrum_setts <- read_csv("api_data/arbitrum_setts.csv",col_types = cols())
arbitrum_prices <- read_csv("api_data/arbitrum_prices.csv",col_types = cols())
arbitrum_setts_hist <- readRDS("api_data/arbitrum_setts_hist.RDS")
################################################################################
################################################################################
################################################################################
## Vault Volume around 5 Aug
################################################################################
time_start <- as_date("2021-01-01")
time_end <- as_date(today())
val_time_df <- data.frame(Date = seq(time_start, time_end, by = "days"))
## ETH
val_time_df <- cbind(val_time_df,do.call(cbind,lapply(eth_setts_hist,function(x,y,z) sapply(y,price_match_sett,sett_hist=x,var_n=z),y=val_time_df$Date,z="value")))
names(val_time_df)[-1] <- eth_setts$id
val_time_df$All_Vaults <- apply(val_time_df[,2:22],1,sum,na.rm=TRUE)/10^6
val_time_df$Boosted_Vaults <- apply(val_time_df[,2:22][,eth_setts$If_Boostable],1,sum,na.rm=TRUE)/10^6
val_time_df$Non_Boosted_Vaults <- apply(val_time_df[,2:22][,!eth_setts$If_Boostable],1,sum,na.rm=TRUE)/10^6
val_time_df <- val_time_df[val_time_df$All_Vaults > 0,]
val_time_xts <- xts(val_time_df[,-1],order.by=val_time_df$Date)
################################################################################
################################################################################
################################################################################
## Vault Ratio around 5 Aug
################################################################################
rat_time_df <- data.frame(Date = val_time_df$Date)
## ETH
rat_time_df <- cbind(rat_time_df,do.call(cbind,lapply(eth_setts_hist,function(x,y,z) sapply(y,price_match_sett,sett_hist=x,var_n=z),y=rat_time_df$Date,z="ratio")))
names(rat_time_df)[-1] <- eth_setts$id
rat_time_xts <- xts(rat_time_df[,-1],order.by=rat_time_df$Date)
################################################################################
################################################################################
################################################################################
## Vault Balance around 5 Aug
################################################################################
bal_time_df <- data.frame(Date = val_time_df$Date)
## ETH
bal_time_df <- cbind(bal_time_df,do.call(cbind,lapply(eth_setts_hist,function(x,y,z) sapply(y,price_match_sett,sett_hist=x,var_n=z),y=rat_time_df$Date,z="balance")))
names(bal_time_df)[-1] <- eth_setts$id
bal_time_xts <- xts(bal_time_df[,-1],order.by=bal_time_df$Date)
################################################################################
################################################################################
################################################################################
## Vault Supply around 5 Aug
################################################################################
sup_time_df <- data.frame(Date = val_time_df$Date)
## ETH
sup_time_df <- cbind(sup_time_df,do.call(cbind,lapply(eth_setts_hist,function(x,y,z) sapply(y,price_match_sett,sett_hist=x,var_n=z),y=rat_time_df$Date,z="supply")))
names(sup_time_df)[-1] <- eth_setts$id
sup_time_xts <- xts(sup_time_df[,-1],order.by=sup_time_df$Date)
################################################################################
################################################################################
################################################################################
## Token Prices around 5 Aug
################################################################################
## Badger & Digg Token
btc_price <- get_price("bitcoin")
badger_price <- get_price("badger-dao")
digg_price <- get_price("digg")
all_prices <- merge(merge(btc_price,badger_price),digg_price)
all_prices_xts <- xts(all_prices[,-1],order.by=all_prices$Date)
################################################################################
################################################################################
function(input, output, session)
{
################################################################################
## Pre Post Boost
################################################################################
## AUM
output$aum_writeup <- renderUI({h3("In this graph we see the change in Assets Under Management for Boosted setts slightly increased in the days after the implementation. This fact highlights a slight diverging pattern where the non-Boosted Setts saw a decrease in their Total Locked Value. By the end of the month, the AUM for Boosted Setts were at their highest, while the non-Boosted Setts continued to struggle to regain its former TVL apex.")})
output$aum_comparison <- renderDygraph({
dygraph(val_time_xts[,c("Boosted_Vaults","Non_Boosted_Vaults")], main = "Boosted vs Non Boosted Vaults AUM (Mainnet)",ylab = "Vault Value (in Millions USD)")%>%
dyAxis("y", logscale=TRUE) %>%
dyEvent("2021-8-05", "Boost Implementation", labelLoc = "bottom") %>%
dyLegend(width = 400) %>%
dyRangeSelector(dateWindow = c("2021-07-05", "2021-09-05")) %>%
dyShading(from = "2021-8-02", to = "2021-8-8", color = "#CCEBD6")
})
## Badger Token
output$b_tok_writeup <- renderUI({h3("The graph above shows a pronounced and sustained increase in the price of Badger post-Boost implementation. Though other market influences are likely to have played a role, the beginning of the trend is quite noticeable as the price appreciated 2x quickly after the August 5th program implementation.")})
output$b_tok_comparison <- renderDygraph({
dygraph(all_prices_xts[,c("badger-dao")], main = "Badger Token Price Around Boost Event",ylab = "Badger Token Price in USD")%>%
# dySeries("bitcoin", axis = 'y2') %>%
# dyAxis("y2", label = "BTC Price for Benchmark", independentTicks = TRUE) %>%
dyEvent("2021-8-05", "Boost Implementation", labelLoc = "bottom") %>%
dyLegend(width = 400) %>%
dyRangeSelector(dateWindow = c("2021-07-05", "2021-09-05")) %>%
dyShading(from = "2021-8-01", to = "2021-8-8", color = "#CCEBD6")
})
## Digg Token
output$d_tok_writeup <- renderUI({h3("Prior to the August 5th Boost incentive program, the $DIGG token had spent a little over a week off of its peg as the price of $BTC remained consistently higher. Immediately after the announcement there was an abrupt and sudden positive rebase that placed the price of $DIGG firmly above $BTC until there were enough negative rebases to place the token back on peg. This graph shows that the effects of increased demand for native Badger assets can be significant enough to create noticeable price movements.")})
output$d_tok_comparison <- renderDygraph({
dygraph(all_prices_xts[,c("bitcoin","digg")], main = "Digg Token Price Around Boost Event",ylab = "Price in USD")%>%
dyEvent("2021-8-05", "Boost Implementation", labelLoc = "bottom") %>%
dyLegend(width = 400) %>%
dyRangeSelector(dateWindow = c("2021-07-05", "2021-09-05")) %>%
dyShading(from = "2021-8-01", to = "2021-8-8", color = "#CCEBD6")
})
################################################################################
################################################################################
################################################################################
## Micro Sett Statistics
################################################################################
output$stat_ui <- renderUI({
if(input$tabs!="stat_view") return(NULL)
selectizeInput('stat_sel', "Select Metric", choices = c("Value ($)","APR (%)","Token Ratio"), multiple = FALSE)
})
output$dot_plot <- renderPlot({
if(is.null(input$stat_sel)) return(NULL)
if(input$stat_sel == "Value ($)")
{
plot_data <- eth_setts %>% arrange(Value) %>% mutate(Sett = factor(id, levels = id))
pt <- ggplot(data = plot_data, aes(x = Value, y = Sett)) +
geom_point() +
geom_segment(aes(yend = Sett, x = 0, xend = Value)) +
scale_x_log10(labels = scales::dollar,
breaks = 10^(1:10)) +
labs(title = "USD Value for the Different Setts", x = "Value ($)")+
theme(plot.background = element_blank())
return(pt)
}
if(input$stat_sel == "APR (%)")
{
plot_data <- eth_setts %>% arrange(APR) %>% mutate(Sett = factor(id, levels = id)) %>%
mutate(Label = scales::percent(APR / 100, accuracy = 1))
pt <- ggplot(data = plot_data, aes(x = APR, y = Sett)) +
geom_point() +
geom_segment(aes(yend = Sett, x = 0, xend = APR)) +
geom_text(aes(label = Label), hjust = -0.2) +
scale_x_log10(labels = function(.) scales::percent(. / 100, accuracy = 1),
breaks = 100 * c(.02, .04, .08, .16, .32, .64, 1.28)) +
labs(title = "APR for the Different Setts", x = "APR (%)")+
theme(plot.background = element_blank())
return(pt)
}
if(input$stat_sel == "Token Ratio")
{
plot_data <- eth_setts %>% arrange(Token_Per_Share) %>% mutate(Sett = factor(id, levels = id))
pt <- ggplot(data = plot_data, aes(x = Token_Per_Share, y = Sett)) +
geom_point() +
geom_segment(aes(yend = Sett, x = 0, xend = Token_Per_Share)) +
scale_x_continuous(labels = scales::comma, breaks = scales::pretty_breaks(n = 10)) +
labs(title = "Token Ratio for the Different Setts")+
xlab("Token Ratio")+
theme(plot.background = element_blank())
return(pt)
}
}, bg="transparent")
################################################################################
################################################################################
################################################################################
## Micro Sett Statistics
################################################################################
output$sett_ui <- renderUI({
if(input$tabs!="sett_view") return(NULL)
selectizeInput('sett_sel', "Select Sett", choices = names(val_time_xts)[1:21], multiple = FALSE)
})
output$sett_aum <- renderDygraph({
if(is.null(input$sett_sel)) return(NULL)
dygraph(val_time_xts[,input$sett_sel]/10^6, main = NULL,ylab = "Value in Million USD",group = "micro_sett")%>%
dyEvent("2021-8-05", "Boost Implementation", labelLoc = "bottom") %>%
dyLegend(width = 400) %>%
dyRangeSelector(dateWindow = c("2021-07-05", "2021-09-05")) %>%
dyShading(from = "2021-8-01", to = "2021-8-8", color = "#CCEBD6")
})
output$sett_bal_sup <- renderDygraph({
if(is.null(input$sett_sel)) return(NULL)
Balance <- bal_time_xts[,input$sett_sel]
Supply <- sup_time_xts[,input$sett_sel]
Ratio <- rat_time_xts[,input$sett_sel]
plot_xts <- cbind(Balance,Supply,Ratio)
names(plot_xts) <- c("Balance","Supply","Ratio")
dygraph(plot_xts, main = NULL,ylab = "Token Count",group = "micro_sett")%>%
dySeries("Ratio", axis = 'y2') %>%
dyAxis("y2", label = "Token Ratio", independentTicks = TRUE) %>%
dyEvent("2021-8-05", "Boost Implementation", labelLoc = "bottom") %>%
dyLegend(width = 400) %>%
dyShading(from = "2021-8-01", to = "2021-8-8", color = "#CCEBD6")
})
output$sett_boostable <- renderInfoBox({
if(is.null(input$sett_sel)) return(valueBox("", ""))
response <- ifelse(eth_setts$If_Boostable[match(input$sett_sel,eth_setts$id)],"Yes","No")
box_c <- ifelse(response=="Yes","green","red")
valueBox(response, "Boosted?",color=box_c)
})
output$sett_deprecated <- renderInfoBox({
if(is.null(input$sett_sel)) return(valueBox("", ""))
response <- ifelse(eth_setts$If_Deprecated[match(input$sett_sel,eth_setts$id)],"Yes","No")
box_c <- ifelse(response=="Yes","red","green")
valueBox(response, "Deprecated?",color=box_c)
})
output$sett_val_b <- renderInfoBox({
if(is.null(input$sett_sel)) return(valueBox("", ""))
response <- round(as.numeric(eth_setts$Value[match(input$sett_sel,eth_setts$id)])/10^6,2)
valueBox(response, "Sett Value (Million $)")
})
output$sett_apr_b <- renderInfoBox({
if(is.null(input$sett_sel)) return(valueBox("", ""))
response <- round(as.numeric(eth_setts$APR[match(input$sett_sel,eth_setts$id)]),2)
valueBox(response, "Sett APR %")
})
################################################################################
################################################################################
}
|
30b1c34895a0c5a9a6adb1f55282e3c8817ce4d3
|
8ef0f5c38582b1acea641ea3b0e62a534bdb9b31
|
/R/densityplot.R
|
d9c0593705787ccb285275c79f32b669f710e637
|
[] |
no_license
|
ptvan/flowIncubator
|
af3f699105a6f9e65965788c8b84b4d1fdcdc221
|
b212c701ba1fd1dd122edb9b7f49f5be674bc3e9
|
refs/heads/master
| 2020-12-31T02:02:22.889635
| 2015-11-06T17:17:19
| 2015-11-06T17:17:19
| 39,515,276
| 0
| 0
| null | 2015-07-22T15:52:01
| 2015-07-22T15:52:01
| null |
UTF-8
|
R
| false
| false
| 633
|
r
|
densityplot.R
|
#' plot the population as densityplot for the channels associated with the gate
#'
#' It is used to check the 1d density of the parent for the purpose of choosing appropriate
#' 1d gating algorithm or fine tune the gating parameters.
#'
#' @param x GatingHierarchy
#' @param data node/population name
#' @export
setMethod("densityplot", signature = c("GatingHierarchy", "character")
, definition = function(x, data, ...){
gh <- x
node <- data
parent <- getParent(gh, node)
parent <- getData(gh, parent)
gate <- getGate(gh, node)
chnls <- parameters(gate)
densityplot(~., parent, channels = chnls, ...)
})
|
508ac2dd32ca0338088318fff7cbbd8451e04af1
|
d33d59cc443f48e4477793a38bdfd04f833995a5
|
/test/unittest/preprocessor/err.ifdefincomp.r
|
221f15a9d38c163472a886605840df6c73572051
|
[
"UPL-1.0"
] |
permissive
|
oracle/dtrace-utils
|
a1b1c920b4063356ee1d77f43a1a0dd2e5e963c3
|
b59d10aa886c2e5486f935c501c3c160a17fefa7
|
refs/heads/dev
| 2023-09-01T18:57:26.692721
| 2023-08-30T21:19:35
| 2023-08-31T20:40:03
| 124,185,447
| 80
| 20
|
NOASSERTION
| 2020-12-19T13:14:59
| 2018-03-07T05:45:16
|
C
|
UTF-8
|
R
| false
| false
| 242
|
r
|
err.ifdefincomp.r
|
-- @@stderr --
/dev/stdin:17: error: operator "defined" requires an identifier
/dev/stdin:17: error: unterminated #if
dtrace: failed to compile script test/unittest/preprocessor/err.ifdefincomp.d: Preprocessor failed to process input program
|
d72a00d5847ef2df0e7979d2d70ca8d303a9d22e
|
b0f77cca265f871fa01914deb0e7c6c8582ed6c3
|
/R_Scripts/2d-density-plot.r
|
a00b56f76c81cb984e0fcecff824dd30c6e711e4
|
[
"MIT"
] |
permissive
|
joshuakevinjones/Code_Files
|
2e94b8d0a79b73591e98828e89b5d80b8ed824d4
|
eefd7337ae10c743c80d79aaeacf4d5d54229b56
|
refs/heads/master
| 2021-01-22T13:26:37.565713
| 2020-06-11T18:02:44
| 2020-06-11T18:02:44
| 100,659,992
| 0
| 0
| null | 2017-08-25T21:37:34
| 2017-08-18T01:27:31
| null |
UTF-8
|
R
| false
| false
| 618
|
r
|
2d-density-plot.r
|
# 2D Density Plot
# Original source: r graphics cookbook
# load the gcookbook package for the data
library(gcookbook)
# load the ggplot2 package
library(ggplot2)
# reset the graphing device
dev.off()
# create the ggplot2 data
p <- ggplot(faithful, aes(x = eruptions, y = waiting)) +
# add a layer with the points
geom_point() +
# and a layer for the density heatmap with the alpha and the color determined by density (the .. refers to the fact that density is a variable that was created inside the ggplot() function)
stat_density2d(aes(alpha=..density.., fill=..density..), geom="tile", contour=FALSE); p
|
05323f366fa9edd540f78b678cf56a676a709ff6
|
d70dd8d045264d171e72b50801c09a8b716151d9
|
/library/bvpSolve/doc/examples/musn.R
|
e11f839396569946870bbdcbaf96b6ee9056bbd3
|
[] |
no_license
|
mayousif/FYDP
|
0351d5dd74e6901614e58774aea765bd35742fd0
|
8f43a6d29e60f02eb558323173987ce4b9869fe6
|
refs/heads/master
| 2020-12-09T23:05:32.166010
| 2020-04-04T22:12:16
| 2020-04-04T22:12:16
| 233,439,365
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,838
|
r
|
musn.R
|
## =============================================================================
## This is the example for MUSN in U. Ascher, R. Mattheij, and R. Russell,
## Numerical Solution of Boundary Value Problems for Ordinary Differential
## Equations, SIAM, Philadelphia, PA, 1995. MUSN is a multiple shooting
## code for nonlinear BVPs. The problem is
##
## u' = 0.5*u*(w - u)/v
## v' = -0.5*(w - u)
## w' = (0.9 - 1000*(w - y) - 0.5*w*(w - u))/z
## z' = 0.5*(w - u)
## y' = -100*(y - w)
##
## The interval is [0 1] and the boundary conditions are
##
## u(0) = v(0) = w(0) = 1, z(0) = -10, w(1) = y(1)
##
## note: there are two solutions...
## =============================================================================
require(bvpSolve)
## =============================================================================
## First method: shooting
## =============================================================================
# Derivatives
musn <- function(t,Y,pars) {
with (as.list(Y),
{
du <- 0.5*u*(w-u)/v
dv <- -0.5*(w-u)
dw <- (0.9-1000*(w-y)-0.5*w*(w-u))/z
dz <- 0.5*(w-u)
dy <- -100*(y-w)
return(list(c(du, dv, dw, dz, dy)))
})
}
x<- seq(0,1,by=0.05)
# Residual function for yend...
res <- function (Y,yini,pars) with (as.list(Y), w-y)
# Initial values; NA= not available
init <- c(u=1, v=1, w=1, z=-10, y=NA)
print(system.time(
sol <-bvpshoot(func = musn, yini= init, yend = res, x = x,
guess = 1, atol = 1e-10, rtol = 0)
))
# second solution...
sol2 <-bvpshoot(func = musn, yini = init, yend = res, x = x,
guess = 0.9, atol = 1e-10, rtol = 0)
pairs(sol)
# check the solution by simple integration...
yini <- sol[1,-1]
out <- as.data.frame(ode(fun = musn, y = yini, parms = 0,
times = x, atol = 1e-10, rtol = 0))
out$w[nrow(out)]-out$y[nrow(out)]
## =============================================================================
## Solution method 2 : bvptwp
## =============================================================================
# Does not work unless good initial conditions are used
bound <- function(i, y, pars) {
with (as.list(y), {
if (i == 1) return (u-1)
if (i == 2) return (v-1)
if (i == 3) return (w-1)
if (i == 4) return (z+10)
if (i == 5) return (w-y)
})
}
xguess <- seq(0, 1, len = 5)
yguess <- matrix(nc = 5,data = rep(c(1,1,1,-10,0.91),5))
rownames(yguess) <- c("u", "v", "w", "z", "y")
print(system.time(
Sol <- bvptwp(yini = NULL, x = x, func = musn, bound = bound,
xguess = xguess, yguess = yguess, leftbc = 4,
atol = 1e-10)
))
plot(Sol)
# same using bvpshoot - not so quick
print(system.time(
Sol2 <- bvpshoot(yini = NULL, x = x, func = musn, bound = bound,
leftbc = 4, guess=c(u=1, v=1, w=1, z=-10, y=0), atol = 1e-10)
))
|
611b37e37fb210f4332201821ae260224050bd98
|
d490fa1324d85e5e5c0a327358c09c9a5da969e8
|
/man/plot_R.Rd
|
9343bb972cc68975f244e85725ac18597f5baeb7
|
[] |
no_license
|
HaikunXu/IATTCassessment
|
63e990425800bd0f3e2ae8b193e21cda597d0624
|
cb584cf47f2a22a616aaacd71a92542fe85e6dbd
|
refs/heads/master
| 2023-07-19T15:18:17.521122
| 2023-07-10T21:09:50
| 2023-07-10T21:09:50
| 191,084,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 411
|
rd
|
plot_R.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_R.R
\name{plot_R}
\alias{plot_R}
\title{Plot quarterly and annual recruitment}
\usage{
plot_R(
SS_Dir,
lyear,
fyear,
legend,
Save_Dir,
ymax,
figure_name,
title,
xlim,
alpha = 0.1,
ref = 0
)
}
\description{
\code{plot_R} This function plot quarterly and annual recruitment for the stock assessment report
}
|
afed498d968cda40a414e11ba0704a579499cd7a
|
12cdcabbc263d7dd18823feb5f59e3449fe57649
|
/Arquivos de aula/2_3_Estatis_descrit.R
|
82a958a73b3acddb91045d3ad418dc50bc837dd6
|
[] |
no_license
|
saraselis/Machine-Learning-em-R-IESB
|
cb2d96d0d1bdbd79ddb8d311921181e4022b9230
|
d4fa94911c9db34677d68034b1c07821f4f22c0f
|
refs/heads/master
| 2020-06-14T03:01:13.905315
| 2019-08-10T01:06:22
| 2019-08-10T01:06:22
| 193,600,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,058
|
r
|
2_3_Estatis_descrit.R
|
#### PRINCIPAIS FUNÇÕES ESTATISTICA ####
## criando um vetor de 5 números, de 2.
numeros <- rep(x = 2,5)
numeros
## desvio padrão
sd(numeros)
## média
mean(numeros)
## mediana
median(numeros)
## máximo e mínimo
max(numeros)
min(numeros)
## resumo estatístico do vetor (não tem sd)
summary(numeros)
#### FUNÇÕES PARA GERAR NÚMEROS ####
seq(from = 1,to = 10,by = 1) ## cria um vetor sequencial de 1 a 10
sample(x = 5,size = 3) ## cria um vetor com 3 números aleatórios entre 1 a 5 sem repetição
sample(x = 5,size = 6)
sample(x = 5,size = 6, replace = T) ## cria um vetor com 6 números aleatórios entre 1 a 5 com repetição
#### ATIVIDADE 1 ####
## Crie um vetor de tamanho 20 com números aleatórios entre 1 e 10.
## E depois calcule media, mediana, máximo e mínimo
## ATENÇÃO: EXECUTE O set.seed(234) ANTES
set.seed(234)
## gerando vetor de números
numeros <- sample(x = 10,size = 20,replace = T)
numeros
## calculando valores estatísticos
summary(numeros)
|
fe47119d37c49805087c3ab0ccb6c4ee4704a25b
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/SpecsVerification/man/Auc.Rd
|
efc2b436f3a86d5f5df783b11ad3437a647e1e35
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,545
|
rd
|
Auc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Auc.R
\name{Auc}
\alias{Auc}
\title{Calculate area under the ROC curve (AUC) for a forecast and its verifying binary observation, and estimate the variance of the AUC}
\usage{
Auc(
fcst,
obs,
handle.na = c("na.fail", "only.complete.pairs"),
use_fn = c("C++", "R")
)
}
\arguments{
\item{fcst}{vector of forecasts}
\item{obs}{vector of binary observations (0 for non-occurrence, 1 for occurrence of the event)}
\item{handle.na}{how should missing values in forecasts and observations be handled; possible values are 'na.fail' and 'only.complete.pairs'; default: 'na.fail'}
\item{use_fn}{the function used for the calculation: 'C++' (default) for the fast C++ implementation, or 'R' for the slow (but more readable) R implementation}
}
\value{
vector containing AUC and its estimated sampling standard deviation
}
\description{
Calculate area under the ROC curve (AUC) for a forecast and its verifying binary observation, and estimate the variance of the AUC
}
\examples{
data(eurotempforecast)
Auc(rowMeans(ens.bin), obs.bin)
}
\references{
DeLong et al (1988): Comparing the Areas under Two or More Correlated Receiver Operating Characteristic Curves: A Nonparametric Approach. Biometrics. \url{https://www.jstor.org/stable/2531595}
Sun and Xu (2014): Fast Implementation of DeLong's Algorithm for Comparing the Areas Under Correlated Receiver Operating Characteristic Curves. IEEE Sign Proc Let 21(11). \doi{10.1109/LSP.2014.2337313}
}
\seealso{
AucDiff
}
|
2a05a1b3075970784c88a9ec087165ff202f7f66
|
9190fb72ead2d5b4c261653f3cf636e5f293bbed
|
/plot4.R
|
6642fcabcb60adf213d4e54a3ee6b812e01ed838
|
[] |
no_license
|
acrost/ExData_Plotting1
|
67034ab7788e65890c42a3b265a6c042eb4978cf
|
ef2a31842d233ffb863411d61ff29ea405df81ce
|
refs/heads/master
| 2021-01-22T15:35:05.202683
| 2015-01-11T21:43:00
| 2015-01-11T21:43:00
| 29,078,314
| 0
| 0
| null | 2015-01-11T02:04:54
| 2015-01-11T02:04:53
| null |
UTF-8
|
R
| false
| false
| 2,656
|
r
|
plot4.R
|
#read in Household Power Consumption text file as a table
Power_table<-read.table("household_power_consumption.txt", header=T, sep=';', na.strings="?")
# only use observations from February 1 and 2, 2007
Power_table <- Power_table[(Power_table$Date == "1/2/2007") | (Power_table$Date == "2/2/2007"),]
# create a new column that combines date and time called Date_Time
# the new format is Day/Month/Year Hour:Minute:Second
Power_table$Date_Time <- strptime(paste(Power_table$Date, Power_table$Time), "%d/%m/%Y %H:%M:%S")
# open a new png file named "plot4.png" as the graphical device.
# Width and height are 480 pixels and the background is white
png(filename="plot4.png", width = 480, height = 480, units= "px", bg= "white")
# change plotting parameters to accept 4 graphs in a 2 x 2 grid,
# moving by row from upper left to bottom right
par(mfrow=c(2,2))
# FIRST PLOT (upper left) ...Note: this recreates Plot 2
# create a line plot of Day and Time vs. Global Active Power observations
plot(Power_table$Date_Time, Power_table$Global_active_power, type = "l", xlab="",
ylab= "Global Active Power")
# SECOND PLOT (upper right)
# create a line plot of Day and Time vs. Voltage observations
plot(Power_table$Date_Time, Power_table$Voltage, type = "l", xlab="datetime",
ylab= "Voltage")
# THIRD PLOT (lower left) ...Note: this recreates Plot 3
# Create a line plot of the observations for Sub Metering 1, 2, and 3
# First, plot Sub_metering_1 in a black line
# Since this is the first plot, print the Y axis label
# Second, plot Sub_metering_2 in a red line
# Third, plot Sub_metering_3 in a blue line
plot(x=Power_table$Date_Time, y=Power_table$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(x=Power_table$Date_Time, y=Power_table$Sub_metering_2, col="red")
lines(x=Power_table$Date_Time, y=Power_table$Sub_metering_3, col="blue")
# resize the font. This is a smaller graph, the font is smaller
# because the legend must fit
par(cex= 0.75)
# Create and place legend in the top right corner of the graph
# List the three variables and display the corresponding line color
# Makes the legend border and background transparent
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1), col=c("black", "red", "blue"),bg= "transparent",box.col = "transparent")
# FOURTH PLOT (lower right)
# create a line plot of Day and Time vs. Global Reactive Power observations
plot(Power_table$Date_Time, Power_table$Global_reactive_power, type = "l", xlab="datetime",
ylab= "Global_reactive_power")
# shut down the graphical device because plot4 is complete
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.