blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d3e48587e8db1a4a16c775054085f7c278495ed
|
126ca45750aba7dc86fe804044ebbd0701ca997e
|
/r_scripts/interpreting_regression_coefficients.R
|
98152c9ac02759bc52bc4b506cdee125a0e5d753
|
[] |
no_license
|
stonegold546/website
|
95a3636b560be2f48dbecd9c2fe19015e2f4d080
|
b7789022c823d4a250a002a829218c7f65ce2848
|
refs/heads/master
| 2021-01-16T18:25:02.874669
| 2018-01-15T07:12:31
| 2018-01-15T07:12:31
| 100,074,012
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 353
|
r
|
interpreting_regression_coefficients.R
|
hsb <- read.csv("datasets/hsb_comb_full.csv")
names(hsb)
# Let's go with the first school, and the first 5 student-level variables
hsb <- hsb[hsb$schoolid == hsb$schoolid[1], 1:5]
summary(hsb)
# Mathach, ses and female seem to have some variability
# Let's predict math achievement using female (dummy), ses (continuous)
lm(mathach ~ female + ses, hsb)
|
6bc2ae2c6332ff920d35cfc13c0f6187681f45a4
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/gRain/man/finding.Rd
|
71dee37ea564ae88208a67bb68250af76b8ad182
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,854
|
rd
|
finding.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/finding.R
\name{finding}
\alias{finding}
\alias{setFinding}
\alias{retractFinding}
\alias{getFinding}
\alias{pFinding}
\title{Set, retrieve, and retract finding in Bayesian network.}
\usage{
setFinding(object, nodes = NULL, states = NULL, flist = NULL, propagate = TRUE)
}
\arguments{
\item{object}{A "grain" object}
\item{nodes}{A vector of nodes}
\item{states}{A vector of states (of the nodes given by 'nodes')}
\item{flist}{An alternative way of specifying findings, see
examples below.}
\item{propagate}{Should the network be propagated?}
}
\description{
Set, retrieve, and retract finding in Bayesian
network. NOTICE: The functions described here are kept only
for backward compatibility; please use the corresponding
evidence-functions in the future.
}
\note{
NOTICE: The functions described here are kept only for
backward compatibility; please use the corresponding
evidence-functions in the future:
\code{setEvidence()} is an improvement of \code{setFinding()} (and as such
\code{setFinding} is obsolete). Users are recommended to use
\code{setEvidence()} in the future.
\code{setEvidence()} allows to specification of "hard evidence" (specific
values for variables) and likelihood evidence (also known as virtual
evidence) for variables.
The syntax of \code{setEvidence()} may change in the future.
}
\examples{
## setFindings
yn <- c("yes", "no")
a <- cptable(~asia, values=c(1,99),levels=yn)
t.a <- cptable(~tub+asia, values=c(5,95,1,99),levels=yn)
s <- cptable(~smoke, values=c(5,5), levels=yn)
l.s <- cptable(~lung+smoke, values=c(1,9,1,99), levels=yn)
b.s <- cptable(~bronc+smoke, values=c(6,4,3,7), levels=yn)
e.lt <- cptable(~either+lung+tub,values=c(1,0,1,0,1,0,0,1),levels=yn)
x.e <- cptable(~xray+either, values=c(98,2,5,95), levels=yn)
d.be <- cptable(~dysp+bronc+either, values=c(9,1,7,3,8,2,1,9), levels=yn)
chest.cpt <- compileCPT(a, t.a, s, l.s, b.s, e.lt, x.e, d.be)
chest.bn <- grain(chest.cpt)
## These two forms are equivalent
bn1 <- setFinding(chest.bn, nodes=c("chest", "xray"), states=c("yes", "yes"))
bn2 <- setFinding(chest.bn, flist=list(c("chest", "yes"), c("xray", "yes")))
getFinding(bn1)
getFinding(bn2)
pFinding(bn1)
pFinding(bn2)
bn1 <- retractFinding(bn1, nodes="asia")
bn2 <- retractFinding(bn2, nodes="asia")
getFinding(bn1)
getFinding(bn2)
pFinding(bn1)
pFinding(bn2)
}
\references{
Søren Højsgaard (2012). Graphical Independence Networks
with the gRain Package for R. Journal of Statistical Software, 46(10), 1-26.
\url{http://www.jstatsoft.org/v46/i10/}.
}
\seealso{
\code{\link{setEvidence}}, \code{\link{getEvidence}},
\code{\link{retractEvidence}}, \code{\link{pEvidence}},
\code{\link{querygrain}}
}
\author{
Søren Højsgaard, \email{sorenh@math.aau.dk}
}
\keyword{models}
\keyword{utilities}
|
01cd47025a7a061c2c219a4945acdfcdd048400d
|
f76402444a7595f7f3df6a0eb1f8152daf07f1a6
|
/inst/variancecomponents/baseball/demo_gibbsflow_sis.R
|
6ac42e8ca5332a44d692280888bc4b901214ef82
|
[] |
no_license
|
jeremyhengjm/GibbsFlow
|
d2cb72e6e1dc857469ff490f566a3a28ff1f90d8
|
e15b2978628eba79a9930fb27abb906895abe807
|
refs/heads/master
| 2021-06-27T13:17:50.151264
| 2021-02-12T12:08:06
| 2021-02-12T12:08:06
| 337,633,398
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,703
|
r
|
demo_gibbsflow_sis.R
|
rm(list = ls())
library(GibbsFlow)
library(tictoc)
library(ggplot2)
# prior
prior <- list()
prior$logdensity <- function(x) as.numeric(baseball_artificial_logprior(x))
prior$gradlogdensity <- function(x) baseball_gradlogprior_artificial(x)
prior$rinit <- function(n) baseball_sample_artificial_prior(n)
# likelihood
likelihood <- list()
likelihood$logdensity <- function(x) as.numeric(baseball_logprior(x) +
baseball_loglikelihood(x) -
baseball_artificial_logprior(x))
likelihood$gradlogdensity <- function(x) baseball_gradlogprior(x) +
baseball_gradloglikelihood(x) -
baseball_gradlogprior_artificial(x)
# define functions to compute gibbs flow (and optionally velocity)
exponent <- 2
compute_gibbsflow <- function(stepsize, lambda, lambda_next, derivative_lambda, x, logdensity) baseball_gibbsflow(stepsize, lambda, lambda_next, derivative_lambda, x, logdensity)
gibbsvelocity <- function(t, x) as.matrix(baseball_gibbsvelocity(t, x, exponent))
# smc settings
nparticles <- 2^7
nsteps <- 50
timegrid <- seq(0, 1, length.out = nsteps)
lambda <- timegrid^exponent
derivative_lambda <- exponent * timegrid^(exponent - 1)
# run sampler
tic()
smc <- run_gibbsflow_sis(prior, likelihood, nparticles, timegrid, lambda, derivative_lambda, compute_gibbsflow, gibbsvelocity)
toc()
# ess plot
ess.df <- data.frame(time = 1:nsteps, ess = smc$ess * 100 / nparticles)
ggplot(ess.df, aes(x = time, y = ess)) + geom_line() +
labs(x = "time", y = "ESS%") + ylim(c(0, 100))
# normalizing constant plot
normconst.df <- data.frame(time = 1:nsteps, normconst = smc$log_normconst)
ggplot() + geom_line(data = normconst.df, aes(x = time, y = normconst), colour = "blue") +
labs(x = "time", y = "log normalizing constant")
# norm of gibbs velocity
normvelocity.df <- data.frame(time = timegrid,
lower = apply(smc$normvelocity, 2, function(x) quantile(x, probs = 0.25)),
median = apply(smc$normvelocity, 2, median),
upper = apply(smc$normvelocity, 2, function(x) quantile(x, probs = 0.75)))
gnormvelocity <- ggplot(normvelocity.df, aes(x = time, y = median, ymin = lower, ymax = upper))
gnormvelocity <- gnormvelocity + geom_pointrange(alpha = 0.5) +
xlim(0, 1) + # scale_y_continuous(breaks = c(0, 40, 80, 120)) +
xlab("time") + ylab("norm of Gibbs velocity")
gnormvelocity
ggsave(filename = "~/Dropbox/GibbsFlow/draft_v3/vcmodel_baseball_normvelocity_gfsis.pdf", plot = gnormvelocity,
device = "pdf", width = 6, height = 6)
|
e4ba2868b3fa4a6bdf59726545ac6432ba0c93f0
|
6a477dfdb76af585f1760767053cabf724a341ce
|
/inst/examples/retired/ex-mk_intervalplot.R
|
d662431d105e065292439dbe03df0b49886d4504
|
[] |
no_license
|
gmlang/ezplot
|
ba94bedae118e0f4ae7448e6dd11b3ec20a40ab4
|
9c48771a22d3f884d042a6185939765ae534cb84
|
refs/heads/master
| 2022-09-20T11:43:50.578034
| 2022-09-16T02:05:24
| 2022-09-16T02:05:24
| 34,095,132
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 752
|
r
|
ex-mk_intervalplot.R
|
library(ezplot)
library(tidyr)
library(dplyr)
# ex1
dat = films %>% select(year_cat, budget) %>% group_by(year_cat) %>%
summarise(mid = median(budget), lwr = min(budget), upr = max(budget))
dat
plt = mk_intervalplot(dat)
title = "Budget Range from 1913 to 2014"
p = plt(xvar="year_cat", yvar="mid", ymin_var="lwr", ymax_var="upr",
ylab="budget ($)", main=title)
scale_axis(p, scale = "log10")
# ex2
fit = lm(log10(boxoffice) ~ year_cat, data=films)
pred = predict(fit, films, interval="prediction")
dat = data.frame(year_cat=films$year_cat, pred)
plt = mk_intervalplot(dat)
p = plt("year_cat", "fit", ymin_var="lwr", ymax_var="upr",
ylab="predicted log10(budget) ($)",
main="Budget Prediction Using year_cat")
p
|
89031967f4296d9a46658a82226891dd9944bfe0
|
ebd63bc43c6cac99f78425e5ed72afac467c4e2f
|
/cachematrix.R
|
a94c7b37dd7c818af3f838c5b916dfa8f35f3ced
|
[] |
no_license
|
k29jm7e/ProgrammingAssignment2
|
012f249b48be0cd8ce3d1faa3548c2d568010d77
|
ccf2262a15591c5bb7216e3b5f4d29068e5b95ab
|
refs/heads/master
| 2021-01-11T00:52:52.329274
| 2016-10-13T19:11:57
| 2016-10-13T19:11:57
| 70,457,675
| 0
| 0
| null | 2016-10-10T06:18:04
| 2016-10-10T06:18:03
| null |
UTF-8
|
R
| false
| false
| 1,302
|
r
|
cachematrix.R
|
## The following two functions are used to create a matrix and cash its inverse by
## storing them in a special object aswell as retreiving the invers of the matrix
## from the cash.
## The following function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
+ inv <- NULL
+ set <- function(y) {
+ x <<- y
+ inv <<- NULL
+ }
+ get <- function() x
+ setInverse <- function(inverse) inv <<- inverse
+ getInverse <- function() inv
+ list(set = set,
+ get = get,
+ setInverse = setInverse,
+ getInverse = getInverse)
}
## The following function computes the inverse of the special
## matrix" returned by the first function `makeCacheMatrix`. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
## This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
+ inv <- x$getInverse()
+ if (!is.null(inv)) {
+ message("getting cached data")
+ return(inv)
+ }
+ mat <- x$get()
+ inv <- solve(mat, ...)
+ x$setInverse(inv)
+ inv
}
|
c040e0dcf3259b31df116eb691442fa26a4745fa
|
cb06c3a5f797bbdc86e5b25c71e6f52184a3ee04
|
/plot3.R
|
123144c9def75ea7f2773150c0a16257b77b658f
|
[] |
no_license
|
TerryDuan/ExData_Plotting1
|
0f7aef3593897bd6d4f813a602411daa8d92e876
|
cb1470fc6bbf38a201ed64fcf6f28235542a253c
|
refs/heads/master
| 2021-01-15T08:20:06.655869
| 2015-07-10T18:51:15
| 2015-07-10T18:51:15
| 38,783,220
| 0
| 0
| null | 2015-07-08T22:22:44
| 2015-07-08T22:22:44
| null |
UTF-8
|
R
| false
| false
| 1,273
|
r
|
plot3.R
|
##the wording dir is set to default, so need redirect to where raw data located
plot3 <- function(){
data <- read.csv("~/myR/data/household_power_consumption.txt",sep=";", stringsAsFactors = FALSE)
data2<- tbl_df(data)
data3<-data2[data2["Date"] == "1/2/2007" | data2["Date"] == "2/2/2007",]
data3<-data3[data3["Global_active_power"] != "?",]
cols <- c("Date", "Time")
data3$Day <- apply(data3[,cols],1, paste, collapse = " ")
data3$Day <- strptime(data3$Day, "%d/%m/%Y %H:%M:%S")
Sub1 <- as.numeric(data3$Sub_metering_1)
plot(data3$Day, data3$Sub_metering_1, ylim = range(Sub1),lines(data3$Day, data3$Sub_metering_1) , pch = "", xlab = "", ylab = "Global Active Power (kilowatts)")
par(new = TRUE)
plot(data3$Day, data3$Sub_metering_3, ylim = range(Sub1),lines(data3$Day, data3$Sub_metering_3, col = "blue") , pch = "", xlab = "", ylab = "Global Active Power (kilowatts)")
par(new = TRUE)
plot(data3$Day, data3$Sub_metering_2, ylim = range(Sub1),lines(data3$Day, data3$Sub_metering_2, col = "red") , pch = "", xlab = "", ylab = "Global Active Power (kilowatts)")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lty = c(1,1,1), col = c("black", "red", "blue"))
dev.copy(png, "plot3.png")
dev.off()
}
|
497a81e5e9fd4f21d9a1df20fa2613ea68c6d370
|
5ba816fc2889e0fc5cfc2ce0c4ac642bfac8ae38
|
/synthetic_expr/synthetic_mscls_2.R
|
4f8f1272c70c989aadc2b779e560f823b377a0be
|
[] |
no_license
|
bargavjayaraman/secure_model_aggregation
|
ce0ca597a54eec2bfbe3c5cec5464458bf897862
|
dea3dc9c4d38424c52125118c5709a037b4c7b88
|
refs/heads/master
| 2022-09-11T01:32:21.456279
| 2020-05-28T13:38:20
| 2020-05-28T13:38:20
| 103,537,043
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 968
|
r
|
synthetic_mscls_2.R
|
# misclassification rate in synthetic dataset for centralized
load("syndata.Rdata")
Nexpr = 20
misclsfctn = rep(0,Nexpr) #misclassification rate
load("../../centr20000_100.Rdata")
for(expr_no in 1:Nexpr){
betahat = betas[[expr_no]][[5]]
betahat = as.numeric(betahat)
# if using 40 parties, should be <=t[2], 60 parties for t[3], 80 for t[4] ...
#betahat[abs(betahat) <= t[1]/2 ] = 0
# if using 40 parties, should be muhat[[expr_no]][, 2], 60 for muhat[[expr_no]][, 3]...
difference = apply(data[[1]], 2, '-', muhat[[expr_no]][, 5])
predict = (crossprod(betahat, difference) > 0)
e1 = sum(predict == F)
# if using 40 parties, should be muhat[[expr_no]][, 2], 60 for muhat[[expr_no]][, 3]...
difference = apply(data[[2]], 2, '-', muhat[[expr_no]][, 5])
predict = (crossprod(betahat, difference) > 0)
e2 = sum(predict == T)
misclsfctn[expr_no] = (e1 + e2) / (10000)
}
cat( mean(misclsfctn) )
cat('\n')
cat( sd(misclsfctn) )
|
e15d03fe332e5183e6e8d5510d9d5e78140816d2
|
90d59895830814f772861dfb52f0e520de79af89
|
/man/climexp_to_sef.Rd
|
9b04ba15447f8efad282a28833d160e9c92a1962
|
[] |
no_license
|
cran/dataresqc
|
003d5e007a67e3639705099eeae6f912de1f0228
|
4e3a4a19308c5e29a1c1b93896523764f701c356
|
refs/heads/master
| 2023-04-13T07:42:08.328276
| 2023-04-02T21:00:02
| 2023-04-02T21:00:02
| 245,601,581
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 587
|
rd
|
climexp_to_sef.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conversions.R
\name{climexp_to_sef}
\alias{climexp_to_sef}
\title{Download a GHCN-Daily data file from the Climate Explorer and convert it
into the Station Exchange Format}
\usage{
climexp_to_sef(url, outpath)
}
\arguments{
\item{url}{Character string giving the url of the data file.}
\item{outpath}{Character string giving the path where to save the file.}
}
\description{
Download a GHCN-Daily data file from the Climate Explorer and convert it
into the Station Exchange Format
}
\author{
Yuri Brugnara
}
|
51312ef8c4b8bb4115769face48388acd82f5961
|
d39ebc2fe344679a396111b0199ea50671192fa9
|
/bin/create_phased_maplist.R
|
ad11dd1b84714779eb30172dc3ffb0eae8678cb8
|
[] |
no_license
|
czheluo/Polymap
|
3a8d287cb09ac142623e054ccca249086d4de73f
|
e7b705f446646cd233effcadd74489d6ff2575bf
|
refs/heads/master
| 2020-05-05T07:18:09.433180
| 2019-11-13T15:30:21
| 2019-11-13T15:30:21
| 179,820,365
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,795
|
r
|
create_phased_maplist.R
|
create_phased_maplist <- function(maplist,
dosage_matrix.conv,
dosage_matrix.orig = NULL,
remove_markers = NULL,
N_linkages = 2,
lower_bound = 0.05,
ploidy = 4,
ploidy2 = NULL,
marker_assignment.1,
marker_assignment.2,
parent1 = P1,
parent2 = P2,
original_coding = TRUE,
log = NULL,
verbose = FALSE) {
vector.to.matrix <- function(x, n.columns){
if(length(x)>n.columns){
x<-c(x, rep("", n.columns-length(x)%%n.columns))
} else {
n.columns <- length(x)
}
x.m <- matrix(x, ncol=n.columns, byrow=T)
colnames(x.m)<-rep("_", n.columns)
return(x.m)
}
test_dosage_matrix <- function(dosage_matrix){
if(class(dosage_matrix) == "data.frame"){
warning("dosage_matrix should be a matrix, now it's a data.frame.")
message("Trying to convert it to matrix, assuming markernames are in the first column..")
rownames(dosage_matrix) <- dosage_matrix[,1]
dosage_matrix <- as.matrix(dosage_matrix[,-1])
class(dosage_matrix) <- "integer"
} else if(class(dosage_matrix) == "matrix"){
rn <- rownames(dosage_matrix)
cn <- colnames(dosage_matrix)
if(is.null(rn)) stop("The rownames of dosage_matrix should contain markernames. Now NULL")
if(is.null(cn)) stop("The columnnames of dosage_matrix should contain genotype names. Now NULL")
if(!(typeof(dosage_matrix)=="integer" | typeof(dosage_matrix)=="double")){
warning("dosage_matrix should be integer or numeric. Trying to convert it.. ")
class(dosage_matrix) <- "integer"
}
} else {
stop("dosage_matrix should be a matrix of integers.
See the manual of this function for more information.")
}
return(dosage_matrix)
}
marker_data_summary <- function(dosage_matrix,
ploidy = 4,
pairing = c("random", "preferential"),
parent1 = P1,
parent2 = P2,
progeny_incompat_cutoff = 0.1,
verbose = TRUE,
log = NULL) {
dosage_matrix <- test_dosage_matrix(dosage_matrix)
if (is.null(log)) {
log.conn <- stdout()
} else {
matc <- match.call()
write.logheader(matc, log)
log.conn <- file(log, "a")
}
pardos <- dosage_matrix[, c(parent1, parent2)]
if(any(is.na(pardos))){
NAmark <- rownames(pardos)[is.na(pardos[,parent1]) | is.na(pardos[,parent2])]
warning("There are parental scores with missing values. These are not considered in the analysis.
It is recommended to remove those before proceeding to further steps.")
dosage_matrix <- dosage_matrix[!rownames(dosage_matrix) %in% NAmark, ]
if(verbose) write(paste(c("\nThe following marker have missing values in their parental scores:",
NAmark, "\n"), collapse = "\n\n"), file = log.conn)
}
pairing <- match.arg(pairing)
add_P_to_table <- function(table) {
#add parent info to table
colnames(table) <- paste0("P2_", colnames(table))
rownames(table) <- paste0("P1_", rownames(table))
return(table)
}
test_row <- function(x, lu, parpos = c(1, 2)) {
#analyse offspring incompatibility for a marker
#with lu as lookup table for maximum and minimum offspring dosages
progeny <- x[-parpos]
partype <- lu$pmin == min(x[parpos]) & lu$pmax == max(x[parpos])
min <- lu[partype, "min"]
max <- lu[partype, "max"]
return(!is.na(progeny) & progeny >= min & progeny <= max)
}
#######################################
nm <- nrow(dosage_matrix)
end_col <- ncol(dosage_matrix)
if(verbose) write("Calculating parental info...", stdout())
# contingency table number of markers
parental_info <-
table(as.factor(dosage_matrix[, parent1]), as.factor(dosage_matrix[, parent2]))
parental_info <- add_P_to_table(parental_info)
#Checking offspring compatability
if(verbose) write("Checking compatability between parental and offspring scores...",
stdout())
parpos <- which(colnames(dosage_matrix) %in% c(parent1, parent2))
progeny <- dosage_matrix[,-parpos]
nr_offspring <- ncol(progeny)
seg.fname <- paste0("seg_p", ploidy, "_", pairing)
seg <- get(seg.fname)#,envir=getNamespace("polymapR"))
segpar <- seg[, c("dosage1", "dosage2")]
colnames(segpar) <- c("pmax", "pmin")
segoff <- seg[, 3:ncol(seg)]
segoff <- segoff > 0
segpos <- c(0:ploidy)
lu_min_max <- apply(segoff, 1, function(x) {
a <- segpos[x]
min <- min(a)
max <- max(a)
return(c(min, max))
})
rownames(lu_min_max) <- c("min", "max")
lu <- cbind(segpar, t(lu_min_max))
expected_dosage <-
apply(dosage_matrix, 1, test_row, lu = lu, parpos = parpos)
#NA should be "TRUE", now "FALSE"
expected_dosage <- t(expected_dosage)
if(length(which(is.na(progeny))) > 0) expected_dosage[is.na(progeny)] <- TRUE
#two factorial table of parental dosages with percentage of "FALSE" per factor combination
progeny_incompat <- colSums(!expected_dosage)
na_progeny <- colSums(is.na(progeny))
perc_incompat <-
progeny_incompat / (nrow(expected_dosage) - na_progeny)
progeny_incompat <-
colnames(progeny)[perc_incompat > progeny_incompat_cutoff]
nr_incompat <- rowSums(!expected_dosage)
offspring_incompat <- tapply(
nr_incompat,
list(dosage_matrix[, parent1], dosage_matrix[, parent2]),
FUN = function(x)
sum(x) / (length(x) * nr_offspring) * 100
)
offspring_incompat <- round(offspring_incompat, 2)
offspring_incompat <- add_P_to_table(offspring_incompat)
summary <-
list(parental_info, offspring_incompat, progeny_incompat)
names(summary) <-
c("parental_info",
"offspring_incompatible",
"progeny_incompatible")
for (i in c(1, 2)) {
if(verbose) {
write(paste0("\n####", names(summary)[i], "\n"),
file = log.conn)
#sink(log.conn)
write(knitr::kable(summary[[i]]),
log.conn)
}
#suppressWarnings(sink())
}
if(verbose) write("\n####Incompatible individuals:\n", log.conn)
if (length(progeny_incompat) == 0 & verbose)
write("None\n", log.conn)
if(verbose) write(summary$progeny_incompatible, log.conn)
if (!is.null(log))
close(log.conn)
return(summary)
} #marker_data_summary()
if(original_coding & is.null(dosage_matrix.orig)) stop("Uncoverted dosage matrix should also be specified if original_coding = TRUE")
mapped_markers <- unlist(lapply(maplist, function(x) as.character(x$marker)))
if(!all(mapped_markers %in% rownames(dosage_matrix.conv))) stop("Not all markers on map have corresponding dosages! If duplicated markers were added back to maps, make sure to use an appropriate dosage matrix!")
if (is.null(log)) {
log.conn <- stdout()
} else {
matc <- match.call()
write.logheader(matc, log)
log.conn <- file(log, "a")
}
if(is.null(ploidy2)) ploidy2 <- ploidy
if(ploidy == ploidy2){
palindromes <- rownames(dosage_matrix.conv)[which(dosage_matrix.conv[,parent1] != dosage_matrix.conv[,parent2] &
abs(dosage_matrix.conv[,parent1] - (0.5*ploidy)) == abs(dosage_matrix.conv[,parent2]-(0.5*ploidy2)))]
## If there are any unconverted palindromes, convert them:
if(any(dosage_matrix.conv[palindromes,parent1] > dosage_matrix.conv[palindromes,parent2]))
dosage_matrix.conv[palindromes[dosage_matrix.conv[palindromes,parent1] > dosage_matrix.conv[palindromes,parent2]],] <-
ploidy - dosage_matrix.conv[palindromes[dosage_matrix.conv[palindromes,parent1] > dosage_matrix.conv[palindromes,parent2]],]
}
# Begin by separating the SxN and NxS linkages:
SxN_assigned <- marker_assignment.1[marker_assignment.1[,parent1]==1 &
marker_assignment.1[,parent2]==0,]
p1_assigned <- marker_assignment.1[-match(rownames(SxN_assigned),rownames(marker_assignment.1)),]
NxS_assigned <- marker_assignment.2[marker_assignment.2[,parent1]==0 &
marker_assignment.2[,parent2]==1,]
p2_assigned <- marker_assignment.2[-match(rownames(NxS_assigned),rownames(marker_assignment.2)),]
#Use only the markers with at least N_linkages significant linkages
P1unlinked <- rownames(p1_assigned)[apply(p1_assigned[,3+grep("LG",colnames(p1_assigned)[4:ncol(p1_assigned)]),drop = FALSE],1,max)<N_linkages]
P2unlinked <- rownames(p2_assigned)[apply(p2_assigned[,3+grep("LG",colnames(p2_assigned)[4:ncol(p2_assigned)]),drop = FALSE],1,max)<N_linkages]
if(verbose) {
removed.m1 <- vector.to.matrix(P1unlinked, n.columns = 4)
removed.m2 <- vector.to.matrix(P2unlinked, n.columns = 4)
if(nrow(removed.m1) > 0){
write(paste("\nThe following P1 markers had less than", N_linkages,"significant linkages:\n_______________________________________\n"),log.conn)
write(knitr::kable(removed.m1,format="markdown"), log.conn)
}
if(nrow(removed.m2) > 0){
write(paste("\n\nThe following P2 markers had less than", N_linkages,"significant linkages:\n_______________________________________\n"),log.conn)
write(knitr::kable(removed.m2,format="markdown"), log.conn)
write("\n", log.conn)
}
}
if(length(P1unlinked) > 0) p1_assigned <- p1_assigned[-match(P1unlinked,rownames(p1_assigned)),]
if(length(P2unlinked) > 0) p2_assigned <- p2_assigned[-match(P2unlinked,rownames(p2_assigned)),]
# Only select markers for which the number of homologue assignments match the seg type:
p1cols <- 3+grep("Hom",colnames(p1_assigned)[4:ncol(p1_assigned)])
p2cols <- 3+grep("Hom",colnames(p2_assigned)[4:ncol(p2_assigned)])
P1rates <- p1_assigned[,p1cols]/rowSums(p1_assigned[,p1cols], na.rm = TRUE)
P2rates <- p2_assigned[,p2cols]/rowSums(p2_assigned[,p2cols], na.rm = TRUE)
P1rates[P1rates < lower_bound] <- 0
P2rates[P2rates < lower_bound] <- 0
P1linked <- apply(P1rates,1,function(x) length(which(x!=0)))
P2linked <- apply(P2rates,1,function(x) length(which(x!=0)))
p1.markers <- rownames(p1_assigned[p1_assigned[,parent1]!=0,])
p2.markers <- rownames(p2_assigned[p2_assigned[,parent2]!=0,])
## Assuming markers are converted here; have to treat palindrome markers in P2 carefully:
P1different <- rownames(p1_assigned[rownames(p1_assigned) %in% p1.markers & p1_assigned[,parent1] != P1linked,])
P2different <- rownames(p2_assigned[setdiff(which(rownames(p2_assigned) %in% p2.markers & p2_assigned[,parent2] != P2linked),
which(rownames(p2_assigned) %in% palindromes & ploidy2 - p2_assigned[,parent2] == P2linked)),])
if(verbose) {
removed.m1 <- if(!is.null(P1different)) {
vector.to.matrix(P1different, n.columns = 4)
} else matrix(,nrow=0,ncol=1) #catching error
removed.m2 <- if(!is.null(P2different)){
vector.to.matrix(P2different, n.columns = 4)
} else matrix(,nrow=0,ncol=1) #catching error
if(nrow(removed.m1) > 0){
write(paste("\nThe following markers did not have the expected assignment in P1:\n_______________________________________\n"),log.conn)
write(knitr::kable(removed.m1,format="markdown"), log.conn)
}
if(nrow(removed.m2) > 0){
write(paste("\n\nThe following markers did not have the expected assignment in P2:\n_______________________________________\n"),log.conn)
write(knitr::kable(removed.m2,format="markdown"), log.conn)
write("\n", log.conn)
}
}
P1rates <- P1rates[!rownames(p1_assigned) %in% P1different,]
P2rates <- P2rates[!rownames(p2_assigned) %in% P2different,]
#Update p1_assigned and p2_assigned
p1_assigned <- p1_assigned[!rownames(p1_assigned) %in% P1different,]
p2_assigned <- p2_assigned[!rownames(p2_assigned) %in% P2different,]
rownames(P1rates) <- rownames(p1_assigned)
rownames(P2rates) <- rownames(p2_assigned)
# return simplex x nulliplex markers
p1_assigned <- rbind(SxN_assigned,p1_assigned)
p2_assigned <- rbind(NxS_assigned,p2_assigned)
P1rates <- rbind(SxN_assigned[,p1cols],P1rates)
P2rates <- rbind(NxS_assigned[,p2cols],P2rates)
# Remove the bi-parental markers that are not assigned in both parents (what about unconverted markers here? Logical test is only looks for a nulliplex parent.)
bip1 <- rownames(p1_assigned[rowSums(p1_assigned[,c(parent1,parent2)]!=0)==2,])
bip2 <- rownames(p2_assigned[rowSums(p2_assigned[,c(parent1,parent2)]!=0)==2,])
BiP_different <- c(setdiff(bip1,intersect(bip1,bip2)),setdiff(bip2,intersect(bip1,bip2)))
if (verbose & !is.null(BiP_different)) {
removed.m <- vector.to.matrix(BiP_different, n.columns = 4)
if(nrow(removed.m) > 0){
write(paste("\nThe following markers did not have the expected assignment across both parents:\n_______________________________________\n"),log.conn)
write(knitr::kable(removed.m,format="markdown"), log.conn)
write("\n", log.conn)
}
}
P1rates <- P1rates[!rownames(p1_assigned) %in% setdiff(bip1,intersect(bip1,bip2)),]
P2rates <- P2rates[!rownames(p2_assigned) %in% setdiff(bip2,intersect(bip1,bip2)),]
#Update p1_assigned and p2_assigned
p1_assigned <- p1_assigned[!rownames(p1_assigned) %in% setdiff(bip1,intersect(bip1,bip2)),]
p2_assigned <- p2_assigned[!rownames(p2_assigned) %in% setdiff(bip2,intersect(bip1,bip2)),]
ALL_assigned <- unique(c(rownames(p1_assigned),rownames(p2_assigned)))
# Make up the output
maplist.out <- lapply(seq(length(maplist)),function(mapn) {
map <- maplist[[mapn]]
map <- map[map$marker%in%ALL_assigned,]
outmap <- map[,c("marker","position")]
hom_mat <- sapply(1:nrow(outmap), function(r){
a <- rep(0, ploidy+ploidy2)
temp <- P1rates[match(as.character(outmap$marker[r]),rownames(P1rates)),]
if(length(which(temp!=0)) > 0) a[(1:ploidy)[which(temp!=0)]] <- 1
temp <- P2rates[match(outmap$marker[r],rownames(P2rates)),]
if(length(which(temp!=0)) > 0) a[((ploidy+1):(ploidy+ploidy2))[which(temp!=0)]] <- 1
return(a)
})
hom_mat <- t(hom_mat)
colnames(hom_mat) <- paste0("h",seq(1,ploidy+ploidy2))
# correct palindrome markers:
if(any(outmap$marker %in% palindromes)){
hom_mat[outmap$marker %in% palindromes,(ploidy+1):(ploidy+ploidy2)] <-
(hom_mat[outmap$marker %in% palindromes,(ploidy+1):(ploidy+ploidy2)] + 1) %% 2
}
# recode using the original coding:
if(original_coding){
orig_parents <- dosage_matrix.orig[match(outmap$marker,rownames(dosage_matrix.orig)),c(parent1,parent2)]
orig_mat <- hom_mat
for(r in 1:nrow(orig_mat)){
if(sum(hom_mat[r,1:ploidy]) != orig_parents[r,1]) orig_mat[r,1:ploidy] <- (hom_mat[r,1:ploidy]+1)%%2
if(sum(hom_mat[r,(ploidy+1):(ploidy+ploidy2)]) != orig_parents[r,2])
orig_mat[r,(ploidy+1):(ploidy+ploidy2)] <- (hom_mat[r,(ploidy+1):(ploidy+ploidy2)]+1)%%2
}
outmap <- cbind(outmap,orig_mat)
} else{
outmap <- cbind(outmap,hom_mat)
}
return(outmap)
}
)
names(maplist.out) <- names(maplist)
phased_markers <- unlist(lapply(maplist.out, function(x) as.character(x$marker)))
if(original_coding){
mapped.dosages <- dosage_matrix.orig[mapped_markers,]
} else{
mapped.dosages <- dosage_matrix.conv[mapped_markers,]
}
if(verbose){
mds.b4 <- marker_data_summary(dosage_matrix = mapped.dosages,
ploidy = (ploidy+ploidy2)/2,
pairing = "random", verbose = FALSE)
mds.aft <- marker_data_summary(dosage_matrix = dosage_matrix.conv[phased_markers,],
ploidy = (ploidy+ploidy2)/2,
pairing = "random", verbose = FALSE)
write(paste("\nMapped marker breakdown before phasing:\n_______________________________________\n"),log.conn)
write(knitr::kable(mds.b4$parental_info,format="markdown"), log.conn)
write("\n", log.conn)
write(paste("\nPhased marker breakdown:\n_______________________________________\n"),log.conn)
write(knitr::kable(mds.aft$parental_info,format="markdown"), log.conn)
write("\n", log.conn)
}
## Run a final check to make sure that the phased marker dosages equal the original marker dosages:
phased.dose <- do.call(rbind,lapply(maplist.out, function(x) {
temp <- cbind(rowSums(x[,paste0("h",1:ploidy)]),
rowSums(x[,paste0("h",(ploidy + 1):(ploidy + ploidy2))]))
rownames(temp) <- x[,"marker"]
return(temp)
}))
orig.dose <- mapped.dosages[rownames(phased.dose),c(parent1,parent2)]
conflicting <- which(rowSums(phased.dose == orig.dose) != 2)
if(length(conflicting) > 0){
warning("Not all phased markers matched original parental dosage. \nPerhaps unconverted marker dosages were supplied as converted dosages by mistake? \nThe following conflicts were detected and removed:")
warn.df <- cbind(orig.dose[conflicting,],phased.dose[conflicting,])
colnames(warn.df) <- c("P1_original","P2_original","P1_phased","P2_phased")
write(knitr::kable(warn.df,format="markdown"), log.conn)
## Simply remove these markers from the output:
rem.markers <- rownames(phased.dose)[conflicting]
maplist.out <- lapply(maplist.out, function(x) x[!x$marker %in% rem.markers,])
}
if(!is.null(log)) close(log.conn)
return(maplist.out)
}
|
ef1972bd0b577c16e3f510067227f9a0043af5c6
|
a138250a21bc0a32cdbdcad01fe44d1557bd26d8
|
/2-SymiinChow/01DiscreteTimedynrExamples/CFA/GenDataCFA.r
|
caba545aa3be391f2953c951d83d92a624b2b154
|
[] |
no_license
|
ktw5691/dynamic-time-imps2018
|
49ab7aa0374de9c2596d70681df19a03ec5c7013
|
e4f1d177b1c63c8323fc3a19e578ca2ed0e3041f
|
refs/heads/main
| 2021-06-16T16:14:16.498539
| 2018-08-05T22:46:47
| 2018-08-05T22:46:47
| 140,288,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,531
|
r
|
GenDataCFA.r
|
#To simulate data for dynamic factor analysis model with auto- and cross-regressions for 2 factors, 2lags (0 and 1 lag) each
rm(list=ls(all=TRUE))
nt=1 # number of time points
ne=2 # number of states
ny=6 # number of observed
nx=0 # number of fixed regressors
np=500 # number of subjects
filey=paste0('CFA.dat') # output file for obs y
npad=1 # start up
ist=npad+1
ntt=nt+npad
# S
S=matrix(c(
1,0,
1.2,0,
.8,0,
0,1,
0,.9,
0,1.1
),ny,ne,byrow=TRUE)
# Q
Q=matrix(c(
2.5,.6,
.6,2.5
),ne,ne,byrow=TRUE)
# H
#H=matrix(c(
# .5,-.3,
# -.2,.4),ne,ne,byrow=TRUE)
H = matrix(rep(0,4),ncol=ne)
# R
R=diag(c(.8,.6,2,1,1.5,2))
# c
c=matrix(c(0,0),ne,1,byrow=TRUE)
# d
d=matrix(c(3,2,4,5,3,4),ny,1,byrow=TRUE)
## Z
# states a t=0
a0=matrix(c(0,0),ne,1,byrow=TRUE)
# cholesky of Q & R (assumes these are positive definite)
Qs = Q
Rs = R
if (sum(diag(Q))> 0) Qs = chol(Q)
if (sum(diag(R))> 0) Rs = chol(R)
# innov z residuals e
a=matrix(0,ntt,ne)
y=matrix(0,ntt,ny)
x=matrix(0,ntt,nx)
yall=matrix(0,nt*np,ny)
all = matrix(0,nt*np,ne)
for (j in 1:np){
a[1,1:ne] = a0
for (i in 2:ntt)
{
ztmp=t(rnorm(ne)%*%Qs)
etmp=t(rnorm(ny)%*%Rs)
atmp=as.matrix(a[i-1,1:ne])
atmp=H%*%atmp+ztmp+c
a[i,1:ne]=t(atmp)
ytmp=S%*%atmp+etmp+d
y[i,1:ny]=ytmp
}
yall[ (1+(j-1)*nt):(j*nt),1:ny] = y[(ist:ntt),1:ny]
all[ (1+(j-1)*nt):(j*nt),1:ne] = a[(ist:ntt),1:ne]
}
#
yx=yall
nyx=nx+ny
if (nx>0) { yx=cbind(y,x) }
write.table(yx,file=filey,append=FALSE,col.names = FALSE,row.names = FALSE)
|
ccf7b689a7157585d2f8e7eca311df54d462c0fc
|
7a79d24727ec33cb3db2629422ebf4f9beac2e37
|
/itemset_mining.r
|
01b0a9615f866fdf961bf07025c8acb6044031b2
|
[] |
no_license
|
benjaminvdb/mining_recipe_data
|
024b68c8269fd7f2574bec37e43ac6983aa20de9
|
999ab8508f2e0c960c094aabdbe8125731096c7c
|
refs/heads/master
| 2023-01-28T07:18:24.358564
| 2020-12-09T12:22:16
| 2020-12-09T12:22:16
| 63,415,621
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,696
|
r
|
itemset_mining.r
|
require(arules)
require(arulesViz)
require(tikzDevice)
base_dir = '/Users/benny/Repositories/recipes/paper'
tables_dir = file.path(base_dir, 'tables')
plots_dir = file.path(base_dir, 'plots')
saveTikz <- function(plt, filename, width = 4.9823, ratio = 1.618) {
height <- width/ratio
filename <- file.path(plots_dir, filename)
tikz(file = filename, width = width, height = height)
replayPlot(plt)
dev.off()
}
# Load data
filename <- '/Users/benny/Repositories/recipes/data/recipes.single'
Recipes = read.transactions(filename, format='single', sep=',', cols=seq(1, 2))
# Create summary
summary(Recipes)
# Mine rules using Apriori
rules <- apriori(Recipes, parameter=list(support=0.02, confidence=0.5))
# Top 3 rules according to lift
inspect(head(sort(rules, by ="lift"), 10))
top10 <- as(head(sort(rules, by ="lift"), 10), 'data.frame')
write.table(top10, file.path(tables_dir, 'rules_top10.dat'), sep = ';', col.names = TRUE, row.names = FALSE)
# Scatter plot
plot(rules)
# The quality() function prints out quality scores for rules
head(quality(rules))
# Two-key plot plots support against confidence, with the 'order'
# indicated by color, which is the number of items
plot(rules, shading="order", control=list(main = "Two-key plot"))
# Interactive plot
sel <- plot(rules, measure=c("support", "lift"), shading="confidence", interactive=TRUE)
# Select rules with confidence > 0.9
subrules <- rules[quality(rules)$confidence > 0.9]
plot(subrules, method="matrix", measure="lift")
# reordering rows and columns in the matrix such that rules with similar values of the interest measure are presented closer together
plot(subrules, method="matrix", measure="lift", control=list(reorder=TRUE))
# Same thing, interactive
plot(subrules, method="matrix", measure="lift", control=list(reorder=TRUE), interactive=TRUE)
# Plot in 3D (less intuitive!)
plot(subrules, method="matrix3D", measure="lift", control=list(reorder=TRUE))
# Two measures combined in one coloring grid
plot(subrules, method="matrix", measure=c("lift", "support"), control=list(reorder=TRUE))
plot(subrules, method="matrix", measure=c("confidence", "support"), control=list(reorder=TRUE))
# Grouping statistically dependent consequents (LHS) allows to plot many more rules
many_rules <- apriori(Recipes, parameter=list(support=0.01, confidence=0.3))
plot(many_rules, method="grouped")
# Select some rules with high lift
subrules2 <- head(sort(rules, by="lift"), 20)
# Plotting makes things cluttered...
#plot(subrules2, method="graph")
# ... while vertices = itemsets and edges = rules is pretty nice
plot(subrules2, method="graph", control=list(type="itemsets"))
# Export to Gephi!!
# NOTE: here we quickly found there seem to be two clusterd ('hartig' en 'zoetig'?)
saveAsGraph(head(sort(rules, by="lift"),200), file="rules2.graphml")
plot(subrules2, method="paracoord", control=list(reorder=TRUE))
# Double decker plot
oneRule <- sample(rules, 1)
inspect(oneRule)
plot(oneRule, method="doubledecker", data = Recipes)
set.seed(1234)
s <- sample(Recipes, 2000)
d <- dissimilarity(s, method = "Jaccard")
library("cluster")
clustering <- pam(d, k = 16)
plot(clustering)
# Prediction based on clustering
allLabels <- predict(s[clustering$medoids], Recipes, method = "Jaccard")
cluster <- split(Recipes, allLabels)
itemFrequencyPlot(cluster[[1]], population = s, support = 0.05)
itemFrequencyPlot(cluster[[2]], population = s, support = 0.05) # Sweet pastries?
itemFrequencyPlot(cluster[[3]], population = s, support = 0.05) # Greek?
itemFrequencyPlot(cluster[[4]], population = s, support = 0.05)
itemFrequencyPlot(cluster[[5]], population = s, support = 0.05) # Apple based sweet pasties?
itemFrequencyPlot(cluster[[6]], population = s, support = 0.05)
itemFrequencyPlot(cluster[[7]], population = s, support = 0.05)
itemFrequencyPlot(cluster[[8]], population = s, support = 0.05)
clustering <- pam(d, k = 2)
allLabels <- predict(s[clustering$medoids], Recipes, method = "Jaccard")
cluster <- split(Recipes, allLabels)
itemFrequencyPlot(cluster[[1]], population = s, support = 0.05) # Hartig
itemFrequencyPlot(cluster[[2]], population = s, support = 0.05) # Zoet
# Supplement a recipe
chickenRules <- subset(rules, subset = rhs %in% "chicken")
# Cool result:
# 461 {carrot,celery stalks} => {chicken} 0.01029268 0.5436782 2.993976
require(ggplot2)
require(RColorBrewer)
require(plyr)
# Plot ingredient distribution
y <- sort(itemFrequency(Recipes, type = 'abs'), decreasing = TRUE)
n <- length(y)
x <- 1:n
# Data
data <- data.frame(x=x, y=y, group='Data')
# Fit linear line on logarithmic data
fit <- lm(log(y) ~ x, data=data.frame(x=x, y=y))
fitvals <- exp(fit$fitted.values)
data2 <- data.frame(x=x, y=fitvals, group='Regression')
# Plot
library(tikzDevice)
plots_dir = '/Users/benny/Repositories/recipes/paper/plots'
phi <- 1.618
width <- 4.9823
height <- width/phi
filename <- file.path(plots_dir, 'ingredient_frequencies.tex')
tikz(file = filename, width = width, height = height)
ggplot() + aes(x=x, y=y, color=group) +
geom_point(data=data, size=.5) +
geom_line(data=data2, linetype='dashed', size=.8) +
scale_y_log10() +
scale_color_brewer(palette = 'Set1') +
ggtitle('Ingredient frequencies on a logarithmic scale') +
labs(x='Ingredients', y='Frequency') +
theme(plot.title = element_text(size=12),
legend.title = element_blank(),
legend.justification=c(1,1),
legend.position=c(1,1))
dev.off()
# Save table
mod_stargazer <- function(output.file, ...) {
output <- capture.output(stargazer(...))
cat(paste(output, collapse = "\n"), file=output.file, sep="\n", append=FALSE)
}
tables_dir <- '/Users/benny/Repositories/recipes/paper/tables'
top <- sort(itemFrequency(Recipes, type='abs'), decreasing = TRUE)
topN <- top[1:10]
t <- data.frame(Ingredient=names(topN), Frequency=unname(topN), Relative=unname(topN)/sum(top))
filename <- file.path(tables_dir, 'ingredients_top10.tex')
mod_stargazer(filename, t, summary=FALSE, digit.separator=' ')
filename <- filename <- file.path(tables_dir, 'ingredients_top10.dat')
write.table(t, file = filename, quote = FALSE, sep = ";",
row.names = FALSE, col.names = TRUE)
library(party)
f <- function(v) {v <= 1000}
a <- as(Recipes[1:2000], 'matrix')
b <- cbind(a, sapply(1:2000, f))
dimnames <- attr(b, 'dimnames')
dimnames[[2]][404] <- 'class'
attr(b, 'dimnames') <- dimnames
data = data.frame(b)
#tree <- ctree(class ~ pepper + salt, data = data)
tinfo <- as(transactionInfo(Recipes), 'list')[[1]] # Get list of index -> tid
tid_to_index <- hashmap(tinfo, sapply(1:length(tinfo), toString))
good_tids <- unlist(recipes_good@data@Dimnames[[2]])
bad_tids <- unlist(recipes_bad@data@Dimnames[[2]])
GoodRecipes <- Recipes[tid_to_index[[good_tids]]]
BadRecipes <- Recipes[tid_to_index[[bad_tids]]]
good <- as(GoodRecipes, 'matrix')
bad <- as(BadRecipes, 'matrix')
good <- cbind(good, 1)
bad <- cbind(bad, 2)
data <- rbind(good, bad)
dimnames <- attr(data, 'dimnames')
dimnames[[2]][404] <- 'class'
attr(data, 'dimnames') <- dimnames
write.csv(data, 'good_bad.csv')
# Frequency of item pairs
X <- as(Recipes, 'matrix')
X <- sapply(as.data.frame(X), as.numeric)
out <- crossprod(X) # Same as: t(X) %*% X
diag(out) <- 0
library("recommenderlab")
algorithms <- list("random items" = list(name = "RANDOM", param = NULL),
"popular items" = list(name = "POPULAR", param = NULL),
"association rules (0.001)" = list(name = "AR", param = list(support = 0.001,confidence=0.1, maxlen=3)))
#"association rules (0.01)" = list(name = "AR", param = list(support = 0.01)),
#"association rules (0.05)" = list(name = "AR", param = list(support = 0.05)),
#"association rules (0.1)" = list(name = "AR", param = list(support = 0.1)),
#"item-based CF (k=3)" = list(name = "IBCF", param = list(k = 3)),
#"item-based CF (k=5)" = list(name = "IBCF", param = list(k = 5)),
#"item-based CF (k=10)" = list(name = "IBCF", param = list(k = 10)),
"item-based CF (k=20)" = list(name = "IBCF", param = list(k = 20)),
#"item-based CF (k=30)" = list(name = "IBCF", param = list(k = 30)),
"item-based CF (k=40)" = list(name = "IBCF", param = list(k = 40)),
#"item-based CF (k=50)" = list(name = "IBCF", param = list(k = 50)),
"item-based CF (k=200)" = list(name = "IBCF", param = list(k = 200)))
#"item-based CF (k=40)" = list(name = "IBCF", param = list(k = 40, method='dice')),
#"item-based CF (k=200)" = list(name = "IBCF", param = list(k = 200, method='dice')))
#"item-based CF (k=402)" = list(name = "IBCF", param = list(k = 402)))
#"user-based CF (Jaccard)" = list(name = "UBCF", param = list(nn = 50, method = 'jaccard')))
#"user-based CF (Pearson)" = list(name = "UBCF", param = list(nn = 50, method = 'pearson')))
Recipes_binary <- as(Recipes, 'binaryRatingMatrix')
Recipes_binary <- Recipes_binary[rowCounts(Recipes_binary) > 5]
scheme <- evaluationScheme(Recipes_binary, method="split", train=.9, k=1, given=2)
results2 <- evaluate(scheme, algorithms, progress = TRUE,
type = "topNList", n=c(1,3,5,10))
nms <- c('Random items', 'Popular items', 'AR s=0.01',
'AR s=0.05', 'AR s=0.1', 'IBCF k=20', 'IBCF k=40',
'IBCF k=200')
names(results2) <- nms
plot(results2, annotate=c(1,3,7))
title('ROC curve for ingredient recommendation')
plt <- recordPlot()
saveTikz(plt, 'ingredients_recommendations_given2.tex')
|
c72948e5f2c6d0be4b893ac09f3e51f96a3bb3aa
|
c4ceae368b59f5ff8c473abaec394f7ffaf4be00
|
/cachematrix.R
|
bb03d0e127577118ca74e2d1dae5363f7a78a8ec
|
[] |
no_license
|
FatmaElBadry/ProgrammingAssignment2
|
560811b208f849754e9414687bb7332931aaa1ad
|
ad861d578e8b7e9f38839dd6cda9da7bafe8346b
|
refs/heads/master
| 2021-09-01T19:36:22.638469
| 2017-12-28T13:17:04
| 2017-12-28T13:17:04
| 115,616,833
| 0
| 0
| null | 2017-12-28T11:29:13
| 2017-12-28T11:29:13
| null |
UTF-8
|
R
| false
| false
| 2,534
|
r
|
cachematrix.R
|
## Week 3 Assignment;cachematrix;Developed By: Fatma ElBadry
## Put comments here that give an overall description of what your
## functions do
## Function "makeCacheMatrix" gets a matrix as an input, And has a list of functions that can do the following:
## 1.set the value of the matrix,
## 2.get the value of the matrix,
## 3.set the inverse Matrix and
## 4.get the inverse Matrix.
## The matrix object can cache its own object.
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { #take the matrix as an input
invMatrix <- NULL
# 1.set the value of the Matrix
setMatrix <- function(y) {
x <<- y
invMatrix <<- NULL
}
# 2.get the value of the Matrix
getMatrix <- function() x
# 3.set the value of the invertible matrix
setInverse <- function(inverse) invMatrix <<- inverse
# 4.get the value of the invertible matrix
getInverse <- function() invMatrix
##define the below list in order to refer to the functions with the $ operator
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## The function "cacheSolve" takes the output of makeCacheMatrix(matrix) as an
# input and checks inverse matrix from makeCacheMatrix(matrix) has any value in it or not.
# In case inverse matrix from makeCacheMatrix((matrix) is empty, it gets the original matrix data from
# and set the invertible matrix by using the solve function.
# In case inverse matrix from makeCacheMatrix((matrix) has some value in it (always works
# after running the code 1st time), it returns the cached object
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invMatrix <- x$getInverse()
if(!is.null(invMatrix)) { #if inverse matrix is not NULL
return(invMatrix) #return the invertible matrix
}
#if value of the invertible matrix is NULL then
MatrixData <- x$getMatrix() #get the original Matrix Data
invMatrix <- solve(MatrixData, ...) #use solve function to inverse the matrix
x$setInverse(invMatrix) #set the invertible matrix
return(invMatrix) #return the invertible matrix
}
|
caeb9d8e09b24a2a18042f332d2dde30586c17f9
|
f1296f9a7a47a1d00a6613c9ba358bb9808b3f14
|
/ui.R
|
3bed9c87f25ca4213674525e3cb82c6367782f8e
|
[] |
no_license
|
straussalbee/ShinyStability
|
6d60927095bef1098ec98657a4de31383efa055e
|
72a6e6ba0316511d2a82f0688ee6059ab4f62217
|
refs/heads/master
| 2021-01-10T03:31:40.387883
| 2016-01-11T23:12:21
| 2016-01-11T23:12:21
| 49,459,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,154
|
r
|
ui.R
|
library(shiny)
#NK repertoire stability data visualization app
stability <- read.table("StabilityData",header=TRUE,colClasses=c(rep("factor",4),"numeric","factor"))
dataset <- stability
#Define UI
shinyUI(fluidPage(
title = "Human NK Cell Repertoire Stability",
plotOutput('plot',width = "900px",height="600px"),
hr(),
fluidRow(
column(3,
h4("Human NK Cell Repertoire Stability"),
br(),
checkboxInput('jitter', 'Jitter'),
checkboxInput('smooth', 'Smooth')
),
column(4, offset = 1,
selectInput('x', 'X', names(dataset),selected="Timepoint"),
selectInput('y', 'Y', names(dataset), selected="Marker"),
selectInput('facet_row', 'Facet Row',c(None='.', names(stability[sapply(stability, is.factor)]))),
selectInput('facet_col', 'Facet Column',c(None='.', names(stability[sapply(stability, is.factor)])),selected="Donor")
),
column(4,
selectInput('color', 'Color', c('None', names(dataset)),selected="Type"),
selectInput('size', 'Size', c('None', names(dataset)),selected="Frequency")
)
)
))
|
c0017e4c670e9dba125e743a7956b840437c1a69
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.compute/man/emrserverless_list_job_runs.Rd
|
5ff7c0104aa187b824067577f1c405aa38a6a705
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,149
|
rd
|
emrserverless_list_job_runs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emrserverless_operations.R
\name{emrserverless_list_job_runs}
\alias{emrserverless_list_job_runs}
\title{Lists job runs based on a set of parameters}
\usage{
emrserverless_list_job_runs(
applicationId,
nextToken = NULL,
maxResults = NULL,
createdAtAfter = NULL,
createdAtBefore = NULL,
states = NULL
)
}
\arguments{
\item{applicationId}{[required] The ID of the application for which to list the job run.}
\item{nextToken}{The token for the next set of job run results.}
\item{maxResults}{The maximum number of job runs that can be listed.}
\item{createdAtAfter}{The lower bound of the option to filter by creation date and time.}
\item{createdAtBefore}{The upper bound of the option to filter by creation date and time.}
\item{states}{An optional filter for job run states. Note that if this filter contains
multiple states, the resulting list will be grouped by the state.}
}
\description{
Lists job runs based on a set of parameters.
See \url{https://www.paws-r-sdk.com/docs/emrserverless_list_job_runs/} for full documentation.
}
\keyword{internal}
|
774fb27cff478f55f14db8744d5dc18fcca07074
|
f5789c65889f7021f5c37f8af7e1b64d9060babf
|
/man/FuzzyData-class.Rd
|
06b91ee0d40e285234813b9dc4d04edf45e9adb1
|
[] |
no_license
|
edwardchu86/FuzzyAHP
|
31341c6e2cdb8b803e9cf03c988ee0b57b235ad2
|
1f46fee5b5d22dc48ff8d2cfe3889f594d67b3f0
|
refs/heads/master
| 2021-01-11T02:08:47.262539
| 2016-04-26T19:32:00
| 2016-04-26T19:32:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 505
|
rd
|
FuzzyData-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-FuzzyData.R
\docType{class}
\name{FuzzyData-class}
\alias{FuzzyData-class}
\title{Class "FuzzyData"}
\description{
An S4 class to represent fuzzy data.
}
\section{Slots}{
\describe{
\item{\code{fnMin}}{A numeric vector of minimal values of fuzzy data.}
\item{\code{fnModal}}{A numeric vector of modal values of fuzzy data.}
\item{\code{fnMax}}{A numeric vector of maximal values of fuzzy data.}
}}
|
baa16e88cfb467a4c55e00c0570e05ba5568ab12
|
824870afd8a85f46191f8186c93304bd136953a6
|
/honeybee_genotype_pipeline/src/plot_ihist.R
|
91e28100c5a823114624a38b990f22511ab7493f
|
[] |
no_license
|
TomHarrop/honeybee-genotype-pipeline
|
9147a8cfe244774e61cc722f31bf250822bd183f
|
3659985ee9da390b97c026c827e867db8e8e00ff
|
refs/heads/master
| 2021-07-24T17:48:44.139225
| 2021-07-13T05:23:35
| 2021-07-13T05:23:35
| 223,312,849
| 1
| 0
| null | 2021-07-13T03:27:36
| 2019-11-22T03:06:27
|
Python
|
UTF-8
|
R
| false
| false
| 1,716
|
r
|
plot_ihist.R
|
#!/usr/bin/env Rscript
log <- file(snakemake@log[[1]],
open = "wt")
sink(log, type = "message")
sink(log, type = "output", append = TRUE)
library(data.table)
library(ggplot2)
ReadIhist <- function(ihist_file){
fread(ihist_file, skip = 5)[`#InsertSize` <= 1000]
}
CalculateMeanInsert <- function(ihist_dt){
my_rle <- ihist_dt[, structure(
list(lengths = Count, values = `#InsertSize`), class = "rle")]
my_mean <- mean(inverse.rle(my_rle))
as.integer(round(my_mean, 0))
}
# read files
ihist_files <- snakemake@input[["ihist_files"]]
names(ihist_files) <- sub(".ihist", "", basename(ihist_files))
# combine
ihist_list <- lapply(ihist_files, ReadIhist)
ihist_data <- rbindlist(ihist_list, idcol = "sample")
# mean per sample
mean_dt <- ihist_data[, .(meansize = CalculateMeanInsert(.SD)),
by = sample]
# configure plot
y_pos <- ihist_data[, max(Count) * 0.95]
vd <- viridisLite::viridis(3)
# plot
gp <- ggplot(ihist_data,
aes(x = `#InsertSize`, y = Count)) +
theme_grey(base_size = 6) +
facet_wrap(~ sample) +
xlab("Mapped insert size") +
geom_vline(mapping = aes(xintercept = meansize),
data = mean_dt,
linetype = 2,
colour = vd[[2]])+
geom_text(mapping = aes(x = meansize + 10,
y = y_pos,
label = meansize),
hjust = "inward",
colour = vd[[2]],
data = mean_dt,
size = 2) +
geom_area(fill = alpha(vd[[1]], 0.5))
ggsave(snakemake@output[["plot"]],
gp,
device = cairo_pdf,
width = 10,
height = 7.5,
units = "in")
sessionInfo()
|
0fb665eb8e343b6a66c8aa8d26cf7e85960119dd
|
d54283af0417b2becacca47695675fa3ee5c0c44
|
/StrokelitudeScripts/TorquePlotting.R
|
a048c7e377dbce2a2a7d8546331d8b7d8dacc0f9
|
[] |
no_license
|
chiser/Strokelitude-Scripts
|
e56f55e4ae94d91ac99c70cb0a1567ff0bb0ee92
|
26e9e032b6493158a7277f561fba02d015b58a8b
|
refs/heads/master
| 2021-01-21T11:23:24.036707
| 2018-03-22T14:43:10
| 2018-03-22T14:43:10
| 83,563,779
| 0
| 0
| null | 2018-04-20T09:38:36
| 2017-03-01T14:31:24
|
R
|
ISO-8859-2
|
R
| false
| false
| 1,236
|
r
|
TorquePlotting.R
|
######################## A script for plotting data from torque measurements
## source the functions needed
source("StrokePrepFunctions.R")
tTraces <- flyTorqueImport()
## query the user for start and end seconds for the excerpts
print("Please enter the starting time for the excerpts.")
startTime <- scan(n=1)
print("Please enter the end time for the excerpts.")
endTime <- scan(n=1)
for(ind in 1:length(tTraces)){
png(filename = paste("torqueTrace",ind,".png", sep = ""), width = 1920)
plot(x = tTraces[[ind]]$Time, y = tTraces[[ind]]$Trace, type = "l", main = paste("Torque Trace",ind), xlab = "Time (sec)", ylab = "Yaw Torque")
# graphics.off()
#uncommenting the above solved the error I had running the script without it: Error in plot.window(): endliche xlim werte nötig.
#another possibility would be to try to hardcore the xlim between reasonable values: not easy because they vary quite a bit. In addition in the plots I see the last fly is plotted wrong: probably because of the conversion of numbers to string(factors) and then to numbers again
flyTraceExcerptPlot(tTraces[[ind]]$Trace, tTraces[[ind]]$Time, startTime, endTime, filename = paste("torqueTraceExcerpt", ind, ".png"))
#dev.off()
}
graphics.off()
|
dcafdace2491346adaacc08dc9a64ea234324dbd
|
b64398eadec00f607b14e292902d54af86b9b2ec
|
/man/threeDto4D.Rd
|
da76ac0ac4b99a1ac3a4282290320e7df68463d8
|
[] |
no_license
|
cran/AnalyzeFMRI
|
431716af6c17c402db4039cac1bd81fd713b95b7
|
5d93ebbaef6207664d08520fe4312001bcedfaf3
|
refs/heads/master
| 2021-10-13T02:15:03.174993
| 2021-10-05T12:40:02
| 2021-10-05T12:40:02
| 17,691,681
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,752
|
rd
|
threeDto4D.Rd
|
\name{threeDto4D}
\alias{threeDto4D}
\title{threeDto4D}
\description{To read tm functionnal images files in ANALYZE or NIFTI format,
and concatenate them to obtain one 4D image file in Analyze (hdr/img
pair) or Nifti format (hdr/img pair or single nii) which is written on
disk. Note that this function outputs the files in the format sent
in. If desired, one can use the function \code{analyze2nifti} to
create NIFTI files from ANALYZE files.}
\usage{threeDto4D(outputfile,path.in=NULL,prefix=NULL,regexp=NULL,times=NULL,
list.of.in.files=NULL,path.out=NULL,is.nii.pair=FALSE,hdr.number=1)}
\arguments{\item{outputfile}{character. Name of the outputfile without extension}
\item{path.in}{character with the path to the directory containing the image files}
\item{prefix}{character. common prefix to each file}
\item{regexp}{character. Regular expression to get all the files}
\item{times}{vector. numbers of the image files to retrieve}
\item{list.of.in.files}{names of img files to concatenate (with full path)}
\item{path.out}{where to write the output hdr/img pair files. Will be taken as path.in if not provided.}
\item{is.nii.pair}{logical. Should we write a signle nii NIFTI file or a hdr/img NIFTI pair file}
\item{hdr.number}{Number of the original 3D Analyze or NIFTI image file from
which to take the header that should serve as the final header of the newly 4D created image file}
}
\value{None.}
\seealso{
\code{\link{twoDto4D}}
\code{\link{fourDto2D}}
}
\examples{
# path.fonc <- "/network/home/lafayep/Stage/Data/map284/functional/
# MondrianApril2007/preprocessing/1801/smoothed/"
# threeDto4D("essai",path.in=path.fonc,prefix="su1801_",regexp="????.img",times=1:120)
}
\keyword{utilities}
|
cb269e7022b09df3808f7ff5e4c7f4368c51ecf4
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/12758_0/rinput.R
|
4d0092328500370df39924c69c030007e3ecf411
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("12758_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12758_0_unrooted.txt")
|
e972be69d83fb65f0580f89ce972f65e836e3ebc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PerformanceAnalytics/examples/mean.geometric.Rd.R
|
92bd449d3040702a392db61aeb367595515fd58a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 478
|
r
|
mean.geometric.Rd.R
|
library(PerformanceAnalytics)
### Name: mean.geometric
### Title: calculate attributes relative to the mean of the observation
### series given, including geometric, stderr, LCL and UCL
### Aliases: mean.geometric mean.utils mean.UCL mean.LCL mean.stderr
### mean.stderr mean.LCL mean.UCL
### ** Examples
data(edhec)
mean.geometric(edhec[,"Funds of Funds"])
mean.stderr(edhec[,"Funds of Funds"])
mean.UCL(edhec[,"Funds of Funds"])
mean.LCL(edhec[,"Funds of Funds"])
|
e2dc03e97e5a7c8b37808535af44c8f2d3f04029
|
db1ea206b2ae975ddb0d74af9f6df9a05e994e03
|
/R_grambank/unusualness/processing/assigning_AUTOTYP_areas.R
|
e895600c8c15fbeda450a1ab506caffa21f577f7
|
[] |
no_license
|
grambank/grambank-analysed
|
1b859b5b25abb2e7755421b65a63bef96dfc8114
|
47d54c9fe82c2380d3c89042fd9f477aa117e044
|
refs/heads/main
| 2023-06-28T20:00:59.261977
| 2023-06-07T17:04:28
| 2023-06-07T17:04:28
| 397,491,052
| 3
| 0
| null | 2023-04-21T13:32:55
| 2021-08-18T06:04:03
|
R
|
UTF-8
|
R
| false
| false
| 3,117
|
r
|
assigning_AUTOTYP_areas.R
|
source("global_variables.R")
source("fun_def_h_load.R")
h_load(c("fields", "tidyverse"))
#Script written by Hedvig Skirgård
cat("Matching all languages in Grambank to an AUTOTYP-area.\n")
#combining the tables languages and values from glottolog_df-cldf into one wide dataframe.
#this can be replaced with any list of Language_IDs, long and lat
if (!file.exists("output/non_GB_datasets/glottolog-cldf_wide_df.tsv")) { source("make_glottolog-cldf_table.R") }
glottolog_df <- read_tsv("output/non_GB_datasets/glottolog-cldf_wide_df.tsv",col_types = cols()) %>%
dplyr::select(Language_ID, Longitude, Latitude) %>%
filter(!is.na(Longitude))
##Adding in areas of linguistic contact from AUTOTYP
AUTOTYP_FN <- "../autotyp-data/data/csv/Register.csv"
cat("Fetching AUTOTYP data from", AUTOTYP_FN, ".\n")
AUTOTYP <- read_csv(AUTOTYP_FN ,col_types = cols()) %>%
dplyr::select(Language_ID = Glottocode, Area, Longitude, Latitude) %>%
group_by(Language_ID) %>%
sample_n(1) #when a language is assigned to more than one area, pick randomly.
#This next bit where we find the autotyp areas of languages was written by Seán Roberts
# We know the autotyp-area of languages in autotyp and their long lat. We don't know the autotyp area of languages in grambank. We also can't be sure that the long lat of languoids with the same glottoids in autotyp and grambank_df have the exact identical long lat. First let's make two datasets, one for autotyp languages (hence lgs where we know the area) and those that we wish to know about, the grambank ones.
lgs_with_known_area <- as.matrix(AUTOTYP[!is.na(AUTOTYP$Area),c("Longitude","Latitude")])
rownames(lgs_with_known_area) <- AUTOTYP[!is.na(AUTOTYP$Area),]$Language_ID
known_areas <- AUTOTYP %>%
dplyr::filter(!is.na(Area)) %>%
dplyr::select(Language_ID, Area) %>%
distinct() %>%
dplyr::select(AUTOTYP_Language_ID = Language_ID, everything())
rm(AUTOTYP)
lgs_with_unknown_area <- as.matrix(glottolog_df[,c("Longitude","Latitude")])
rownames(lgs_with_unknown_area) <- glottolog_df$Language_ID
# For missing, find area of closest language
cat("Calculating the geographical distance between languages with known AUTOTYP-areas and those without a matched AUTOTYP-area.\n")
atDist <- rdist.earth(lgs_with_known_area,lgs_with_unknown_area, miles = FALSE)
rm(lgs_with_known_area, lgs_with_unknown_area)
df_matched_up <- as.data.frame(unlist(apply(atDist, 2, function(x){names(which.min(x))})), stringsAsFactors = F) %>%
dplyr::rename(AUTOTYP_Language_ID = `unlist(apply(atDist, 2, function(x) { names(which.min(x)) }))`)
cat("Matching languages without known AUTOTYP-area to the AUTOTYP-area of its closest neighbour with has a known AUTOTYP-area.\n")
glottolog_df_with_AUTOTYP <- df_matched_up %>%
tibble::rownames_to_column("Language_ID") %>%
full_join(known_areas, by = "AUTOTYP_Language_ID") %>%
right_join(glottolog_df, by = "Language_ID") %>%
dplyr::select(-AUTOTYP_Language_ID) %>%
dplyr::rename(AUTOTYP_area = Area)
glottolog_df_with_AUTOTYP %>%
write_tsv("output/non_GB_datasets/glottolog_AUTOTYP_areas.tsv")
|
3826f099f23f8741729178d85b3c406baa19ceb1
|
bcc5dab59e4229eb26dc7f2e24b5964d97aa4840
|
/Duke-Data Analysis/Lab3a.R
|
ef283326e954dd5b636c8b3bf119f89fa9bdbec2
|
[] |
no_license
|
JPruitt/Coursera
|
87d3d273bce00d143769f6c8070c9a2163a568fd
|
339873ff1036b4a1d52f6cca5001b4d9670f374d
|
refs/heads/master
| 2021-01-01T19:42:50.471049
| 2014-05-09T11:00:20
| 2014-05-09T11:00:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,897
|
r
|
Lab3a.R
|
load(url("http://s3.amazonaws.com/assets.datacamp.com/course/dasi/ames.RData"))
head(ames)
names(ames)
dim(ames)
area<-ames$Gr.Liv.Area
price<-ames$SalePrice
summary(area)
hist(area)
samp0<-sample(area, 50)
hist(samp0)
samp1<-sample(area, 50)
hist(samp1)
mean(samp1)
# The vector 'sample_means50' is initialized with NA values
sample_means50 = rep(NA, 5000)
# The for loop runs 5000 times, with 'i' taking values 1 up to 5000
for (i in 1:5000) {
# Take a random sample of size 50
samp = sample(area, 50)
# Store the mean of the sample in the 'sample_means50' vector on the ith
# place
sample_means50[i] = mean(samp)
# Print the counter 'i'
print(i)
}
# Print the first few random medians
head(sample_means50)
sample_means_small<-rep(NA, 100)
for (i in 1:100){
samp=sample(area, 50)
sample_means_small[i]<-mean(samp)
}
sample_means_small
# Initialize the sample distributions:
sample_means10 = rep(NA, 5000)
sample_means100 = rep(NA, 5000)
# Run the for loop:
for (i in 1:5000) {
samp = sample(area, 10)
sample_means10[i] = mean(samp)
samp = sample(area, 100)
sample_means100[i] = mean(samp)
}
# Take a look at the results:
head(sample_means10)
head(sample_means50) # was already loaded
head(sample_means100)
# Divide the plot in 3 rows:
par(mfrow = c(3, 1))
# Define the limits for the x-axis:
xlimits = range(sample_means10)
# Draw the histograms:
hist(sample_means10, breaks=20, xlim=xlimits)
hist(sample_means50, breaks=20, xlim=xlimits)
hist(sample_means100, breaks=20, xlim=xlimits)
# Take a sample of size 50 from 'price':
sample_50<-sample(price, 50)
# Print the mean:
mean(sample_50)
sample_means50<-rep(NA, 5000)
sample_means150<-rep(NA, 5000)
for (i in 1:5000){
samp=sample(price, 50)
sample_means50[i]<-mean(samp)
samp=sample(price, 150)
sample_means150[i]<-mean(samp)
}
par(mfrow=c(2,1))
hist(sample_means50)
hist(sample_means150)
|
c607cd751de1a57c4ff1d3634dcb3030d67bcd8c
|
2fb36e3d133cf9dd19061376071125bdb22ee7f1
|
/Graph generators/ROCdraw.R
|
78bdfc68155bcb3aec75e1e92356b58281ae8cfc
|
[] |
no_license
|
DebolinaHalderLina/CRISPRpred_plus_plus
|
3b143680798893fe49ddf556d6485fb9f6405007
|
5b4314294be170735ebdcb9c5984092d3483311d
|
refs/heads/master
| 2020-03-27T13:49:00.017969
| 2018-08-29T17:07:48
| 2018-08-29T17:07:48
| 146,610,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,097
|
r
|
ROCdraw.R
|
library(ROCR)
#data(ROCR.simple)
#newob1= read.csv(file.choose())
#newob2= read.csv(file.choose())
#newob3= read.csv(file.choose())
#newob4= read.csv(file.choose())
#newob5= read.csv(file.choose())
#observed = read.csv(file.choose())
preds <- cbind(p1 = newob1$x, p1 = newob2$x, p1 = newob3$x,p1 = newob4$x,p1 = newob5$x)
n <- 5 # you have n models
colors <- c('red', 'blue','green','orange','black') # 2 colors
for (i in 1:n) {
plot(performance(prediction(preds[,i],observed$result),"mat",),
add=(i!=1),col=colors[i],lwd=2)
}
#for (i in 1:n) {
#plot(performance(prediction(preds[,i],observed$score_drug_gene_threshold),"mat"),
# add=(i!=1),col=colors[3],lwd=2)
#plot(performance(prediction(preds[,i],observed$result),"sens","spec"),
#add=(i!=1),col=colors[i],lwd=2)
#}
#for (i in 1:n) {
#plot(performance(prediction(preds[,i],observed$score_drug_gene_threshold),"mat"),
#add=(i!=1),col=colors[2],lwd=2)
#plot(performance(prediction(preds[,i],observed$result),"prec","rec"),
#add=(i!=1),col=colors[i],lwd=2)
#}
|
44c2cbfd41c5bad862befb2f2df6e91ef52025e4
|
c981caf103a3540f7964e6c41a56ca34d67732c4
|
/R/ma.wtd.quantileNA.R
|
aa09fb7d78b4679ccf7a7918a2e1defdcb522195
|
[] |
no_license
|
alexanderrobitzsch/miceadds
|
8285b8c98c2563c2c04209d74af6432ce94340ee
|
faab4efffa36230335bfb1603078da2253d29566
|
refs/heads/master
| 2023-03-07T02:53:26.480028
| 2023-03-01T16:26:31
| 2023-03-01T16:26:31
| 95,305,394
| 17
| 2
| null | 2018-05-31T11:41:51
| 2017-06-24T15:16:57
|
R
|
UTF-8
|
R
| false
| false
| 943
|
r
|
ma.wtd.quantileNA.R
|
## File Name: ma.wtd.quantileNA.R
## File Version: 0.14
#*** weighted quantile
ma.wtd.quantileNA <- function( data, weights=NULL, vars=NULL,
type=7, probs=seq(0,1,.25) )
{
require_namespace("TAM")
#*** pre-processing
res <- ma_wtd_stat_prepare_data(data=data, weights=weights, vars=vars )
data <- res$data
weights <- res$weights
M <- length(data)
#*** weighted quantile
V <- ncol(data[[1]])
PP <- length(probs)
res <- matrix( NA, nrow=M, ncol=V*PP )
for (ii in 1:M){
data1 <- data[[ii]]
for (vv in 1:V){
M1 <- TAM::weighted_quantile(x=data1[,vv], w=weights, type=type,
probs=probs )
res[ii, 1:PP + (vv-1)*PP ] <- M1
}
}
res <- colMeans(res)
res <- matrix( res, nrow=PP, ncol=V, byrow=FALSE)
colnames(res) <- colnames(data[[1]])
rownames(res) <- paste0(100*probs,"%")
return(res)
}
|
532d47f38b42edaf97fd3bf6766d3f011baf8bb2
|
8bcf0871c60390d112f651094b41079708829d07
|
/man/prep.houses.Rd
|
494fbf408fcae51c910cc96e87c6c6d041a8f485
|
[] |
no_license
|
cran/lancet.iraqmortality
|
018b361d3df3a7176fabd35841d99eda52ae4b6f
|
9b707b59e4f5f82b559405e3473c6f9b9c859e34
|
refs/heads/master
| 2016-08-05T01:49:10.146125
| 2007-06-20T00:00:00
| 2007-06-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
rd
|
prep.houses.Rd
|
\name{prep.houses}
\alias{prep.houses}
\title{Loads up houses dataset from the mortality.zip file}
\description{
Returns a data frame containing renamed and cleaned up variables of the
'houses' from the provided mortality.zip file in the data directory.
See the package vignette for a description of these variables. If no
mortality.zip is provided, then this function will not work.
}
\usage{
prep.houses()
}
\value{
Returns a data frame with information on the 1,849 households
interviewed in Burnham et al (2006). Descibed as the \bold{houses}
data frame in the vignette.
}
\seealso{
\code{\link{prep.deaths}}
}
\author{Arjun Ravi Narayan, David Kane}
\keyword{datasets}
|
a4a098bc8e27aae520a3e71c5dfea2f7b6cd64fe
|
665b491ee5cc3af40c02a49e9ac6277a5eeaca02
|
/playground/pruning.r
|
db2721e9ea28c9db8c565f6b05129927b54b6701
|
[
"MIT"
] |
permissive
|
USCbiostats/aphylo
|
49ac5286c5b69350b85d11a4d23dc7422ef3c26c
|
0a2f61558723c3a3db95e7f5b4e5edc4bf87a368
|
refs/heads/master
| 2023-08-16T19:57:29.976058
| 2023-08-09T20:20:32
| 2023-08-09T20:20:32
| 77,094,049
| 10
| 1
|
NOASSERTION
| 2020-06-07T02:24:17
| 2016-12-21T23:37:46
|
R
|
UTF-8
|
R
| false
| false
| 3,566
|
r
|
pruning.r
|
#' Given that we want to remove a set of \eq{l \subset L}, the algorithm goes as
#' follows:
#'
#' 1. Match the node ids with position.
#' 2. Sort it to be decreasing, so the most inner nodes show first,
#' 3. Increase the list:
#' a. Compute the geodesic matrix G,
#' b. For i in l do, j != i:
#' If G(i,j)!=0, then add it to the list
#'
#' \code{sapply(1:(3-1), function(x) sapply(x:3, c, x)[,-1,drop=FALSE])}
#'
#' b. Degine tags(n). For k in p, For s in p[k]:
#' 1. If G[s] != 0, then tag() = 1
#'
#' Two things to do: 1 remove them from the
#'
#'
#' # A simple example of how prune works-------------------------------------------
#' # Simulating a nice tree
#' set.seed(1213)
#' x <- sim_tree(4)
#'
#' # Setting up the plot envir
#' oldpar <- par(no.readonly = TRUE)
#' par(mfrow = c(3,2), mai=rep(.5,4), cex=1, xpd=NA, omi=c(0,0,1,0))
#'
#' # Plotting
#' plot(x, main = "Full tree", show.node.label = TRUE)
#' plot(prune(x, 5), main="removing 5", show.node.label = TRUE)
#' plot(prune(x, 6), main="removing 6", show.node.label = TRUE)
#' plot(prune(x, 4), main="removing 4", show.node.label = TRUE)
#' plot(prune(x, 3), main="removing 3", show.node.label = TRUE)
#' plot(prune(x, c(4,6,3)), main="removing (4,6,3)", show.node.label = TRUE)
#'
#' # Adding a title
#' par(mai=rep(1,4), omi=rep(0,4), mfrow=c(1,1), new=FALSE)
#' title("Prunning trees with -prune-")
#' par(oldpar)
#'
#' @name prune
NULL
#' @export
#' @rdname prune
prune <- function(x, ids, ...) UseMethod("prune")
#' @export
#' @rdname prune
prune.po_tree <- function(x, ids) {
# 1. Identify which will be removed ------------------------------------------
# Getting the unique set, and sorting it
ids <- sort(unique(ids))
n <- length(attr(x, "labels"))
# Matching to actual labels
if (is.character(ids))
ids <- match(ids, getlabels(x)) - 1L
# Checking the lengths
if (any(is.na(ids)))
stop("Some -ids- don't match any leafs of the tree.")
if (any(ids > (n - 1)))
stop("Out of range: Some -ids- are out of range (above n-1).")
if (any(ids < 1))
stop("Out of range: Some -ids- are out of range (below 1). Root node cannot be removed.")
# 2. Computing Geodesics to extend the list ----------------------------------
nodes_ids <- ids
G <- approx_geodesic(x, undirected = FALSE, warn = FALSE)
# Which others should be removed
for (l in ids)
nodes_ids <- c(nodes_ids, which(G[l + 1L, ] > 0) - 1L)
# Getting the final list and checking if it leaves at least 2 nodes
nodes_ids <- sort(unique(nodes_ids))
if ( (n - length(nodes_ids)) < 2 )
stop("You are removing all but the root node, and that's not a tree.")
# 3. Marking the ones that need to be removed --------------------------------
old_ids <- 0L:(n - 1L)
new_ids <- rep(0L, n)
new_ids[nodes_ids+1L] <- 1L
new_ids <- old_ids - cumsum(new_ids)
old_labels <- attr(x, "labels")
# 4. Removing everything that won't be used any more -------------------------
# From the edgelist
edges_ids <- which(!(x[,1] %in% nodes_ids) & !(x[,2] %in% nodes_ids))
x <- x[edges_ids,,drop=FALSE]
# 4. Relabeling --------------------------------------------------------------
x[,1] <- new_ids[match(x[,1], old_ids)]
x[,2] <- new_ids[match(x[,2], old_ids)]
attr(x, "labels") <- old_labels[-(nodes_ids + 1L)]
# 5. Re computing the offspring ----------------------------------------------
attr(x, "offspring") <- list_offspring(x)
structure(x, class= "po_tree")
}
|
573a8e3f8993cdd64917101c09b4fdbba029f293
|
cd3200268fcde9acc2728146f74be6356e1a0859
|
/scripts/voxceleb.R
|
d6cc8b82de04271c8ec0f897c35efc7f56f763b0
|
[] |
no_license
|
cdo03c/Audio_Age_Classifier
|
116008a905b0a7cb6919385c94b060a0d9e64c70
|
78bd018c5c8cf8063121e8f495cf55c52db746cf
|
refs/heads/master
| 2020-03-13T18:12:23.701493
| 2018-04-27T01:54:02
| 2018-04-27T01:54:02
| 131,231,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,072
|
r
|
voxceleb.R
|
#Set working directory
setwd("~/Documents/Github/Audio_Age_Classifier")
# Clear workspace
rm(list=ls())
#Load packages
library(rvest)
#Tests if data is downloaded and download if it doesn't exist
if(!file.exists("./data/voxceleb.zip")){download.file(url = "http://www.robots.ox.ac.uk/~vgg/data/voxceleb/voxceleb1.zip",
destfile = "./data/voxceleb.zip")}
#Unzips the data
unzip("./data/voxceleb.zip", exdir = "./data")
#Creates list of file
dirs = list.dirs('./data/voxceleb1_txt')
dirs = dirs[-1]
files = paste(dir,list.files(dir))
for(dir in dirs[2:length(dirs)]){
files = c(files, paste(dir,list.files(dir)))
}
###USE REGEX TO EXTRACT CELEBRITY NAME AND YOUTUBE ID
###USE RVEST TO EXTRACT THE CELEBRITY'S BIRTH DATE FROM WIKIPEDIA
#Create data frame of survivor seasons
wiki = read_html("https://en.wikipedia.org/wiki/Survivor_(U.S._TV_series)")
#USE REVEST TO EXTRACT PUBLICATION DATE FOR VIDEO FROM YOUTUBE
#SUBTRACT DIFFERENCE BETWEEN AGE AND PUBLICATION DATE FOR AGE ESTIMATE FOR CELEBRITY IN VIDEO
|
f80369bf95b01ad24608decbe2ba792259d029c2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spatial.gev.bma/examples/spatial.gev.bma.Rd.R
|
6c41db68623e105737122354734e5ccf9976fbf3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 366
|
r
|
spatial.gev.bma.Rd.R
|
library(spatial.gev.bma)
### Name: spatial.gev.bma
### Title: Run an MCMC to fit a hierarchical spatial generalized extreme
### value (GEV) model with the option for Bayesian model averaging (BMA)
### Aliases: spatial.gev.bma
### ** Examples
data(norway)
attach(norway)
##To replicate our results, change 2 to 2e5 below
a <- spatial.gev.bma(Y.list,X,S,2)
|
104d1f46210dfd827d688bbdcf2a71c1e11da5e9
|
5ca23ae12036731c20f2c1af00e71a5c00d9a5de
|
/lib/functions.R
|
d3926b3256187f6ba45d0b4e1a6bfa41437108d3
|
[] |
no_license
|
joebrew/controlflu2016
|
6a8343edd40cab88f370359bf1254e71f45f35f1
|
8fe7bad454de0353c5c798f640dd14ef1d2b47c9
|
refs/heads/master
| 2021-01-20T18:33:39.383334
| 2016-09-20T21:47:39
| 2016-09-20T21:47:39
| 63,759,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,798
|
r
|
functions.R
|
# Read and clean absenteeism data
read_and_clean_absenteeism_data <- function(file = 'data/scrubbed_data_for_joe_correct_ids.csv'){
# Package requirements
require(readr)
require(plyr)
require(dplyr)
# Read file
# df <- read_csv(file)
df <- read.csv(file)
# Clean up grade
df$grade[df$grade %in% c('30', '31')] <- '3'
# Clean up date of birth
df$dob <- substr(df$dob,
start = 1,
stop = 10)
# Make date objects
df$absenceDate <- as.Date(df$absenceDate, format = '%m/%d/%Y')
df$dob <- as.Date(df$dob)
# Clean up race ethnicity
df$race <-
ifelse(df$raceEthnicity == 'A', 'Asian',
ifelse(df$raceEthnicity == 'B', 'Black',
ifelse(df$raceEthnicity == 'H', 'Hispanic',
ifelse(df$raceEthnicity == 'I', 'Indigenous',
ifelse(df$raceEthnicity == 'M', 'Mixed',
ifelse(df$raceEthnicity == 'W', 'White', NA))))))
# Clean up lunch status
#1 Applied Not Eligible
#0 Did not apply
#2 Eligible for free lunch
#6 Eligible for free lunch/Direct Certified/Decline
#9 Eligible for free lunch/Direct Certified
#3 Eligible Reduced
#4 Enrolled USDA approved Prov 2 school
#Z Unknown
df$lunch <-
ifelse(df$lunchStatus %in% c(0, 1), 'Not free/reduced',
ifelse(df$lunchStatus %in% c(2, 3, 9), 'Free/reduced', NA))
# Remove useless columns and clean up names
df <- df %>%
dplyr::select(studentNumber,
lunch,
race,
schoolName,
grade,
dob,
absenceDate)
df <- df %>%
dplyr::rename(id = studentNumber,
school = schoolName,
date = absenceDate)
# Make id character
df$id <- as.character(df$id)
return(df)
}
# Read and clean school immunization data
read_and_clean_immunization_data <- function(directory = 'data/immunization_data/'){
# Packages
require(readr)
require(dplyr)
# Snapshot the current working directory
cwd <- getwd()
# And set to new working directory
setwd(directory)
# Read in
files <- dir()
results_list <- list()
counter <- 0
for (i in 1:length(files)){
this_file <- suppressWarnings(read_csv(files[i]))
this_file <- data.frame(this_file)
# Snapshot the column names if the first
if(i == 1){
this_file <-
this_file %>%
dplyr::select(-`NA.`)
assign('column_names',
names(this_file),
envir = .GlobalEnv)
} else {
for (j in 1:length(column_names)){
this_column <- column_names[j]
if(!this_column %in% names(this_file)){
this_file[,this_column] <- NA
}
}
this_file <- this_file[,column_names]
}
# Remove any NA row
this_file <- this_file %>%
filter(!is.na(this_file$student_id))
# Make all characters
for(j in 1:length(column_names)){
this_file[,j] <-
as.character(this_file[,j])
}
results_list[[i]] <- this_file
message(paste0('Just finished reading in data for: ', this_file$school_name[1],
'\n',
'has ', nrow(this_file), ' rows.\n\n'))
counter <- counter + nrow(this_file)
}
rm(column_names, envir = .GlobalEnv)
df <- do.call('rbind', results_list)
# Set back to original working directory
setwd(cwd)
# Clean up the dataset
df$consent_form_return <-
ifelse(df$consent_form_return %in%
c('y', 'y\`', 'Y', 'yes'), TRUE, FALSE)
df$consent_form_yes <-
ifelse(df$consent_form_yn %in% c('n', 'N', 'no', 'No'), FALSE,
ifelse(df$consent_form_yn %in% c('y', 'Y', 'yes'), TRUE,
NA))
df$vaccine_date <- as.Date(df$vaccine_date)
df$vfc_priv <-
ifelse(df$vfc_priv %in% c('peiv', 'pri', 'PRI', 'pric', 'priv',
'Priv', 'prtiv', 'vpri'), 'Private',
ifelse(df$vfc_priv %in% c('fc', 'vc', 'vf', 'vfc', 'VFC'), 'VFC',
NA))
df$refusal <-
ifelse(df$refusal %in% c('1', 'ref', 'REF', 'y', 'Y', 'yes'), TRUE, FALSE)
df$absence <-
ifelse(is.na(df$absence), FALSE, TRUE)
df$vaccine <-
ifelse(df$vaccine %in% c('0', '?'), FALSE,
ifelse(is.na(df$vaccine), TRUE, TRUE))
# Reduce columns
df <-
df %>%
dplyr::select(school_name,
grade,
student_id,
consent_form_return,
consent_form_yes,
vaccine,
vaccine_date) %>%
dplyr::rename(id = student_id)
# Make data.frame
df <- data.frame(df)
# Remove those for whom student id appears to be a birthday
df <- df[!grepl('/', df$id),]
df <- df[!grepl('-', df$id),]
# Make id character
df$id <- as.character(df$id)
# Remove duplicates
# arrange so that yesses come first
# (this is justified since "no" is default, and any yes most likely indicates
# a true yes)
df <- df %>%
arrange(desc(consent_form_return),
desc(consent_form_yes),
desc(vaccine))
df <- df[!duplicated(df$id),]
# Fix date screw up
df$vaccine_date[df$vaccine_date <= '2015-01-01'] <- '2015-10-01'
return(df)
}
# Create panel data
create_panel_data <- function(ab,
students){
# Manually create date range
date_range <- seq(as.Date('2015-08-25'), as.Date('2016-05-29'), by = 1)
# Make a dataframe
df <- data.frame(date = date_range)
df$day <- weekdays(df$date)
# Remove weekends
df <- df %>% filter(!day %in% c('Saturday', 'Sunday'))
# Remove christmas break, etc. (dates for which there are 0 absences)
df <- df %>%
filter(! date %in% df$date[!df$date %in% sort(unique(ab$date))])
# Create an expanded grid of all dates with all students
df <- expand.grid(date = df$date,
id = students$id)
# Join the expanded grid to absences
df <- left_join(df,
ab %>%
mutate(absent = TRUE) %>%
dplyr::select(id, date, absent),
by = c('date', 'id'))
# If not absent, assume present
df$absent[is.na(df$absent)] <- FALSE
# Return
return(df)
}
# Make pretty table
make_pretty <- function(x){
require(Hmisc)
require(DT)
the_names <- capitalize(gsub('_', ' ', toupper(names(x))))
x <- data.frame(x)
for (j in 1:ncol(x)){
if(class(x[,j]) %in% c('numeric', 'integer')){
x[,j] <- round(x[,j], digits = 2)
} else {
x[,j] <- toupper(x[,j])
}
}
for (j in 1:ncol(x)){
if(grepl('rate', names(x)[j])){
x[,j] <- paste0(x[,j], '%')
}
}
names(x) <- the_names
DT::datatable(x)
}
|
e4758ba8cbb2a3a2585105c50e96b3568f0dabb5
|
c653aa4d1b83e9f31450a0ebe1eb809de5e46e1e
|
/Ex5/ex5_3.r
|
2b8486a2fb0a73e4c25fc15d615a30125f8b7949
|
[
"MIT"
] |
permissive
|
BioYLiu/ml_bioinformatics
|
5bbaebf35af68e1408bd2df062d2d61da6381474
|
2ff4962767a9cfe206620f1fc870839e249dde96
|
refs/heads/master
| 2020-05-23T11:12:55.657940
| 2016-07-10T23:38:17
| 2016-07-10T23:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,178
|
r
|
ex5_3.r
|
# Group members (Name, Student ID, E-Mail):
# 1. Baldomero Valdez, Valenzuela, 2905175, baldmer.w@gmail.com
# 2. Omar Trinidad Gutierrez Mendez, 2850441, omar.vpa@gmail.com
# 3. Shinho Kang, 2890169, wis.shinho.kang@gmail.com
data(iris)
# shuffle the dataset and get training and test dataset
shuffled.iris <- iris[sample(1:nrow(iris)), ]
test.ds <- shuffled.iris[1:30,]
training.ds <- shuffled.iris[31:150,]
png(filename="task3.png")
par(mfrow=c(4, 3))
labels = names(iris)[-5]
indexes = c(1:4)
for (x in indexes) {
for (y in indexes) {
if (x != y) {
a = training.ds[,x]
b = training.ds[,y]
plot(a~b,
pch = 22,
bg = c('red', 'green', 'blue')[unclass(iris$Species)],
xlab = labels[x],
ylab = labels[y]
# xlim = c(0,7),
# ylim = c(0,7)
)
model = lm(a~b)
abline(model, col='brown')
}
}
}
dev.off()
# Because each of the plots show a correlation between the columns we can
# conclude that one of the predictors can be expressed as a linear combination
# of the others.
|
443628460e2aebfbc8a9cddd6c34d687b1b478db
|
4af1baeb8bd7ca845beb983fcf7c662ab5df6d7e
|
/MarketBasket_Recommender.R
|
ba9670ba6fd19f6bbd0abdde28e83447e70d5125
|
[] |
no_license
|
santiagovama/R
|
54a52cebae1d36ddbabb3080205fbf2fe8b7b956
|
c12cd9b9de4e7a8888386c20ce64a1a481327766
|
refs/heads/master
| 2023-08-17T06:27:32.434463
| 2021-10-02T15:01:05
| 2021-10-02T15:01:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,954
|
r
|
MarketBasket_Recommender.R
|
# Association Rules for Market Basket Analysis (R)
# http://rpubs.com/Adhislacy/281337
# https://github.com/krupanss/Market-Basket-Analysis-R/blob/master/MarketBasketAnalysis.Rmd
# https://educationalresearchtechniques.com/2016/08/01/market-basket-analysis-in-r/
# http://www.rpubs.com/Mughundhan/268460 -> use this first
# https://stackoverflow.com/questions/17313450/how-to-convert-data-frame-to-transactions-for-arules
# https://stackoverflow.com/questions/45578516/r-aggregate-and-collapse-several-cells-into-one
# https://stackoverflow.com/questions/15933958/collapse-concatenate-aggregate-a-column-to-a-single-comma-separated-string-w
# http://rstatistics.net/association-mining-with-r/
# https://rpubs.com/cheverne/associationrules_marketbasketanalysis
# https://rstudio-pubs-static.s3.amazonaws.com/267119_9a033b870b9641198b19134b7e61fe56.html -> ECLAT
# https://benjnmoss.wordpress.com/2017/02/13/market-basket-analysis-in-alteryx/
# https://synerise.com/data-mining-how-to-analyze-customers-market-baskets-to-increase-sales/#
# http://r-train.ru/apriori-tips-and-tricks/
library(arules) # association rules
library(arulesViz) # data visualization of association rules
library(RColorBrewer) # color palettes for plots
library(tidyverse)
library(lubridate)
# read sample data into dataframe
raw_data <- read_csv("https://raw.githubusercontent.com/kh7393/Market-Basket/master/Online%20Retail_new.csv")
raw_data <- raw_data %>%
mutate(InvoiceDate = format(InvoiceDate, "%H:%M:%S")) %>%
mutate(InvoiceDate = make_datetime(day, month, year, hour, minute))
# show countries
raw_data1 <- raw_data %>% select(Country, InvoiceNo) %>%
group_by(Country) %>%
summarize(InvoiceNo, n())
# Remove the canceled/refunded orders
# Remove rows with invalid product description
# select the rows with relevant data for analysis
transaction_detail <- aggregate(raw_data$AirlineDescription ~ raw_data$InvoiceNo,
FUN=paste,collapse=',')
# remove column for invoice number
transaction_itemsets<-transaction_detail[,-1]
# convert to transactions object for market basket analysis
write(transaction_itemsets,"itemsets2.csv")
itemsets_txn<-read.transactions("itemsets2.csv",format="basket",rm.duplicates=TRUE,sep=",")
# show the dimensions of the transactions object
print(dim(itemsets_txn))
print(dim(itemsets_txn)[1]) # X no. market baskets for flight trips
print(dim(itemsets_txn)[2]) # X no. of initial product/items
# summary of dataset including most frequent items, itemset/transaction length distribution
summary(itemsets_txn)
# find the top 15 items
itemFrequencyPlot(itemsets_txn, topN=15)
# exploratory plotting - examine frequency for each item with support greater than 0.025
pdf(file="fig_market_basket_initial_item_support.pdf",
width = 8.5, height = 11)
itemFrequencyPlot(itemsets_txn, support = 0.025, cex.names=0.8, xlim = c(0,0.3),
type = "relative", horiz = TRUE, col = "dark red", las = 1,
xlab = paste("Proportion of Market Baskets Containing Item",
"\n(Item Relative Frequency or Support)"))
dev.off()
pdf(file="fig_market_basket_final_item_support.pdf", width = 8.5, height = 11)
itemFrequencyPlot(itemsets_txn, support = 0.025, cex.names=1.0, xlim = c(0,0.5),
type = "relative", horiz = TRUE, col = "blue", las = 1,
xlab = paste("Proportion of Market Baskets Containing Item",
"\n(Item Relative Frequency or Support)"))
dev.off()
# obtain large set of association rules for items by category and all shoppers
# this is done by setting very low criteria for support and confidence
first.rules <- apriori(itemsets_txn,
parameter = list(support = 0.001, confidence = 0.05))
print(summary(first.rules)) # yields 69,921 rules... too many
# for splitting LHS & RHS
Firstitemsets_txnrules_df <- as(first.rules, "data.frame")
Firstitemsets_txnrules_df <- Firstitemsets_txnrules_df %>%
separate(rules, c("LHS", "RHS"), sep = "=>")
# select association rules using thresholds for support and confidence
# yields 344 rules
second.rules <- apriori(itemsets_txn,
parameter = list(support = 0.025, confidence = 0.05))
print(summary(second.rules))
Seconditemsets_txnrules_df <- Firstitemsets_txnrules_df %>%
filter(support >= 0.025 & confidence >= 0.05)
# data visualization of association rules in scatter plot
# pdf(file="fig_market_basket_rules.pdf", width = 8.5, height = 8.5)
plot(second.rules,
control=list(jitter=2, col = rev(brewer.pal(9, "Greens")[4:9])),
shading = "lift")
# dev.off()
# grouped matrix of rules
# pdf(file="fig_market_basket_rules_matrix.pdf", width = 8.5, height = 8.5)
plot(second.rules, method="grouped",
control=list(col = rev(brewer.pal(9, "Greens")[4:9])))
# dev.off()
# this needs fixing
top.second.rules <- head(sort(second.rules, decreasing = TRUE, by = "lift"), 10)
pdf(file="fig_market_basket_farmer_rules.pdf", width = 11, height = 8.5)
plot(top.second.rules, method="graph",
control=list(type="items"),
shading = "lift")
dev.off()
# plot(second.rules,method="graph",interactive=TRUE,shading=NA)
itemsets_txnrules_df <- as(second.rules, "data.frame")
itemsets_txnrules_df <- itemsets_txnrules_df %>%
separate(rules, c("LHS", "RHS"), sep = "=>") %>%
mutate(InverseConfidence = (support * lift) / confidence)
# final table of recommended rules to use filtered by max confidence
# option to sort by lift
# if LHS is bought then RHS is purchased
rules_final <- itemsets_txnrules_df %>%
filter(confidence > InverseConfidence) # %>%
# arrange(desc(lift))
# non-case sensitive filter
filteredrules_df <- rules_final %>%
filter(grepl("hotel", RHS, ignore.case = TRUE))
|
e8d55645923624366cd610d80e68b609b29b2362
|
463a55894d24fb80effc1957dc04b9db8d9d9686
|
/plot1.R
|
7ed9a7a33df2b4c17ca93d6629722c3f6dc8ab3c
|
[] |
no_license
|
avishekbasak/ExData_Plotting1
|
b1d2b0c42b74094357cefe6fd374084aaa914845
|
52ef30935e72d8c1c7dddcffd4d3d5fcd12a817b
|
refs/heads/master
| 2021-06-19T20:57:43.380213
| 2017-07-29T04:50:38
| 2017-07-29T04:50:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,181
|
r
|
plot1.R
|
file.Name <- "household_power_consumption.txt"
file.path <- "~/R_Workspace/exploratory/ExData_Plotting1/"
#downlad and unzip file, if file doesn't exist
file.full <- paste(file.path,file.Name, sep="")
if(!file.exists(file.full)){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","exploratory/ExData_Plotting1/householdpowerconsumption.zip",method = "curl")
unzip("exploratory/ExData_Plotting1/householdpowerconsumption.zip",exdir = "exploratory/ExData_Plotting1/")
}
#read data
data.full <- read.csv(file.full, header = TRUE, sep = ";")
#format the data
data.full$Date <- as.Date(data.full$Date, format = "%d/%m/%Y")
#subset of data
data.small <- data.full[data.full$Date == "2007-02-01"| data.full$Date=="2007-02-02",]
#format the numeric field Global_active_power
data.small$Global_active_power <- as.numeric(as.character(data.small$Global_active_power))
#set the png file
png(filename=paste(file.path,"plot1.png",sep=""),width = 480, height = 480, units = "px")
#create the histogram
hist(data.small$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power")
dev.off()
|
5ff9b20aafe62251b68ba3a9058df97a532c96d7
|
953c191533cbcc52d39d3e640aa460af71706440
|
/code/ggplot.R
|
005adb15c3b628febef252376748f202fd483074
|
[] |
no_license
|
frugeles/hackathon2017
|
23b666b9fc18064b2f58bfc60339f80ce2434bf1
|
e1081064664fdf568e65707b49c1356a1db9c53f
|
refs/heads/master
| 2021-08-15T04:03:31.180279
| 2017-11-17T09:46:39
| 2017-11-17T09:46:39
| 110,962,128
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,448
|
r
|
ggplot.R
|
#### Libraries ####
library(ggplot2)
library(RColorBrewer)
library(dplyr)
library(rgdal)
library(rgeos)
library(leaflet)
library(ggmap)
library(sp)
#### Leaflet ####
data_path = './data/'
load(paste0(data_path,'calls_Final.RData'))
nb_calls_district <- calls_Final %>%
group_by(districtID) %>%
summarise(colorTest=n()*100/nrow(calls_Final))
# load Seattle data
load(file = paste0(data_path,'Seattle.RData'))
test <- right_join(nb_calls_district,Seattle@data,by=c('districtID'='OBJECTID'))
Seattle@data <- test
bins <- quantile(nb_calls_district$colorTest, c(0, .25, .5, .75, 1))
pal <- colorBin("RdYlBu", domain = Seattle@data$colorTest, bins = bins,
na.color = "grey40", reverse = T)
centr.STL <- gCentroid(Seattle)@coords
l <- leaflet(options = leafletOptions(minZoom = 5, maxZoom = 14)) %>%
addProviderTiles("Esri.WorldImagery") %>%
setView(centr.STL[1], centr.STL[2], zoom = 11) %>%
addLegend(pal = pal, values = round(Seattle@data$colorTest, 1),
opacity = 0.7, position = "bottomright", title = "Percentage of total calls to 911")
l %>% addPolygons(data=Seattle, weight = 1,
fill = ~colorTest, fillColor = ~pal(colorTest),
opacity=1, fillOpacity = 0.6, color=grey(0.5),
## USE POPUP
popup = ~as.character(
paste(S_HOOD, L_HOOD, "<br>", "Percentage =", round(colorTest, 2)))
)
|
cb235d2813c9a7160fde9f1c0056a02adefb3de8
|
050854230a7cead95b117237c43e1c8ff1bddcaa
|
/man/comb_output_table.Rd
|
75f420b6740b8742bf25a920aa7f1138ca349d27
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
USGS-R/mda.lakes
|
7b829d347e711416cbadbf50f8ac52c20546e7bc
|
eba6ddfba4d52c74e7b09fb1222772630dfa7f30
|
refs/heads/main
| 2023-04-15T18:10:46.043228
| 2020-11-13T18:43:09
| 2020-11-13T18:43:09
| 7,429,212
| 1
| 11
| null | 2023-04-07T22:44:55
| 2013-01-03T19:50:59
|
R
|
UTF-8
|
R
| false
| true
| 599
|
rd
|
comb_output_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comb_output_table.R
\name{comb_output_table}
\alias{comb_output_table}
\title{Combine file-based table output}
\usage{
comb_output_table(pattern, ...)
}
\arguments{
\item{pattern}{Pattern to be passed to \code{\link{Sys.glob}}}
\item{...}{Additional parameters passed to \code{\link{read.table}}}
}
\description{
Attempts to load all files matching a certain pattern and combine
them vertically (rbind). Returns the result. If no files match the
pattern, then an empty data.frame is returned.
}
\author{
Luke Winslow
}
|
2fa87c6c42fc5dc1e3087a8ee7c15e5cf4211aa7
|
6eff13f8c2534f5bf21ddab72f3c9683fe06e3f9
|
/Clustering/Heirachical/2_hierarch_cluster_vehicals.R
|
9f823b0d284df842363129072f723fe984d91b54
|
[] |
no_license
|
ashukumar12d/ML-with-R
|
5d76829aacd2a4fbf56bc897f5d1e32133acc4c7
|
0921f23a20d85fbfb8b5b7599856e7a80ff6344a
|
refs/heads/master
| 2021-05-10T09:02:40.261810
| 2018-01-25T12:53:32
| 2018-01-25T12:53:32
| 118,912,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
2_hierarch_cluster_vehicals.R
|
# To illustrate interpretation of the dendogram, we'll look at a cluster analysis
# performed on a set of cars.
head(mtcars)
# Find the distance matrix
d <- dist(as.matrix(mtcars)) # find distance matrix
print(d)
# apply hirarchical clustering - The hclust function in R
# uses the complete linkage method for hierarchical clustering by default.
hc <- hclust(d)
print(hc)
# plot the dendrogram
plot(hc) # Simple plotting
|
76747d3a407c36a995fdeee8d934df834b6f1b86
|
db4f04ed79dfa815c2e5a3e30c8845e12c043008
|
/snippets/R/trackingCode.R
|
83742a3811eed61fcb30e13f7ce3051b71bae469
|
[
"Apache-2.0"
] |
permissive
|
kmader/TIPL
|
34d5374a1fe294ffa4e4f7f7fe9b0c34aa7c89c7
|
671c07eecbf7ba9f0388735326b55359c17abb92
|
refs/heads/maven
| 2022-12-10T06:45:37.263475
| 2020-02-11T08:05:46
| 2020-02-11T08:05:46
| 140,686,379
| 1
| 3
|
Apache-2.0
| 2022-12-08T16:49:09
| 2018-07-12T08:44:14
|
Java
|
UTF-8
|
R
| false
| false
| 22,270
|
r
|
trackingCode.R
|
library(plyr)
compare.foam<-function(cDir,goldFile='glpor_1.csv',kevinFile='clpor_2.csv') {
kbubs<-read.csv(paste(cDir,kevinFile,sep='/'),skip=1)
gbubs<-read.csv(paste(cDir,goldFile,sep='/'),skip=1)
gbubs<-compare.foam.clean(gbubs)
kbubs<-compare.foam.clean(kbubs)
compare.frames(gbubs,kbubs)
}
# calculate the bubble to bubble spacing
calc.track.statistics<-function(in.roi) ddply(in.roi,.(sample),function(c.sample) data.frame(mean_velocity=mean(c.sample$DIR_Z),
mean_obj_spacing=(with(c.sample,rng(POS_X)*rng(POS_Y)*rng(POS_Z))/nrow(c.sample))^(0.33),
sd_vel_x=sd(c.sample$DIR_X),
sd_vel_y=sd(c.sample$DIR_Y),
sd_vel_z=sd(c.sample$DIR_Z)
))
# fancy edge data reader
read.edge<-function(x) {
edge.data<-read.csv(x,skip=1)
names(edge.data)[1]<-"Component.1"
edge.data
}
# Add MChain to edges
edges.append.mchain<-function(in.edges,in.bubbles) {
sub.bubbles<-in.bubbles[,names(in.bubbles) %in% c("sample","LACUNA_NUMBER","MChain")]
colnames(sub.bubbles)[colnames(sub.bubbles)=="MChain"]<-"MChain.1"
o.merge1<-merge(in.edges,sub.bubbles,
by.x=c("sample","Component.1"),by.y=c("sample","LACUNA_NUMBER"))
sub.bubbles<-in.bubbles[,names(in.bubbles) %in% c("sample","LACUNA_NUMBER","MChain")]
colnames(sub.bubbles)[colnames(sub.bubbles)=="MChain"]<-"MChain.2"
out.df<-merge(o.merge1,sub.bubbles,
by.x=c("sample","Component.2"),by.y=c("sample","LACUNA_NUMBER"))
mc1a<-out.df$MChain.1
mc2a<-out.df$MChain.2
switch.els<-which(mc1a>mc2a)
out.df$MChain.1[switch.els]<-mc2a[switch.els]
out.df$MChain.2[switch.els]<-mc1a[switch.els]
out.df
}
# calculate statistics
chain.life.stats.fn<-function(in.data,include.orig=F) ddply(in.data,.(MChain),function(c.chain) {
disp.val<-sqrt(with(c.chain,sum(DIR_X)^2+sum(DIR_Y)^2+sum(DIR_Z)^2))
leng.val<-with(c.chain,sum(sqrt(DIR_X^2+DIR_Y^2+DIR_Z^2)))
new.cols<-data.frame(sample=c.chain$sample,
min.sample=min(c.chain$sample),
max.sample=max(c.chain$sample),
cnt.sample=length(unique(c.chain$sample)),
cnt.chain=nrow(c.chain),
mean.dist=mean(c.chain$M_MATCH_DIST),
max.dist=max(c.chain$M_MATCH_DIST),
dir.disp=disp.val,
dir.length=leng.val,
disp.to.leng=disp.val/leng.val)
if(include.orig) {
cbind(c.chain,new.cols)
} else {
new.cols
}
})
# calculate the bubble life stats from the chains
bubble.life.stats.fn<-function(in.chains,chain.life.stats,sample.vec) {
out.val<-ddply(in.chains,.(MChain.1,MChain.2),function(c.edge) {
a.chain<-c.edge$MChain.1[1]
b.chain<-c.edge$MChain.2[1]
sample.range<-subset(chain.life.stats,MChain %in% c(a.chain,b.chain))
sample.cnt<-ddply(sample.range,.(sample),function(c.sample) data.frame(cnt=nrow(c.sample)))
both.present<-intersect(subset(sample.cnt,cnt>1)$sample,sample.vec)
# max of the min and the min of the max make the smallest range
data.frame(c.edge,min.sample=min(both.present),max.sample=max(both.present),cnt.sample=length(both.present))
})
out.val$range.sample<-out.val$max.sample-out.val$min.sample
out.val
}
bubble.samples.exists.fn<-function(edge.chains,chain.life.stats,sample.vec) {
# calculate the full lifetime information
bubble.life.full<-bubble.life.stats.fn(edge.chains,chain.life.stats,sample.vec)
# only the possible topological events
# the bubbles must have been mutually alive more than 2 frames
# the number of number of frames they are connected much be less than the mutual lifetime
bubble.life.good<-subset(bubble.life.full,range.sample>=2 & Connections<=(range.sample+1))
# give the bubbles an id
bubble.life.good$id<-as.factor(paste(bubble.life.good$MChain.1,bubble.life.good$MChain.2))
bubble.samples.exists<-ddply(bubble.life.good,.(MChain.1,MChain.2,id),function(c.edge) {
a.chain<-c.edge$MChain.1[1]
b.chain<-c.edge$MChain.2[1]
sample.range<-subset(chain.life.stats,MChain %in% c(a.chain,b.chain))
sample.cnt<-ddply(sample.range,.(sample),function(c.sample) data.frame(cnt=nrow(c.sample)))
both.present<-intersect(subset(sample.cnt,cnt>1)$sample,sample.vec)
data.frame(sample=both.present)
})
all.bubbles.topo<-rbind(cbind(bubble.life.good[,names(bubble.life.good) %in% c("MChain.1","MChain.2","id","sample","Voxels")],connection="Touching"),cbind(bubble.samples.exists,Voxels=0,connection="Separated"))
# remove all the extra empties
ddply(all.bubbles.topo,.(id,sample),function(c.edge) {
o.val<-subset(c.edge,connection=="Touching")
if (nrow(o.val)<1) o.val<-c.edge
o.val
})
}
# merge two data files after tracking into the same file
mergedata<-function(goldData,matchData,prefix="M_",as.diff=F) {
outData<-data.frame(goldData)
for (cCol in names(matchData)) {
outData[[paste(prefix,cCol,sep="")]]=matchData[[cCol]]
}
as.diff.data(outData,m.prefix=prefix)
}
as.diff.data<-function(mergedData,m.prefix="M_",d.prefix="D_",sample.col.name="sample") {
all.cols<-names(mergedData)
keep.original<-which(laply(all.cols,
function(x) {
length(grep(m.prefix,x))<1
}
)
)
original.cols<-all.cols[keep.original]
keep.numeric<-which(laply(original.cols,
function(x) {
is.numeric(mergedData[,x])
}
)
)
numeric.cols<-all.cols[keep.numeric]
out.data<-mergedData[,(names(mergedData) %in% original.cols)]
# normalize the fields by their velocity (D_sample)
if (sample.col.name %in% original.cols) {
dsample.vec<-mergedData[[paste(m.prefix,sample.col.name,sep="")]]-mergedData[[sample.col.name]]
} else {
dsample.vec<-1
}
for(c.col in numeric.cols) {
# add differential column
new.name<-switch(c.col,
POS_X={"DIR_X"},
POS_Y={"DIR_Y"},
POS_Z={"DIR_Z"},
paste(d.prefix,c.col,sep="")
)
old.name<-paste(m.prefix,c.col,sep="")
cur.out.col<-mergedData[[old.name]]-mergedData[[c.col]]
if (c.col!=sample.col.name) cur.out.col=cur.out.col/dsample.vec
out.data[[new.name]]<-cur.out.col
}
cbind(out.data,M_MATCH_DIST=mergedData$M_MATCH_DIST,BIJ_MATCH=mergedData$M_BIJ_MATCHED)
}
edge.status.change<-function(ic.edge) {
# sort the list appropriately
c.edge<-ic.edge[order(ic.edge$sample),]
c.info<-c.edge[,c("sample","connection")]
# true if the bubbles are touching
c.info$connection<-c.info$connection=="Touching"
# shift the list by one forwards to have the connection before
# in each column
c.info$cxn.before<-c(NA,c.info$connection[-nrow(c.info)])
# shift the list by one backwards
c.info$cxn.after<-c(c.info$connection[-1],NA)
# there are actually 4 possibilities
# was.created means it was created between t-1 and t
# will.created means it will be created between t and t+1
# was.destroyed means it was destroyed between t-1 and t
# will.destroyed means it will be destoryed between t and t+1
c.info$was.created<-(!c.info$cxn.before
& c.info$connection)
c.info$will.created<-(!c.info$connection &
c.info$cxn.after)
c.info$was.destroyed<-(c.info$cxn.before
& !c.info$connection)
c.info$will.destroyed<-(c.info$connection &
!c.info$cxn.after)
out.cols<-c.info[,c("was.created","will.created","was.destroyed","will.destroyed")]
cbind(c.edge,out.cols)
}
# Converts a topology into a list of status changes for each eedge
topo2status.change<-function(in.topo,parallel=T) ddply(in.topo,.(id),edge.status.change,.parallel=parallel)
# Add position (or other columns to the edge file)
# it can be used like this edge.w.pos<-edges.append.pos(bubbles.join,mini.edges)
edges.append.pos<-function(in.bubbles,in.edges,time.col="sample",
bubble.col="MChain",bubble.col1="MChain.1",bubble.col2="MChain.2") {
join.cols<-c(time.col,bubble.col)
link.left<-merge(in.edges,in.bubbles,all.x=T,all.y=F,by.x=c(time.col,bubble.col1),by.y=join.cols,sort=F,suffixes=c(".edge",""))
merge(link.left,in.bubbles,all.x=T,all.y=F,by.x=c(time.col,bubble.col2),by.y=join.cols,sort=F,suffixes=c(".start",".end"))
}
# add chain (time-independent bubble identifier)
tracking.add.chains<-function(in.data,check.bij=F) {
if (check.bij) sub.bubbles<-subset(in.data,BIJ_MATCH)
else sub.bubbles<-in.data
if(nrow(sub.bubbles)>0) {
sub.bubbles$Chain<-c(1:nrow(sub.bubbles)) # Unique Bubble ID
bubbles.forward<-data.frame(sample=sub.bubbles$sample+sub.bubbles$D_sample,
LACUNA_NUMBER=sub.bubbles$LACUNA_NUMBER+sub.bubbles$D_LACUNA_NUMBER,
Chain=sub.bubbles$Chain)
bubbles.mapping<-merge(sub.bubbles[,names(sub.bubbles) %in% c("sample","Chain","LACUNA_NUMBER")],
bubbles.forward,by=c("sample","LACUNA_NUMBER"))
bubble.mapping.proper<-mapply(list, bubbles.mapping$Chain.x, bubbles.mapping$Chain.y, SIMPLIFY=F)
bubbles.mapping.full<-1:max(sub.bubbles$Chain)
for(c in bubble.mapping.proper) {
cx<-c[[1]]
cy<-c[[2]]
min.ch<-c(cx,cy,bubbles.mapping.full[cx],bubbles.mapping.full[cy])
min.val<-min(min.ch[!is.na(min.ch)])
bubbles.mapping.full[cx]<-min.val
bubbles.mapping.full[cy]<-min.val
}
cbind(sub.bubbles,MChain=bubbles.mapping.full[sub.bubbles$Chain])
} else {
cbind(sub.bubbles,MChain=c(),Chain=c())
}
}
# combine the edges with the bubble file to have chains id's instead of components and unique names
process.edges<-function(in.edges,in.bubbles) {
edges.join<-edges.append.mchain(in.edges,in.bubbles)
rows.to.swap<-which(edges.join$MChain.2>edges.join$MChain.1)
edges.join2<-edges.join[rows.to.swap,]
edges.join[rows.to.swap,]$MChain.1<-edges.join2$MChain.2
edges.join[rows.to.swap,]$MChain.2<-edges.join2$MChain.1
# Edge lifetime information
edges.join.stats<-ddply(edges.join,.(MChain.1,MChain.2),function(x) {cbind(x,
Range=max(x$sample)-min(x$sample),
Start.Frame=min(x$sample),
Final.Frame=max(x$sample),
Connections=nrow(x)
)})
edges.join.stats$name<-paste(edges.join.stats$MChain.1,edges.join.stats$MChain.2)
edges.join.stats$id<-as.numeric(as.factor(edges.join.stats$name))
edges.join.stats2<-ddply(edges.join.stats,.(MChain.1,MChain.2),function(x) {
cbind(x,n.sample=x$sample-min(x$sample),x.sample=(x$sample-min(x$sample))/(max(x$sample)-min(x$sample)))
})
edges.join.stats2
}
edges.missing<-function(in.edges,in.bubbles) {
sub.bubbles<-ddply(in.bubbles[,names(in.bubbles) %in% c("sample","MChain")],.(MChain),function(c.chain) {
data.frame(start.sample=min(c.chain$sample),final.sample=max(c.chain$sample))
})
ddply(in.edges,.(id),function(c.edge) {
c.row<-c.edge[1,!(names(c.edge) %in% c("sample"))]
c1<-c.row$MChain.1
c2<-c.row$MChain.2
rel.samples<-subset(sub.bubbles,MChain==c1 | MChain==c2)
sample.vals<-c(max(rel.samples$start.sample):min(rel.samples$final.sample)) # from the highest starting frame to the lowest ending frame
cbind(c.row,sample=sample.vals,connected=(sample.vals %in% c.edge$sample))
})
}
#' Match objects
#' @author Kevin Mader (kevin.mader@gmail.com)
#'
#'
#' @param groundTruth is the frame to compare to
#' @param susData is the current frame
#' @param maxVolDifference is the largest allowable difference in volume before maxVolPenalty is added
#' @param in.offset the offset to apply to groundTruth before comparing to susData
#' @param do.bij run the bijective comparison as well
#' @param x.weight weight to scale the x distance with
#' @param dist.fun a custom distance metric to use
matchObjects<-function(groundTruth,susData,maxVolDifference=0.5,
maxVolPenalty=5000^2,in.offset=c(0,0,0),
do.bij=T,x.weight=1,y.weight=1,z.weight=1,
dist.fun=NA) {
gmatch<-c()
gdist<-c()
gincl<-c()
if(is.na(dist.fun)) { # if it is not present
if (!is.na(maxVolPenalty)) { # use maxVolPenalty
dist.fun<-function(bubMat,cPos,offset) { (maxVolPenalty*((abs(bubMat$VOLUME-cPos$VOLUME)/cPos$VOLUME)>maxVolDifference)+x.weight*(bubMat$POS_X-offset[1]-cPos$POS_X)**2+y.weight*(bubMat$POS_Y-offset[2]-cPos$POS_Y)**2+z.weight*(bubMat$POS_Z-offset[3]-cPos$POS_Z)**2) }
} else { # skip it
# leave volume out
dist.fun<-function(bubMat,cPos,offset) { x.weight*(bubMat$POS_X-offset[1]-cPos$POS_X)**2+y.weight*(bubMat$POS_Y-offset[2]-cPos$POS_Y)**2+z.weight*(bubMat$POS_Z-offset[3]-cPos$POS_Z)**2 }
}
}
for (i in 1:dim(groundTruth)[1]) {
cVec<-dist.fun(susData,groundTruth[i,],in.offset)
cDist<-min(cVec)
gdist[i]<-sqrt(cDist) # perform square root operation before saving and only on one value
gmatch[i]<-which(cVec==cDist)[1]
}
mData<-susData[gmatch,]
mData$MATCH_DIST<-gdist
if (do.bij) {
# Check the reverse
for (i in 1:length(gmatch)) {
c.susbubble<-gmatch[i]
# distance from matched bubble to all bubbles in ground truth
cVec<-dist.fun(groundTruth,susData[c.susbubble,],-1*in.offset)
cDist<-min(cVec)
gincl[i]<-(i==which(cVec==cDist)[1])
}
mData$BIJ_MATCHED<-gincl
}
mData
}
# allow old function to continue working
compare.foam.frames<-function(...) compare.frames(...)
# compare two frames and forward parameters to the matchObjects function
compare.frames<-function(gbubs,kbubs,as.diff=F,...) {
kmatch<-matchObjects(gbubs,kbubs,...)
fData<-mergedata(gbubs,kmatch,as.diff=as.diff)
fData
}
# takes a tracked data experiment with sample columns and calculates the birth and death
bubble.life.check<-function(in.data) {
ddply(in.data,.(sample),function(x) {
c.sample<-x$sample[1]
n.sample<-x$sample[1]+x$D_sample[1]
n.bubbles<-unique(subset(in.data,sample==n.sample)$LACUNA_NUMBER)
dies=!((x$LACUNA_NUMBER+x$D_LACUNA_NUMBER) %in% n.bubbles)
l.bubbles.list<-subset(in.data,sample+D_sample==c.sample)
l.bubbles<-unique(l.bubbles.list$LACUNA_NUMBER+l.bubbles.list$D_LACUNA_NUMBER)
born=!(x$LACUNA_NUMBER %in% l.bubbles)
cbind(x,dies=dies,born=born)
})
}
plot.t1.event<-function(edges.tracked,keep.event,with.frames=F,all.frames=F,
x.name="POS_X",y.name="POS_Z",x.label=NA,y.label=NA) {
important.edges<-edges.tracked$important.edges
edge.info<-edges.tracked$edge.info
good.roi.data<-edges.tracked$obj.list
cur.event<-subset(important.edges,event.name==keep.event)
keep.chains<-unique(c(cur.event$MChain.1,cur.event$MChain.2))
keep.frames<-c((min(cur.event$sample)-1):(max(cur.event$sample)+1))
sub.edges<-subset(edge.info,(MChain.1 %in% keep.chains) | (MChain.2 %in% keep.chains))
sub.edges<-subset(sub.edges,((MChain.1 %in% keep.chains) & (MChain.2 %in% keep.chains)) | (was.created | was.destroyed))
selected.links<-edges.append.pos(good.roi.data,sub.edges)
selected.chains<-subset(good.roi.data[with(good.roi.data, order(sample)), ],MChain %in% keep.chains)
if (!all.frames) {
print(keep.frames)
selected.links<-subset(selected.links,sample %in% keep.frames)
if (with.frames) selected.chains<-subset(selected.chains,sample %in% keep.frames)
}
selected.links$involved<-with(selected.links,(MChain.1 %in% keep.chains) & (MChain.2 %in% keep.chains))
selected.links$edge.length<-with(selected.links,sqrt((POS_X.start-POS_X.end)^2+(POS_Y.start-POS_Y.end)^2+(POS_Z.start-POS_Z.end)^2))
selected.links$type="No Event"
selected.links[which(selected.links$was.created),]$type<-"Was Created"
selected.links[which(selected.links$will.created),]$type<-"Will Created"
selected.links[which(selected.links$was.destroyed),]$type<-"Was Destroyed"
selected.links[which(selected.links$will.destroyed),]$type<-"Will Destroyed"
ss<-function(var) paste(var,".start",sep="")
se<-function(var) paste(var,".end",sep="")
if (with.frames) {
o.plot<-ggplot(selected.links)+
geom_segment(aes_string(x=ss(x.name),y=ss(y.name),xend=se(x.name),yend=se(y.name),
linetype="connection",alpha="involved",color="type"))+
geom_point(data=selected.chains,aes_string(x=x.name,y=y.name),alpha=1,color="red")+
labs(color="Edge Event")+facet_wrap(~sample)
} else {
o.plot<-ggplot(selected.links)+
geom_segment(aes_string(x=ss(x.name),y=ss(y.name),xend=se(x.name),yend=se(y.name),
linetype="connection",alpha="involved"))+
geom_point(data=selected.chains,aes_string(x=x.name,y=y.name),alpha=1,color="red")+
geom_path(data=selected.chains,aes_string(x=x.name,y=y.name,group="MChain",color="as.factor(MChain)"),alpha=1)+
labs(color="Chain")
}
if(is.na(x.label)) x.label<-x.name
if(is.na(y.label)) y.label<-y.name
o.plot+theme_bw(20)+labs(x=x.label,y=y.label,alpha="Involved",linetype="Connected")
}
#' Edge Tracking Function
#' @author Kevin Mader (kevin.mader@gmail.com)
#' Tracks a list of data.frames using the compare.frames function
#' and standard tracking, offset tracking, and adaptive offset tracking
#' Tracking Function
track.edges<-function(in.objs,in.edges,keep.all.events=F,parallel=F) {
edge.chain<-process.edges(in.edges,in.objs)
chain.life.stats<-chain.life.stats.fn(in.objs)
sample.vec<-unique(in.objs$sample)
# just get a summary (we can more carefully analyze later)
obj.life.stats<-bubble.life.stats.fn(edge.chain,chain.life.stats,sample.vec)
all.bubbles.topo<-bubble.samples.exists.fn(edge.chain,chain.life.stats,sample.vec)
edge.info<-topo2status.change(all.bubbles.topo,parallel=parallel)
# keep only the interesting events
edge.info.interesting<-subset(edge.info,was.created | will.created | was.destroyed | will.destroyed)
# combine the list together as chain1 and chain2
singlechain.edge.info<-rbind(cbind(edge.info.interesting,MChain=edge.info.interesting$MChain.1),
cbind(edge.info.interesting,MChain=edge.info.interesting$MChain.2))
important.edges<-ddply(singlechain.edge.info,.(sample,MChain),function(c.bubble.frame) {
sum.stats<-colSums(c.bubble.frame[,c("was.created","will.created","was.destroyed","will.destroyed")],na.rm=T)
event.count<-sum(sum.stats)
event.name<-paste("S",c.bubble.frame$sample[1],"_",paste(unique(c.bubble.frame$id),collapse=",",sep=""),sep="")
if ((sum.stats["was.created"]>0) & (sum.stats["was.destroyed"]>0)) {
was.events<-subset(c.bubble.frame,was.created | was.destroyed)
} else {
was.events<-c.bubble.frame[0,]
}
if ((sum.stats["will.created"]>0) & (sum.stats["will.destroyed"]>0)) {
will.events<-subset(c.bubble.frame,will.created | will.destroyed)
} else {
will.events<-c.bubble.frame[0,]
}
out.mat<-rbind(was.events,will.events)
if (nrow(out.mat)>0) out.mat<-cbind(out.mat,event.count=event.count,event.name=event.name)
out.mat
})
important.edges<-important.edges[order(-important.edges$event.count),]
list(important.edges=important.edges,edge.info=edge.info,obj.list=in.objs,obj.life.stats=obj.life.stats)
}
#' @author Kevin Mader (kevin.mader@gmail.com)
#' Tracks a list of data.frames using the compare.frames function
#' and standard tracking, offset tracking, and adaptive offset tracking
#'
#'
#' @param inData the list of data.frames containing the samples
#' @param offset is the offset to use for the offset run
#' @param run.offset if the offset analysis should be run
#' @param run.adaptive if the adaptive analysis should be run
#' @param ... parameters to be passed onto the compare.frames function
track.frames<-function(inData,offset,run.offset=T,run.adaptive=T,parallel=F,...) {
track.fcn<-function(x,in.offset=c(0,0,0)) {
cbind(compare.frames(x[[1]],x[[2]],as.diff=T,in.offset=in.offset,...),Frame=x[[3]])
}
# Track function adaptive
track.fcn.adp<-function(x,in.offset=c(0,0,0)) {
pre.match<-compare.frames(x[[1]],x[[2]],in.offset=in.offset,...)
pre.offset<-colMeans(pre.match)[c("DIR_X","DIR_Y","DIR_Z")]
cbind(compare.frames(x[[1]],x[[2]],as.diff=T,in.offset=pre.offset,...),Frame=x[[3]])
}
staggered.data<-mapply(list, inData[-length(inData)], inData[-1],1:(length(inData)-1), SIMPLIFY=F)
track.data<-ldply(staggered.data,
track.fcn,.parallel=parallel)
if(run.offset) {
track.data.fix<-ldply(staggered.data,
function(x) track.fcn(x,offset),.parallel=parallel)
if(nrow(track.data.fix)<1) run.offset<-F
}
if(run.adaptive) {
track.data.adp<-ldply(staggered.data,
function(x) track.fcn.adp(x,offset),.parallel=parallel)
if(nrow(track.data.adp)<1) run.adaptive<-F
}
# functions to apply before combinining
# 1) life check
# 2) under quantile for match distance
# 2) add chain numbers based on remaining bubbles
preproc.fcn<-function(...) {
alive.bubbles<-bubble.life.check(...)
track.one<-tracking.add.chains(alive.bubbles)
chain.life.stats.fn(track.one,include.orig=T)
}
all.tracks<-cbind(preproc.fcn(track.data),Matching="No Offset")
if(run.offset) all.tracks<-rbind(all.tracks,cbind(preproc.fcn(track.data.fix),Matching="Fix Offset"))
if(run.adaptive) all.tracks<-rbind(all.tracks,cbind(preproc.fcn(track.data.adp),Matching="Adaptive Offset"))
all.tracks
}
|
ab1d1a944229a5ff5b41a73d27638df4ca9334ae
|
eedafd67512fc0146ee0d9d2910764978f541802
|
/man/logistic_reg_adj_diff.Rd
|
abd6505d1be1d573a0a7ae4f799e241193a28ad7
|
[
"MIT"
] |
permissive
|
MSKCC-Epi-Bio/bstfun
|
e1e3925278d3f18ab62501bdc5930b4d2b768dd4
|
532465ec4a7097d8cf2e4aea50f0add44f361320
|
refs/heads/main
| 2023-06-27T13:02:47.872731
| 2023-06-26T18:04:43
| 2023-06-26T18:04:43
| 237,299,694
| 7
| 3
|
NOASSERTION
| 2023-06-26T18:04:45
| 2020-01-30T20:30:30
|
R
|
UTF-8
|
R
| false
| true
| 3,030
|
rd
|
logistic_reg_adj_diff.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logistic_reg_adj_diff.R
\name{logistic_reg_adj_diff}
\alias{logistic_reg_adj_diff}
\title{Logistic regression adjusted differences}
\usage{
logistic_reg_adj_diff(
data,
variable,
by,
adj.vars,
conf.level,
type,
ci_type = c("sd", "centile"),
boot_n = 250,
...
)
}
\arguments{
\item{data}{a data frame}
\item{variable}{string of binary variable in \verb{data=}}
\item{by}{string of the \verb{by=} variable name}
\item{adj.vars}{character vector of variable names to adjust model for}
\item{conf.level}{Must be strictly greater than 0 and less than 1.
Defaults to 0.95, which corresponds to a 95 percent confidence interval.}
\item{type}{string indicating the summary type}
\item{ci_type}{string dictation bootstrap method for CI estimation.
Must be one of \code{c("sd", "centile")}.}
\item{boot_n}{number of bootstrap iterations to use. In most cases, it is
reasonable to used 250 for the \code{"sd"} method and 5000 for the \code{"centile"}
method.}
\item{...}{not used}
}
\value{
tibble with difference estimate
}
\description{
This function works with \code{gtsummary::add_difference()} to calculate
adjusted differences and confidence intervals based on results from a
logistic regression model. Adjustment covariates are set to the mean to
estimate the adjusted difference. The function uses bootstrap methods to
estimate the adjusted difference between two groups.
The CI is estimate by either using the SD from the bootstrap difference
estimates and calculating the CI assuming normality or using the centiles
of the bootstrapped differences as the confidence limits
The function can also be used in \code{add_p()}, and if you do, be sure to
set \code{boot_n = 1} to avoid long, unused computation.
}
\section{Example Output}{
\if{html}{Example 1}
\if{html}{\figure{logistic_reg_adj_diff_ex1.png}{options: width=80\%}}
\if{html}{Example 2}
\if{html}{\figure{logistic_reg_adj_diff_ex2.png}{options: width=80\%}}
}
\examples{
library(gtsummary)
tbl <- tbl_summary(trial, by = trt, include = response, missing = "no")
# Example 1 -----------------------------------------------------------------
logistic_reg_adj_diff_ex1 <-
tbl \%>\%
add_difference(
test = everything() ~ logistic_reg_adj_diff,
adj.vars = "stage"
)
# Example 2 -----------------------------------------------------------------
# Use the centile method, and
# change the number of bootstrap resamples to perform
logistic_reg_adj_diff_ex2 <-
tbl \%>\%
add_difference(
test = everything() ~
purrr::partial(logistic_reg_adj_diff, ci_type = "centile", boot_n = 100),
adj.vars = "stage"
)
}
\seealso{
Other gtsummary-related functions:
\code{\link{add_inline_forest_plot}()},
\code{\link{add_sparkline}()},
\code{\link{as_ggplot}()},
\code{\link{bold_italicize_group_labels}()},
\code{\link{style_tbl_compact}()},
\code{\link{tbl_likert}()},
\code{\link{theme_gtsummary_msk}()}
}
\concept{gtsummary-related functions}
|
b57a5e675773f3b7b5e70d42c1e06135bb19eda9
|
d4aa21b1c7e32f971ec0db948d2ea41057d9a7b3
|
/case_study.R
|
710168450f8975f0238fafcf6e1105f0dfea0122
|
[] |
no_license
|
grepJimmyGu/Machine-Learning
|
c290506505d746174e9ea06aef03685c3fb3ac2b
|
20f4c6b8336ea21d8140d8f7a7c6e5d347933448
|
refs/heads/master
| 2021-01-19T19:36:07.610594
| 2014-04-20T22:15:56
| 2014-04-20T22:15:56
| 17,353,113
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,090
|
r
|
case_study.R
|
# General Data Analysis
data <- read.csv("general_data.csv", header=TRUE, sep = ";")
# Profit Per Click
data["PRP"] <- (data["Revenue"]-data["Spend"])/data["Clicks"]
# Revenue per order
data["RPO"] <- data["Revenue"]/data["Orders"]
# Correlation
library(corrplot)
part <- cbind(data[,2],data[,3],data[,6],data[,9])
colnames(part) <- c("Imp","Clicks","Orders","Revenue")
corrplot(cor(part), method = "ellipse")
# Performance
plot(cbind(c(1:12),data["ROAS"]), type = "l")
# Other plots
plot(cbind(c(1:12),scale(data["PRP"])), ylab = "", type = "l", main = "Comparison", xlab = "Every Month")
lines(cbind(c(1:12), scale(data["RPO"])), lty = 2)
legend("topleft", c("Profit Per Click", "Revenue Per Order"), lty = c(1,2), cex=0.75)
# Key Words Analysis
keywords <- read.csv("Keywords.csv", header = TRUE, sep = ";")
keywords <- as.data.frame(keywords)
keywords <- keywords[-9,]
keywords <- keywords[-10,]
row.names(keywords) <- keywords[,1]
# Profit Per Click
keywords["PRP"] <- (keywords["Revenue"] - keywords["Spend"])/keywords["Clicks"]
plot(cbind(keywords["Clicks"],keywords["PRP"]))
abline(h = 0)
identify(cbind(keywords["Clicks"],keywords["PRP"]))
legend("topleft", c("2:Acme Tennis", "3:Acme Tennis Balls", "4:Tennis Balls", "6:Buy Tennis Balls"), cex = 0.75)
name <- c("Acme","Acme Tennis","Acme Tennis Balls","Tennis Balls","Bouncy Tennis Balls","Buy Tennis Balls",
"Free Balls","Tennis Raquets","Sports Equipment","Andre Agassi")
rownames(key) <- name
colnames(key) <- c("Imp", "Clicks", "Orders")
key_pr <- prcomp(key, rtex = TRUE)
# Ridge Regression
library(glmnet)
X <- cbind(as.matrix(data["Imp"]),as.matrix(data["Clicks"]),as.matrix(data["Orders"]))
fit <- glmnet(X, as.matrix(data["Revenue"]), family = "gaussian", alpha = 0)
cv.fit <- cv.glmnet(X, as.matrix(data["Revenue"]), type.measure = "mse", nfolds = 12, grouped=FALSE)
coef(fit, s = cv.fit$lambda.min)
ad_1 <- c(1160023, 5796, 1129)
ad_2 <- c(1070790, 4554, 1273)
predict(fit, newx = rbind(ad_1,ad_2), s = cv.fit$lambda.min)
cbind(predict(fit, newx = X, s = cv.fit$lambda.min), as.matrix(data["Revenue"]))
|
f90e4276b9dbf7fd37a331403f06d1b7ee3b1c15
|
56b1818d99acdfeacc15122d0b996ca7e9e24089
|
/R/data.check.r
|
0ba713c049f42b0c688255c9efa83a6bc609339a
|
[] |
no_license
|
ccamp83/kinesis
|
efa3a7d96d23e75c53a843cb74bd5627699bd945
|
eff6e62f45fedcd7fc6cc8000845b8bdd35608bc
|
refs/heads/master
| 2023-09-04T07:56:43.440448
| 2023-08-19T20:29:58
| 2023-08-19T20:29:58
| 124,016,252
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,114
|
r
|
data.check.r
|
#' Check the data file and provide fixes if available
#' @param dataset an object of the type data.frame
#' @param refreshRate the refresh rate used during the motion capture (in hertz)
#' @param time.unit the unit of measurement in which time is expressed in the 'time' column of the dataset given to the function. 1 = seconds, 10 = deciseconds, 100 = centiseconds, 1000 = milliseconds, ... Default to 1000
#' @examples
#' libraries()
#'
#' ### restoring missing columns
#'
#' head(rtgData_bad) # dataset provided by this package
#' rtgChecked <- data.check(rtgData_bad) # subjName is given without quotes. When asked to type the subject name, run the next line as is
#' test_subject
#' head(rtgChecked)
#'
#' ### time.unit
#'
#' rtgData <- data.check(rtgData) # dataset provided by this package
#' # time column in rtgData is in milliseconds. Note that data.check allows to specify different time units as well
#' head(rtgData)
#'
#' # instead, should the dataset have time in seconds
#' # the function will return frameT as a vector of NAs
#' data(rtgData) # reload dataset
#' rtgData$time <- rtgData$time / 1000 # change time to seconds
#' rtgData <- data.check(rtgData)
#' rtgData$frameT # always check that frameT looks good
#'
#' # use time.unit to fix it
#' data(rtgData) # reload dataset
#' rtgData$time <- rtgData$time / 1000 # change time to seconds
#' rtgData <- data.check(rtgData, time.unit = 1)
#' rtgData$frameT
#'
#' @export
data.check <- function(dataset, refreshRate = 85, time.unit = 1, check.only = F, ...)
{
# assign refreshRate & time.unit to global environment for looping inside ddply (temporary)
assign("refreshRate", refreshRate, envir = kinesis_parameters)
assign("time.unit", time.unit, envir = kinesis_parameters)
# get required columns
reqCols <- kinesis_parameters$dataCols
# look for missing columns
missingCols <- reqCols[!reqCols %in% names(dataset)]
#### Fix missing columns (if any)
if (length(missingCols) > 0) {
cat("The following columns do not exist:\n")
cat(missingCols, sep = ", ")
if(!check.only)
{
cat("\n\nFixing...\n\n")
# Fix subjName
if (reqCols[1] %in% missingCols) {
cat("Please type subject name:\n")
dataset$subjName <- readline()
names(dataset)[names(dataset) == "subjName"] <- reqCols[1]
cat(reqCols[1], " added.\n", sep = "")
}
# Fix frameN
if (reqCols[2] %in% missingCols) {
dataset <- kin.frameN(dataset)
cat(reqCols[2], " added.\n", sep = "")
}
# Fix time
if (reqCols[3] %in% missingCols) {
dataset <- kin.time(dataset, kinesis_parameters$refreshRate, kinesis_parameters$time.unit)
cat(reqCols[3], " added.\n", sep = "")
}
# Fix trialN
if (reqCols[5] %in% missingCols) {
# if trialN is missing, it is assumed that there is one trial
dataset$trialN <- 1
names(dataset)[names(dataset) == "trialN"] <- reqCols[5]
cat(reqCols[5], " added.\n", sep = "")
}
# Fix deltaTime
if (reqCols[4] %in% missingCols) {
# if time does not exists, create deltaTime
if(reqCols[3] %in% missingCols){
dataset <- eval(substitute(
ddply(dataset, .(trialN), mutate,
frameT = kinesis_parameters$time.unit / kinesis_parameters$refreshRate)
, list(trialN = as.name(kinesis_parameters$dataCols[5]))))
} else {
# else deltaTime is delta time
dataset <- eval(substitute(
ddply(dataset, .(trialN), mutate,
frameT = c(NA, diff(time)))
, list(trialN = as.name(kinesis_parameters$dataCols[5]),
time = as.name(kinesis_parameters$dataCols[3]))))
}
names(dataset)[names(dataset) == "frameT"] <- reqCols[4]
cat(reqCols[4], " added.\n", sep = "")
}
cat("\nDatabase fixed successfully.")
} else
{
opt <- options(show.error.messages=FALSE)
on.exit(options(opt))
stop()
}
}
else {
cat("\nDatabase looks good.")
}
return(dataset)
}
|
1a22a8e498149ef4a6a44c31066dfe02801efbfd
|
2786dab27b0fd4a7651985c56ee5756fae5fe437
|
/Mockup/r/win-library/3.5/emov/analysis/analysis.R
|
152a75814c10119c83cd07e5e61e66f287696f5e
|
[] |
no_license
|
Praedo4/The-Eye-Tracking-Interface
|
7bc167878ff7eb2d0bc4b1fadf3bb532a82c229b
|
7db22494fccedc4f87bfdc4eff2dfce9b19b4d22
|
refs/heads/master
| 2021-04-30T02:13:42.373303
| 2018-08-20T14:46:45
| 2018-08-20T14:46:45
| 121,497,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,064
|
r
|
analysis.R
|
# sample analysis of eye movement data using emov in R
# by Simon Schwab
library(calibrate) # textxy
setwd("~/Work/code/emov/pkg/R")
source("emov.R")
# install.packages("circular")
#library("circular")
# read raw data file
data = emov.read_iviewsamples(
"~/Data/nscenes/natural_scenes_samples.txt", 46)
# handle missing data: Iview has 0 for missing data
data$L.Raw.X..px.[data$L.Raw.X..px. == 0] = NA
data$L.Raw.Y..px.[data$L.Raw.Y..px. == 0] = NA
data$L.POR.X..mm.[data$L.POR.X..mm. == 0] = NA
data$L.POR.Y..mm.[data$L.POR.Y..mm. == 0] = NA
data$L.GVEC.X[data$L.GVEC.X == 0] = NA
data$L.GVEC.Y[data$L.GVEC.Y == 0] = NA
data$L.GVEC.Z[data$L.GVEC.Z == 0] = NA
# select channels to use
data = data.frame(t = data$Time, x = data$L.POR.X..mm, y = -data$L.POR.Y..mm)
# filter data, 1 deg is 14.4 mm, filtering > 750 deg/s (10800 mm/s)
flt = emov.filter(data$x, data$y, 10500/200)
data$x = flt$x
data$y = flt$y
# cart2sphere
#data = emov.cart2sphere(data$L.GVEC.X, data$L.GVEC.Y, data$L.GVEC.Z)
#data = data.frame(x=deg(data$az), y=-deg(data$elev))
# trial segmentation
n = 12 # number of trials
idx = c() # index
start = 1
for (i in 1:n) {
idx = c(idx, start, start - 1 + 2000)
start = start + 2000
}
idx <- matrix(idx, nrow=n, ncol=2, byrow=TRUE)
idx <- data.frame(start=idx[,1], end=idx[,2]) # easy to access
# fixation detecton for each trial
max_disp = 19.0 # in cm, 28.8 cm (2 deg)
min_dur = 80/1000*200
fix = emov.idt(data$t, data$x, data$y, max_disp, min_dur)
# fixation segmentation for easy ploting
fixseg = list()
for (i in 1:n) {
start = data$t[idx[i,1]]
end = data$t[idx[i,2]]
fixseg[[i]] = fix[fix$start >= start & fix$end <= end, ]
}
# Plot all trials, raw data and fixations
#my_xlim = c(-35, 25)
#my_ylim = c(-20, 15)
my_xlim = c(0, 770)
my_ylim = c(-680, -250)
c = sqrt(80/pi) # constant, r=1 corresponds to fixation duration of 50 ms.
par(mfcol=c(4,3))
for (i in 1:n) {
plot(fixseg[[i]]$x, fixseg[[i]]$y,
xlim=my_xlim, ylim=my_ylim,
xlab=NA, ylab=NA, pch=19,
cex=sqrt(fixseg[[i]][, 3] * 10^-3 * pi^-1) * c^-1, col='gray')
textxy(fixseg[[i]]$x, fixseg[[i]]$y, 1:length(fixseg[[i]]$x), cx=1)
par(new=TRUE)
plot(fixseg[[i]]$x, fixseg[[i]]$y,
xlim=my_xlim, ylim=my_ylim,
xlab=NA, ylab=NA, cex=1)
par(new=TRUE)
plot(data$x[idx$start[i]:idx$end[i]],
data$y[idx$start[i]:idx$end[i]],
type="l", xlim=my_xlim, ylim=my_ylim,
xlab="Horizontal (px)", ylab="Vertical (px)")
}
# Plot single trial
par(mfcol=c(1,1))
nr = 1
plot(fixseg[[nr]]$x, fixseg[[nr]]$y, xlim=my_xlim, ylim=my_ylim,
xlab=NA, ylab=NA, pch=19,
cex=sqrt(fixseg[[nr]][, 3] * 10^-3 * pi^-1) * c^-1,
col='gray')
textxy(fixseg[[nr]]$x, fixseg[[nr]]$y, 1:length(fixseg[[nr]]$x), cx=1)
par(new=TRUE)
plot(fixseg[[nr]]$x, fixseg[[nr]]$y, xlim=my_xlim, ylim=my_ylim,
xlab=NA, ylab=NA, cex=1)
par(new=TRUE)
plot(data$x[idx$start[nr]:idx$end[nr]],
data$y[idx$start[nr]:idx$end[nr]],
type="l", xlim=my_xlim, ylim=my_ylim,
xlab="Horizontal (px)", ylab="Vertical (px)")
# Plot stimuli
# install.packages("jpeg")
# library(jpeg)
#
# img = list()
# img[[1]] <- readJPEG("/home/simon/Data/nscenes/stimuli/000.jpg")
# img[[2]] <- readJPEG("/home/simon/Data/nscenes/stimuli/001.jpg")
# img[[3]] <- readJPEG("/home/simon/Data/nscenes/stimuli/002.jpg")
# img[[4]] <- readJPEG("/home/simon/Data/nscenes/stimuli/003.jpg")
# img[[5]] <- readJPEG("/home/simon/Data/nscenes/stimuli/004.jpg")
# img[[6]] <- readJPEG("/home/simon/Data/nscenes/stimuli/005.jpg")
# img[[7]] <- readJPEG("/home/simon/Data/nscenes/stimuli/006.jpg")
# img[[8]] <- readJPEG("/home/simon/Data/nscenes/stimuli/007.jpg")
# img[[9]] <- readJPEG("/home/simon/Data/nscenes/stimuli/008.jpg")
# img[[10]] <- readJPEG("/home/simon/Data/nscenes/stimuli/009.jpg")
# img[[11]] <- readJPEG("/home/simon/Data/nscenes/stimuli/010.jpg")
# img[[12]] <- readJPEG("/home/simon/Data/nscenes/stimuli/011.jpg")
#
# par(mfcol=c(4,3))
#
# for (i in 1:n) {
# plot(c(0,1), c(0,1))
# rasterImage(img[[i]], 0, 0, 1, 1)
# }
|
af5049a5119ffe30ef459997f3de85585a417931
|
7c8e977bcd7e4a68c908343d1eda902726c37262
|
/Rlib/format.cbs.R
|
93dd86e3ac9b408ea54a1cbccf4017df50df6799
|
[] |
no_license
|
gideonite/cn_pipeline
|
8c6352005499a3b0a34df7e817aab9cd2cda16b1
|
89291340a1243589e98e1a4deab43a8d02d05717
|
refs/heads/master
| 2021-01-22T11:37:00.983072
| 2013-03-15T16:29:10
| 2013-03-15T16:29:10
| 6,833,427
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,660
|
r
|
format.cbs.R
|
#Set up input files
#Input is
#cbs.output - the standard output from running CBS
#segments.p.output - the output from running segments.p
#outputfile - name of output file from running format.cbs
#header - whether the first line in the output should be the column names
format.cbs <- function(cbs.output,segments.p.output,outputfile,header=TRUE)
{
unique.names <- unique(segments.p.output[,1])
n <- length(unique.names)
chroms.vector <- cbs.output$data[,1]
positions.vector <- cbs.output$data[,2]
for(i in 1:n)
{
new.output <- segments.p.output[segments.p.output[,1]==unique.names[i],]
p <- nrow(new.output)
new.data <- cbs.output$data[,i+2]
previous.chrom <- new.output[1,2]
previous.start <- new.output[1,3]
previous.end <- new.output[1,4]
previous.markers <- new.output[1,5]
which.na <- which(is.na(new.data))
chroms.na <- chroms.vector[which.na]
positions.na <- positions.vector[which.na]
count.probes <- rep(0,p)
sum.missing <- sum(chroms.na==previous.chrom & positions.na>=previous.start & positions.na<=previous.end)
count.probes[1] <- previous.markers+sum.missing
#Add count of informative markers
for(j in 2:p)
{
new.chrom <- new.output[j,2]
new.start <- new.output[j,3]
new.end <- new.output[j,4]
new.markers <- new.output[j,5]
sum.missing <- sum(chroms.na==new.chrom & positions.na>=new.start & positions.na<=new.end)
count.probes[j] <- new.markers+sum.missing
}
new.output <- cbind(new.output[,1:4],count.probes,new.output[,5:6],new.output[,8:10])
#Look in gaps between chromosomes
new.matrix <- NULL
previous.chrom <- new.output[1,2]
previous.end <- new.output[1,3]
previous.end <- new.output[1,4]
which.missing <- which(chroms.na==previous.chrom & positions.na<=previous.start)
if(length(which.missing)>0)
{
new.row <- c(unique.names[i],previous.chrom,min(positions.na[which.missing]),max(positions.na[which.missing]),length(which.missing),0,rep(NA,4))
if(length(new.matrix)==0) new.matrix <- new.row
else new.matrix <- rbind(new.matrix,new.row)
}
for(j in 2:p)
{
new.chrom <- new.output[j,2]
new.start <- new.output[j,3]
new.end <- new.output[j,4]
if (new.chrom==previous.chrom)
{
which.missing <- which(chroms.na==new.chrom & positions.na<=new.start & positions.na>=previous.end)
if(length(which.missing)>0)
{
new.row <- c(unique.names[i],new.chrom,min(positions.na[which.missing]),max(positions.na[which.missing]),length(which.missing),0,rep(NA,4))
if(length(new.matrix)==0) new.matrix <- new.row
else new.matrix <- rbind(new.matrix,new.row)
}
previous.chrom <- new.chrom
previous.end <- new.end
}
else if(new.chrom!=previous.chrom)
{
which.missing <- which(chroms.na==previous.chrom & positions.na>=previous.end)
if(length(which.missing)>0)
{
new.row <- c(unique.names[i],previous.chrom,min(positions.na[which.missing]),max(positions.na[which.missing]),length(which.missing),0,rep(NA,4))
if(length(new.matrix)==0) new.matrix <- new.row
else new.matrix <- rbind(new.matrix,new.row)
}
which.missing <- which(chroms.na==new.chrom & positions.na<=new.start)
if(length(which.missing)>0)
{
new.row <- c(unique.names[i],new.chrom,min(positions.na[which.missing]),max(positions.na[which.missing]),length(which.missing),0,rep(NA,4))
if(length(new.matrix)==0) new.matrix <- new.row
else new.matrix <- rbind(new.matrix,new.row)
}
previous.chrom <- new.chrom
previous.end <- new.end
}
}
which.missing <- which(chroms.na==new.chrom & positions.na>=new.end)
if(length(which.missing)>0)
{
new.row <- c(unique.names[i],new.chrom,min(positions.na[which.missing]),max(positions.na[which.missing]),length(which.missing),0,rep(NA,4))
if(length(new.matrix)==0) new.matrix <- new.row
else new.matrix <- rbind(new.matrix,new.row)
}
# Now merge the two
if(length(new.matrix)>0)
{
new.matrix <- matrix(new.matrix,ncol=10)
new.matrix.chroms <- new.matrix[,2]
# new.matrix.chroms <- as.numeric(new.matrix[,2])
new.matrix.ends <- as.numeric(new.matrix[,4])
for(j in 1:nrow(new.matrix))
{
new.chrom <- new.matrix.chroms[j]
new.end <- new.matrix.ends[j]
if(new.chrom==new.output[1,2] & new.end<=as.numeric(new.output[1,3]))
# if(new.chrom==as.numeric(new.output[1,2]) & new.end<=as.numeric(new.output[1,3]))
{
new.output <- rbind(new.matrix[j,],new.output)
}
else if (new.chrom==new.output[nrow(new.output),2] & new.end>=as.numeric(new.output[nrow(new.output),3]))
# else if (new.chrom==as.numeric(new.output[nrow(new.output),2]) & new.end>=as.numeric(new.output[nrow(new.output),3]))
{
new.output <- rbind(new.output,new.matrix[j,])
}
else
{
which.chrom <- which(new.output[,2]==new.chrom)
no.match <- TRUE
k <- 1
while(no.match & k<=length(which.chrom))
{
new.position <- which.chrom[k]
matrix.start <- as.numeric(new.output[new.position,3])
if(new.end<=matrix.start)
{
new.output <- rbind(new.output[1:(new.position-1),],new.matrix[j,],new.output[new.position:nrow(new.output),])
no.match <- FALSE
}
k <- k+1
}
if(no.match)
{
new.output <- rbind(new.output[1:new.position,],new.matrix[j,],new.output[(new.position+1):nrow(new.output),])
}
}
}
}
left.threecolumns <- c(NA,NA,NA)
for(j in 2:nrow(new.output))
{
if(new.output[j-1,2]==new.output[j,2])
{
left.threecolumns <- rbind(left.threecolumns,as.numeric(new.output[j-1,8:10]))
}
else
{
left.threecolumns <- rbind(left.threecolumns,c(NA,NA,NA))
}
}
new.output <- cbind(new.output[,1:7],left.threecolumns,new.output[,8:10])
if(i==1) output <- new.output
else output <- rbind(output,new.output)
}
names(output) <- c("sample","chrom","loc.start","loc.end","num.mark","num.informative","seg.mean","pval","l.lcl","l.ucl","r.pval","r.lcl","r.ucl")
write.table(output,outputfile,sep="\t",row.names=FALSE,col.names=header,quote=FALSE)
}
|
c5d4a24b04e8deb21a1f64abab6fb44e2139db36
|
efa74f16941c8b503d174d124eeff630219f38a3
|
/functions_R/F02_HelperFunctions.R
|
e1c843abfe4eadc470adc665907a229ceada1d82
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ong8181/eDNA-early-pooling
|
1cabf8d27c1e52f28eed74fa8525de2265e5a754
|
6d5a3bcac691dac0347c151f94801b8595fbb5d6
|
refs/heads/main
| 2023-04-12T01:41:02.715960
| 2022-07-01T00:25:20
| 2022-07-01T00:25:20
| 458,195,380
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,703
|
r
|
F02_HelperFunctions.R
|
####
#### F02. Figure Helper functions
####
# taxa name summarize function
taxa_name_summarize <- function(ps_object, taxa_rank, top_taxa_n = 10, taxa_always_include = NULL){
tax_df <- as.data.frame(tax_table(ps_object))
if(is.null(tax_df$rep_tax)) tax_df$rep_tax <- "Undetermined"
# Search Others and Undetermined taxa
tax_col1 <- which(colnames(tax_df) == taxa_rank) # Target rank
tax_col2 <- which(colnames(tax_df) == "species") # Highest resolution
## Search unidentified taxa (= taxa is null from the target rank to the highest resolution)
rep_tax_cond1 <- tax_df[,taxa_rank] == "" & !is.na(tax_df[,taxa_rank])
rep_tax_cond2 <- apply(tax_df[,tax_col1:tax_col2] == "", 1, sum) == (tax_col2 - tax_col1) + 1
# Replace taxa names
tax_df[!rep_tax_cond1, "rep_tax"] <- as.character(tax_df[!rep_tax_cond1, taxa_rank])
tax_df[rep_tax_cond1 & !rep_tax_cond2, "rep_tax"] <- "Others"
# Re-import phyloseq object with revised tax_table
ps_object2 <- phyloseq(otu_table(ps_object), sample_data(ps_object), tax_table(as.matrix(tax_df)))
# Replace low abundance taxa name with Others
taxa_abundance_rank <- aggregate(taxa_sums(ps_object2), by = list(tax_table(ps_object2)[,"rep_tax"]), sum)
taxa_abundance_rank <- taxa_abundance_rank[order(taxa_abundance_rank$x, decreasing = T),]
taxa_top <- taxa_abundance_rank[1:top_taxa_n,]
if(is.null(taxa_always_include)) {
include_taxa <- as.character(taxa_top[,1])
} else {
include_taxa <- unique(c(as.character(taxa_top[,1]), taxa_always_include))
}
low_tax <- is.na(match(tax_table(ps_object2)[,"rep_tax"], include_taxa))
tax_table(ps_object2)[low_tax,"rep_tax"] <- "Others"
return(ps_object2)
}
|
f8b9615f4e2c68bf2d86b6de96f20c105b057236
|
09c5ab6b8f885f31d437b846bba1508dc6e2d7db
|
/source.R
|
510a52e3a16af8ebbc5e5483048505750fc4ce92
|
[] |
no_license
|
raphaelgall/nobelprizestats
|
b0fdc237f87096f6a2305a77a25b34c6b6ed8b78
|
4b6ca5f12e5077203c633147551d92ba9fe0ece8
|
refs/heads/master
| 2020-07-25T12:51:41.043879
| 2020-02-14T15:37:01
| 2020-02-14T15:37:01
| 67,025,624
| 0
| 1
| null | 2016-09-05T11:07:35
| 2016-08-31T09:55:32
|
HTML
|
UTF-8
|
R
| false
| false
| 3,423
|
r
|
source.R
|
#nobelprize analysis with statistical techniques.
#explorative analysis, including machine learning, quantitative text analysis.
#source file
#libraries, installed only once.
#neededpackages <- c("tm", "SnowballCC", "RColorBrewer", "ggplot2", "wordcloud", "biclust", "cluster", "igraph", "fpc", "psych", "ggvis")
#install.packages(neededpackages, dependencies=TRUE)
library('tm')
library('wordcloud')
library('SnowballC')
library('ggplot2')
library('cluster')
library('fpc')
library('psych')
library('ggvis')
#load texts for all years per prize
peacedata <- file.path("nobelprize_pea/1940_2016")
literaturedata <- file.path("nobelprize_lit/1949_2016")
nobel_peace <- Corpus(DirSource(peacedata))
nobel_literature <- Corpus(DirSource(literaturedata))
#load texts for each decade separatly per prize
peacedata_1940 <- file.path("nobelprize_pea/1940")
peacedata_1950 <- file.path("nobelprize_pea/1950")
peacedata_1960 <- file.path("nobelprize_pea/1960")
peacedata_1970 <- file.path("nobelprize_pea/1970")
peacedata_1980 <- file.path("nobelprize_pea/1980")
peacedata_1990 <- file.path("nobelprize_pea/1990")
peacedata_2000 <- file.path("nobelprize_pea/2000")
peacedata_2010 <- file.path("nobelprize_pea/2010")
nobel_peace_1940 <- Corpus(DirSource(peacedata_1940))
nobel_peace_1950 <- Corpus(DirSource(peacedata_1950))
nobel_peace_1960 <- Corpus(DirSource(peacedata_1960))
nobel_peace_1970 <- Corpus(DirSource(peacedata_1970))
nobel_peace_1980 <- Corpus(DirSource(peacedata_1980))
nobel_peace_1990 <- Corpus(DirSource(peacedata_1990))
nobel_peace_2000 <- Corpus(DirSource(peacedata_2000))
nobel_peace_2010 <- Corpus(DirSource(peacedata_2010))
literaturedata_1940 <- file.path("nobelprize_lit/1940")
literaturedata_1950 <- file.path("nobelprize_lit/1950")
literaturedata_1960 <- file.path("nobelprize_lit/1960")
literaturedata_1970 <- file.path("nobelprize_lit/1970")
literaturedata_1980 <- file.path("nobelprize_lit/1980")
literaturedata_1990 <- file.path("nobelprize_lit/1990")
literaturedata_2000 <- file.path("nobelprize_lit/2000")
literaturedata_2010 <- file.path("nobelprize_lit/2010")
nobel_literature_1940 <- Corpus(DirSource(literaturedata_1940))
nobel_literature_1950 <- Corpus(DirSource(literaturedata_1950))
nobel_literature_1960 <- Corpus(DirSource(literaturedata_1960))
nobel_literature_1970 <- Corpus(DirSource(literaturedata_1970))
nobel_literature_1980 <- Corpus(DirSource(literaturedata_1980))
nobel_literature_1990 <- Corpus(DirSource(literaturedata_1990))
nobel_literature_2000 <- Corpus(DirSource(literaturedata_2000))
nobel_literature_2010 <- Corpus(DirSource(literaturedata_2010))
#store speeches by decades in list
nobel_peace_bydecade <- list(
nobel_peace_1940,
nobel_peace_1950,
nobel_peace_1960,
nobel_peace_1970,
nobel_peace_1980,
nobel_peace_1990,
nobel_peace_2000,
nobel_peace_2010
)
nobel_literature_bydecade <- list(
nobel_literature_1940,
nobel_literature_1950,
nobel_literature_1960,
nobel_literature_1970,
nobel_literature_1980,
nobel_literature_1990,
nobel_literature_2000,
nobel_literature_2010
)
|
68278b2bbefccd43702bd1e69d4a5130834a6350
|
9c8962826b6125045ec4f93cab879bc26c9f9a59
|
/mse/03-run-sra-2sim.R
|
b0f6291eea91bf080f840b7e15a9e2a53d6dd08b
|
[] |
no_license
|
pbs-assess/yelloweye-inside
|
c00c61e3764b48e17af3c10b7ddf7f5b5ff58d16
|
8ea4a374f3cee21e515a82627d87d44e67228814
|
refs/heads/master
| 2022-08-19T05:15:03.352305
| 2022-08-15T23:51:36
| 2022-08-15T23:51:36
| 219,026,971
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,128
|
r
|
03-run-sra-2sim.R
|
cores <- parallel::detectCores() / 2
library(MSEtool)
############ Condition operating models with SRA_scope and data
SRA_data <- readRDS("mse/scoping/SRA_data.rds")
data_names <- c("Chist", "Index", "I_sd", "I_type", "length_bin", "s_CAA", "CAA", "CAL", "I_units")
data_ind <- match(data_names, names(SRA_data))
OM_condition <- readRDS("mse/scoping/OM_2sim.rds")
# Base
SRA <- SRA_scope(OM_condition, data = SRA_data[data_ind], condition = "catch2", selectivity = rep("free", 2),
s_selectivity = rep("logistic", 5), cores = 1,
vul_par = SRA_data$vul_par, map_vul_par = matrix(NA, 80, 2),
map_s_vul_par = SRA_data$map_s_vul_par, map_log_rec_dev = SRA_data$map_log_rec_dev,
LWT = list(CAL = 0, CAA = 0))
ret <- retrospective(SRA, 11)
saveRDS(list(SRA, ret), file = "mse/scoping/scoping_base.rds")
SRA_list <- readRDS("mse/scoping/scoping_base.rds")
SRA <- SRA_list[[1]]; ret <- SRA_list[[2]]
plot(SRA, retro = ret, file = "mse/scoping/scoping_base", dir = getwd(),
open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), render_args = list(output_format = "word_document"))
# Upweight dogfish
SRA2 <- SRA_scope(OM_condition, data = SRA_data[data_ind], condition = "catch2", selectivity = rep("free", 2),
s_selectivity = rep("logistic", 5), cores = 1,
vul_par = SRA_data$vul_par, map_vul_par = matrix(NA, 80, 2),
map_s_vul_par = SRA_data$map_s_vul_par, map_log_rec_dev = SRA_data$map_log_rec_dev,
LWT = list(CAL = 0, CAA = 0, Index = c(1, 4, 1, 1, 1)))
ret2 <- retrospective(SRA2, 11)
saveRDS(list(SRA2, ret2), file = "mse/scoping/scoping_upweight_dogfish.rds")
SRA_list <- readRDS("mse/scoping/scoping_upweight_dogfish.rds")
SRA2 <- SRA_list[[1]]; ret2 <- SRA_list[[2]]
# plot for report -------------------------------------------------------------
# png(here::here("mse/figures/retrospective-equal-weighting.png"), width = 8, height = 5,
# res = 220, units = "in")
# par(mfcol = c(2, 3), mar = c(5, 4, 1, 1), oma = c(0, 0, 2.5, 0), cex = 0.7)
# plot(ret)
# dev.off()
#
# png(here::here("mse/figures/retrospective-upweight-dogfish-est-sel.png"), width = 8, height = 5,
# res = 220, units = "in")
# par(mfcol = c(2, 3), mar = c(5, 4, 1, 1), oma = c(0, 0, 2.5, 0), cex = 0.7)
# plot(ret2)
# dev.off()
# -----------------------------------------------------------------------------
plot(SRA2, retro = ret2, file = "mse/scoping/scoping_upweight_dogfish", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), render_args = list(output_format = "word_document"))
# Upweight dogfish survey, fix HBLL sel from base
base_s_vul_par <- c(SRA@mean_fit$report$s_LFS[1], SRA@mean_fit$report$s_L5[1])
s_vul_par <- matrix(c(base_s_vul_par, 0.5), 3, 5)
map_s_vul_par <- matrix(NA, 3, 5)
SRA3 <- SRA_scope(OM_condition, data = SRA_data[data_ind], condition = "catch2", selectivity = rep("free", 2),
s_selectivity = rep("logistic", 5), cores = 1,
vul_par = SRA_data$vul_par, map_vul_par = matrix(NA, 80, 2),
s_vul_par = s_vul_par, map_s_vul_par = map_s_vul_par, map_log_rec_dev = SRA_data$map_log_rec_dev,
LWT = list(CAL = 0, CAA = 0, Index = c(1, 4, 1, 1, 1)))
ret3 <- retrospective(SRA3, 11)
saveRDS(list(SRA3, ret3), file = "mse/scoping/scoping_updog_fixsel.rds")
SRA_list <- readRDS("mse/scoping/scoping_updog_fixsel.rds")
SRA3 <- SRA_list[[1]]; ret3 <- SRA_list[[2]]
#' @param xfrac The fraction over from the left side.
#' @param yfrac The fraction down from the top.
#' @param label The text to label with.
#' @param pos Position to pass to text()
#' @param ... Anything extra to pass to text(), e.g. cex, col.
add_label <- function(xfrac, yfrac, label, pos = 4, ...) {
u <- par("usr")
x <- u[1] + xfrac * (u[2] - u[1])
y <- u[4] - yfrac * (u[4] - u[3])
text(x, y, label, pos = pos, ...)
}
plot_retro_pbs <- function(retro, legend = TRUE, french = FALSE) {
xlim <- range(as.numeric(dimnames(retro@TS)$Year))
nyr_label <- dimnames(retro@TS)$Peel
color <- viridisLite::plasma(length(nyr_label))
Year_matrix <- matrix(as.numeric(dimnames(retro@TS)$Year), ncol = length(color), nrow = dim(retro@TS)[2], byrow = FALSE)
# for(i in 1:length(retro@TS_var)) {
for(i in 3) {
matrix_to_plot <- t(retro@TS[, , i])
ylim <- c(0, 1.1 * max(matrix_to_plot, na.rm = TRUE))
if (!french) ylab <- attr(retro, "TS_lab")[i]
if (french) ylab <- rosettafish::en2fr("Spawning biomass")
plot(NULL, NULL, xlim = xlim, ylim = ylim, xlab = "Year", ylab = ylab, axes = FALSE)
abline(h = 0, col = "grey")
if(grepl("MSY", as.character(ylab))) abline(h = 1, lty = 3)
matlines(Year_matrix, matrix_to_plot, col = color, lty = 1)
if (legend)
legend(1917, 4000, legend = nyr_label, lwd = 1, col = color, bty = "n", title = paste0(rosettafish::en2fr("Years removed", translate = french), ":"), y.intersp = 0.8)
}
}
# plot for report -------------------------------------------------------------
png(here::here("mse/figures/retrospective-spawning-biomass.png"), width = 5, height = 5,
res = 260, units = "in")
par(mfcol = c(2, 1), mar = c(0, 4, 0, 0), oma = c(4, 0, 1, 1), cex = 0.7, yaxs = "i")
plot_retro_pbs(ret, legend = FALSE)
add_label(0.02, 0.06, "(A) Initial fit")
box()
axis(2, at = seq(0, 5000, 1000))
plot_retro_pbs(ret3)
axis(2, at = seq(0, 4000, 1000))
axis(1)
box()
mtext("Year", side = 1, line = 2.5, cex = 0.8)
add_label(0.02, 0.06, "(B) Base OM")
nyr_label <- dimnames(ret@TS)$Peel
dev.off()
png(here::here("mse/figures-french/retrospective-spawning-biomass.png"), width = 5, height = 5,
res = 260, units = "in")
par(mfcol = c(2, 1), mar = c(0, 4, 0, 0), oma = c(4, 0, 1, 1), cex = 0.7, yaxs = "i")
plot_retro_pbs(ret, legend = FALSE, french = TRUE)
add_label(0.02, 0.06, "(A) Ajustement initial du modèle")
box()
axis(2, at = seq(0, 5000, 1000))
plot_retro_pbs(ret3, french = TRUE)
axis(2, at = seq(0, 4000, 1000))
axis(1)
box()
mtext(rosettafish::en2fr("Year"), side = 1, line = 2.5, cex = 0.8)
add_label(0.02, 0.06, "(B) ME de base")
nyr_label <- dimnames(ret@TS)$Peel
dev.off()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
plot(SRA3, retro = ret3, file = "mse/scoping/scoping_updog_fixsel", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), render_args = list(output_format = "word_document"))
compare_SRA(SRA, SRA2, SRA3, scenario = list(names = c("base", "upweight dogfish", "up.dog. fix HBLL sel")))
# Low catch - use gfdatabase estimates of commerical catch in 1986-2005
SRA_data2 <- SRA_data
SRA_data2$Chist[match(1986:2005, SRA_data2$Year), 1] <- 0.5 * SRA_data2$Chist[match(1986:2005, SRA_data2$Year), 1]
SRA4 <- SRA_scope(OM_condition, data = SRA_data2[data_ind], condition = "catch2", selectivity = rep("free", 2),
s_selectivity = rep("logistic", 5), cores = 1,
vul_par = SRA_data$vul_par, map_vul_par = matrix(NA, 80, 2),
map_s_vul_par = SRA_data$map_s_vul_par, map_log_rec_dev = SRA_data$map_log_rec_dev,
LWT = list(CAL = 0, CAA = 0, Index = c(1, 4, 1, 1, 1)))
ret4 <- retrospective(SRA4, 11)
saveRDS(list(SRA4, ret4), file = "mse/scoping/scoping_lowcatch.rds")
SRA_list <- readRDS("mse/scoping/scoping_lowcatch.rds")
SRA4 <- SRA_list[[1]]; ret4 <- SRA_list[[2]]
plot(SRA4, retro = ret4, file = "mse/scoping/scoping_lowcatch", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), render_args = list(output_format = "word_document"))
# Low catch - fix HBLL sel from base
SRA5 <- SRA_scope(OM_condition, data = SRA_data2[data_ind], condition = "catch2", selectivity = rep("free", 2),
s_selectivity = rep("logistic", 5), cores = 1,
vul_par = SRA_data$vul_par, map_vul_par = matrix(NA, 80, 2),
s_vul_par = s_vul_par, map_s_vul_par = map_s_vul_par, map_log_rec_dev = SRA_data$map_log_rec_dev,
LWT = list(CAL = 0, CAA = 0, Index = c(1, 4, 1, 1, 1)))
ret5 <- retrospective(SRA5, 11)
saveRDS(list(SRA5, ret5), file = "mse/scoping/scoping_lowcatch_fixsel.rds")
SRA_list <- readRDS("mse/scoping/scoping_lowcatch_fixsel.rds")
SRA5 <- SRA_list[[1]]; ret5 <- SRA_list[[2]]
plot(SRA5, retro = ret5, file = "mse/scoping/scoping_lowcatch_fixsel", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), render_args = list(output_format = "word_document"))
## Try to estimate fishery selectivity
SRA_data$vul_par[1:2, ] <- c(50, 40, 30, 25)
map_vul_par <- matrix(NA, 80, 2)
map_vul_par[1:2, ] <- 1:4
SRA6 <- SRA_scope(OM_condition, data = SRA_data[data_ind], condition = "catch2", selectivity = rep("logistic", 2),
s_selectivity = rep("logistic", 5), cores = 1,
vul_par = SRA_data$vul_par, map_vul_par = map_vul_par,
map_s_vul_par = SRA_data$map_s_vul_par, map_log_rec_dev = SRA_data$map_log_rec_dev,
LWT = list(CAL = 1, CAA = 20, Index = c(1, 4, 1, 1, 1)))
ret6 <- retrospective(SRA6, 11)
saveRDS(list(SRA6, ret6), file = "mse/scoping/scoping_estfisherysel_esthbllsel.rds")
SRA_list <- readRDS("mse/scoping/scoping_estfisherysel_esthbllsel.rds")
SRA6 <- SRA_list[[1]]; ret6 <- SRA_list[[2]]
plot(SRA6, retro = ret6, file = "mse/scoping/scoping_estfisherysel_esthbll_sel", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), render_args = list(output_format = "word_document"))
## Compare plots
compare_SRA(SRA, SRA2, SRA3, SRA4, SRA5, SRA6,
scenario = list(names = c("base", "upweight dogfish", "up.dog. fix HBLL sel",
"low catch", "low catch fix HBLL sel", "est fishery/HBLL sel")),
filename = "mse/scoping/compare_scoping", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), render_args = list(output_format = "word_document"))
# Grid M and steepness
library(dplyr)
DLMtool::setup(8)
LH_grid <- expand.grid(M = seq(0.02, 0.07, 0.01), h = seq(0.65, 0.75, 0.01))
OM_condition <- readRDS("mse/scoping/OM_2sim.rds")
OM_condition@nsim <- nrow(LH_grid)
OM_condition@cpars$M <- LH_grid$M
OM_condition@cpars$h <- LH_grid$h
Mat_age <- OM_condition@cpars$Mat_age[1,,1]
OM_condition@cpars$Mat_age <- array(Mat_age,
c(OM_condition@maxage, OM_condition@nyears + OM_condition@proyears, OM_condition@nsim)) %>%
aperm(perm = c(3, 1, 2))
# Upweight dogfish
SRA7 <- SRA_scope(OM_condition, data = SRA_data[data_ind], condition = "catch2", selectivity = rep("free", 2),
s_selectivity = rep("logistic", 5), cores = cores,
vul_par = SRA_data$vul_par, map_vul_par = matrix(NA, 80, 2),
map_s_vul_par = SRA_data$map_s_vul_par, map_log_rec_dev = SRA_data$map_log_rec_dev,
LWT = list(CAL = 0, CAA = 0, Index = c(1, 4, 1, 1, 1)))
saveRDS(SRA7, file = "mse/scoping/profile_M_and_h.rds")
SRA7 <- readRDS("mse/scoping/profile_M_and_h.rds")
plot(SRA7, sims = LH_grid$h == 0.71, file = "mse/scoping/profile_M", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8), scenarios = list(names = paste0("M = 0.0", 2:7), col = 1:6))
# Upweight dog. fix HBLL sel
base_s_vul_par <- c(SRA@mean_fit$report$s_LFS[1], SRA@mean_fit$report$s_L5[1])
s_vul_par <- matrix(c(base_s_vul_par, 0.5), 3, 5)
map_s_vul_par <- matrix(NA, 3, 5)
#### Episodic recruitment
SRA <- readRDS("mse/OM/upweight_dogfish.rds")
set.seed(324)
sporadic_recruitment2 <- function(x, years = length(x), low_sigmaR = 0.4, high_sigmaR = 0.8) {
require(dplyr)
nhigh <- 25
high_ind <- sample(1:years, nhigh)
new_samp <- rnorm(nhigh, -0.5 * high_sigmaR^2, high_sigmaR) %>% exp()
x[high_ind] <- new_samp
return(x)
}
new_Perr_y <- apply(SRA@OM@cpars$Perr_y[, 182:281], 1, sporadic_recruitment2)
SRA@OM@cpars$Perr_y[, 182:281] <- t(new_Perr_y)
saveRDS(SRA, file = "mse/OM/sporadic_recruitment.rds")
# M = 0.02
OM_condition@cpars$M <- rep(0.02, OM@nsim)
SRA <- SRA_scope(OM_condition, condition = "catch2", Chist = SRA_data$Chist, Index = SRA_data$Index, I_sd = SRA_data$I_sd, I_type = SRA_data$I_type,
selectivity = rep("logistic", 2), s_selectivity = rep("logistic", 5), length_bin = 0.1 * SRA_data$length_bin, cores = cores,
s_CAA = SRA_data$s_CAA, vul_par = SRA_data$vul_par, map_s_vul_par = SRA_data$map_s_vul_par,
map_log_rec_dev = SRA_data$map_log_rec_dev)
saveRDS(SRA, file = "mse/OM/lowM.rds")
SRA <- readRDS("mse/OM/lowM.rds")
plot(SRA, file = "mse/OM/OM_lowM", dir = getwd(), open_file = FALSE, f_name = SRA_data$f_name, s_name = SRA_data$s_name,
MSY_ref = c(0.4, 0.8))
|
2b9e7778b19591382c1247a740cd01817193d24e
|
65cd986ba44482281761b8fed4028f87ebebcd12
|
/to be deleted/share_allocation/3_ctrfact_sim_reformat.r
|
e9de0fd7f7aafcc04966b6557a4c91838aab6b24
|
[] |
no_license
|
Superet/Expenditure
|
07efeb48f700bec71bb1d1b380476dddceae76ee
|
f6e1655517b65106ea775768e935e94efa73dbaa
|
refs/heads/master
| 2021-07-14T18:09:50.415191
| 2020-06-01T19:54:30
| 2020-06-01T19:54:30
| 38,344,163
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,843
|
r
|
3_ctrfact_sim_reformat.r
|
library(ggplot2)
library(reshape2)
library(Rcpp)
library(RcppArmadillo)
library(maxLik)
library(evd)
library(data.table)
library(doParallel)
library(foreach)
# library(chebpol)
library(nloptr)
library(mgcv)
options(error = quote({dump.frames(to.file = TRUE)}))
seg_id <- as.numeric(Sys.getenv("PBS_ARRAY_INDEX"))
cat("seg_id =", seg.id, "\.\n")
args <- commandArgs(trailingOnly = TRUE)
print(args)
if(length(args)>0){
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
}
# setwd("~/Documents/Research/Store switching/processed data")
# plot.wd <- '~/Desktop'
# source("../Exercise/Multiple_discrete_continuous_model/0_Allocation_function.R")
# source("../Exercise/main/share_allocation/ctrfact_sim_functions.r")
# setwd("/home/brgordon/ccv103/Exercise/run")
# setwd("/kellogg/users/marketing/2661703/Exercise/run")
setwd("/sscc/home/c/ccv103/Exercise/run")
run_id <- 4
plot.wd <- getwd()
make_plot <- TRUE
ww <- 10
ar <- .6
source("0_Allocation_function.R")
source("ctrfact_sim_functions.r")
# Load estimation data
ver.date <- "2016-02-26"
cpi.adj <- TRUE
if(cpi.adj){
loadf <- paste("estrun_",run_id,"/MDCEV_cpi_est_seg",seg_id,"_", ver.date,".rdata",sep="")
}else{
loadf <- paste("estrun_",run_id,"/MDCEV_est_seg",seg_id,"_", ver.date,".rdata",sep="")
}
loadf
load(loadf)
rm(list = intersect(ls(), c("gamfit", "shr","model_name", "tmpdat")))
# Set simulation parameters
interp.method <- "spline" # Spline interpolation or Chebyshev interpolation
exp.method <- "Utility" # Utility maximization or finding roots for first order condition
trim.alpha <- 0.05
numsim <- 1000 #numsim1 #<- 1000
draw.par <- FALSE
sim.omega <- FALSE
fname <- paste("ctrfact_ref_seg",seg_id,sep="")
if(draw.par){ fname <- paste(fname, "_pardraw", sep="") }
if(sim.omega){fname <- paste(fname, "_simomega", sep="") }
fname <- paste(fname, "_sim", numsim, "_", as.character(Sys.Date()), sep="")
cat("Output file name is", fname, ".\n")
###############################
# Prepare simulation elements #
###############################
# Data required: parameters, income level, price, retail attributes, and random draws
# For each simulation scenario, if income, price or retail attributes change, we need to re-simulate inclusive values.
# Set simulation parameters
lambda <- coef(sol.top2)
shr.par <- coef(sol)
#-----------------------#
# Construct income data #
# Take the households' income in 2007 as basis
selyr <- 2007
tmp <- data.table(subset(mydata, year %in% selyr))
tmp <- tmp[,list(income = unique(income_midvalue)), by = list(household_code, year)]
sim.data<- data.frame(tmp)[,c("household_code","income")]
sim.unq <- data.frame(income2007 = unique(sim.data[,-1]))
# Counterfactual scenario: income is lower by 10%.
my.change <- .1
lnInc_08 <- lnInc + log(1 - my.change)
sim.unq$income2008 <- (1 - my.change) * sim.unq$income2007
sim.unq$Inc07 <- log(sim.unq[,"income2007"])
sim.unq$Inc08 <- log(sim.unq[,"income2008"])
sim.unq <- sim.unq[order(sim.unq$income2007),]
cat("dim(sim.unq) =", dim(sim.unq), "\n")
#----------------------------#
# Average price in year 2007
tmp <- dcast(subset(price_dat, year %in% selyr),
scantrack_market_descr + year + biweek ~ channel_type, value.var = "bsk_price_paid_2004")
price.07 <- setNames( colMeans(as.matrix(tmp[,4:(3+R)]), na.rm=T), fmt_name)
cat("The average price level in 2007:\n"); print(price.07);cat("\n")
# Average retail attributes in 2007
selcol <- c("size_index", "ln_upc_per_mod", "ln_num_module","overall_prvt")
X_list07<- setNames( lapply(fmt_name, function(x) colMeans(as.matrix(subset(fmt_attr, channel_type == x & year%in% selyr)[,selcol]))),
fmt_name)
cat("The average retail attributes in 2007:\n"); print(do.call(rbind, X_list07)); cat("\n")
# Compute delta psi = -log(-0.1)*alpha*X
tmpX <- do.call(rbind, X_list07)
d.psi <- tmpX %*% shr.par[paste("beta_", 5:8, sep="")] * log(.9)
cat("Change of marginal utility (psi) of 10% income change:\n"); print(d.psi); cat("\n")
# Expand X_list and price to match the nobs of income
price.07 <- rep(1, nrow(sim.unq)) %*% matrix(price.07, nrow = 1)
colnames(price.07) <- fmt_name
X_list07 <- lapply(X_list07, function(x)
{out <- rep(1, nrow(sim.unq)) %*% matrix(x, nrow = 1); colnames(out) <- names(x); return(out)})
#-------------------#
# Take random draws #
set.seed(666)
eps_draw <- matrix(rgev(numsim*R, scale = exp(shr.par["ln_sigma"])), numsim, R)
if(draw.par){
par_se <- c(sqrt(diag(vcov(sol.top2))), sqrt(diag(vcov(sol))) )
par_se[is.na(par_se)] <- 0
par_draw <- sapply(par_se, function(x) rnorm(numsim, mean = 0, sd = x))
}else{
par_draw <- NULL
}
##############
# Simulation #
##############
if(interp.method == "spline"){
y.nodes <- quantile(mydata$dol, c(0:50)/50)
y.nodes <- sort(unique(c(y.nodes , seq(600, 1000, 100)) ))
}else{
# Set up Chebyshev interpolation
GH_num_nodes<- 100
y.interval <- c(.1, 1000)
y.nodes <- chebknots(GH_num_nodes, interval = y.interval)[[1]]
}
numnodes<- length(y.nodes)
# Simulate expenditure and expenditure share.
pct <- proc.time()
sim.base07 <- SimWrapper_fn(omega_deriv, ln_inc = sim.unq$Inc07, lambda = lambda, param_est = shr.par, base = beta0_base,
X_list = X_list07, price = price.07, eps_draw = eps_draw, method = exp.method, ret.sim = TRUE, par.draw = par_draw)
use.time <- proc.time() - pct
cat("2007 Baseline simulation finishes with", use.time[3]/60, "min.\n")
pct <- proc.time()
sim.base08 <- SimWrapper_fn(omega_deriv, ln_inc = sim.unq$Inc08, lambda = lambda, param_est = shr.par,
base = beta0_base, X_list = X_list07, price = price.07, eps_draw = eps_draw, method = exp.method, ret.sim = TRUE, par.draw = par_draw)
use.time <- proc.time() - pct
cat("2008 Baseline simulation finishes with", use.time[3]/60, "min.\n")
# -------------- #
# Counterfactual #
# Change only attributes
# If retail A behaves the same as retail B
ret.a <- "Discount Store"
ret.b <- c("Grocery", "Dollar Store", "Warehouse Club")
ref.sim <- setNames(vector("list", length(ret.b)), ret.b)
for(i in 1:length(ret.b)){
pct <- proc.time()
X.new <- X_list07
X.new[[ret.a]] <- X.new[[ret.b[i]]]
# price.new <- price.07
# price.new[,ret.a] <- price.new[, ret.b]
if(sim.omega){
ref.sim[[i]] <- SimOmega_fn(ln_inc = sim.unq[,"Inc08"], lambda = lambda, param_est = shr.par,
base = beta0_base, X_list = X_list_new, price = price.07,
lnInc_lv = lnInc_08, y.nodes = y.nodes, eps_draw = eps_draw, method = exp.method,
interp.method = interp.method, ret.sim = TRUE, alpha = trim.alpha, par.draw = par_draw)
}else{
ref.sim[[i]] <- SimWrapper_fn(omega_deriv = omega_deriv, ln_inc = sim.unq[,"Inc08"], lambda = lambda, param_est = shr.par,
base = beta0_base, X_list = X.new, price = price.07,
eps_draw = eps_draw, method = exp.method, ret.sim = TRUE, par.draw = par_draw)
}
use.time <- proc.time() - pct
cat("Counterfactual finishes with", use.time[3]/60, "min.\n")
}
################
# Save results #
################
rm(list = intersect(ls(), c("ar", "args", "cl", "ggtmp", "ggtmp1", "ggtmp2", "i", "lastFuncGrad", "lastFuncParam", "make_plot", "mycore",
"myfix", "plot.wd", "s1_index", "sel", "selyr", "price", "sol", "sol.top", "sol.top2", "tmp", "tmp_coef",
"tmp_price", "tmp1", "tmp2", "use.time", "ver.date", "var1", "ww", "f",
"numnodes", "out", "out1", "pct", "tmpd1", "tmpd2", "tmpdat", "u", "W", "y", "y.nodes",
"Allocation_constr_fn", "Allocation_fn", "Allocation_nlop_fn", "cheb.1d.basis", "cheb.basis", "chebfun",
"exp_fn", "expFOC_fn", "incl_value_fn", "mysplfun", "mytrimfun", "param_assignR", "simExp_fn", "SimOmega_fn",
"SimWrapper_fn", "solveExp_fn", "spl2dfun", "uP_fn", "uPGrad_fn", "X_list")))
save.image(paste("estrun_",run_id, "/", fname, ".rdata",sep=""))
cat("This program is done.\n")
|
75220a794ffcd35a6782b19423a1da88139e6f96
|
e051cfb06eb74bc41448c523df8930f38d20aac6
|
/man/countReads-methods.Rd
|
61d9a157e7b6ac7a1187d661b0d37b009b31bc98
|
[] |
no_license
|
duydnguyen/tan-coverage
|
6ed78bb23adc9554d6b17296657a67592cdf68af
|
2aa9f8b9c4da7dbaa7f464b42dfa2b46db8a874f
|
refs/heads/master
| 2021-06-16T17:01:38.450390
| 2017-06-01T15:37:02
| 2017-06-01T15:37:02
| 93,070,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 609
|
rd
|
countReads-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/methods-segvis.R
\docType{methods}
\name{countReads}
\alias{countReads}
\alias{countReads,segvis-method}
\alias{countReads}
\title{countReads method for segvis class}
\usage{
countReads(object)
\S4method{countReads}{segvis}(object)
}
\arguments{
\item{object}{segvis object}
}
\value{
The number of reads in the bam file considered for the \code{segvis} object
}
\description{
Counts the number of reads considered in object
}
\examples{
\dontrun{
countReads(segvis)
}
}
\seealso{
\code{\link{segvis-class}}
}
|
dd05daae026308b7818dfa28d5b3402540ef4a77
|
370b017a04a617ddaf948052bfab29d1d5452fb5
|
/Exploratory_EBC.R
|
c2cd362098598c553fafc0f06ad6356d21ff6e51
|
[] |
no_license
|
DrMattG/ES_Conservation
|
221dd6fb2877e89cdc6261c29573dfead62939ac
|
c142b047689d94bf0333d9d3c76301ee117e0609
|
refs/heads/master
| 2022-11-24T13:32:25.528600
| 2020-08-04T09:28:36
| 2020-08-04T09:28:36
| 284,936,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,899
|
r
|
Exploratory_EBC.R
|
#Results: 557
#(from Web of Science Core Collection)
#You searched for: TOPIC: ("evidence-based" "conservation")
#Refined by: WEB OF SCIENCE CATEGORIES: ( ECOLOGY OR ENVIRONMENTAL SCIENCES OR BIODIVERSITY CONSERVATION OR ENVIRONMENTAL STUDIES OR ZOOLOGY )
#Timespan: All years. Indexes: SCI-EXPANDED, SSCI, A&HCI, ESCI.
library(visNetwork)
library(bibliometrix)
library(igraph)
library(here)
library(tidytext)
library(textmineR)
library(tidyverse)
library(reshape2)
library(wordcloud)
file1 <-paste0(here(),"/Data/EBC/EBC.bib")
file2 <-paste0(here(),"/Data/EBC/EBC2.bib")
M <- convert2df(file = c(file1,file2), dbsource = "isi", format = "bibtex")
M
results <- biblioAnalysis(M, sep = ";")
options(width=100)
S <- summary(object = results, k = 10, pause = FALSE)
CR <- citations(M, field = "article", sep = ";")
#cbind(CR$Cited[1:10])
A <- cocMatrix(M, Field = "CR", sep = ";")
#NetMatrix <- biblioNetwork(M, analysis = "co-citation", network = "references", sep = ";")
# Plot the network
#net=networkPlot(NetMatrix, n = 30, Title = "Co-Citation Network", type = "fruchterman", size=T, remove.multiple=FALSE, labelsize=0.7,edgesize = 5)
# Create a historical citation network
#options(width=130)
#histResults <- histNetwork(M, min.citations = 1, sep = ";")
#net <- histPlot(histResults, n=30, size = 10, labelsize=5)
Cites<-igraph::graph_from_incidence_matrix(A)
V(Cites)$name
V(Cites)$Year=sub("^.*([0-9]{4}).*", "\\1", V(Cites)$name)
V(Cites)$Journal=sub(".*([0-9]{4})", "", V(Cites)$name)
deg <- igraph::degree(Cites, mode="all")
sort(deg)
#plot(Cites, vertex.label=NA, vertex.size=deg)
Cites = igraph::delete.vertices(Cites,igraph::degree(Cites)<5)
Cites<-simplify(Cites)
Cites<-delete.vertices(simplify(Cites), degree(Cites)==0)
vn <- toVisNetworkData(Cites)
vn$nodes$title<-vn$nodes$label
unique(vn$nodes$Year)
# #Repair some year values Early view
#vn$nodes$Year[1]="2020"
#vn$nodes$Year[2]="2020"
#vn$nodes$Year[3]="2020"
#vn$nodes$Year[4]="2020"
vn$nodes$Year[5]="2020"
vn$nodes$Year[6]="2020"
vn$nodes$Year[7]="2020"
# vn$nodes$Year[9]="2012"
# vn$nodes$Year[10]="2020"
vn$nodes$Year[11]="2020"
vn$nodes$Year[12]="2020"
vn$nodes$Year[19]="2020"
vn$nodes$Year[20]="2020"
vn$nodes$Year[41]="2020"
vn$nodes$Year[659]="2013"
vn$nodes$Year[699]="2000"
# vn$nodes$Year[13]="2020"
# vn$nodes$Year[14]="2020"
# vn$nodes$Year[16]="2020"
# vn$nodes$Year[17]="2020"
# vn$nodes$Year[52]="2020"
# vn$nodes$Year[53]="2020"
vn$nodes$Journal<-gsub(", ", "",vn$nodes$Journal)
vn$nodes$Journal<-trimws(vn$nodes$Journal, which = "both", whitespace = "[ \t\r\n]")
unique(vn$nodes$Journal)
#
# vn$nodes$Journal[2]<-"OIKOS"
# vn$nodes$Journal[3]<-"AMBIO"
# vn$nodes$Journal[7]<-"URBAN ECOSYST"
# vn$nodes$Journal[9]<-"CONSERV BIOL"
# vn$nodes$Journal[10]<-"CONSERV BIOL"
# vn$nodes$Journal[11]<-"CONSERV BIOL"
# vn$nodes$Journal[12]<-"CONSERV BIOL"
# vn$nodes$Journal[13]<-"CONSERV BIOL"
# vn$nodes$Journal[14]<-"AMBIO"
# vn$nodes$Journal[16]<-"PRIMATOL"
# vn$nodes$Journal[17]<-"RESTOR ECOL"
# vn$nodes$Journal[53]<-"SOC NAT RESOUR"
# vn$nodes$Journal[52]<-"URBAN AFF"
#
#
# # Nodes are sized by degree (the number of links to other packages)
degree_value <- degree(Cites, mode = "all")
vn$nodes$value <- degree_value[match(vn$nodes$id, names(degree_value))]
# vn$nodes$x<-as.numeric(vn$nodes$Year)
#
# unique(vn$nodes$x)
#
#
# vn$nodes$color<-ifelse(vn$nodes$x>=2015, "red",
# ifelse(vn$nodes$x>=2010, "green",
# ifelse(vn$nodes$x>=2005, "brown",
# ifelse(vn$nodes$x>=2000,"green",
# ifelse(vn$nodes$x>=1995, "yellow", "lightblue")))))
#
#unique(vn$nodes$Year)
which(vn$nodes$Year=="1968")
vn$nodes$color="blue"
vn$nodes$color[637]<-"red"
vn$edges$arrows<-"to"
visNetwork(nodes = vn$nodes, edges = vn$edges,main="EBC",height = "500px", width = "100%")%>%
#visOptions(highlightNearest = TRUE)%>%
visOptions(highlightNearest = list(enabled = TRUE, degree = 1)) %>%
visNodes() %>%
visSave(file =paste0(here(),"/plots/EBC.html"), selfcontained = T)
#Find subgraphs
c1 = cluster_fast_greedy(Cites)
# modularity measure
modularity(c1)
coords = layout_with_fr(Cites)
plot(c1, Cites, layout=coords, )
membership(c1)
sizes(c1)
vn$nodes$group=membership(c1)
cols<-viridis::magma(11)
plotrix::color.id(cols[3])
vn$nodes$color<-ifelse(vn$nodes$group==1, plotrix::color.id(cols[1]),
ifelse(vn$nodes$group==2, plotrix::color.id(cols[2]),
ifelse(vn$nodes$group==3, plotrix::color.id(cols[3]),
ifelse(vn$nodes$group==4,plotrix::color.id(cols[4]),
ifelse(vn$nodes$group==5,plotrix::color.id(cols[5]),
ifelse(vn$nodes$group==6,plotrix::color.id(cols[6]),
ifelse(vn$nodes$group==7,plotrix::color.id(cols[7]),
ifelse(vn$nodes$group==8,plotrix::color.id(cols[8]),
ifelse(vn$nodes$group==9,plotrix::color.id(cols[9]),
ifelse(vn$nodes$group==10, plotrix::color.id(cols[10]), plotrix::color.id(cols[11])))))))))))
visNetwork(nodes = vn$nodes, edges = vn$edges,main="EBC",height = "500px", width = "100%")%>%
visOptions(highlightNearest = TRUE)%>%
visOptions(highlightNearest = list(enabled = TRUE, degree = 1)) %>%
visNodes() %>%
visSave(file =paste0(here(),"/plots/EBC_col.html"), selfcontained = T)
#
#
# unique(vn$nodes$Journal)
#
# vn$nodes$color<-ifelse(vn$nodes$Journal=="CONSERV BIOL", "red","blue")
#
# visNetwork(nodes = vn$nodes, edges = vn$edges,main="Coexist",height = "500px", width = "100%")%>%
# #visOptions(highlightNearest = TRUE)%>%
# visOptions(highlightNearest = list(enabled = TRUE, degree = 2)) %>%
# visNodes() %>%
# visSave(file =paste0(here(),"/plots/Coexist_conb.html"), selfcontained = T)
###############################################################################################################
#
#
# # extract the abstracts of main papers
#
# data=data.frame("Title"=as.character(M$TI),"Abstract"= as.character(M$AB), stringsAsFactors = FALSE)
# text_df <- mutate(data, text = data$Abstract)
# text_df<-text_df %>%
# rowid_to_column()
# text_cleaning_tokens<-text_df %>%
# unnest_tokens(word, text)
# text_cleaning_tokens$word <- gsub('[[:digit:]]+', '', text_cleaning_tokens$word)
# text_cleaning_tokens$word <- gsub('[[:punct:]]+', '', text_cleaning_tokens$word)
# text_cleaning_tokens <- text_cleaning_tokens %>% filter(!(nchar(word) == 1))%>%
# anti_join(stop_words)
# tokens <- text_cleaning_tokens %>% filter(!(word==""))
# tokens <- tokens %>% mutate(ind = row_number())
# tokens <- tokens %>% group_by(Title) %>% mutate(ind = row_number()) %>%
# tidyr::spread(key = ind, value = word)
# tokens [is.na(tokens)] <- ""
# tokens <- tidyr::unite(tokens, text,-Title,sep =" " )
# tokens$text <- trimws(tokens$text)
#
#
# #create DTM
# dtm <- CreateDtm(tokens$text,
# doc_names = tokens$Title,
# ngram_window = c(1, 2))
# #explore the basic frequency
# tf <- TermDocFreq(dtm = dtm)
# original_tf <- tf %>% select(term, term_freq,doc_freq)
# rownames(original_tf) <- 1:nrow(original_tf)
# # Eliminate words appearing less than 2 times or in more than half of the
# # documents
# vocabulary <- tf$term[ tf$term_freq > 1 & tf$doc_freq < nrow(dtm) / 2 ]
# dtm = dtm
#
# k_list <- seq(1, 40, by = 1)
# model_dir <- paste0("models_", digest::digest(vocabulary, algo = "sha1"))
# if (!dir.exists(model_dir)) dir.create(model_dir)
# model_list <- TmParallelApply(X = k_list, FUN = function(k){
# filename = file.path(model_dir, paste0(k, "_topics.rda"))
#
# if (!file.exists(filename)) {
# m <- FitLdaModel(dtm = dtm, k = k, iterations = 500)
# m$k <- k
# m$coherence <- CalcProbCoherence(phi = m$phi, dtm = dtm, M = 5)
# save(m, file = filename)
# } else {
# load(filename)
# }
#
# m
# }, export=c("dtm", "model_dir")) # export only needed for Windows machines
# #model tuning
# #choosing the best model
# coherence_mat <- data.frame(k = sapply(model_list, function(x) nrow(x$phi)),
# coherence = sapply(model_list, function(x) mean(x$coherence)),
# stringsAsFactors = FALSE)
# ggplot(coherence_mat, aes(x = k, y = coherence)) +
# geom_point() +
# geom_line(group = 1)+
# ggtitle("Best Topic by Coherence Score") + theme_minimal() +
# scale_x_continuous(breaks = seq(1,40,1)) + ylab("Coherence")
#
# model <- model_list[which.max(coherence_mat$coherence)][[ 1 ]]
# model$top_terms <- GetTopTerms(phi = model$phi, M = 40)
# top20_wide <- as.data.frame(model$top_terms)
#
# allterms <-data.frame(t(model$phi))
#
# allterms$word <- rownames(allterms)
#
# rownames(allterms) <- 1:nrow(allterms)
#
# allterms <- melt(allterms,idvars = "word")
#
# allterms <- allterms %>% rename(topic = variable)
#
# FINAL_allterms <- allterms %>% group_by(topic) %>% arrange(desc(value))
#
#
#
# model$topic_linguistic_dist <- CalcHellingerDist(model$phi)
# model$hclust <- hclust(as.dist(model$topic_linguistic_dist), "ward.D")
# model$hclust$labels <- paste(model$hclust$labels, model$labels[ , 1])
# plot(model$hclust)
#
# final_summary_words <- data.frame(top_terms = t(model$top_terms))
# final_summary_words$topic <- rownames(final_summary_words)
# rownames(final_summary_words) <- 1:nrow(final_summary_words)
# final_summary_words <- final_summary_words %>% melt(id.vars = c("topic"))
# final_summary_words <- final_summary_words %>% rename(word = value) %>% select(-variable)
# final_summary_words <- left_join(final_summary_words,allterms)
# final_summary_words <- final_summary_words %>% group_by(topic,word) %>%
# arrange(desc(value))
# final_summary_words <- final_summary_words %>% group_by(topic, word) %>% filter(row_number() == 1) %>%
# ungroup() %>% tidyr::separate(topic, into =c("t","topic")) %>% select(-t)
# word_topic_freq <- left_join(final_summary_words, original_tf, by = c("word" = "term"))
#
# for(i in 1:length(unique(final_summary_words$topic)))
# { wordcloud(words = subset(final_summary_words ,topic == i)$word, freq = subset(final_summary_words ,topic == i)$value, min.freq = 1,
# max.words=200, random.order=FALSE, rot.per=0.35,
# colors=brewer.pal(8, "Dark2"))}
#
# dev.off()
#
# d<-dtm
# p <- as.data.frame(predict(object = model, newdata = d, method = "dot"))
# names(p)
#
# p[, "max"] <- apply(p, 1, max)
file <-paste0(here(),"/Data/EBC/Cite100.bib")
M <- convert2df(file = file, dbsource = "isi", format = "bibtex")
M
results <- biblioAnalysis(M, sep = ";")
options(width=100)
S <- summary(object = results, k = 10, pause = FALSE)
CR <- citations(M, field = "article", sep = ";")
#cbind(CR$Cited[1:10])
A <- cocMatrix(M, Field = "CR", sep = ";")
Cites<-igraph::graph_from_incidence_matrix(A)
V(Cites)$name
V(Cites)$Year=sub("^.*([0-9]{4}).*", "\\1", V(Cites)$name)
V(Cites)$Journal=sub(".*([0-9]{4})", "", V(Cites)$name)
deg <- igraph::degree(Cites, mode="all")
sort(deg)
#plot(Cites, vertex.label=NA, vertex.size=deg)
vn <- toVisNetworkData(Cites)
vn$nodes$title<-vn$nodes$label
# unique(vn$nodes$Year)
# # #Repair some year values Early view
# #vn$nodes$Year[1]="2020"
# #vn$nodes$Year[2]="2020"
# #vn$nodes$Year[3]="2020"
# #vn$nodes$Year[4]="2020"
# vn$nodes$Year[5]="2020"
# vn$nodes$Year[6]="2020"
# vn$nodes$Year[7]="2020"
# # vn$nodes$Year[9]="2012"
# # vn$nodes$Year[10]="2020"
# vn$nodes$Year[11]="2020"
# vn$nodes$Year[12]="2020"
# vn$nodes$Year[19]="2020"
# vn$nodes$Year[20]="2020"
# vn$nodes$Year[41]="2020"
# vn$nodes$Year[659]="2013"
# vn$nodes$Year[699]="2000"
# # vn$nodes$Year[13]="2020"
# # vn$nodes$Year[14]="2020"
# # vn$nodes$Year[16]="2020"
# # vn$nodes$Year[17]="2020"
# # vn$nodes$Year[52]="2020"
# # vn$nodes$Year[53]="2020"
#
# vn$nodes$Journal<-gsub(", ", "",vn$nodes$Journal)
# vn$nodes$Journal<-trimws(vn$nodes$Journal, which = "both", whitespace = "[ \t\r\n]")
# unique(vn$nodes$Journal)
# #
# # vn$nodes$Journal[2]<-"OIKOS"
# # vn$nodes$Journal[3]<-"AMBIO"
# # vn$nodes$Journal[7]<-"URBAN ECOSYST"
# # vn$nodes$Journal[9]<-"CONSERV BIOL"
# # vn$nodes$Journal[10]<-"CONSERV BIOL"
# # vn$nodes$Journal[11]<-"CONSERV BIOL"
# # vn$nodes$Journal[12]<-"CONSERV BIOL"
# # vn$nodes$Journal[13]<-"CONSERV BIOL"
# # vn$nodes$Journal[14]<-"AMBIO"
# # vn$nodes$Journal[16]<-"PRIMATOL"
# # vn$nodes$Journal[17]<-"RESTOR ECOL"
# # vn$nodes$Journal[53]<-"SOC NAT RESOUR"
# # vn$nodes$Journal[52]<-"URBAN AFF"
# #
# #
# # Nodes are sized by degree (the number of links to other packages)
degree_value <- degree(Cites, mode = "all")
vn$nodes$value <- degree_value[match(vn$nodes$id, names(degree_value))]
# vn$nodes$x<-as.numeric(vn$nodes$Year)
#
# unique(vn$nodes$x)
#
#
# vn$nodes$color<-ifelse(vn$nodes$x>=2015, "red",
# ifelse(vn$nodes$x>=2010, "green",
# ifelse(vn$nodes$x>=2005, "brown",
# ifelse(vn$nodes$x>=2000,"green",
# ifelse(vn$nodes$x>=1995, "yellow", "lightblue")))))
#
#unique(vn$nodes$Year)
#which(vn$nodes$Year=="1968")
#vn$nodes$color="blue"
#vn$nodes$color[637]<-"red"
vn$edges$arrows<-"to"
visNetwork(nodes = vn$nodes, edges = vn$edges,main="EBC cited 100+",height = "500px", width = "100%")%>%
#visOptions(highlightNearest = TRUE)%>%
visOptions(highlightNearest = list(enabled = TRUE, degree = 1)) %>%
visNodes() %>%
visSave(file =paste0(here(),"/plots/EBC_cited100.html"), selfcontained = T)
|
19c2854bcfd254a29ce572d9612467ff2c3fb855
|
cbf79fbcb32d9d13dd5b7ef258fc98b424a9c61b
|
/src/shuffle_gtf.R
|
89c0322acbf6df61f7ac682ce57e4084d56f7401
|
[] |
no_license
|
TomHarrop/5acc
|
5a15ad07f1527f880904752a2582b1f569761686
|
3b2b443ec9344b8967e582be666e85a5c56afd84
|
refs/heads/master
| 2021-01-03T13:22:26.812766
| 2019-07-30T05:28:49
| 2019-07-30T05:28:49
| 38,441,066
| 2
| 0
| null | 2018-09-19T22:47:19
| 2015-07-02T15:34:39
|
R
|
UTF-8
|
R
| false
| false
| 6,077
|
r
|
shuffle_gtf.R
|
#!/usr/bin/env Rscript
library(data.table)
library(dplyr)
library(GenomicRanges)
library(rtracklayer)
library(valr)
###########
# GLOBALS #
###########
os_gff_file <- snakemake@input[["os_gff_file"]]
os_gtf_file <- snakemake@input[["os_gtf_file"]]
seqlengths_file <- snakemake@input[["seqlengths_file"]]
irgsp_gff_file <- snakemake@input[["irgsp_gff_file"]]
osa1r7_gff_file <- snakemake@input[["osa1r7_gff_file"]]
osa1_mirbase_gff_file <- snakemake@input[["osa1_mirbase_gff_file"]]
tigr_repeats_fa <- snakemake@input[["tigr_repeats_fa"]]
star_index_dir <- snakemake@params[["star_index_dir"]]
cpus <- snakemake@threads[[1]]
log_file <- snakemake@log[["log"]]
shuffled_gff_file <- snakemake@output[["shuffled_gff"]]
########
# MAIN #
########
# set log
log <- file(log_file, open = "wt")
sink(log, type = "message")
sink(log, append = TRUE, type = "output")
# load tbl genome
genome <- read_genome(seqlengths_file)
# get genes
os_gff_genes <- import.gff(os_gff_file, feature.type = "gene")
# slop genes
slopped_genes_tbl <- bed_slop(as.tbl_interval(os_gff_genes),
genome,
both = 100)
# fix irgsp gff
irgsp_tmp1 <- tempfile(fileext = ".gff")
irgsp_tmp2 <- tempfile(fileext = ".gff")
irgsp_tmp3 <- tempfile(fileext = ".gff")
system2("sed",
args = c("282d", irgsp_gff_file),
stdout = irgsp_tmp1,
stderr = log_file)
system2("sed",
args = c("537d", irgsp_tmp1),
stdout = irgsp_tmp2,
stderr = log_file)
system2("sed",
args = c("913d", irgsp_tmp2),
stdout = irgsp_tmp3,
stderr = log_file)
irgsp_gff <- import.gff(irgsp_tmp3)
# rename chromosomesq
slr <- gsub("0(\\d)",
"\\1",
sub("chr",
"Chr",
seqlevels(irgsp_gff)))
names(slr) <- seqlevels(irgsp_gff)
seqlevels(irgsp_gff) <- slr
irgsp_tbl <- as.tbl_interval(irgsp_gff)
# load osa1r7 gff
osa1r7_gff <- import.gff(osa1r7_gff_file)
osa1r7_tbl <- as.tbl_interval(osa1r7_gff)
# load osa.gff3 miRBase miRNAs
osa1_mirbase_gff <- import.gff(osa1_mirbase_gff_file)
osa1_mirbase_tbl <- as.tbl_interval(osa1_mirbase_gff)
# simulate reads from tigr repeats
wgsim1 <- tempfile(fileext = ".wgsim.1.fq")
wgsim2 <- tempfile(fileext = ".wgsim.2.fq")
system2("wgsim",
args = c("-e", "0",
"-1", "55",
"-2", "55",
"-r", "0",
"-R", "0",
"-X", "0",
"-d", "0",
"-s", "0",
tigr_repeats_fa,
wgsim1,
wgsim2),,
stdout = log_file,
stderr = log_file)
# map tigr repeats
star_outdir <- tempdir()
prefix <- paste(star_outdir, "TIGR_Oryza_Repeats.", sep = "/")
system2("STAR",
args = c("--runThreadN",
cpus,
"--genomeDir",
star_index_dir,
"--outSAMtype",
"BAM SortedByCoordinate",
"--outFilterMultimapNmax",
"-1",
"--outBAMcompression",
"10 ",
"--readFilesIn",
wgsim1, wgsim2,
"--outFileNamePrefix",
prefix),,
stdout = log_file,
stderr = log_file)
# convert BAM to bed
rpt_bed <- tempfile(fileext = ".bed6")
star_bamfile <- paste0(prefix, "Aligned.sortedByCoord.out.bam")
system2("bedtools",
args = c("bamtobed",
"-i", star_bamfile),
stdout = rpt_bed,
stderr = log_file)
# read bed hits
rpt_hits <- import.bed(rpt_bed)
rpt_tbl <- as.tbl_interval(rpt_hits)
# merge with valr
all_tbl <- dplyr::bind_rows(slopped_genes_tbl,
irgsp_tbl,
osa1r7_tbl,
osa1_mirbase_tbl,
rpt_tbl)
merged_tbl <- bed_merge(all_tbl)
# prepare a dummy GFF for shuffling
os_gff_exons <- import.gff(os_gtf_file, feature.type = "exon", format = "gtf")
grl <- GenomicRanges::reduce(split(os_gff_exons,
elementMetadata(os_gff_exons)$gene_name))
gtf_reduced <- unlist(os_gff_exons, use.names = FALSE)
# add metadata
elementMetadata(gtf_reduced)$widths <- width(gtf_reduced)
# calculate feature lengths with dplyr
feature_length_tbl <- group_by(as.data.frame(gtf_reduced), gene_name) %>%
summarize(length = sum(widths))
feature_lengths <- data.table(Length = feature_length_tbl$length,
rn = feature_length_tbl$gene_name,
key = "rn")
to_shuffle <- feature_lengths[Length < quantile(feature_lengths$Length, 0.9),
unique(rn)]
# generate dummy ranges
gene_chromosome <- unique(
data.table(rn = os_gff_genes$Name,
seqid = as.character(GenomeInfoDb::seqnames(os_gff_genes)),
strand = as.character(rtracklayer::strand(os_gff_genes)),
key = "rn"))
dummy_gff_dt <- gene_chromosome[feature_lengths, .(
chrom = seqid,
source = 'phytozomev10',
type = 'CDS',
start = 1,
end = Length,
score = ".",
strand,
phase = ".",
ID = rn
)]
dummy_gff_tbl <- as.tbl_interval(dummy_gff_dt)
# shuffle
shuffled_gtf <- bed_shuffle(
dummy_gff_tbl %>% filter(
(!chrom %in% c("ChrSy", "ChrUn")) &
ID %in% to_shuffle),
genome,
excl = merged_tbl,
within = TRUE,
seed = 1)
# convert to Granges
shuffled_gr <- makeGRangesFromDataFrame(shuffled_gtf,
keep.extra.columns=FALSE,
ignore.strand=FALSE,
seqinfo=NULL,
seqnames.field="chrom",
start.field="start",
end.field="end",
strand.field="strand")
names(shuffled_gr) <- shuffled_gtf$ID
shuffled_gr$type <- "CDS"
# write output
export(shuffled_gr, shuffled_gff_file, "gff3")
# write log
sessionInfo()
|
fa88d3938e747a45c2a027e2bf3393fc10c1f660
|
761f685716e4707544dfa397160bced4f683f512
|
/archive/rna-seq_three_lab.R
|
c7bd6c2c18b1bede81a2485531e9cba6a4694e2d
|
[
"MIT"
] |
permissive
|
Zhang-lab/RNA-seq_QC_analysis
|
69c1849c345324d68463b490f955c6b7cf1965f2
|
8a193be3b2046b25a23d6d4123b7fbf424adc97d
|
refs/heads/master
| 2021-06-05T01:39:20.511643
| 2021-04-23T15:32:13
| 2021-04-23T15:32:13
| 113,912,760
| 3
| 9
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,186
|
r
|
rna-seq_three_lab.R
|
library("DESeq2")
# load count matrix######################################################################################################################
setwd("/Users/chengl/Desktop/")
Bartolomei=read.table("Bartolomei.txt",header=T,sep="\t")
Dolinoy=read.table("Dolinoy.txt",header=T,sep="\t")
Mutlu=read.table("Mutlu.txt",header=T,sep="\t")
rownames(Bartolomei)=Bartolomei[,1]
Bartolomei=Bartolomei[,-1]
Bartolomei=Bartolomei[,-1]
rownames(Dolinoy)=Dolinoy[,1]
Dolinoy=Dolinoy[,-1]
Dolinoy=Dolinoy[,-1]
Dolinoy=Dolinoy[,-which(colnames(Dolinoy)=="T105c_Lead_F_Liver_5mo.R1")]
rownames(Mutlu)=Mutlu[,1]
Mutlu=Mutlu[,-1]
Mutlu=Mutlu[,-1]
countdata=cbind(Bartolomei,Dolinoy,Mutlu)
# load experiment design#####################################################################################################################
tt=read.table("BartolomeiLab_exp_design.txt",header=T,sep="\t")
Tissue=factor(rep(1,17),label="Liver")
tt=cbind(tt,Tissue)
group1=tt[tt$SAMPLE%in%colnames(Bartolomei),2]
sex1=tt[tt$SAMPLE%in%colnames(Bartolomei),3]
tissue1=tt[tt$SAMPLE%in%colnames(Bartolomei),4]
sex=c()
group=c()
tissue=c()
for(i in 1:dim(Dolinoy)[2]) {
if(length(grep("M",colnames(Dolinoy)[i],fixed=T))==1) {
sex=c(sex,"MALE")
} else {
sex=c(sex,"FEMALE")
}
if(length(grep("Ctrl",colnames(Dolinoy)[i],fixed=T))==1) {
group=c(group,"Ctrl")
} else if(length(grep("Lead",colnames(Dolinoy)[i],fixed=T))==1) {
group=c(group,"Lead")
} else{
group=c(group,"DEHP")
}
if(length(grep("Liver",colnames(Dolinoy)[i],fixed=T))==1) {
tissue=c(tissue,"Liver")
} else {
tissue=c(tissue,"Blood")
}
}
sex2=sex
group2=group
tissue2=tissue
tt=read.table("MutluLab_exp_design.txt",header=T,sep="\t")
Tissue=factor(c(rep(0,24),rep(1,24),rep(2,24)),label=c("Lung","Liver","Heart"))
tt=cbind(tt,Tissue)
group3=tt[tt$Samples%in%colnames(Mutlu),3]
sex3=tt[tt$Samples%in%colnames(Mutlu),4]
tissue3=tt[tt$Samples%in%colnames(Mutlu),5]
group=as.factor(c(as.character(group1),as.character(group2),as.character(group3)))
sex=as.factor(c(as.character(sex1),as.character(sex2),as.character(sex3)))
lab=factor(c(rep(0,ncol(Bartolomei)),rep(1,ncol(Dolinoy)),rep(2,ncol(Mutlu))),label=c("Bartolomei","Dolinoy","Mutlu"))
tissue=as.factor(c(as.character(tissue1),as.character(tissue2),as.character(tissue3)))
colData=data.frame(lab,sex,group,tissue)
rownames(colData)=colnames(countdata)
# create DESeq object and pre-filter#####################################################################################################################
dds=DESeqDataSetFromMatrix(countData=countdata,colData=colData,design=~lab+sex+tissue)
dds=dds[rowSums(fpm(dds,robust=F)>10)>10,]
# transformation#####################################################################################################################
rld=vst(dds,blind=FALSE)
# distance analysis#####################################################################################################################
library("pheatmap")
library("RColorBrewer")
library(grid)
sampleDists=dist(t(assay(rld)))
sampleDistMatrix=as.matrix(sampleDists)
rownames(sampleDistMatrix)=tissue
colnames(sampleDistMatrix)=lab
colors=colorRampPalette(rev(brewer.pal(9,"Blues")))(255)
png("distance_ALL.png",height=3700,width=3700,res=300)
setHook("grid.newpage", function() pushViewport(viewport(x=1,y=1,width=0.9, height=0.9, name="vp", just=c("right","top"))), action="prepend")
pheatmap(sampleDistMatrix,clustering_distance_rows=sampleDists,clustering_distance_cols=sampleDists,col=colors,main="Heatmap of similarity between ALL samples based on Euclidean distance")
setHook("grid.newpage", NULL, "replace")
grid.text("Index of samples", y=-0.02, gp=gpar(fontsize=12))
grid.text("Index of tissue", x=-0.03, rot=90, gp=gpar(fontsize=12))
dev.off()
# correlation analysis#####################################################################################################################
df=as.data.frame(colData(dds)[,c("sex","lab","tissue")])
png("correlation_ALL.png",height=3700,width=3700,res=300)
pheatmap(cor(assay(rld)),annotation_col=df,show_colnames=F,main="Heatmap of correlation between ALL samples")
dev.off()
# PCA#####################################################################################################################
library(ggplot2)
pcaData=plotPCA(rld,intgroup=c("group","sex","lab","tissue"), returnData = TRUE)
percentVar=round(100*attr(pcaData,"percentVar"))
ggplot(pcaData,aes(x=PC1,y=PC2,size=lab,shape=sex,color=tissue))+
geom_point()+
scale_shape_manual(values=c(1,2,16))+
scale_size_manual(values=c(6,4,8))+
ggtitle("Principal component analysis with covariate of ALL samples")+
xlab(paste0("PC1: ",percentVar[1],"% variance"))+
ylab(paste0("PC2: ",percentVar[2],"% variance"))+
theme(plot.title=element_text(size=14,family="Tahoma",face="bold",hjust=0.5),
text=element_text(size=12,family="Tahoma"),
axis.title=element_text(face="bold"),
axis.text.x=element_text(size=10,face="bold"),
axis.text.y=element_text(size=10,face="bold"),
legend.text=element_text(size=10,face="bold"),
legend.title=element_text(size=10,face="bold"))
|
45df8c72c8464df49d3b8dfb5ecf8a67c03eb27c
|
24ec28988913ab689df0553e8492757d4f1f383a
|
/R/davidScoreLA.R
|
2258f96240ef749a98b9e5fa33dd614173adc9a7
|
[] |
no_license
|
nmmarquez/linHierarchy
|
bb0222ec8563a490609b90cebe339fa3d3fd6c2e
|
97a27cec497d301a70c46c156ff7b2783de97ecf
|
refs/heads/master
| 2021-01-10T19:15:22.915490
| 2015-03-03T08:44:14
| 2015-03-03T08:44:14
| 19,612,549
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,025
|
r
|
davidScoreLA.R
|
#' Calculate the David's Score of players
#'
#' Calculates the David's Score of players in an "interData" object.
#' @param intData object of class "interData" to calculate scores.
#' @param corrected specify wether to use David's adjustment for chance.
#' @param normalized specify wether to use a normalizing factor detailed in
#' de Vries et al (2006).
#' @details Using the methods outlined in Gamel et al. 2003 and de Vries et al.
#' 2006 a David's score is calculated using interactions from intData. Adjusting
#' the corrected parameter will modify the algorithm to use David's adjustment
#' for chance.
#' @return A 2 column data frame specifying the players used in the algorithm
#' sorted by their corresponding david's score.
#' @examples
#' # generate generic data
#' interactions <- data.frame (a = sample (letters [1:10], 100, T),
#' b = sample (letters [1:10], 100, T),
#' o = sample (c(-1,-1,0,1,1), 100, T),
#' d = Sys.time () + runif (100, 40, 160))
#' # convert to interData object
#' id1 <- intTableConv (interactions)
#' # calculate David's Score
#' davidScore (id1)
#' # with David's adjustment for chance
#' davidScore (id1, corrected = TRUE)
#' @references Gammel et al. (2003) David's Score. Animal Behaviour.
#' de Vries et al (2006). Measuring and testing the steepness of
#' dominance hierarchies. Animal Behaviour.
#' @export
davidScore <- function (intData, corrected = FALSE, normalize = FALSE){
idError (intData); plyrs <- intData$players
if (corrected){
Pmat <- Dij (intData)
}
else{
Pmat <- Pij (intData)
}
w <- rowSums (Pmat); l <- colSums (Pmat)
w2 <- Pmat %*% w; l2 <- t (t(l) %*% Pmat)
DS <- data.frame (players = plyrs, score = w + w2 - l - l2)
DS <- DS [order (-DS$score),]; row.names (DS) <- 1:nrow(DS)
if (normalize){
DS$score <- (DS$score + nrow (Pmat) * ((nrow (Pmat) - 1)/2))/nrow (Pmat)
}
DS
}
|
785f5a9a33738d1fe77c37848acb3b4cf247e814
|
f8601db2cf70d2282c889ac21f213b3e62c3658f
|
/Code/06_Hunt_Prey_Kg_to_Kcal.R
|
58277ff0b4ce91dd8f348278b1f11eebafab9b34
|
[] |
no_license
|
PacheCoLuis/ethnodogs
|
91b0e085925075013f081a58ed140dfc404a7c4b
|
eb454823a8146f133932b4d7e57d3fd49d664cc1
|
refs/heads/main
| 2023-03-06T00:12:59.452765
| 2021-02-17T05:35:28
| 2021-02-17T05:35:28
| 312,133,468
| 0
| 0
| null | 2020-11-12T02:31:57
| 2020-11-12T01:20:10
|
R
|
UTF-8
|
R
| false
| false
| 23,168
|
r
|
06_Hunt_Prey_Kg_to_Kcal.R
|
# SUBSISTENCE HUNTING PREY KG TO KCAL #######################
source("05_Subsist_Hunt_Sample.R")
# Table 4: Prey captures recorded during fieldwork + Harvest (kg to kcal) #######
# Switch to longitudinal format, to deal with serial prey catches [1-5]
prey.all <- reshape(harv_succ, varying=list(prey=c("Prey.Catch","Prey.Catch.2","Prey.Catch.3","Prey.Catch.4","Prey.Catch.5"), weight=c("PC.kg","PC.kg.2","PC.kg.3","PC.kg.4","PC.kg.5")), direction="long")
colnames(prey.all)[which(colnames(prey.all)=="Harvest.kg..NAs.Prey....")] <- "PC.kg.guess"
prey.all <- filter(prey.all,!is.na(PC.kg))
prey.all <- prey.all[order(prey.all$Prey.Number,prey.all$Trip.ID.by.date),]
# kcal/kg estimates per prey type (successful trips) #######
# Hill & Hawkes (1983: p158), assume that 65% of the prey live weight is edible
# a general conversion factor to obtain the edible kg (ek) is (65*PC.kg)/100
# ek should be multiplied by the amount of estimated cal/kg (ck) per prey
# prey ek*ck [units: (cal/kg)*kg = cal = kcal] given the food calories
# equivalence cal ~ kcal [1Cal = 1000calories = 1kcal] the units in
# ( (65*PC.kg)/100 ) * (cal/kg) would be kcal (the OFT currency)
sort(unique(prey.all$Prey.Catch))
prey.all$kcal <- rep(0,dim(prey.all)[1])
prey.all$kcal[prey.all$Prey.Catch=="bird"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="bird"])/100 )*1900
prey.all$kcal[prey.all$Prey.Catch=="chiic"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="chiic"])/100 )*3000
prey.all$kcal[prey.all$Prey.Catch=="ek en che"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="ek en che"])/100 )*3000
prey.all$kcal[prey.all$Prey.Catch=="garuba"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="garuba"])/100 )*1500
prey.all$kcal[prey.all$Prey.Catch=="halee"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="halee"])/100 )*3000
prey.all$kcal[prey.all$Prey.Catch=="keeh"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="keeh"])/100 )*1250
prey.all$kcal[prey.all$Prey.Catch=="kiib"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="kiib"])/100 )*3000
prey.all$kcal[prey.all$Prey.Catch=="kitam"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="kitam"])/100 )*3000
prey.all$kcal[prey.all$Prey.Catch=="wech"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="wech"])/100 )*3000
prey.all$kcal[prey.all$Prey.Catch=="yuk"] <- ((65*prey.all$PC.kg[prey.all$Prey.Catch=="yuk"])/100 )*1250
# Visualizing
prey.all[,c("Prey.Catch", "PC.kg", "kcal")]
# Saving
write.csv(prey.all,"prey_all.csv",row.names=FALSE)
# SUMMARIZING THE PREY CATCH KILOGRAMS, HUNTS WITHOUT/WITH DOGS
# MIND: harv_succ$Prey.Number, coming from hsd$Prey.Number, indicates the
# total number of captures per trip, do not use it to sum captures
# by prey type --- count Prey.Catch instead
prety.stats <- group_by(prey.all,Prey.Catch) %>% summarize(count=n(), round(mean(PC.kg),1),round(sd(PC.kg),1), length(which((Dogs.used=="No")==TRUE)), length(which((Dogs.used=="Yes")==TRUE)))
colnames(prety.stats) <- c("Prey","N","Mean_Kg","SD","Without_dogs","With_dogs")
prety.stats <- prety.stats[order(-prety.stats$Mean_Kg),]
write.table(prety.stats, file="Table-4_Prey-KG_means_sd.csv", append = FALSE, quote=FALSE, sep=" , ", row.names=FALSE)
prety.stats
# CHECK for consistency, the total Ns hold equally
sum(prety.stats$N) == sum(prety.stats$Without_dogs) + sum(prety.stats$With_dogs)
# SUMMARIZING THE PREY CATCH KCAL, HUNTS WITHOUT/WITH DOGS
prety.kcals <- group_by(prey.all,Prey.Catch) %>% summarize(count=n(), round(mean(kcal),0),round(sd(kcal),0), length(which((Dogs.used=="No")==TRUE)), length(which((Dogs.used=="Yes")==TRUE)))
colnames(prety.kcals) <- c("Prey","N","Mean_Kcal","SD","Without_dogs","With_dogs")
prety.kcals <- prety.kcals[order(-prety.kcals$Mean_Kcal),]
write.table(prety.kcals, file="Table-4_Prey-KCAL_means_sd.csv", append = FALSE, quote=FALSE, sep=" , ", row.names=FALSE)
prety.kcals
# NOTE: on weight estimates and actual weight measurements
# If 'Harvest.kg..NAs.Prey....' [here 'prey.all$PC.kg.guess'] values are
# filled with characters it is a 'Harvest.guess'=="Yes" meaning that the
# 'Harvest.kg' were obtained from the literature, in such cases hunters
# reported prey type but could not estimate the approximate prey weight
# In all other cases weights were either measured using a hanging scale
# or estimated by hunters. Use 'PC.kg' associated to Source.Uniform==HA
# records as a rough of the number of weights that hunters did estimate
sort(names(table(prey.all$PC.kg.guess))) # table with 45 dimnames
table(prey.all$PC.kg.guess)[1:37]
names(table(prey.all$PC.kg.guess)[38:45])
# working with numeric values (kg estimates or measures)
kg.estme <- data.frame(table(prey.all$PC.kg.guess)[1:37][names(table(prey.all$PC.kg.guess)[1:37])]) ; sum(kg.estme$Freq)
# 85 cases for which PC.kg were hunters' estimates or actual measurements
length(which(prey.all$Source.Uniform=="HA"))
# ~68 of kg.estme could come from hunters' estimates, which
# leaves ~17 cases with actual prey measurements (weights)
# working with character values (kg guesses)
kg.guess <- data.frame(table(prey.all$PC.kg.guess)[38:45][names(table(prey.all$PC.kg.guess)[38:45])]) ; sum(kg.guess$Freq)
# 36 cases for which PC.kg were taken from Primack et al. (1997) or Koster (2008)
# CHECK for consistency
sum(kg.estme$Freq) + sum(kg.guess$Freq) == sum(table(prey.all$PC.kg.guess))
# Metrics: kcal, hours, prey, ethno-source #######
# kg or kcal per source type successful hunts (mean, sd, n)
prety.sourc <- group_by(prey.all,Source.Uniform) %>% summarize(count=n(), round(sum(PC.kg)))
colnames(prety.sourc) <- c("Source","Prey_N","Sum_Kg")
prety.sourc
prety.sokca <- group_by(prey.all,Source.Uniform) %>% summarize(count=n(), round(sum(kcal)))
colnames(prety.sokca) <- c("Source","Prey_N","Sum_Kcal")
prety.sokca
# Hours per trip (mean, sd, n)
round(mean(hsd$Hours.hunted,na.rm=TRUE),1) ; round(sd(hsd$Hours.hunted,na.rm=TRUE),1)
# mean 4.9 sd 2.1
length(unique(hsd$Trip.ID.by.date))-length(unique(hsd[which(is.na(hsd$Hours.hunted)),"Trip.ID.by.date"]))
# n = 136 dates; after omitting 49 dates for which 'Hours.hunted' are NA...
length(unique(hsd[which(is.na(hsd$Hours.hunted) & hsd$Source.Uniform=="Notes"), "Trip.ID.by.date"]))
# ...37 from 'Notes'
length(unique(hsd[which(is.na(hsd$Hours.hunted) & hsd$Source.Uniform=="HA"), "Trip.ID.by.date"]))
# ...12 from 'HA'
# Evidence on the sources' disparities and more
# BUT FIRST, recalling unsuccessful hunts when converting from wide to long format
prey.suun <- reshape(harv, varying=list(prey=c("Prey.Catch","Prey.Catch.2","Prey.Catch.3","Prey.Catch.4","Prey.Catch.5"), weight=c("PC.kg","PC.kg.2","PC.kg.3","PC.kg.4","PC.kg.5")), direction="long")
colnames(prey.suun)[which(colnames(prey.suun)=="Harvest.kg..NAs.Prey....")] <- "PC.kg.guess"
prey.suun <- filter(prey.suun, Prey.Catch!="")
# kcal/kg estimates per prey type (all trips) #######
# Hill & Hawkes (1983: p158), assume that 65% of the prey live weight is edible
# a general conversion factor to obtain the edible kg (ek) is (65*PC.kg)/100
# ek should be multiplied by the amount of estimated cal/kg (ck) per prey
# prey ek*ck [units: (cal/kg)*kg = cal = kcal] given the food calories
# equivalence cal ~ kcal [1Cal = 1000calories = 1kcal] the units in
# ( (65*PC.kg)/100 ) * (cal/kg) would be kcal (the OFT currency)
sort(unique(prey.suun$Prey.Catch))
prey.suun$kcal <- rep(0,dim(prey.suun)[1])
prey.suun$kcal[prey.suun$Prey.Catch=="aim_PCS"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="aim_PCS"])/100 )*0
prey.suun$kcal[prey.suun$Prey.Catch=="bird"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="bird"])/100 )*1900
prey.suun$kcal[prey.suun$Prey.Catch=="chiic"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="chiic"])/100 )*3000
prey.suun$kcal[prey.suun$Prey.Catch=="ek en che"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="ek en che"])/100 )*3000
prey.suun$kcal[prey.suun$Prey.Catch=="garuba"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="garuba"])/100 )*1500
prey.suun$kcal[prey.suun$Prey.Catch=="halee"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="halee"])/100 )*3000
prey.suun$kcal[prey.suun$Prey.Catch=="keeh"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="keeh"])/100 )*1250
prey.suun$kcal[prey.suun$Prey.Catch=="kiib"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="kiib"])/100 )*3000
prey.suun$kcal[prey.suun$Prey.Catch=="kitam"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="kitam"])/100 )*3000
prey.suun$kcal[prey.suun$Prey.Catch=="wech"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="wech"])/100 )*3000
prey.suun$kcal[prey.suun$Prey.Catch=="yuk"] <- ((65*prey.suun$PC.kg[prey.suun$Prey.Catch=="yuk"])/100 )*1250
# Visualizing
prey.suun[,c("Prey.Catch", "PC.kg", "kcal")]
# Saving
write.csv(prey.suun,"prey_suun.csv",row.names=FALSE)
# SUMMARIZING THE PREY COUNTS PER SOURCE
prety.suun <- group_by(prey.suun,Prey.Catch) %>% summarize(count=n(), length(which((Source.Uniform=="GPS/HR")==TRUE)), length(which((Source.Uniform=="HA")==TRUE)), length(which((Source.Uniform=="Notes")==TRUE)))
colnames(prety.suun) <- c("Prey", "N", "GPS/HR", "HA", "Notes")
write.table(prety.suun, file="Table_Prey-Source.csv", append = FALSE, quote=FALSE, sep=", ", row.names=FALSE)
prety.suun
# Recalling notes on prey missed
#hsd[1:20,c("Trip.ID.by.date", "Prey.Number", "HarvShare_NotAccounted", "Prey.Catch", "Harvest.kg..NAs.Prey....","Prey.Catch.Spa")]
#harv[which(harv$PC.kg==0),c("Trip.ID.by.date", "Prey.Number", "HarvShare_NotAccounted", "Prey.Catch", "Harvest.kg..NAs.Prey....","Prey.Catch.Spa")]
prety.miss <- group_by(prey.suun[which(prey.suun$Prey.Number==0),],Prey.Catch.Spa) %>% summarize(count=n(), length(which((Source.Uniform=="GPS/HR")==TRUE)), length(which((Source.Uniform=="HA")==TRUE)), length(which((Source.Uniform=="Notes")==TRUE)))
colnames(prety.miss) <- c("Prey.Miss.Spa", "N_Aimed", "GPS/HR", "HA", "Notes")
write.csv(prety.miss, file="Table_PreyMissSpa-Source.csv", row.names=FALSE)
prety.miss
# Number of prey captured per trip #######
# Single capture trips (n = 79)
dim(unique(prey.all[which(prey.all$Prey.Number==1),c("Trip.ID.by.date","Prey.Catch")]))[1]
kill_sing <- prey.all[which(prey.all$Prey.Number==1), c("Trip.ID.by.date","Prey.Catch","Dogs.used","Day.Night")] ; kill_sing
# Double capture trips (n = 17) Prey.Catch [Trip.ID.by.date]
length(unique(prey.all[which(prey.all$Prey.Number==2),"Trip.ID.by.date"]))
kill_doub <- prey.all[which(prey.all$Prey.Number==2), c("Trip.ID.by.date","Prey.Catch","Dogs.used","Day.Night")] ; kill_doub
# --SAME x2 PREY
# keeh [27]
# halee [40]
# kiib [93]
# wech [118], wech [119], wech [155]
# kitam [29], kitam [39], kitam [107], kitam [130], kitam [139], kitam [142]
# --ARRAYS OF x2 PREY
# halee-kiib [32], keeh-wech [135], kiib-wech [141], wech-halee [172], kitam-wech [195]
# TEXT: We observed seventeen trips with double-captures: kitam (n=6), wech (n=3),
# keeh (n=1), halee (n=1), and kiib (n=1). The rest of the double-capture cases were
# for different species: halee-kiib, keeh-wech, kiib-wech, wech-halee, kitam-wech. All
# double capture trips were diurnal, and only one of them [x2 wech, 119] was without dogs
# Triple capture trips (n = 1) Prey.Catch [Trip.ID.by.date]
length(unique(prey.all[which(prey.all$Prey.Number==3),"Trip.ID.by.date"]))
kill_trip <- prey.all[which(prey.all$Prey.Number==3), c("Trip.ID.by.date","Prey.Catch","Dogs.used","Day.Night")] ; kill_trip
# ...x3 halee [26] --- Prey.Catch [Trip.ID.by.date]
# TEXT: A unique nocturnal and without dogs trip, with a triple-capture for halee
# Quadruple capture trips (n = 0) Prey.Catch [Trip.ID.by.date]
dim(unique(prey.all[which(prey.all$Prey.Number==4),c("Trip.ID.by.date","Prey.Catch")]))[1]
kill_quad <- prey.all[which(prey.all$Prey.Number==4), c("Trip.ID.by.date","Prey.Catch","Dogs.used","Day.Night")] ; kill_quad
# Quintuple capture trips (n = 1) Prey.Catch [Trip.ID.by.date]
length(unique(prey.all[which(prey.all$Prey.Number==5),"Trip.ID.by.date"]))
kill_quin <- prey.all[which(prey.all$Prey.Number==5), c("Trip.ID.by.date","Prey.Catch","Dogs.used","Day.Night")] ; kill_quin
# TEXT: We registered a diurnal and with dogs trip with a quintuple-capture
# for wech (a whole family)
# Metrics for Figs 4 & 5 Hours~Prey + Harvest~Trip #######
# FIGURES 4 and 5 (start, bring pieces together)
# ALL CASES
# Omitting cases for which we do not know 'Hours.hunted'
hrsprey <- filter(prey.all,!is.na(Hours.hunted))
hrsprey_na <- filter(prey.all,is.na(Hours.hunted))
length(unique(hrsprey_na$Trip.ID.by.date))
pny.uhrs <- sum(xtabs(~Source.Uniform+Prey.Catch,hrsprey_na)) ; pny.uhrs
# 28 successful trips and 34 prey items for which Hours.hunted are NA
# (see 21 unsuccesful trips at the start of BEHAVIORAL METRICS OF THE HUNTS
# section; together they sum the 49 cases for which Hours.hunted are NA)
# OMITTING the "garuba" and the "bird"
pgb.khrs <- dim(filter(hrsprey,Prey.Catch=="garuba"|Prey.Catch=="bird"))[1] ; pgb.khrs
# 2 prey items omitted (garuba and bird) with known Hours.hunted
hrsprey <- filter(hrsprey,Prey.Catch!="garuba"&Prey.Catch!="bird")
hrsprey$Prey.Catch <- droplevels(hrsprey$Prey.Catch)
# NOTE:
# The wild pigeon shot was a last chance to get wild meat on the way back home after a 12h
# unsuccessful trip with dogs which did not chase or alerted hunters about the prey.
# The iguana that hunters brought down from a tree after concluding routine agricultural
# tasks and attending to their dogs' barks, was not chased in the bush.
# FIG4a-b --- HOURS~PREY #######
# Getting the descriptive stats of trips for which we know Hours.hunted
prety.hour <- group_by(hrsprey,Prey.Catch) %>%
summarize(count=n(), length(unique(Trip.ID.by.date)), mean(Hours.hunted,na.rm=TRUE), sd(Hours.hunted,na.rm=TRUE))
colnames(prety.hour) <- c("Prey","count","N_Trips","Mean_Trips_Hrs","SD")
prety.hour <- prety.hour[order(-prety.hour$Mean_Trips_Hrs),]
write.table(prety.hour,file="Table_Prey-HOURS_means_sd.csv", append=FALSE, quote=FALSE, sep=",", row.names = FALSE)
prety.hour
sum(prety.hour$count) # 85 prey items, in hunting trips without and with dogs
# WITHOUT DOGS CASES
hrsprey_nd <- filter(hrsprey,Dogs.used=="No")
hrsprey_nd$Prey.Catch <- droplevels(hrsprey_nd$Prey.Catch)
boxplot.medians <- boxplot(hrsprey_nd$Hours.hunted~hrsprey_nd$Prey.Catch, plot=FALSE)$stats[3,]
boxplot.names <- boxplot(hrsprey_nd$Hours.hunted~hrsprey_nd$Prey.Catch, plot=FALSE)$names
names.ordered <- boxplot.names[order(boxplot.medians)]
prey.ordered <- factor(hrsprey_nd$Prey.Catch, ordered=TRUE, levels=names.ordered)
hpc <- boxplot(hrsprey_nd$Hours.hunted~prey.ordered,plot=0)
pnd.khrs <- sum(hpc$n[1:5]) # 11 prey items without dogs, and known Hours.hunted
# WITH DOGS CASES
hrsprey_wd <- filter(hrsprey,Dogs.used=="Yes")
hrsprey_wd$Prey.Catch <- droplevels(hrsprey_wd$Prey.Catch)
boxplot.medians <- boxplot(hrsprey_wd$Hours.hunted~hrsprey_wd$Prey.Catch, plot=FALSE)$stats[3,]
boxplot.names <- boxplot(hrsprey_wd$Hours.hunted~hrsprey_wd$Prey.Catch, plot=FALSE)$names
names.ordered <- boxplot.names[order(boxplot.medians)]
prey.ordered <- factor(hrsprey_wd$Prey.Catch, ordered=TRUE, levels=names.ordered)
hpc <- boxplot(hrsprey_wd$Hours.hunted~prey.ordered,plot=0)
pyd.khrs <- sum(hpc$n[1:5]) # 74 prey items with dogs, and known Hours.hunted
# CHECK FOR CONSISTENCY:
# Sum of prey items holds equal to total prey number
pny.uhrs + pgb.khrs + pnd.khrs + pyd.khrs == sum(prety.stats$N)
# FIG5a-b --- HARVEST~DOGS.USED #######
# UNSUCCESSFUL AND SUCCESSFUL HUNTS
# KILLS IN TRIPS WITHOUT AND WITH DOGS
doguse_suco <- boxplot(prey.suun$PC.kg[prey.suun$Prey.Catch!="aim_PCS"] ~ prey.suun$Dogs.used[prey.suun$Prey.Catch!="aim_PCS"], plot=0)
# FAILS IN TRIPS WITHOUT AND WITH DOGS
doguse_umsk <- boxplot(prey.suun$PC.kg[prey.suun$Prey.Catch=="aim_PCS"] ~ prey.suun$Dogs.used[prey.suun$Prey.Catch=="aim_PCS"], plot=0)
# WITHOUT DOGS ALL TRIPS
doguse_suco$n[1] ; length(unique(prey.suun[which(prey.suun$PC.kg>=0 & prey.suun$Dogs.used=="No"),"Trip.ID.by.date"]))
# Kills: 17 prey items in 42 trips without dogs
doguse_umsk$n[1] ; length(unique(prey.suun[which(prey.suun$PC.kg>=0 & prey.suun$Dogs.used=="No"),"Trip.ID.by.date"]))
# Fails: 28 trips (~missed targets) in 42 trips without dogs
# WITH DOGS ALL TRIPS
doguse_suco$n[2] ; length(unique(prey.suun[which(prey.suun$PC.kg>=0 & prey.suun$Dogs.used=="Yes"),"Trip.ID.by.date"]))
# Kills: 104 prey items in 143 trips with dogs
doguse_umsk$n[2] ; length(unique(prey.suun[which(prey.suun$PC.kg>=0 & prey.suun$Dogs.used=="Yes"),"Trip.ID.by.date"]))
# Fails: 59 trips (~missed targets) in 143 trips with dogs
# WITHOUT DOGS (Fails [UNSUCCESSFUL] omitted) SUCCESSFUL TRIPS ONLY
doguse_suco$n[1] ; length(unique(prey.suun[which(prey.suun$PC.kg>0 & prey.suun$Dogs.used=="No"),"Trip.ID.by.date"]))
# 17 prey items in 14 trips without dogs
# WITH DOGS (Fails [UNSUCCESSFUL] omitted) SUCCESSFUL TRIPS ONLY
doguse_suco$n[2] ; length(unique(prey.suun[which(prey.suun$PC.kg>0 & prey.suun$Dogs.used=="Yes"),"Trip.ID.by.date"]))
# 104 prey items in 84 trips with dogs
# SUCCESSFUL HUNTS ONLY
doguse_succ <- boxplot(prey.suun$PC.kg[prey.suun$PC.kg>0] ~ prey.suun $Dogs.used[prey.suun$PC.kg>0], plot=0)
# SUM ALL KG PER TRIP INTO kg_sum AND OMIT Trip.ID.by.date REPETITIONS
kgsum <- prey.suun %>%
group_by(Trip.ID.by.date) %>%
mutate(kg_sum=sum(PC.kg)) %>%
select(Trip.ID.by.date,kg_sum,Prey.Number,Dogs.used,Source.Uniform)
kgsum <- kgsum %>%
distinct(Trip.ID.by.date,kg_sum,Prey.Number,Dogs.used,Source.Uniform)
# SUM ALL KCAL PER TRIP INTO kcal_sum AND OMIT Trip.ID.by.date REPETITIONS
kcalsum <- prey.suun %>%
group_by(Trip.ID.by.date) %>%
mutate(kcal_sum=sum(kcal)) %>%
select(Trip.ID.by.date,kcal_sum,Prey.Number,Dogs.used,Source.Uniform)
kcalsum <- kcalsum %>%
distinct(Trip.ID.by.date,kcal_sum,Prey.Number,Dogs.used,Source.Uniform)
#write.csv(kgsum,"kg_sum.csv",row.names=FALSE)
#write.csv(kcalsum,"kcal_sum.csv",row.names=FALSE)
# FIGURES 4 and 5 (end, compile postscripts)
# Figure 4: Hunting trip duration by prey type captured #######
# Raw material produced here for later BW-work on art using Adobe Illustrator
setEPS()
postscript("F4_Hrs-Prey_perDogsUsed.eps")
par(mfrow=c(1,2))
# Fig4a Without dogs
boxplot.medians <- boxplot(hrsprey_nd$Hours.hunted~hrsprey_nd$Prey.Catch, plot=FALSE)$stats[3,]
boxplot.names <- boxplot(hrsprey_nd$Hours.hunted~hrsprey_nd$Prey.Catch, plot=FALSE)$names
names.ordered <- boxplot.names[order(boxplot.medians)]
prey.ordered <- factor(hrsprey_nd$Prey.Catch, ordered=TRUE, levels=names.ordered)
hpc <- boxplot(hrsprey_nd$Hours.hunted~prey.ordered,plot=0)
par(mar=c(5,5.5,2.5,5)+0.1,mgp=c(3,1,0))
boxplot(hrsprey_nd$Hours.hunted~prey.ordered,varwidth=TRUE,horizontal=TRUE,
yaxt="n",xlab="Hunting (hours)\nWithout dogs",frame.plot=FALSE,cex.axis=0.9)
axis(side=2, las=2, at=c(1:5), labels = c(
names.ordered[1],
names.ordered[2],
names.ordered[3],
names.ordered[4],
names.ordered[5]),
tick=FALSE)
axis(side=4, las=2, at=c(1:5), labels = c(
paste("n = ",hpc$n[1]),
paste("n = ",hpc$n[2]),
paste("n = ",hpc$n[3]),
paste("n = ",hpc$n[4]),
paste("n = ",hpc$n[5])),
tick=FALSE)
# Fig4b With dogs
boxplot.medians <- boxplot(hrsprey_wd$Hours.hunted~hrsprey_wd$Prey.Catch, plot=FALSE)$stats[3,]
boxplot.names <- boxplot(hrsprey_wd$Hours.hunted~hrsprey_wd$Prey.Catch, plot=FALSE)$names
names.ordered <- boxplot.names[order(boxplot.medians)]
prey.ordered <- factor(hrsprey_wd$Prey.Catch, ordered=TRUE, levels=names.ordered)
hpc <- boxplot(hrsprey_wd$Hours.hunted~prey.ordered,plot=0)
par(mar=c(5,5.5,2.5,5)+0.1,mgp=c(3,1,0))
boxplot(hrsprey_wd$Hours.hunted~prey.ordered,varwidth=TRUE,horizontal=TRUE,
yaxt="n",xlab="Hunting (hours)\nWith dogs",frame.plot=FALSE,cex.axis=0.9)
axis(side=2, las=2, at=c(1:5), labels = c(
names.ordered[1],
names.ordered[2],
names.ordered[3],
names.ordered[4],
names.ordered[5]),
tick=FALSE)
axis(side=4, las=2, at=c(1:5), labels = c(
paste("n = ",hpc$n[1]),
paste("n = ",hpc$n[2]),
paste("n = ",hpc$n[3]),
paste("n = ",hpc$n[4]),
paste("n = ",hpc$n[5])),
tick=FALSE)
par(mar = c(5, 4, 4, 2) + 0.1, mgp = c(3, 1, 0), mfrow=c(1,1)) # default
dev.off()
# Inferential statistics Hours~Prey when nDogs or wDogs Used
summary(lm(hrsprey_nd$Hours.hunted~hrsprey_nd$Prey.Catch))
summary(lm(hrsprey_wd$Hours.hunted~hrsprey_wd$Prey.Catch))
# Figure 5: Hunting trip harvest without and with dogs + t-Test #######
# Raw material produced here for later BW-work on art using Adobe Illustrator
# Inferential statistics Harvest~Trip per Dogs Used
# MIND: no Normal distribution found
# All hunts
huntsall <- lm(prey.suun$kcal ~ prey.suun$Dogs.used)
summary(huntsall)
# F = 4.1, df = 206, p = 0.04; Adjusted R squared = 0.015; kcal ~ 9933 + dogsY*5907
# Successful hunts only
prey.succ <- prey.suun %>% filter(kcal>0)
huntsucc <- lm(prey.succ$kcal ~ prey.succ$Dogs.used)
summary(huntsucc)
# F = 0.12, df = 119, p = 0.73; Adjusted R squared = -0.007; kcal ~ 26293 + dogsY*(-1467)
# t-tests #######
# All hunts
adpkcal <- kcalsum %>% filter(Dogs.used=="Yes")
length(adpkcal$kcal_sum) # 143
adakcal <- kcalsum %>% filter(Dogs.used=="No")
length(adakcal$kcal_sum) # 42
t.test(adpkcal$kcal_sum,adakcal$kcal_sum)
# TEXT: t=2.15, df=79.3, p-value=0.03 95% CI 567 to 14259
# Means adpkcal = 18055 and adakcal = 10642
# Successful hunts
sdpkcal <- kcalsum %>% filter(Dogs.used=="Yes",kcal_sum>0)
length(sdpkcal$kcal_sum)
sdakcal <- kcalsum %>% filter(Dogs.used=="No",kcal_sum>0)
length(sdakcal$kcal_sum)
t.test(sdpkcal$kcal_sum,sdakcal$kcal_sum)
# TEXT: t=-0.21, df=19, p-value=0.83 95% CI -12990 to 10610
# Means sdpkcal = 30737 and sdakcal = 31927
round(mean(adpkcal$kcal_sum),1) # avg 18055.5
round(median(adpkcal$kcal_sum),1) # median 9750
round(sd(adpkcal$kcal_sum),1) # sd 22502.1
length(adpkcal$kcal_sum) # 143 trips
round(mean(adakcal$kcal_sum),1) # avg 10642.5
round(median(adakcal$kcal_sum),1) # median 0
round(sd(adakcal$kcal_sum),1) # sd 18661.4
length(adakcal$kcal_sum) # 42 trips
setEPS()
postscript("F5_Harvest-Trip_perDogsUsed.eps")
par(mfrow=c(2,1))
# Fig5a All trips (kcal>=0)
par(mar=c(6,6,4,4)+0.1,mgp=c(4,1,0))
boxplot(kcalsum$kcal_sum ~ kcalsum$Dogs.used, varwidth=TRUE, las=1, xaxt="n",
ylab="Harvest (kcal)", frame.plot=FALSE)
axis(side = 1, at=c(1, 2), labels = c(
paste0("Without dogs\nSample ", length(adakcal$kcal_sum), " Succ.Tr ", length(which(adakcal$kcal_sum>0)), "\n%Succ.Rt ", round((length(which(adakcal$kcal_sum>0))*100)/length(adakcal$kcal_sum),0), " Anim.Cap ", doguse_suco$n[1]),
paste0("With dogs\nSample ", length(adpkcal$kcal_sum), " Succ.Tr ", length(which(adpkcal$kcal_sum>0)), "\n%Succ.Rt ", round((length(which(adpkcal$kcal_sum>0))*100)/length(adpkcal$kcal_sum),0), " Anim.Cap ", doguse_suco$n[2])),
tick=FALSE, cex.axis=0.6)
# Fig5b Successful trips (kcal>0)
par(mar=c(6,6,4,4)+0.1,mgp=c(4,1,0))
boxplot(kcalsum$kcal_sum[kcalsum$kcal_sum>0] ~ kcalsum$Dogs.used[kcalsum$kcal_sum>0],
varwidth=TRUE, las=1, xaxt="n", ylab="Harvest (kcal)",frame.plot=FALSE)
axis(side = 1, at=c(1, 2), labels = c(
paste0("Without dogs"),
paste0("With dogs")),
tick=FALSE, cex.axis=0.6)
par(mar = c(5, 4, 4, 2) + 0.1, mgp = c(3, 1, 0), mfrow=c(1,1)) # default
dev.off()
|
f4f1e1d59bcf7a3636a5a2a8c907a90fe428f307
|
de7c4927217fe4266a5e97fc69633a437a25f06e
|
/src/5.1.SingleSample_Neu.R
|
72f627ce34e184fd5c27b599c91750e61ad94053
|
[
"MIT"
] |
permissive
|
elifesciences-publications/BreastCancer_SingleCell
|
38a0a802b56c2b3ddf3c798778998da8a28573d8
|
619586208d4d92c3bac12b89b097ed665bf9ed71
|
refs/heads/master
| 2022-12-03T00:15:06.596247
| 2020-08-17T15:17:35
| 2020-08-17T15:17:35
| 288,213,663
| 3
| 1
| null | 2020-08-17T15:15:38
| 2020-08-17T15:15:38
| null |
UTF-8
|
R
| false
| false
| 15,863
|
r
|
5.1.SingleSample_Neu.R
|
library(scran)
library(dplyr)
library(Rtsne)
library(plyr)
library(BiocSingular)
library(scater)
library(batchelor)
dataList <- readRDS("../data/Robjects/ExpressionList_QC.rds")
m <- dataList[[1]]
pD <- dataList[[2]]
fD <- dataList[[3]]
rm(dataList)
pD$Replicate <- mapvalues(pD$SampleID, c("BRCA1_A","BRCA1_B","NEU_A","NEU_B","FF99WT_A","FF99WT_B","4T1"),
c("A","B","A","B","A","B","A")) %>% factor(., levels = c("A","B"))
pD$Condition <- mapvalues(pD$SampleID, c("BRCA1_A","BRCA1_B","NEU_A","NEU_B","FF99WT_A","FF99WT_B","4T1"),
c("BRCA1","BRCA1","NEU","NEU","FF99WT","FF99WT","4T1")) %>%
factor(., levels = c("BRCA1","NEU","FF99WT","4T1"))
fD$keep <- rowMeans(m) > 0.01
m <- m[fD$keep, pD$PassAll]
pD <- pD[pD$PassAll,]
fD <- fD[fD$keep, ]
rownames(m) <- fD$symbol
rownames(pD) <- pD$barcode
rownames(fD) <- fD$symbol
#-------------------------------------------
BRCA1 <- pD$barcode[pD$Condition %in% c("BRCA1")]
FF99WT <- pD$barcode[pD$Condition %in% c("FF99WT")]
NEU <- pD$barcode[pD$Condition %in% c("NEU")]
Sample_4T1 <- pD$barcode[pD$Condition %in% c("4T1")]
#--------------------------------------------
# NEU
set.seed(1000)
m_NEU <- m[,pD$barcode %in% NEU]
pD_NEU <- pD[pD$barcode %in% NEU,]
#=======================================================
fD_A <- fD %>% dplyr::mutate(keep = rowMeans(m_NEU[,pD_NEU$Replicate == "A"])> 0.01)
fD_B <- fD %>% dplyr::mutate(keep = rowMeans(m_NEU[,pD_NEU$Replicate == "B"])> 0.01)
sce.NEU_A <- SingleCellExperiment(list(counts=as.matrix(m_NEU[fD_A$keep,pD_NEU$Replicate == "A"])),
colData = DataFrame(pD_NEU[pD_NEU$Replicate == "A",]),
rowData = DataFrame(fD_A[fD_A$keep,]))
sce.NEU_B <- SingleCellExperiment(list(counts=as.matrix(m_NEU[fD_B$keep,pD_NEU$Replicate == "B"])),
colData = DataFrame(pD_NEU[pD_NEU$Replicate == "B",]),
rowData = DataFrame(fD_B[fD_B$keep,]))
#-------------------------
clusters <- quickCluster(sce.NEU_A, method ='igraph',use.ranks=FALSE, min.mean = 0.1)
table(clusters)
sce.NEU_A <- computeSumFactors(sce.NEU_A, min.mean = 0.1, clusters = clusters)
summary(sizeFactors(sce.NEU_A))
sce.NEU_A <- normalize(sce.NEU_A)
#table(sce.NEU$Replicate)
#batch <- c(rep("1", each=2242), rep("2", each = 1912))
fit <- trendVar(sce.NEU_A, use.spikes = FALSE)
dec <- decomposeVar(sce.NEU_A, fit)
dec$Symbol <- rowData(sce.NEU_A)$symbol
dec_A <- dec[order(dec$bio, decreasing = TRUE), ]
hvg.A <- dec_A[which(dec_A$FDR <= 0.1 & dec_A$bio >=0),]
set.seed(1000)
sce.NEU_A <- runPCA(sce.NEU_A, feature_set= rownames(hvg.A),BSPARAM=IrlbaParam())
sce.NEU_A <- runTSNE(sce.NEU_A , use_dimred = "PCA")
sce.NEU_A <- runUMAP(sce.NEU_A , use_dimred = "PCA")
#rownames(sce.NEU_A) <- rowData(sce.NEU_A)$symbol
plotTSNE(sce.NEU_A, colour_by="Cd14")
snn.gr <- buildSNNGraph(sce.NEU_A, use.dimred="PCA", assay.type="logcounts", k=500)
clusters <- igraph::cluster_louvain(snn.gr)
table(clusters$membership)
sce.NEU_A$Cluster <- factor(clusters$membership)
plotTSNE(sce.NEU_A, colour_by="Cluster")
markers <- findMarkers(sce.NEU_A, sce.NEU_A$Cluster, direction="up")
library(xlsx)
for (cluster in names(markers)) {
write.xlsx(data.frame(markers[[cluster]]), row.names=TRUE,
file="NEU_A__MarkerGenes.xlsx",
sheetName=cluster, append = TRUE)
gc()
}
#----------------------------
clusters <- quickCluster(sce.NEU_B, method ='igraph',use.ranks=FALSE, min.mean = 0.1)
table(clusters)
sce.NEU_B <- computeSumFactors(sce.NEU_B, min.mean = 0.1, clusters = clusters)
summary(sizeFactors(sce.NEU_B))
sce.NEU_B <- normalize(sce.NEU_B)
#table(sce.NEU$Replicate)
#batch <- c(rep("1", each=2242), rep("2", each = 1912))
fit <- trendVar(sce.NEU_B, use.spikes = FALSE)
dec <- decomposeVar(sce.NEU_B, fit)
dec$Symbol <- rowData(sce.NEU_B)$symbol
dec_B <- dec[order(dec$bio, decreasing = TRUE), ]
hvg.B <- dec_B[which(dec_B$FDR <= 0.1 & dec_B$bio >=0.1),]
sce.NEU_B <- runPCA(sce.NEU_B, feature_set= rownames(hvg.B),BSPARAM=IrlbaParam())
sce.NEU_B <- runTSNE(sce.NEU_B, use_dimred = "PCA")
sce.NEU_B <- runUMAP(sce.NEU_B, use_dimred = "PCA")
rownames(sce.NEU_B) <- rowData(sce.NEU_B)$symbol
snn.gr <- buildSNNGraph(sce.NEU_B, use.dimred="PCA", assay.type="logcounts", k=300)
clusters <- igraph::cluster_walktrap(snn.gr)
table(clusters$membership)
sce.NEU_B$Cluster <- factor(clusters$membership)
plotTSNE(sce.NEU_B, colour_by="Cluster")
plotTSNE(sce.NEU_B, colour_by="Cd14")
markers <- findMarkers(sce.NEU_B, sce.NEU_B$Cluster, direction="up")
for (cluster in names(markers)) {
write.xlsx(data.frame(markers[[cluster]]), row.names=TRUE,
file="NEU_B_MarkerGenes.xlsx",
sheetName=cluster, append = TRUE)
gc()
}
#--Plot Genes expression--------------
genes <- c("Cd14","Bcl3","Osmr","Nfkbia")
plt <- list()
for (gene in genes){
tmp = plotTSNE(sce.NEU_A, colour_by = gene )+
scale_fill_gradient2(name = gene, low='grey',high ='red')+
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
plt[[gene]] <- tmp
}
multiplot(plotlist=plt, cols = 2)
plotTSNE(sce.NEU_A, colour_by ="Cluster")
#==CombineDataset=============================================================
universe <- intersect(rownames(dec_A), rownames(dec_B))
mean.bio <- (dec_A[universe,"bio"] + dec_B[universe,"bio"])/2
chosen <- universe[mean.bio > 0]
length(chosen)
rescaled <- batchelor::multiBatchNorm(
sce.NEU_A[universe,],
sce.NEU_B[universe,]
)
rescaled.NEU_A <- rescaled[[1]]
rescaled.NEU_B <- rescaled[[2]]
set.seed(1000)
unc.NEU_A <- logcounts(rescaled.NEU_A)[chosen,]
unc.NEU_B <- logcounts(rescaled.NEU_B)[chosen,]
mnn.out <- batchelor::fastMNN(
NEU_A=unc.NEU_A, NEU_B=unc.NEU_B,
k=20, d=50, BSPARAM=IrlbaParam(deferred=TRUE)
)
mnn.out
sce.NEU <- mnn.out
sce.NEU <- runTSNE(sce.NEU, use_dimred="corrected")
plotTSNE(sce.NEU, colour_by="batch") + ggtitle("Corrected")
assay(sce.NEU, "original") <- cbind(unc.NEU_A, unc.NEU_B)
osce.NEU <- runPCA(sce.NEU, exprs_values = "original",ntop = Inf, BSPARAM=IrlbaParam())
osce.NEU <- runTSNE(osce.NEU, use_dimred = "PCA")
plotTSNE(osce.NEU, colour_by = "batch") +ggtitle("Original")
#osce.NEU <- runUMAP(osce.NEU, use_dimred = "PCA")
#plotUMAP(osce.NEU, colour_by = "batch")
sceList <- list("sce_A" = sce.NEU_A, "sce_B" = sce.NEU_B, "combine"=sce.NEU)
saveRDS(sceList, "../data/Robjects/Neu_sceList.rds")
#--Plot Gene expression-----------------
genes <- c("Cd14","Bcl3","Osmr","Nfkbia","Aqp5","Kcnn4","Col9a1","Apoc1")
plt <- list()
for (gene in genes){
tmp = plotTSNE(sce.NEU, by_exprs_values="original", colour_by = gene )+scale_fill_gradient2(name = gene, low='grey',high ='red')
plt[[gene]] <- tmp
}
multiplot(plotlist=plt, cols = 2)
#--Clustering------------------------------------
snn.gr <- buildSNNGraph(sce.NEU, use.dimred="corrected", k=100) #k=100 3 cluster k=50: 4 cluster k=40 : 5clusters
clusters <- igraph::cluster_louvain(snn.gr)
table(clusters$membership, sce.NEU$batch)
sce.NEU$Cluster <- factor(clusters$membership)
plotTSNE(sce.NEU, colour_by="Cluster")
#-----------------------
markers <- findMarkers(sce.NEU, sce.NEU$Cluster,block = sce.NEU$batch,
assay.type="original", direction="up")
for (cluster in names(markers)) {
write.xlsx(data.frame(markers[[cluster]]), row.names=TRUE,
file="NEU_combine_3clusters_MarkerGenes.xlsx",
sheetName=cluster, append = TRUE)
gc()
}
#===============================================================================
# NEU_A <- CreateSeuratObject(counts =m_NEU[,pD_NEU$Replicate == "A"] , project = "A", min.cells = 5)
# NEU_A$Rep <- "A"
# NEU_A <- NormalizeData(NEU_A, verbose = FALSE)
# NEU_A <- FindVariableFeatures(NEU_A, selection.method = "vst", nfeatures = 2000)
# # Set up stimulated object
# NEU_B <- CreateSeuratObject(counts = m_NEU[,pD_NEU$Replicate == "B"], project = "B", min.cells = 5)
# NEU_B$Rep <- "B"
# NEU_B <- NormalizeData(NEU_B, verbose = FALSE)
# NEU_B <- FindVariableFeatures(NEU_B, selection.method = "vst", nfeatures = 2000)
# anchors <- FindIntegrationAnchors(object.list = list(NEU_A, NEU_B), dims = 1:20)
# Neu <- IntegrateData(anchorset = anchors, dims = 1:20)
# DefaultAssay(Neu) <- "integrated"
# Neu <- ScaleData(Neu, verbose = FALSE)
# Neu <- RunPCA(Neu, npcs = 30, verbose=FALSE)
# Neu <- RunUMAP(Neu, reduction = "pca", dims = 1:20)
# Neu <- RunTSNE(Neu, reduction = "pca",dims = 1:20)
# DimPlot(Neu, reduction = "tsne", group.by = "Rep")
# Neu <- FindNeighbors(Neu, reduction ="pca", dims = 1:20)
# Neu <- FindClusters(Neu, resolution = 0.2)
# DimPlot(Neu, reduction = "tsne", label=T)
# DefaultAssay(Neu) <- "RNA"
# markers <- FindAllMarkers(Neu, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
# markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
# FeaturePlot(Neu, features = c("Cd14"))
# head(dec)
# hvg.out_A <- dec[which(dec$FDR <= 0.1 & dec$bio >=0),]
# sce.NEU_A <- runPCA(sce.NEU,feature_set= rownames(hvg.out), BSPARAM=IrlbaParam())
# sce.NEU <- runTSNE(sce.NEU, use_dimred = "PCA")
# plotTSNE(sce.NEU, colour_by = "SampleID")
# sce.NEU <- runUMAP(sce.NEU, use_dimred ="PCA")
# plotUMAP(sce.NEU, colour_by = "SampleID")
# snn.gr <- buildSNNGraph(sce.NEU, use.dimred="PCA")
# clusters <- igraph::cluster_fast_greedy(snn.gr)
# sce.NEU$Cluster <- factor(clusters$membership)
# plotTSNE(sce.NEU, colour_by = "Cluster")
# rownames(sce) <- rowData(sce)$symbol
# sceList[[name]] <- sce
# saveRDS(sceList, "SingleSamples_norm_sce.rds")
# genes <- c("Tslp", "Ctla2a", 'H2-Ab1', "Il24", "Pdgfrl", 'Ly6a', "H2-Aa", "Slpi", "H2-Eb1")
# plotTSNE(sceList[["NEU"]], colour_by ="Cluster")
# plotUMAP(sce, colour_by ="Cluster")
# rownames(sce) <- rowData(sce)$symbol
# genes <- c("Mgp","Kctd1","Matn4","Cp","Mt1","Steap4","Selenop","Mt2")
# plt <- list()
# for (gene in genes){
# tmp = plotTSNE(sce.BRCA1, colour_by = gene )+scale_fill_gradient2(name = gene, low='grey',high ='red')
# plt[[gene]] <- tmp
# }
# multiplot(plotlist=plt, cols = 4)
# #=======================================================================
# fD_NEU <- fD %>% dplyr::mutate(keep = rowMeans(m_NEU) > 0.01)
# sce.NEU <- SingleCellExperiment(list(counts=as.matrix(m_NEU[fD_NEU$keep,])),
# colData = DataFrame(pD_NEU),
# rowData = DataFrame(fD_NEU[fD_NEU$keep,]))
# clusters <- quickCluster(sce.NEU, method ='igraph',use.ranks=FALSE, min.mean = 0.1)
# table(clusters)
# sce.NEU <- computeSumFactors(sce.NEU, min.mean = 0.1, clusters = clusters)
# summary(sizeFactors(sce.NEU))
# sce.NEU <- normalize(sce.NEU)
# table(sce.NEU$Replicate)
# batch <- c(rep("1", each=2242), rep("2", each = 1912))
# fit <- trendVar(sce.NEU, use.spikes = FALSE, block = batch)
# dec <- decomposeVar(sce.NEU, fit)
# dec$Symbol <- rowData(sce.NEU)$symbol
# dec <- dec[order(dec$bio, decreasing = TRUE), ]
# hvg <- dec[which(dec_A$FDR <= 0.1 & dec_A$bio >=0),]
# sce.NEU <- runPCA(sce.NEU, BSPARAM=IrlbaParam())
# sce.NEU <- runTSNE(sce.NEU, use_dimred = "PCA")
# plotTSNE(sce.NEU, colour_by = "SampleID")
# plotTSNE(sce.BRCA1, colour_by="Cluster")
# plotUMAP(sce.4T1, colour_by="Cluster")
# snn.gr <- buildSNNGraph(sce.BRCA1, use.dimred="PCA", k = 50)
# clusters <- igraph::cluster_fast_greedy(snn.gr)
# table(clusters$membership)
# sce.BRCA1$Cluster <- factor(clusters$membership)
# jgc <- function()
# {
# .jcall("java/lang/System", method = "gc")
# }
# options(java.parameters = "-Xmx8g")
# markers <- findMarkers(sce.BRCA1, sce.BRCA1$Cluster, direction="up")
# library(xlsx)
# for (cluster in names(markers)) {
# write.xlsx(data.frame(markers[[cluster]]), row.names=TRUE,
# file="BRCA1_MarkerGenes.xlsx",
# sheetName=cluster, append = TRUE)
# jgc()
# }
# sce.4T1 <- sceList[["4T1"]]
# plotTSNE(sce.4T1, colour_by="Cluster")
# plotUMAP(sce.4T1, colour_by="Cluster")
# markers <- findMarkers(sce.4T1, sce.4T1$Cluster, direction="up")
# for (cluster in names(markers)) {
# write.xlsx(data.frame(markers[[cluster]]), row.names=TRUE,
# file="4T1_MarkerGenes.xlsx",
# sheetName=cluster, append = TRUE)
# gc()
# }
# sce.NEU <- sceList[["NEU"]]
# plotTSNE(sce.NEU, colour_by="Cluster")
# plotUMAP(sce.NEU, colour_by="Cluster")
# markers <- findMarkers(sce.NEU, sce.NEU$Cluster, direction="up")
# for (cluster in names(markers)) {
# write.xlsx(data.frame(markers[[cluster]]), row.names=TRUE,
# file="Neu_MarkerGenes.xlsx",
# sheetName=cluster, append = TRUE)
# gc()
# }
# sce.FF99WT <- sceList[["FF99WT"]]
# plotTSNE(sce.FF99WT, colour_by="Cluster")
# snn.gr <- buildSNNGraph(sce.FF99WT, use.dimred="PCA", k = 50)
# clusters <- igraph::cluster_fast_greedy(snn.gr)
# table(clusters$membership)
# sce.FF99WT$Cluster <- factor(clusters$membership)
# plotTSNE(sce.FF99WT, colour_by="Cluster")
# plotUMAP(sce.FF99WT, colour_by="Cluster")
# markers <- findMarkers(sce.FF99WT, sce.FF99WT$Cluster, direction="up")
# for (cluster in names(markers)) {
# write.xlsx(data.frame(markers[[cluster]]), row.names=TRUE,
# file="FF99WT_MarkerGenes.xlsx",
# sheetName=cluster, append = TRUE)
# gc()
# }
# #====================================================================
# top.markers_c <- c()
# for (i in 1:3){
# marker.tmp <- markers[[i]]
# top.tmp <- rownames(marker.tmp)[marker.tmp$Top <=10]
# top.markers_c <- c(top.markers_c, top.tmp)
# }
# top.markers_c <- top.markers_c[!duplicated(top.markers_c)]
# top.exprs <- logcounts(sce.BRCA1)[top.markers_c,,drop=FALSE]
# heat.vals <- top.exprs - rowMeans(top.exprs)
# pheatmap(heat.vals, cluster_cols=TRUE,
# show_colnames = FALSE,
# annotation_col=data.frame(Cluster = factor(sce.BRCA1$Cluster),
# row.names=colnames(sce.BRCA1)),
# fontsize_row = 7)
# #-------------------------------------------
# universe <- intersect(rownames(decList[[1]]), rownames(decList[[2]]))
# mean.bio <- (decList[[1]][universe, "bio"] + decList[[2]][universe, "bio"])/2
# chosen <- universe[mean.bio >0]
# length(chosen)
# rescaled <- batchelor::multiBatchNorm(sceList[[1]][universe, ], sceList[[2]][universe,])
# #-------------------------------------------
# repA <- logcounts(rescaled[[1]])[chosen,]
# repB <- logcounts(rescaled[[2]])[chosen,]
# mnn.out <- batchelor::fastMNN(repA = repA, repB=repB, k = 20, d = 50, BSPARAM=IrlbaParam(deferred=TRUE))
# dim(reducedDim(mnn.out,"corrected"))
# Rle(mnn.out$batch)
# metadata(mnn.out)$merge.info$pairs[[1]]
# #-------------------------------------
# sce <- mnn.out
# assay(sce,"original") <- cbind(repA, repB)
# sce <- SingleCellExpriment(list(logcounts = omat))
# reducedDim(sce, "MNN") <- mnn.out$corrected
# sce$Batch <- as.character(mnn.out$batch)
# sce
# #---------------------------------------------
# osce <- runPCA(sce, exprs_values="original",ntop = Inf, BSPARAM=IrlbaParam())
# osce <- runTSNE(osce, use_dimred = "PCA")
# ot <- plotTSNE(osce, colour_by="batch") + ggtitle("Original")
# csce <- runTSNE(sce, use_dimred = "corrected")
# ct <- plotTSNE(csce, colour_by = "batch") + ggtitle("Corrected")
# multiplot(ot, ct, cols = 2)
# metadata(mnn.out)$merge.info$lost.var
|
66680f5f6a5c1a968078d5cf205390fe17cce858
|
3ad8b5f48b75f88338c8a50fc5ffdeebca9e8081
|
/analysis.r
|
828d149ce699fcc1efeb2bac0370438833c8b0b7
|
[] |
no_license
|
mnunes/IMDb
|
13ffacf00339c7dbc0c1111977b40b937a83af93
|
c3c34bddf818d283f164b29543cc2d2ba8dc3e81
|
refs/heads/master
| 2021-06-22T19:30:05.636927
| 2017-08-19T22:47:53
| 2017-08-19T22:47:53
| 40,671,881
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,771
|
r
|
analysis.r
|
setwd("~/Documents/Lectures/UFRN/EST0113 - Introdução à Estatística e Probabilidade/Material/Unidade II/04 - Análises Gerais/")
library(ggplot2)
library(dplyr)
###################
### Game of Thrones
# ler e combinar os dados
got.season <- scan(file="got.season.dat")
got.episode <- scan(file="got.episode.dat")
got.rating <- scan(file="got.rating.dat")
got <- cbind(got.season, got.episode, got.rating)
got <- data.frame(got)
colnames(got) <- c("temporada", "episodio", "rating")
ordem <- sort(got.season*100 + got.episode, index.return=TRUE)$ix
got <- got[ordem, ]
got$episodio <- 1:dim(got)[1]
got$temporada <- as.character(got$temporada)
head(got)
# plots
ggplot(got, aes(x=episodio, y=rating, color=temporada)) +
labs(title="Game of Thrones: Ratings por Temporada", x="Episódio", y="Rating", colour="Temporada") +
geom_smooth(method=loess, se=FALSE) +
geom_point(shape=1) +
theme(plot.title = element_text(hjust = 0.5))
got %>%
select(temporada, rating) %>%
group_by(temporada) %>%
summarise(media=mean(rating), mediana=median(rating), desvPad=sd(rating), maximo=max(rating), episodio_max=which.max(rating), minimo=min(rating), episodio_min=which.min(rating))
ggplot(got, aes(x=temporada, y=rating, color=temporada)) +
labs(title="Game of Thrones: Ratings por Temporada", x="Temporada", y="Rating", colour="Temporada") +
geom_boxplot() +
theme(plot.title = element_text(hjust = 0.5))
####################
### The Walking Dead
# ler e combinar os dados
twd.season <- scan(file="twd.season.dat")
twd.episode <- scan(file="twd.episode.dat")
twd.rating <- scan(file="twd.rating.dat")
twd <- cbind(twd.season, twd.episode, twd.rating)
twd <- data.frame(twd)
colnames(twd) <- c("temporada", "episodio", "rating")
ordem <- sort(twd.season*100 + twd.episode, index.return=TRUE)$ix
twd <- twd[ordem, ]
twd$episodio <- 1:dim(twd)[1]
twd$temporada <- as.character(twd$temporada)
head(twd)
# plots
ggplot(twd, aes(x=episodio, y=rating, color=temporada)) +
labs(title="The Walking Dead: Ratings por Temporada", x="Episódio", y="Rating", colour="Temporada") +
geom_smooth(method=loess, se=FALSE) +
geom_point(shape=1) +
theme(plot.title = element_text(hjust = 0.5))
twd %>%
select(temporada, rating) %>%
group_by(temporada) %>%
summarise(media=mean(rating), mediana=median(rating), desvPad=sd(rating), maximo=max(rating), episodio_max=which.max(rating), minimo=min(rating), episodio_min=which.min(rating))
ggplot(twd, aes(x=temporada, y=rating, color=temporada)) +
labs(title="The Walking Dead: Ratings por Temporada", x="Temporada", y="Rating", colour="Temporada") +
geom_boxplot() +
theme(plot.title = element_text(hjust = 0.5))
|
1db3a0391b36a69f5fd5e2e5557a8f7ca0974fd7
|
b8236fb6a92f2f34e254fc1d0a92833e2ac7e952
|
/pkg/R/plotCatCol.R
|
f7d5008726cf3f2074e4199ee2aaa6b7b3371d24
|
[] |
no_license
|
mxc19912008/tabplot
|
0054761ba8e2edfb75b09bc49bec3e2c781e19c1
|
df979f12323dafe27731381329114e96b7553be8
|
refs/heads/master
| 2020-06-16T19:51:21.619864
| 2016-11-02T16:35:51
| 2016-11-02T16:35:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,094
|
r
|
plotCatCol.R
|
plotCatCol <- function(tCol, tab, vpTitle, vpGraph, vpLegend, max_print_levels, text_NA, legend.lines, compare){
midspace <- .05
drawContours <- TRUE
anyNA <- tail(tCol$categories, 1)=="missing"
categories <- tCol$categories
if (anyNA) categories <- categories[-length(categories)]
nCategories <- length(categories)
spread <- (nCategories > max_print_levels)
## determine color indices for categories
palet <- if (tCol$palet_recycled) {
rep(tCol$palet, length.out = nCategories)
} else {
colorRampPalette(tCol$palet)(nCategories)
}
if (anyNA) {
palet[nCategories+1] <- tCol$colorNA
}
if (compare) {
marks.x <- seq(0, 1, length.out=5)
}
mgrey <- "#D8D8D8"
cellplot(2,1,vpGraph, {
if (compare) grid.rect(gp = gpar(col=mgrey,fill = mgrey))
## create large vector of colors (one color for each bin*category
colorset <- rep(palet, each=tab$nBins)
missings <- which(tCol$widths==0)
if (drawContours) {
cols <- colorset
cols[missings] <- NA
} else {
cols <- NA
}
## draw bins
grid.rect( x = tCol$x, y = tab$rows$y
, width = tCol$widths, height = tab$rows$heights
, just=c("left","bottom")
, gp = gpar(col=cols, fill = colorset, linejoin="mitre", lwd=0))
## draw white rect at the right to correct for rounding errors during plotting
# grid.rect(x = 1, y=-.005, width=0.1, height=1.01, just=c("left", "bottom"),
# gp=gpar(col=NA, fill="white"))
if (compare) grid.rect(width=midspace, gp = gpar(col="white", fill = "white"))
})
## draw legend
cellplot(3,1, vpLegend, {
nLegendSpread <- min(((legend.lines-1) %/% 2) + 1, max_print_levels, nCategories)
nLegendSpreadRows <- nLegendSpread * 2 -1
nLegendRows <- ifelse(spread, nLegendSpreadRows, nCategories) + 2 * anyNA
Layout2 <- grid.layout(nrow = nLegendRows, ncol = 1 + spread,
widths=if(spread) c(0.25, 0.75) else {1})
cex <- min(1, 1 / (convertHeight(unit(1,"lines"), "npc", valueOnly=TRUE)
* nLegendRows))
pushViewport(viewport(name="legendblocks", layout = Layout2, gp=gpar(cex=cex)))
#print(current.vpPath())
grid.rect(gp=gpar(col=NA, fill="white"))
if (spread) {
if (tCol$rev_legend) {
palet <- rev(palet)
}
cellplot(1:nLegendSpreadRows,1, NULL, {
grid.rect( x = 0, y = seq(1, 0, length.out=nCategories+1)[-(nCategories+1)]
, width = 0.8, height = 1/nCategories
, just=c("left", "top")
, gp = gpar(col=palet, fill = palet)
)
})
labels <- rep("...", nLegendSpreadRows)
labels[seq(1, nLegendSpreadRows, by=2)] <- tCol$categories[seq(1, nCategories - anyNA, length.out=nLegendSpread)]
for (j in 1:nLegendSpreadRows) {
k <- ifelse(tCol$rev_legend, (nLegendSpreadRows+1)-j, j)
cellplot(j,2, NULL, {
grid.text( labels[k]
, x = 0
, just="left")
})
}
if (anyNA) {
cellplot(nLegendRows, 1, NULL, {
grid.rect( x = 0, y = 0.5, width = 0.8, height = 1
, just=c("left")
, gp = gpar(col=palet[nCategories + 1],
fill = palet[nCategories + 1])
)
})
cellplot(nLegendRows, 2, NULL, {
grid.text( text_NA
, x = 0
, just="left")
})
}
} else {
for (j in 1:nCategories) {
k <- ifelse(tCol$rev_legend, (nCategories + 1) - j, j)
cellplot(j,1, NULL, {
grid.rect( x = 0, y = 0.5, width = 0.2, height = 1
, just=c("left")
, gp = gpar(col=palet[k], fill = palet[k])
)
grid.text( categories[k]
, x = 0.25
, just="left")
})
}
if (anyNA) {
cellplot(nLegendRows, 1, NULL, {
grid.rect( x = 0, y = 0.5, width = 0.2, height = 1
, just=c("left")
, gp = gpar(col=palet[nCategories + 1],
fill = palet[nCategories + 1])
)
grid.text( text_NA
, x = 0.25
, just="left")
})
}
}
popViewport(1)
})
}
|
08b97a33fbe83961ce2bf93b3ad84715b21ed8d1
|
f64bc106b92095472c45346309afbc50033cc3ee
|
/rcode_realtor.R
|
2783900a34edd1da7537cd3fd2a1c2892185f192
|
[] |
no_license
|
hannahjonesut/Realtor.com
|
a27c7979b3c336fe95bb45c0aae246f09b0d0083
|
d13f142472d3651562a5cae846353c75a43680c5
|
refs/heads/main
| 2023-06-11T02:38:45.434877
| 2021-06-21T19:54:23
| 2021-06-21T19:54:23
| 378,786,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,041
|
r
|
rcode_realtor.R
|
library(dplyr)
library(tidyverse)
library(tidyr)
library(gamlr)
library(foreach)
library(ggplot2)
library(stringr)
usa_data<- read.csv('https://raw.githubusercontent.com/hannahjonesut/Realtor.com/main/RDC_Inventory_Country_History_Assessment.csv?token=ASRSTUEJMVJZYFZFQAVYF2TAZ76HS')
metro_data <- read.csv('https://raw.githubusercontent.com/hannahjonesut/Realtor.com/main/RDC_Inventory_Metro_History_Assessment.csv?token=ASRSTUHRS3O5BGP42ZDXU23AZ76JS')
#hh rank is based on HH count in zip code, with 1 being greatest aka most dense
#https://www.realtor.com/research/data/
metro_hh <- metro_data %>%
mutate(rank_simple = ifelse(HouseholdRank>=1 & HouseholdRank<=5, 1,
ifelse(HouseholdRank>5 & HouseholdRank<=10, 2,
ifelse(HouseholdRank>10 & HouseholdRank<=15, 3,
ifelse(HouseholdRank>15 & HouseholdRank<=20, 4,
ifelse(HouseholdRank>20 & HouseholdRank<=25, 5,
ifelse(HouseholdRank>25 & HouseholdRank<=30, 6,
ifelse(HouseholdRank>30 & HouseholdRank<=35, 7,
ifelse(HouseholdRank>35 & HouseholdRank<=40, 8,
ifelse(HouseholdRank>40 & HouseholdRank<=45, 9,
ifelse(HouseholdRank>45, 10, 0)))))))))))%>%
group_by(rank_simple, month_date_yyyymm)%>%
summarize( avg_listprice = mean(average_listing_price), new_list_count = mean(new_listing_count), days_on_mkt = mean(median_days_on_market))
ggplot(data = metro_hh)+
geom_smooth(aes(x = month_date_yyyymm, y = avg_listprice, color = as.factor(rank_simple)))+
labs(x="Date (YYYYMM)", y = "Average List Price", legend = "Household Rank (1 = most households, 10 = least households)", title = "Average List Price by Household Rank (2016 - 2021)")
ggplot(data = metro_hh)+
geom_smooth(aes(x = month_date_yyyymm, y = new_list_count, color = as.factor(rank_simple)))+
labs(x="Date (YYYYMM)", y = "Total Number of Listings", legend = "Household Rank (1 = most households, 10 = least households)", title = "Total Listings by Household Rank (2016 - 2021)")
usa_2020_2021 <- usa_data%>%
filter(month_date_yyyymm >= 202001)%>%
mutate(year = as.numeric(substr(month_date_yyyymm, 1, 4)), month = as.numeric(substr(month_date_yyyymm, 5, 6)), pct_inc = price_increased_count/total_listing_count)
#look at percent of price increased over total listings
ggplot(data = usa_2020_2021)+
geom_point(aes(x = median_days_on_market, y = pct_inc))+
facet_grid(cols = vars(month), rows = vars(year))+
labs(x="Median Days on Market by Month", y = "% of Listings that Increased Price by Year" , title = "Days on Market vs % Price Increased, Monthly")
#price reduced freq as a fn of days on mkt-- when do people drop price?
usa_2020_2021 <- usa_data%>%
filter(month_date_yyyymm >= 202001)%>%
mutate(year = as.numeric(substr(month_date_yyyymm, 1, 4)), month = as.numeric(substr(month_date_yyyymm, 5, 6)))%>%
mutate(quarter = ifelse(month>0 & month <=3, 1,
ifelse(month>3 & month <=6, 2,
ifelse(month>6 & month <=9, 3,
ifelse(month>9 & month<=12, 4, 0))))) %>%
group_by(year, quarter)%>%
summarize(pct_inc =mean(price_increased_count/total_listing_count), med_days_on_mkt = median(median_days_on_market))
#look at percent of price increased over total listings
ggplot(data = usa_2020_2021)+
geom_point(aes(x = med_days_on_mkt, y = pct_inc))+
facet_grid(cols = vars(quarter), rows = vars(year))+
labs(x="Median Days on Market by Month", y = "% of Listings that Increased Price by Year" , title = "Days on Market vs % Price Increased, Monthly")
|
ced1b69196bdcc8fe3683a1ae79bd8b08ac412d7
|
92befee27f82e6637c7ed377890162c9c2070ca9
|
/man/data.math.Rd
|
5e516d23118579066883baca3de32e736f8e3dcd
|
[] |
no_license
|
alexanderrobitzsch/sirt
|
38e72ec47c1d93fe60af0587db582e5c4932dafb
|
deaa69695c8425450fff48f0914224392c15850f
|
refs/heads/master
| 2023-08-31T14:50:52.255747
| 2023-08-29T09:30:54
| 2023-08-29T09:30:54
| 95,306,116
| 23
| 11
| null | 2021-04-22T10:23:19
| 2017-06-24T15:29:20
|
R
|
UTF-8
|
R
| false
| false
| 2,288
|
rd
|
data.math.Rd
|
%% File Name: data.math.Rd
%% File Version: 0.15
\name{data.math}
\alias{data.math}
\docType{data}
\title{
Dataset Mathematics
}
\description{
This is an example dataset involving Mathematics items for
German fourth graders. Items are classified into several domains and
subdomains (see Section Format).
The dataset contains 664 students on 30 items.
}
\usage{data(data.math)}
\format{
The dataset is a list. The list element \code{data}
contains the dataset with the demographic variables
student ID (\code{idstud}) and a dummy variable
for female students (\code{female}). The remaining
variables (starting with \code{M} in the name) are
the mathematics items. \cr
The item metadata are included in the list element
\code{item} which contains item name (\code{item}) and the
testlet label (\code{testlet}). An item not included
in a testlet is indicated by \code{NA}.
Each item is allocated to one and only competence domain (\code{domain}).
\cr
The format is:
\code{List of 2} \cr
\code{ $ data:'data.frame':} \cr
\code{ ..$ idstud: int [1:664] 1001 1002 1003 ...} \cr
\code{ ..$ female: int [1:664] 1 1 0 0 1 1 1 0 0 1 ...} \cr
\code{ ..$ MA1 : int [1:664] 1 1 1 0 0 1 1 1 1 1 ...} \cr
\code{ ..$ MA2 : int [1:664] 1 1 1 1 1 0 0 0 0 1 ...} \cr
\code{ ..$ MA3 : int [1:664] 1 1 0 0 0 0 0 1 0 0 ...} \cr
\code{ ..$ MA4 : int [1:664] 0 1 1 1 0 0 1 0 0 0 ...} \cr
\code{ ..$ MB1 : int [1:664] 0 1 0 1 0 0 0 0 0 1 ...} \cr
\code{ ..$ MB2 : int [1:664] 1 1 1 1 0 1 0 1 0 0 ...} \cr
\code{ ..$ MB3 : int [1:664] 1 1 1 1 0 0 0 1 0 1 ...} \cr
\code{ [...]} \cr
\code{ ..$ MH3 : int [1:664] 1 1 0 1 0 0 1 0 1 0 ...} \cr
\code{ ..$ MH4 : int [1:664] 0 1 1 1 0 0 0 0 1 0 ...} \cr
\code{ ..$ MI1 : int [1:664] 1 1 0 1 0 1 0 0 1 0 ...} \cr
\code{ ..$ MI2 : int [1:664] 1 1 0 0 0 1 1 0 1 1 ...} \cr
\code{ ..$ MI3 : int [1:664] 0 1 0 1 0 0 0 0 0 0 ...} \cr
\code{ $ item:'data.frame':} \cr
\code{ ..$ item : Factor w/ 30 levels "MA1","MA2","MA3",..: 1 2 3 4 5 ...} \cr
\code{ ..$ testlet : Factor w/ 9 levels "","MA","MB","MC",..: 2 2 2 2 3 3 ...} \cr
\code{ ..$ domain : Factor w/ 3 levels "arithmetic","geometry",..: 1 1 1 ...} \cr
\code{ ..$ subdomain: Factor w/ 9 levels "","addition",..: 2 2 2 2 7 7 ...} \cr
}
%% \keyword{datasets}
|
0689d3c8a479e6cd7979adc884cc2da457649eff
|
401d58ce50f49caa41321ce84b77d49e1dc0ac85
|
/binder/install.R
|
9324532aacfd82cf4cb5e84c8aea71b96e7d241e
|
[] |
no_license
|
StateOfTheR/finistR2020
|
7fcfd0106cc61c7e59a1acad932b1232a62085b3
|
a514f7ebe840b57591e8168dcd64289277a02e87
|
refs/heads/master
| 2022-12-17T16:27:47.622897
| 2020-09-08T11:08:19
| 2020-09-08T11:08:19
| 278,055,173
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,155
|
r
|
install.R
|
local({
r <- getOption("repos")
r["CRAN"] <- "https://cloud.r-project.org"
options(repos = r)
})
## non CRAN packages
remotes::install_github("https://github.com/rstudio-education/gradethis")
remotes::install_github("StateOfTheR/optimLibR")
remotes::install_github("mlverse/torch")
remotes::install_github("Chabert-Liddell/MLVSBM")
remotes::install_github("Demiperimetre/GREMLIN")
remotes::install_github("rstudio/d3heatmap")
remotes::install_github("jchiquet/PLNmodels")
## remotes::install_github("RamiKrispin/coronavirus")
## remotes::install_github("dreamRs/topogram")
## remotes::install_github("ropensci/rnaturalearthhires")
## CRAN packages not found in conda
install.packages("rkeops")
install.packages("sbm")
install.packages("swirlify")
install.packages("palmerpenguins")
install.packages("ggiraph")
install.packages("timevis")
install.packages("ggraph")
install.packages("fields")
install.packages("slider")
install.packages("fable")
install.packages("fabletools")
install.packages("ranger")
## Julia and co
devtools::install_github("Non-Contradiction/JuliaCall")
library(JuliaCall)
julia <- julia_setup()
install.packages("diffeqr")
|
abb3438443d7dc37602cce3f812f5a56b58cca34
|
1aa8276f7e7a20e53c7b0c3d373df7ada2bc1f9b
|
/flyingpigeon/Rsrc/climod/namelist.R
|
38b44bdb5773b6fa9f1da002f5d05ee89148298a
|
[
"Apache-2.0"
] |
permissive
|
Ouranosinc/flyingpigeon
|
a6605b195483684e848afbfd1dbcef7d8a3fb8eb
|
657c4023e128342f380c847103e5fd78edad17db
|
refs/heads/master
| 2021-01-11T03:58:20.808105
| 2019-08-19T17:02:47
| 2019-08-19T17:02:47
| 71,271,393
| 1
| 0
|
Apache-2.0
| 2018-10-11T17:32:05
| 2016-10-18T17:02:06
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 625
|
r
|
namelist.R
|
##' Construct a list with automatic names
##'
##' Constructs a lists from its arguments, automatically naming each
##' element of the list with the name of the argument.
##'
##' @param ... objects to add to the list.
##'
##' @return A list with named elements
##'
##' @examples
##' x <- 3
##' y <- "a string"
##' z <- function(x){x^3 +4}
##' n <- namelist(x,y,z)
##' str(namelist)
##'
##' @export
namelist <- function(...){
result <- list(...)
names(result) <- as.list(substitute(list(...)))[-1L]
return(result)
}
### Copyright 2015 Univ. Corp for Atmos. Research
### Author: Seth McGinnis, mcginnis@ucar.edu
|
636436e6916b317c0d3a0230258da1e180f5c345
|
0856e5e6ee080e36e9e24f5f8f514e9229ee5a75
|
/tests/testthat/test-misc.R
|
504889363c995860e19f49d0a6dcd972035882e1
|
[] |
no_license
|
ncrna/DropletUtils
|
fd03bf42ad93ba0aec731ab781e44d99ce6ccc62
|
755a6a5b2fc0408eac50eccb88739c17d86463d2
|
refs/heads/master
| 2023-09-01T13:17:28.382840
| 2021-09-18T20:31:36
| 2021-09-18T20:32:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,599
|
r
|
test-misc.R
|
# This checks out the barcodeRanks and defaultDrops functions.
# library(DropletUtils); library(testthat); source("test-misc.R")
# Mocking up some counts.
set.seed(100)
my.counts <- DropletUtils:::simCounts()
totals <- Matrix::colSums(my.counts)
test_that("barcodeRanks runs to completion", {
limit <- 100
brout <- barcodeRanks(my.counts, lower=limit)
expect_equal(brout$total, totals)
expect_identical(brout$rank, rank(-totals, ties.method="average"))
expect_true(all(is.na(brout$fitted[totals <= limit])))
# Trying again with a higher limit.
limit2 <- 200
brout2 <- barcodeRanks(my.counts, lower=limit2)
expect_identical(brout, brout2)
# Specifying the boundaries.
bounds <- c(200, 1000)
brout3 <- barcodeRanks(my.counts, lower=limit, fit.bounds=bounds)
is.okay <- totals > bounds[1] & totals < bounds[2]
expect_true(all(is.na(brout3$fitted[!is.okay])))
expect_true(all(!is.na(brout3$fitted[is.okay])))
# Respecting column names.
alt <- my.counts
colnames(alt) <- sprintf("BARCODE_%i", seq_len(ncol(alt)))
brout2 <- barcodeRanks(alt)
expect_identical(rownames(brout2), colnames(alt))
expect_identical(names(brout2$rank), NULL)
expect_identical(names(brout2$total), NULL)
expect_identical(names(brout2$fitted), NULL)
# Trying out silly inputs.
expect_error(barcodeRanks(my.counts[,0]), "insufficient")
expect_error(barcodeRanks(my.counts[0,]), "insufficient")
})
test_that("barcodeRanks' excluder works correctly", {
brout <- barcodeRanks(my.counts)
keep <- brout$total >= 100 & !duplicated(brout$total)
x <- log10(brout$rank[keep])
y <- log10(brout$total[keep])
o <- order(x)
x <- x[o]
y <- y[o]
# Compares correctly to a reference.
edge.out <- DropletUtils:::.find_curve_bounds(x=x, y=y, exclude.from=100)
ref.out <- DropletUtils:::.find_curve_bounds(x=tail(x, -100), y=tail(y, -100), exclude.from=0)
expect_identical(edge.out, ref.out+100)
edge.outx <- DropletUtils:::.find_curve_bounds(x=x, y=y, exclude.from=200)
ref.outx <- DropletUtils:::.find_curve_bounds(x=tail(x, -200), y=tail(y, -200), exclude.from=0)
expect_false(identical(edge.outx, ref.outx+200))
# Proper edge behavior.
edge.out2 <- DropletUtils:::.find_curve_bounds(x=x, y=y, exclude.from=0)
expect_identical(edge.out[2], edge.out2[2])
expect_false(identical(edge.out[1], edge.out2[1]))
edge.out3 <- DropletUtils:::.find_curve_bounds(x=x, y=y, exclude.from=Inf)
expect_identical(unname(edge.out3[1]), length(y)-1)
expect_identical(unname(edge.out3[2]), length(y)-1)
# Works properly when put together.
ref <- barcodeRanks(my.counts)
brout <- barcodeRanks(my.counts, exclude.from=0)
expect_false(identical(ref, brout))
brout2 <- barcodeRanks(my.counts, exclude.from=200)
expect_false(identical(ref, brout2))
brout3 <- barcodeRanks(my.counts, exclude.from=Inf)
expect_false(identical(ref, brout2))
})
test_that("defaultDrops runs to completion", {
out <- defaultDrops(my.counts)
# Should always call at least one cell (100th %ile cell)
expect_true(sum(out)>0)
out <- defaultDrops(my.counts, lower.prop=0) # should keep all non-zero cells.
expect_true(all(out | totals==0))
out <- defaultDrops(my.counts, upper.quant=1, lower.prop=1) # as it's >, not >=.
expect_true(!any(out))
# Works alright on silly inputs.
expect_identical(logical(0), defaultDrops(my.counts[,0]))
expect_identical(logical(ncol(my.counts)), defaultDrops(my.counts[0,]))
})
|
9f648eaf8293b3bb4e944f49c1caa282d4150bc7
|
21aeae41e2bb75c6a600d4ef03adb96be33dfef8
|
/man/emxCovariances.Rd
|
e656458e71262966c70d629ef4b6bb4a7593aa56
|
[] |
no_license
|
cran/EasyMx
|
32a661f4363625debfe28ec299625f70c199e516
|
5a60b1553b3a77dcc0bf6dec6064fe3b12ce2e17
|
refs/heads/master
| 2023-02-09T06:45:50.308496
| 2023-01-30T21:00:07
| 2023-01-30T21:00:07
| 89,990,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,959
|
rd
|
emxCovariances.Rd
|
\name{emxCovariances}
\alias{emxCovariances}
\title{Create a set of covariances}
\description{
This function creates a covariance matrix as an MxMatrix or MxPath object.
}
\usage{
emxCovariances(x, values, free, path=FALSE, type, name='Variances')
}
\arguments{
\item{x}{character vector. The names of the variables for which covariances are created.}
\item{values}{numeric vector. See Details.}
\item{free}{logical vector. See Details.}
\item{path}{logical. Whether to return the MxPath object instead of the MxMatrix.}
\item{type}{character. The kind of covariance structure to create. See Details.}
\item{name}{The name of the matrix created.}
}
\details{
Possible values for the \code{type} argument are 'independent', 'full', and 'corr'. When \code{type='independent'}, the remaining arguments are passes to \code{\link{emxResiduals}}. The \code{values} and \code{free} arguments are only used when the \code{type} argument is 'independent'. For all other cases, they are ignored.
When \code{type='full'}, a full covariance matrix is created. That is, a symmetric matrix is created with all unique elements freely estimated. The starting values for the variances are all 1; for the covariances, all 0.5.
When \code{type='corr'}, a full correlation matrix is created. That is, a symmetric matrix is created with all unique elements not on the diagonal freely estimated. The starting values for the correlations are all 0.5. The variances are fixed at 1.
}
\value{
Depending on the value of the \code{path} argument, either an MxMatrix or and MxPath object that can be inspected, modified, and/or included in MxModel objects.
}
\seealso{
\link{emxFactorModel}, \link{emxGrowthModel}
}
%\references{
%
%}
\examples{
# Create a covariance matrix
require(EasyMx)
manVars <- paste0('x', 1:6)
latVars <- paste0('F', 1:2)
emxCovariances(manVars, type='full')
emxCovariances(latVars, type='corr', path=TRUE)
}
|
2c9cd29e56f392329d4b6d5e2997ff4dc237bf8f
|
6d70f2719893bb8cfcceeb43451d14a229bf2495
|
/code/_Archive/functions/annotate_tfbs_fun.R
|
4c753ba2d97be478f142b9c8831bbb49b715c3dc
|
[] |
no_license
|
iamciera/synth_es2
|
7323e01790c0f0b349b851d6230e0eae4cd0a972
|
4bd0b3210cfc31ec00dab3c2c1c03220d1cf05c7
|
refs/heads/master
| 2021-01-19T22:16:42.560379
| 2017-04-19T16:16:54
| 2017-04-19T16:16:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
annotate_tfbs_fun.R
|
function(seq,tfbs){
seqbp <- strsplit(seq, NULL)[[1]]
seqL <- length(seqbp)
#Remove known binding sites
seqTruncbp <- seqbp
for (i in 1:nrow(tfbs)){
s = tfbs[i,1]
e = tfbs[i,2]
seqTruncbp[s:e] <- 'F'
}
seqTrunc <- paste0(seqTruncbp,collapse = '')
return(seqTrunc)
}
|
e19d47623b1d83a78ebce51649ee2bebf3ec2274
|
0bc7b27b4ecdf338211f763915e498afbd076f19
|
/man/Cprop.test.Rd
|
2ff0596cda502ab5028e7af4a412b6cd4edf578c
|
[] |
no_license
|
cran/RcmdrPlugin.TeachStat
|
f42fd6b05a5e351d3f77e7204daabeae93bc93f1
|
702e87f2c3e6e7036a50d547f529f20ea915d369
|
refs/heads/master
| 2022-08-01T00:58:27.010966
| 2022-06-22T11:00:02
| 2022-06-22T11:00:02
| 162,720,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,526
|
rd
|
Cprop.test.Rd
|
\name{Cprop.test}
\alias{Cprop.test}
\title{
Test for proportions of one or two samples
}
\description{
Performs hypothesis testing and confidence interval for a proportion or difference of two proportions. The values of the samples necessary to perform the function are the number of successes and the number of trails.
}
\usage{
Cprop.test(ex, nx, ey = NULL, ny = NULL, p.null = 0.5,
alternative = c("two.sided", "less", "greater"), conf.level = 0.95,
...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ex}{numeric value that represents the number of successes of the first sample (see Details).}
\item{nx}{numerical value representing the total number of trails of the first sample.}
\item{ey}{ (optional) numerical value representing the number of success of the second sample (see Details).}
\item{ny}{(optional) numerical value representing the total number of trails of the second sample.}
\item{p.null}{numeric value that represents the value of the population proportion or the difference between the two population proportions, depending on whether there are one or two samples (see Details).}
\item{alternative}{a character string specifying the alternative hypothesis, must be one of \code{"two.sided"} (default), \code{"greater"} or \code{"less"}. You can specify just the initial letter.}
\item{conf.level}{confidence level of the interval.}
\item{\dots}{further arguments to be passed to or from methods.}
}
\details{
So that the contrast can be made must be fulfilled that at least 1 hit. That is, in the case of a sample \code{ex} must be greater than or equal to 1 and in the case of two samples, \code{ex} or \code{ey} must be greater than or equal to 1.
Furthermore, for the case of a sample value p.null must be strictly positive.
}
\value{
A list with class "htest" containing the following components:
\item{statistic}{the value of the test statistic.}
\item{parameter}{number of trails and value of the population proportion or the difference in population proportions.}
\item{p.value}{the p-value for the test.}
\item{conf.int}{a confidence interval for the proportion or for the difference in proportions, appropriate to the specified alternative hypothesis.}
\item{estimate}{a value with the sample proportions.}
\item{null.value}{the value of the null hypothesis.}
\item{alternative}{a character string describing the alternative.}
\item{method}{a character string indicating the method used, and whether Yates' continuity correction was applied.}
\item{data.name}{a character string giving the names of the data.}
}
\seealso{
\code{\link{prop.test}}
}
\examples{
## Proportion for a sample
Cprop.test(1,6) # 1 success in 6 attempts
#### With a data set: proportion of cars not manufactured in US
data(cars93) #data set provided with the package
exitos<-sum(cars93$USA == "nonUS")
total<-length(cars93$USA)
Cprop.test(ex=exitos, nx=total)
## Difference of proportions
Cprop.test(1,6,3,15)
# Sample 1: 1 success in 6 attempts
# Sample 2: 3 success in 15 attempts
#### With a data set: difference of proportions of cars not manufactured in US
#### between manual and automatic
exitosx<-sum(cars93$USA == "nonUS" & cars93$Manual == "Yes" )
totalx<-sum(cars93$Manual == "Yes")
exitosy<-sum(cars93$USA == "nonUS" & cars93$Manual == "No" )
totaly<-sum(cars93$Manual == "No")
Cprop.test(ex=exitosx, nx=totalx,ey=exitosy, ny=totaly)
}
|
3afd57215cd09ef06057b45f76860e965bce4bce
|
d9ee4c89fa85ee69ee6d3a6f34035924fc7472e4
|
/h2o-r/tests/testdir_docexamples/runit_Rdoc_deep_learning.R
|
97fc1e2ba3d0c5e979a666b1e13f9c83ba5216f7
|
[
"Apache-2.0"
] |
permissive
|
mrgloom/h2o-3
|
838c298d257d893202e5cba8b55c84d6f5da1c57
|
3f00bf9e8e6aeb3f249301f20694076db15b7d5e
|
refs/heads/master
| 2021-01-15T21:34:32.995372
| 2015-08-20T02:06:09
| 2015-08-20T05:52:14
| 41,108,114
| 1
| 0
| null | 2015-08-20T16:56:36
| 2015-08-20T16:56:34
| null |
UTF-8
|
R
| false
| false
| 525
|
r
|
runit_Rdoc_deep_learning.R
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test.rdoc_deep_learning.golden <- function(H2Oserver) {
irisPath = system.file("extdata", "iris.csv", package = "h2o")
iris.hex = h2o.uploadFile(H2Oserver, path = irisPath)
indep <- names(iris.hex)[1:4]
dep <- names(iris.hex)[5]
h2o.deeplearning(x = indep, y = dep, training_frame = iris.hex, activation = "Tanh", epochs = 5, loss = "CrossEntropy")
testEnd()
}
doTest("R Doc Deep Learning", test.rdoc_deep_learning.golden)
|
4c09361808e633810c306be3148dba0ffabd486d
|
e7d14bdf98239f6684d822bb95a15c9eda66939a
|
/CelticSea/2020_CS_MixedFisheriesAdvice/report_06_Figure 6.4.25.3_advice_sheet_plots_2020.r
|
1a5efe1842c7939c6d4a51d37badf976926a0772
|
[] |
no_license
|
ices-eg/wg_WGMIXFISH
|
6e0dddafd9ca773db41bebd7bf32258b913060ff
|
1b2c8bf211b3a936f9c8bce797cb62c36d314392
|
refs/heads/master
| 2022-10-07T21:06:57.031331
| 2021-09-18T09:57:01
| 2021-09-18T09:57:01
| 146,871,376
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,935
|
r
|
report_06_Figure 6.4.25.3_advice_sheet_plots_2020.r
|
#################################################################################
################################## MIXFISH PLOTS ################################
#################################################################################
## Paul Dolder
## 28/05/2015
## Celtic Sea
## This script runs on the extract 'ca.csv'
##
## 31/05/2018 - PJD rewritten from fleets object
##
## It produces the landings by metier plot (for advice sheet annex)
## and the total landings pie plot (for advice sheet catch box)
## and outputs the figures for landings, discards, discard percentage
## and the landings by fleet (for advice sheet catch box)
## Includes stocks COD, HAD, WHG
## NEP3A currently excluded
## If the Species or Metiers change, the script will need to be changed to reflect
#################################################################################
rm(list=ls())
res.path<-file.path("results/")
plot.path<-file.path("plots/")
library(reshape2) ; library(ggplot2) ;library(grid); library(data.table)
library(FLCore); library(FLFleet)
library(tidyr); library(dplyr)
source("bootstrap/software/functions/FLFcube_FLCore_R31.R")
source("bootstrap/software/functions/remove_validity_FLFleet.R")
source("bootstrap/software/functions/funcs.R")
load("results/02_Making_Fleets_Celtic_Sea_2020tier12nepnewLOUsingSAM_KW.RData")
df <- slot.fleet(fleets, "landings")
dfD <-slot.fleet(fleets, "discards")
dfC <-slot.fleet(fleets, "catch")
# merge catch categories
df <- merge(df, dfD, all = T)
df <- merge(df, dfC, all = T)
rm(dfD, dfC)
df$area <- substr(df$metier, 9, 14)
df$metier<-substr(df$metier,1,7)
colnames(df)[colnames(df)=="qname"] <-"stock"
levels(df$stock)[levels(df$stock) %in% grep("nep", unique(df$stock), value = TRUE)]<-"nep.27.7bk"
# aggregate it all up
dfa<-aggregate(df[c("landings","discards","catch")],by=list(Year=df$year,Area= df$area,Stock = df$stock),sum, na.rm = T) # #for area
df<-aggregate(df[c("landings","discards","catch")],by=list(Year=df$year,Metier = df$metier,Stock = df$stock),sum, na.rm = T)
# melt
df <-reshape2::melt(df,id=c("Year","Metier","Stock"))
dfa<-reshape2::melt(dfa,id=c("Year","Area","Stock"))
# order the dataframe
df <-df[(order(df$Year,df$Metier,df$Stock)),]
dfa <-dfa[(order(dfa$Year,dfa$Area,dfa$Stock)),]
plot.path <- "plots"
###### PLOT CODE #####
none <- element_blank()
################################
## LANDINGS BARPLOT BY METIER ##
################################
## Make sure the stocks are labelled with numbers
levels(df$Stock)[levels(df$Stock) == "cod.27.7e-k"] <-"1:cod.27.7e-k"
levels(df$Stock)[levels(df$Stock) == "had.27.7b-k"] <- "2:had.27.7b-k"
levels(df$Stock)[levels(df$Stock) == "meg.27.7b-k8abd"] <-"3:meg.27.7b-k8abd"
levels(df$Stock)[levels(df$Stock) == "mon.27.78abd"] <- "4:mon.27.78abd"
levels(df$Stock)[levels(df$Stock) == "sol.27.7fg"] <-"5:sol.27.7fg"
levels(df$Stock)[levels(df$Stock) == "whg.27.7b-ce-k"] <- "6:whg.27.7b-ce-k"
levels(df$Stock)[levels(df$Stock) == "nep.27.7bk"] <- "7:nep.27.7bk"
df$Stock <- factor(df$Stock, levels = sort(levels(df$Stock)))
levels(dfa$Stock)[levels(dfa$Stock) == "cod.27.7e-k"] <-"1:cod.27.7e-k"
levels(dfa$Stock)[levels(dfa$Stock) == "had.27.7b-k"] <- "2:had.27.7b-k"
levels(dfa$Stock)[levels(dfa$Stock) == "meg.27.7b-k8abd"] <-"3:meg.27.7b-k8abd"
levels(dfa$Stock)[levels(dfa$Stock) == "mon.27.78abd"] <- "4:mon.27.78abd"
levels(dfa$Stock)[levels(dfa$Stock) == "sol.27.7fg"] <-"5:sol.27.7fg"
levels(dfa$Stock)[levels(dfa$Stock) == "whg.27.7b-ce-k"] <- "6:whg.27.7b-ce-k"
levels(dfa$Stock)[levels(dfa$Stock) == "nep.27.7bk"] <- "7:nep.27.7bk"
dfa$Stock <- factor(dfa$Stock, levels = sort(levels(dfa$Stock)))
## For other ares
dfa$Area[dfa$Area %in% c("","27.7.a", "27.7.d")] <- "OTH"
dfa <- dfa %>% group_by(Year, Area, Stock, variable) %>%
summarise(value = sum(value))
pal <- pals::brewer.paired(12)[1:length(unique(df$Stock))]
data.yr<-2019
p<-ggplot(df[(df$variable=="landings" & df$Year==data.yr),],aes(factor(Metier),value/1000,fill=Stock))
p + geom_bar(stat="identity",position = "stack") +# ylim(0,13)+
theme(panel.grid.major = none, panel.grid.minor = none) + theme(panel.background = none) +
# scale_fill_grey(start=0,end=1)+
scale_fill_manual(values = pal) +
theme(panel.border = none) + theme(axis.line = element_line(colour = "grey50")) +
theme_bw() +
xlab("Metiers used by mixed-fisheries model") + ylab("Landings ('000 tonnes)") +
theme(axis.text.x = element_text(angle = 90,size = 8)) +
theme(legend.key = element_rect(colour = "black"), legend.position=c(0.9,0.7),legend.key.size=unit(0.5,"cm"),
legend.background = element_rect(colour="black", size=.5),
legend.text=element_text(face="bold",size=8),
legend.title=element_text(face="bold",size=8),
legend.title.align=0.5) +
theme(axis.text = element_text(lineheight=0.8, size=8,face="bold")) +
theme(axis.title = element_text(size=12,face="bold")) +
geom_bar(stat="identity",position = "stack",colour="black",show_guide=FALSE) +
annotate("text", label=" ",x=5.5,y=18,fontface="bold",size=6) #label="Landings by species / metier")
ggsave(file= "plots/Celtic Sea Figure 6.4.25.3_advice_sheet_landing_by_metier_plot.png",width=8,height=4.8)
###########################
## By area
###########################
pa<-ggplot(dfa[(dfa$variable=="landings" & dfa$Year==data.yr),],aes(factor(Area),value/1000,fill=Stock))
pa + geom_bar(stat="identity",position = "stack") +# ylim(0,13)+
theme(panel.grid.major = none, panel.grid.minor = none) + theme(panel.background = none) +
# scale_fill_grey(start=0,end=1)+
scale_fill_manual(values = pal) +
theme(panel.border = none) + theme(axis.line = element_line(colour = "grey50")) +
theme_bw() +
xlab("Areas used by mixed-fisheries model") + ylab("Landings ('000 tonnes)") +
theme(axis.text.x = element_text(angle = 90,size = 8)) +
theme(legend.key = element_rect(colour = "black"), legend.position=c(0.1,0.7),legend.key.size=unit(0.5,"cm"),
legend.background = element_rect(colour="black", size=.5),
legend.text=element_text(face="bold",size=8),
legend.title=element_text(face="bold",size=8),
legend.title.align=0.5) +
theme(axis.text = element_text(lineheight=0.8, size=8,face="bold")) +
theme(axis.title = element_text(size=12,face="bold")) +
geom_bar(stat="identity",position = "stack",colour="black",show_guide=FALSE) +
annotate("text", label=" ",x=5.5,y=18,fontface="bold",size=6) #label="Landings by species / metier")
ggsave(file= "plots/Celtic Sea Figure 6.4.25.3_advice_sheet_landing_by_area_plot.png",width=8,height=4.8)
###################
#pie plot landings#
###################
df2<-df[(df$variable=="landings" & df$Year==data.yr),]
df2<-aggregate(df2["value"],by=list(Stock = df2$Stock),sum,na.rm=T)
p<- ggplot(df2,aes(x="",y=value,fill=Stock))
p + geom_bar(width = 1,stat="identity") +
scale_fill_manual(values = pal)+
coord_polar("y", start=0) +
xlab("")+ylab("")+
theme(axis.text.x = element_blank(),
panel.border= element_blank(),
panel.background = element_blank()) +
geom_bar(width = 1,stat="identity",colour="black",show_guide=FALSE) +
theme(legend.key.size=unit(1,"cm"),
legend.text=element_text(face="bold",size=10),
legend.title=element_text(face="bold",size=10),
legend.title.align=0.5,
legend.background = element_rect(colour="black", size=.5)) +
annotate("text",x=1.8,y=1,label="Total Landings by Stock",fontface="bold",size=10)
ggsave(file= "plots/Celtic Sea Catch_distribution_figure_advice_sheet.png",width=11.7,height=8.3)
################################################
## Landings, Discards totals for advice sheet ##
################################################
land<-df[(df$variable=="landings" & df$Year==data.yr),]
disc<-df[(df$variable=="discards" & df$Year==data.yr),]
print(paste("-----Landings =",round(sum(land$value,na.rm=T),0),paste("t -------")))
print(paste("-----Discards =",round(sum(disc$value,na.rm=T),0),paste("t -------")))
print(paste("-----Discard =",round(100*sum(disc$value,na.rm=T)/sum(sum(land$value,na.rm=T),sum(disc$value,na.rm=T)),0),paste("% -------")))
Fleet.summary.func<-function(x) {
if (x %in% c("OTB_CRU","OTT_CRU","OTT_DEF","OTB_DEF","SSC_DEF","OTM_DEF"))
return("Otter trawls and seines")
if (x %in% c("TBB_DEF"))
return("Beam trawls")
if(x %in% c("GNS_DEF","GTR_DEF"))
return("Gill and trammel nets")
if(x %in% c("LLS_FIF"))
return("Longlines")
if (x %in% c("OTH","MIS_MIS"))
return("Other gears")
else
return("THIS SHOULDN'T BE HERE!!")
}
## and apply the function (assigning metiers to a fleet...)
land$Fleet<-sapply(land$Metier,Fleet.summary.func)
unique(land$Fleet) ## check that all metiers allocated to a fleet
## Fleet summary
print(paste("-----Otter trawls and seines =",round(sum(land$value[(land$Fleet=="Otter trawls and seines")]),0),paste("t -------"),round(100*sum(land$value[(land$Fleet=="Otter trawls and seines")],na.rm=T)/sum(land$value,na.rm=T),0),paste("%")))
print(paste("-----Beam trawls =",round(sum(land$value[(land$Fleet=="Beam trawls")]),0),paste("t -------"),round(100*sum(land$value[(land$Fleet=="Beam trawls")],na.rm=T)/sum(land$value,na.rm=T),0),paste("%")))
print(paste("-----Gill and trammel nets =",round(sum(land$value[(land$Fleet=="Gill and trammel nets")]),0),paste("t -------"),round(100*sum(land$value[(land$Fleet=="Gill and trammel nets")],na.rm=T)/sum(land$value,na.rm=T),0),paste("%")))
print(paste("-----Longlines =",round(sum(land$value[(land$Fleet=="Longlines")]),0),paste("t -------"),round(100*sum(land$value[(land$Fleet=="Longlines")],na.rm=T)/sum(land$value,na.rm=T),0),paste("%")))
print(paste("-----Other gears =",round(sum(land$value[(land$Fleet=="Other gears")],na.rm=T),0),paste("t -------"),round(100*sum(land$value[(land$Fleet=="Other gears")],na.rm=T)/sum(land$value,na.rm=T),0),paste("%")))
|
020663d8021b67c9219051f10a4cc44bff59451d
|
98f3aca17ebba3cbdc340453fabd0c7a647f494c
|
/scripts/modeling/help_functions/preparation&cleaning_functions.R
|
787a1e881bceec450202b14351f617838a1d4aa4
|
[
"MIT"
] |
permissive
|
junxiongliu/Santander-Value-Prediction-Challenge
|
de0b3dcf1b47aaa9f8c491b49d9636659f845983
|
452f69d8040f784b3c03163674780a1815b86305
|
refs/heads/master
| 2020-03-21T11:56:54.798741
| 2018-08-26T23:51:41
| 2018-08-26T23:51:41
| 138,529,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,694
|
r
|
preparation&cleaning_functions.R
|
# Define needed functions here
##--------------------------------------------------------------------
## function to check "min=max" columns and get rid of such
noVariation_filter <- function(data){
# read in data and return data without invalid columns
data_out <- data[,!apply(data,2,function(x) min(x) == max(x))]
return (data_out)
}
##--------------------------------------------------------------------
## function to check highly collinear columns and get rid of such
### NOTE: This function will filter out BOTH pairs... should not use...
### stackoverflow: https://stackoverflow.com/questions/18275639/remove-highly-correlated-variables
collinear_filter <- function(data, threshold){
# plug in data you need check correlation and threshold of high correlation elimination
# return a vector of features that are not highly correlated
tmp <- cor(data)
tmp[upper.tri(tmp)] <- 0
diag(tmp) <- 0
# print (tmp)
data_out <- data[,!apply(tmp,2,function(x) any(abs(x) > threshold))]
# print (data_out %>% head(5))
names_out <- names(data_out)
return (names_out)
}
##--------------------------------------------------------------------
## function to select based on correlation with response
corr_selection <- function(data, features, response, topn){
# input data, vector of all features and response and topn correlations you want
# output dataframe with top selected features (and target)
cor_df <- data.frame(feature = character(), cor = double())
for (fea in features){
cur_cor <- cor(data[[response]], data[[fea]])
cur_row <- data.frame(feature = fea, cor = cur_cor)
cor_df <- rbind(cor_df, cur_row)
}
cor_df_sorted <- cor_df %>% arrange(desc(abs(cor)))
cor_df_sorted_top <- cor_df_sorted %>% head(topn) # get top
# select the features there
all_f <- c(as.vector(cor_df_sorted_top$feature), response)
data_small <- data %>% select(all_f)
return (data_small)
}
##--------------------------------------------------------------------
## function to select based on random forest importance
rf_selection <- function(data, response, topn){
# input data, vector of response (will be against all features) and topn correlations you want
# output top n features (sorted)
cur_formula <- paste(response, "~.", sep = "")
rForest <- randomForest(formula = as.formula(cur_formula),
data = data, mtry = 20,
ntree = 1000,nodesize = 20,
importance = TRUE)
rf_importance <- data.frame(rForest$importance)
rf_topn <- rf_importance %>%
mutate(feature = rownames(rf_importance)) %>%
arrange(desc(X.IncMSE)) %>% head(topn)
all_f <- c(as.vector(rf_topn$feature))#, response)
# data_small <- data %>% select(all_f)
return (all_f)
}
##--------------------------------------------------------------------
## function to select based on xgboost importance
xgb_selection <- function(fea_matrix, response_matrix, topn){
# input feature matrix, response matrix and number of n features to return
# return list of topn features (sorted by importance)
xgb_model <- xgboost(data = fea_matrix, label = response_matrix,
eta = 0.3, nthread = 1, nrounds = 200, objective = "reg:linear",
early_stopping_rounds = 3, verbose = 1)
# importance
xgbImp <- data.frame(xgb.importance(model = xgb_model))
top_fea <- xgbImp %>% arrange(desc(Gain)) %>% head(topn)
all_f <- as.vector(top_fea$Feature)
return (all_f)
}
##--------------------------------------------------------------------
## function to do "row-wise" feature engineering
rw_fea_engineering <- function(data, response = ""){
# input data and all the features
# output data + all features + engineered features + target
# assuming no NAs in any of features
# separate out the response dataframe (only needed for training)
if (response != ""){
target_join <- data %>% select(!!sym(response)) %>% mutate(row_num = row_number())
data <- data %>% select(-!!sym(response))
}
# generate new features with features dataframe
data_w_features <- data %>%
mutate(rowMean = rowMeans(.),
rowMedian = apply(., 1, median), # , na.rm=TRUE
rowMax = apply(., 1, max),
# rowMin = apply(., 1, min),
rowMean_n0 = apply(.,1, function(x) mean(x[x!=0])), # non-zero mean
rowMin_n0 = apply(.,1, function(x) min(x[x!=0])), # non-zero min (will produce some inf)
rowMedian_n0 = apply(.,1, function(x) median(x[x!=0])), # non-zero min
count_n0 = apply(.,1, function(x) length(x[x!=0])) # count of non-zeros
# -- can have more..
) %>%
replace(., is.na(.), -1) %>% # replace NA with -1
mutate(row_num = row_number())
data_w_features[mapply(is.infinite, data_w_features)] <- -1 # replace infinite with -1
# join back response and return
if (response != ""){ # for training frame
data_return <- data_w_features %>% left_join(target_join, by = "row_num") %>% select(-row_num)
}else { # for testing frame
data_return <- data_w_features %>% select(-row_num)
}
return (data_return)
}
##--------------------------------------------------------------------
# evaluation function (calculating rmse)
eval <- function(data, pred, actual, nrow = -1){
# input data, prediction, actual, and customized nrow (default will be nrow of data)
data <- data %>% mutate(diff_2 = (!!sym(pred) - !!sym(actual))**2)
if (nrow < 0){
rmse <- sqrt(sum(data$diff_2)/nrow(data))
}else{ ### cusomized nrow
rmse <- sqrt(sum(data$diff_2)/nrow)
}
return (rmse)
}
|
d282a0a42fa89833781a67afa1bf1b68f87848f8
|
39a11c694363f6868317b74eecc1b61881327296
|
/R/Methods-accessors.R
|
be714616f8b6c998ca63aed8cf951415813c7245
|
[] |
no_license
|
grimbough/IONiseR
|
ddc9b11fedb1f66aee1181aa4b26d738226fb32e
|
47d8ab1e1d798f3591407be679076a1a5b5d9dd2
|
refs/heads/master
| 2021-01-21T02:11:22.317716
| 2020-09-21T15:33:30
| 2020-09-21T15:33:30
| 78,727,939
| 2
| 0
| null | 2017-01-12T09:18:45
| 2017-01-12T09:18:45
| null |
UTF-8
|
R
| false
| false
| 2,719
|
r
|
Methods-accessors.R
|
#' Extract readInfo slot
#'
#' This generic function accesses the readInfo slot stored in an object
#' derived from the Fast5Summary class.
#'
#' @param x Object of class \code{\linkS4class{Fast5Summary}}
#' @return A data.frame with 5 columns
#' @examples
#' if( require(minionSummaryData) ) {
#' data(s.typhi.rep2, package = 'minionSummaryData')
#' readInfo( s.typhi.rep2 )
#' }
setGeneric("readInfo", function(x) {
standardGeneric("readInfo")
})
#' @describeIn Fast5Summary Returns readInfo data.frame
#'
#' @include classes.R
#' @export
setMethod("readInfo",
c(x = "Fast5Summary"),
function(x) {
x@readInfo
}
)
#' Extract eventData slot
#'
#' This generic function accesses the eventData slot stored in an object derived
#' from the Fast5Summary class.
#'
#' @param x Object of class \code{\linkS4class{Fast5Summary}}
#' @return A data.frame with 5 columns
#' @examples
#' if( require(minionSummaryData) ) {
#' data(s.typhi.rep2, package = 'minionSummaryData')
#' eventData( s.typhi.rep2 )
#' }
setGeneric("eventData", function(x) {
standardGeneric("eventData")
})
#' @describeIn Fast5Summary Returns eventData data.frame
#'
#' @include classes.R
#' @export
setMethod("eventData",
c(x = "Fast5Summary"),
function(x) {
x@eventData
}
)
#' Extract baseCalled slot
#'
#' This generic function accesses the baseCalled slot stored in an object
#' derived from the Fast5Summary class.
#'
#' @param x Object of class \code{\linkS4class{Fast5Summary}}
#' @return A data.frame with 6 columns
#' @examples
#' if( require(minionSummaryData) ) {
#' data(s.typhi.rep2, package = 'minionSummaryData')
#' baseCalled( s.typhi.rep2 )
#' }
setGeneric("baseCalled", function(x) {
standardGeneric("baseCalled")
})
#' @describeIn Fast5Summary Returns baseCalled data.frame
#'
#' @include classes.R
#' @export
setMethod("baseCalled",
c(x = "Fast5Summary"),
function(x) {
x@baseCalled
}
)
#' Extract fastq slot
#'
#' This generic function accesses the fastq slot stored in an object
#' derived from the Fast5Summary class.
#'
#' @param x Object of class \code{\linkS4class{Fast5Summary}}
#' @return A ShortReadQ object
#' @examples
#' if( require(minionSummaryData) ) {
#' data(s.typhi.rep2, package = 'minionSummaryData')
#' fastq( s.typhi.rep2 )
#' }
setGeneric("fastq", function(x) {
standardGeneric("fastq")
})
#' @describeIn Fast5Summary Returns ShortReadQ object stored in fastq slot.
#'
#' @include classes.R
#' @export
setMethod("fastq",
c(x = "Fast5Summary"),
function(x) {
x@fastq
}
)
|
fbb04647253435fd16046e485caa36454f7d8168
|
91e6daaaa02d48a5cf6906e9087aa23765cbf06e
|
/man/IRACpm-package.Rd
|
48292f7c6b22a428aba666fdbb726c8da5ed83ff
|
[] |
no_license
|
esplint/IRACpm
|
b64d29690180a593d8d43f5b62feda27d7c24423
|
2899b29f46db1983e14a44c6f26731ce04e2e5eb
|
refs/heads/master
| 2016-08-11T07:05:54.100352
| 2016-02-26T22:03:36
| 2016-02-26T22:03:36
| 46,518,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,790
|
rd
|
IRACpm-package.Rd
|
\name{IRACpm-package}
\alias{IRACpm-package}
\alias{IRACpm}
\alias{CD1}
\alias{index1}
\alias{epochs3}
\alias{data1}
\docType{package}
\title{
Apply Distortion Correction to SPITZER IRAC Data
}
\description{
Applies a 7-8 order distortion correction to IRAC
astrometric data from the Spitzer Space Telescope and includes
a function for measuring apparent proper motions between
different Epochs.
}
\details{
\tabular{ll}{
Package: \tab IRACpm\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-05-05\cr
License: \tab GPL-2\cr
Depends: \tab foreach, doMC, astro, R.utils\cr
}
Basic work flow for a data set to measure proper motion should follow this
outline:
1) Read in files containing output from the Spitzer Science
Center's APEX single frame module form MOPEX using read.in.data.
2) Measure image central world coordinates and rotations with CD.solver,
CD.solver2, CD.solver3, or CD.solver4
3) Calculate average coordinates for each star of interest with calc.all.ch12
4) Repeat for other epochs
5) Run mucalc to measure apparent proper motions
(If accurate relative astrometry is wanted without proper motions,
just follow steps 1-3.)
Example datasets and output for CD.solver is CD1,
read.in.data is data1, and input data for mucalc is epochs3
To just convert pixel coordinates to World Coordinates using the
distortion corrections measured follow the example listed below.
}
\author{
Taran Esplin
Taran Esplin <tle918@psu.edu>
}
\keyword{ package }
\examples{
data(CD1,ca_pix1,wa_pars1,data1)
options(digits=10)
#using a measured scale factor
coor.calc(ca_pix1,wa_pars1,CD1[[1]][1,],-100,104,CD1[[2]],1)
#estimating a scale factor from HMJD.
coor.calc(ca_pix1,wa_pars1,CD1[[1]][1,],-100,104,data1,1)
#the difference for this point in the array is ~2 mas
}
|
c896828d20335043fc0e42de85ad54d29211eba9
|
090111bc82f2086d1b108d7cda8071b9042c1c8a
|
/terrain/research/north-60/assess-flow.R
|
11a3fbe0a375378d05898c460147eb1d0910f859
|
[] |
no_license
|
nemochina2008/environmental-layers
|
dbf06b1bc7f5077672843ed89c39f4f9b0781e0c
|
f3799e26c25704bee989aa830ba3b9270161cb3d
|
refs/heads/master
| 2021-05-28T11:23:05.794919
| 2013-01-29T23:54:22
| 2013-01-29T23:54:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,320
|
r
|
assess-flow.R
|
# R code to plot latitudinal profiles of (circular) mean flow direction,
# along with both RMSE and (circular) correlation coefficients comparing
# fused layers with both the raw ASTER and with the Canada DEM
#
# Jim Regetz
# NCEAS
library(raster)
library(circular)
datadir <- "/home/regetz/media/temp/terrain/flow"
# create function to recode terraflow SFD values into degrees, with
# 0=North and proceeding clockwise (this matches gdaldem's default
# azimuth output for aspect calculation)
recode <- function(r) {
v <- values(r)
v[v==0] <- NA
v[v==1] <- 90 ## east
v[v==2] <- 135
v[v==4] <- 180 ## south
v[v==8] <- 225
v[v==16] <- 270 ## west
v[v==32] <- 315
v[v==64] <- 0 ## north
v[v==128] <- 45
r[] <- v
return(r)
}
# load flow direction rasters, recoding on the fly
sfd.aster <- recode(raster(file.path(datadir, "aster_300straddle_sfd.tif")))
sfd.srtm <- recode(raster(file.path(datadir, "srtm_150below_sfd.tif")))
sfd.uncor <- recode(raster(file.path(datadir, "fused_300straddle_sfd.tif")))
sfd.enblend <- recode(raster(file.path(datadir, "fused_300straddle_enblend_sfd.tif")))
sfd.bg <- recode(raster(file.path(datadir, "fused_300straddle_blendgau_sfd.tif")))
sfd.can <- recode(raster(file.path(datadir, "cdem_300straddle_sfd.tif")))
# extract raster latitudes for later
lats300 <- yFromRow(sfd.aster, 1:nrow(sfd.aster))
lats150 <- yFromRow(sfd.srtm, 1:nrow(sfd.srtm))
# initialize output pdf device driver
pdf("flowdir-assessment.pdf", height=8, width=11.5)
#
# plot latitudinal profiles of mean flow direction
#
# simple helper function to calculate row-wise means using circular
# mean, patterned after circ.mean in the CircStats package
rowMeansC <- function(r1, na.rm=TRUE) {
m1 <- as.matrix(r1)
m1[] <- (m1 * pi)/180
sinr <- rowSums(sin(m1), na.rm=na.rm)
cosr <- rowSums(cos(m1), na.rm=na.rm)
cmeans <- atan2(sinr, cosr)
(cmeans * 180)/pi
}
par(mfrow=c(2,2), omi=c(1,1,1,1))
ylim <- c(-180, 180)
plot(lats300, rowMeansC(sfd.can), type="l", yaxt="n",
xlab="Latitude", ylab="Mean flow direction", ylim=ylim)
axis(2, at=c(-180, -90, 0, 90, 180), labels=c("S", "W", "N", "E", "S"))
text(min(lats300), min(ylim)+0.5, pos=4, font=3, labels="Original DEMs")
lines(lats300, rowMeansC(sfd.aster), col="blue")
lines(lats150, rowMeansC(sfd.srtm), col="red")
legend("bottomright", legend=c("ASTER", "SRTM", "CDED"), col=c("blue",
"red", "black"), lty=c(1, 1), bty="n")
abline(v=60, col="red", lty=2)
mtext(expression(paste("Latitudinal profiles of mean flow direction (",
125*degree, "W to ", 100*degree, "W)")), adj=0, line=2, font=2)
#plot(lats300, rowMeans(as.matrix(sfd.uncor), na.rm=TRUE), type="l",
# xlab="Latitude", ylab="Mean flow direction", ylim=ylim)
#text(min(lats300), min(ylim)+0.5, pos=4, font=3, labels="uncorrected")
#abline(v=60, col="red", lty=2)
#mtext(expression(paste("Latitudinal profiles of mean flow direction (",
# 125*degree, "W to ", 100*degree, "W)")), adj=0, line=2, font=2)
plot(lats300, rowMeansC(sfd.uncor), type="l", yaxt="n",
xlab="Latitude", ylab="Mean flow direction", ylim=ylim)
axis(2, at=c(-180, -90, 0, 90, 180), labels=c("S", "W", "N", "E", "S"))
text(min(lats300), min(ylim)+0.5, pos=4, font=3, labels="simple fused")
abline(v=60, col="red", lty=2)
plot(lats300, rowMeansC(sfd.enblend), type="l", yaxt="n",
xlab="Latitude", ylab="Mean flow direction", ylim=ylim)
axis(2, at=c(-180, -90, 0, 90, 180), labels=c("S", "W", "N", "E", "S"))
text(min(lats300), min(ylim)+0.5, pos=4, font=3, labels="multires spline")
abline(v=60, col="red", lty=2)
plot(lats300, rowMeansC(sfd.bg), type="l", yaxt="n",
xlab="Latitude", ylab="Mean flow direction", ylim=ylim)
axis(2, at=c(-180, -90, 0, 90, 180), labels=c("S", "W", "N", "E", "S"))
text(min(lats300), min(ylim)+0.5, pos=4, font=3, labels="gaussian blend")
abline(v=60, col="red", lty=2)
#
# plot latitudinal profiles of RMSE
#
# simple helper function to calculate row-wise RMSEs, accounting for the
# fact that flow dir values are circular (0-360), so the difference
# between e.g. 5 and 355 should only be 10
rmse <- function(r1, r2, na.rm=TRUE, use) {
diffs <- abs(as.matrix(r1) - as.matrix(r2))
if (!missing(use)) diffs[!use] <- NA
diffs[] <- ifelse(diffs>180, 360-diffs, diffs)
sqrt(rowMeans(diffs^2, na.rm=na.rm))
}
par(mfrow=c(2,3), omi=c(1,1,1,1))
ylim <- c(0, 100)
# ...with respect to ASTER
plot(lats300, rmse(sfd.uncor, sfd.aster), type="l", xlab="Latitude",
ylab="RMSE", ylim=ylim)
lines(lats150, rmse(crop(sfd.uncor, extent(sfd.srtm)), sfd.srtm), col="blue")
legend("topright", legend=c("ASTER", "SRTM"), col=c("black", "blue"),
lty=c(1, 1), bty="n")
text(min(lats300), max(ylim)-5, pos=4, font=3, labels="uncorrected")
abline(v=60, col="red", lty=2)
mtext(expression(paste(
"Flowdir discrepancies with respect to separate ASTER/SRTM components (",
125*degree, "W to ", 100*degree, "W)")), adj=0, line=2, font=2)
plot(lats300, rmse(sfd.enblend, sfd.aster), type="l", xlab="Latitude",
ylab="RMSE", ylim=ylim)
lines(lats150, rmse(crop(sfd.enblend, extent(sfd.srtm)), sfd.srtm), col="blue")
legend("topright", legend=c("ASTER", "SRTM"), col=c("black", "blue"),
lty=c(1, 1), bty="n")
text(min(lats300), max(ylim)-5, pos=4, font=3, labels="exponential ramp")
abline(v=60, col="red", lty=2)
plot(lats300, rmse(sfd.bg, sfd.aster), type="l", xlab="Latitude",
ylab="RMSE", ylim=ylim)
lines(lats150, rmse(crop(sfd.bg, extent(sfd.srtm)), sfd.srtm), col="blue")
legend("topright", legend=c("ASTER", "SRTM"), col=c("black", "blue"),
lty=c(1, 1), bty="n")
text(min(lats300), max(ylim)-5, pos=4, font=3, labels="gaussian blend")
abline(v=60, col="red", lty=2)
# ...with respect to CDEM
plot(lats300, rmse(sfd.uncor, sfd.can), type="l", xlab="Latitude",
ylab="RMSE", ylim=ylim)
text(min(lats300), max(ylim)-5, pos=4, font=3, labels="uncorrected")
abline(v=60, col="red", lty=2)
mtext(expression(paste(
"Flowdir discrepancies with respect to Canada DEM (",
125*degree, "W to ", 100*degree, "W)")), adj=0, line=2, font=2)
plot(lats300, rmse(sfd.enblend, sfd.can), type="l", xlab="Latitude",
ylab="RMSE", ylim=ylim)
text(min(lats300), max(ylim)-5, pos=4, font=3, labels="exponential ramp")
abline(v=60, col="red", lty=2)
plot(lats300, rmse(sfd.bg, sfd.can), type="l", xlab="Latitude",
ylab="RMSE", ylim=ylim)
text(min(lats300), max(ylim)-5, pos=4, font=3, labels="gaussian blend")
abline(v=60, col="red", lty=2)
#
# plot latitudinal profiles of correlation coefficients
#
# simple helper function to calculate row-wise *circular* correlation
# coefficients
corByLat <- function(r1, r2, rows) {
if (missing(rows)) {
rows <- 1:nrow(r1)
}
m1 <- circular(as.matrix(r1), units="degrees", rotation="clock")
m2 <- circular(as.matrix(r2), units="degrees", rotation="clock")
sapply(rows, function(row) {
p <- cor.circular(m1[row,], m2[row,])
if (is.null(p)) NA else p
})
}
par(mfrow=c(2,3), omi=c(1,1,1,1))
ylim <- c(-1, 1)
# ...with respect to ASTER
plot(lats300, corByLat(sfd.uncor, sfd.aster), type="l", xlab="Latitude",
ylab="Circular correlation", ylim=ylim)
lines(lats150, corByLat(crop(sfd.uncor, extent(sfd.srtm)), sfd.srtm), col="blue")
legend("bottomright", legend=c("ASTER", "SRTM"), col=c("black", "blue"),
lty=c(1, 1), bty="n")
text(min(lats300), min(ylim), pos=4, font=3, labels="simple fused")
abline(v=60, col="red", lty=2)
mtext(expression(paste(
"Flow direction correlations with respect to separate ASTER/SRTM components (",
125*degree, "W to ", 100*degree, "W)")), adj=0, line=2, font=2)
plot(lats300, corByLat(sfd.enblend, sfd.aster), type="l", xlab="Latitude",
ylab="Circular correlation", ylim=ylim)
lines(lats150, corByLat(crop(sfd.enblend, extent(sfd.srtm)), sfd.srtm), col="blue")
legend("bottomright", legend=c("ASTER", "SRTM"), col=c("black", "blue"),
lty=c(1, 1), bty="n")
text(min(lats300), min(ylim), pos=4, font=3, labels="multires spline")
abline(v=60, col="red", lty=2)
plot(lats300, corByLat(sfd.bg, sfd.aster), type="l", xlab="Latitude",
ylab="Circular correlation", ylim=ylim)
lines(lats150, corByLat(crop(sfd.bg, extent(sfd.srtm)), sfd.srtm), col="blue")
legend("bottomright", legend=c("ASTER", "SRTM"), col=c("black", "blue"),
lty=c(1, 1), bty="n")
text(min(lats300), min(ylim), pos=4, font=3, labels="gaussian blend")
abline(v=60, col="red", lty=2)
# ...with respect to CDEM
plot(lats300, corByLat(sfd.uncor, sfd.can), type="l", xlab="Latitude",
ylab="Circular correlation", ylim=ylim)
text(min(lats300), min(ylim), pos=4, font=3, labels="simple fused")
abline(v=60, col="red", lty=2)
mtext(expression(paste(
"Flow direction correlations with respect to Canada DEM (",
125*degree, "W to ", 100*degree, "W)")), adj=0, line=2, font=2)
plot(lats300, corByLat(sfd.enblend, sfd.can), type="l", xlab="Latitude",
ylab="Circular correlation", ylim=ylim)
text(min(lats300), min(ylim), pos=4, font=3, labels="multires spline")
abline(v=60, col="red", lty=2)
plot(lats300, corByLat(sfd.bg, sfd.can), type="l", xlab="Latitude",
ylab="Circular correlation", ylim=ylim)
text(min(lats300), min(ylim), pos=4, font=3, labels="gaussian blend")
abline(v=60, col="red", lty=2)
# close pdf device driver
dev.off()
|
bc28a8370a29f926c25c9cc85f2c002961dd1489
|
33945f7d8c8dc14d102638de7ec71d1e88413013
|
/cal/rgl-plots.R
|
f9c14f20b39817458b2dae9dca90ad341d79a18c
|
[] |
no_license
|
wactbprot/svol
|
9c483a87969cc5eddec68e6c5be8a2b60bad0e9e
|
57db9658fbd5b253bced0e7fa66471d79115364f
|
refs/heads/master
| 2021-01-18T14:05:39.453305
| 2015-02-05T12:16:08
| 2015-02-05T12:16:08
| 29,733,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,102
|
r
|
rgl-plots.R
|
library("rgl")
v2 <- read.table("data/ventil-kreis_2.txt"
, skip=2
, sep=" "
, row.names = NULL)
v1 <- read.table("data/ventil-kreis_1.txt"
, skip=2
, sep=" "
, row.names=NULL)
rgl.close()
rgl.open()
rgl.bg( sphere = FALSE
, color=c("white","black")
, back="lines")
axes3d(box = TRUE
, col=1
)
title3d(xlab= "x in mm"
, ylab= "y in mm"
, zlab= "z in mm"
, pos=c(10,10,0)
, col=1
)
## Ursprung
rgl.points(0,0,0
, col=1)
## Bezugskreis
rgl.points(v1[, 3]
, v1[, 4]
, v1[, 5]
, col=1)
## Scans
scns <- c("SCN1"
, "SCN3"
, "SCN4"
, "SCN5"
, "SCN6"
, "SCN7")
col=1
for (scn in scns){
i <- which(v2[,1] == scn)
rgl.points(v2[i, 3]
, v2[i, 4]
, v2[i, 5]
, col=col
)
col<- col+1
}
s1 <- read.table("data/ventil-sitz_1.txt", skip=2, sep=" ", row.names=NULL)
rgl.points(s1[, 3], s1[, 4], -s1[, 5] - 40, add=TRUE, col=1)
|
6ca36ac945eddbbb89cc24e554568f8edf5bd75f
|
331ffa7fcacf86dcafcca4f921c91382313bd0e3
|
/man/SDF.Rd
|
af85cee80d7e5d319c17cab1974be6c1db9c4bb2
|
[] |
no_license
|
wconstan/sapa
|
5f5022a0dc09d207a4c6f6a1cbb79af9b5ddbdd5
|
501d67a72e5e25594d8cc169c2f1b7c49b68f86a
|
refs/heads/master
| 2023-07-25T13:19:11.032775
| 2016-05-20T20:35:23
| 2016-05-20T20:35:23
| 58,663,742
| 0
| 2
| null | 2023-07-08T23:46:21
| 2016-05-12T17:46:29
|
R
|
UTF-8
|
R
| false
| false
| 12,673
|
rd
|
SDF.Rd
|
%% WARNING: This file was automatically generated from the associated
%% sapa_sdf.mid file. Do NOT edit this Rd file to make a change. Instead,
%% edit the sapa_sdf.mid file in the project MID directory. Once the
%% sapa_sdf.mid file has been updated, this Rd file, and all other
%% documentation (such as corresponding LaTeX, SGML and HTML documentation)
%% should be regenerated using the mid.pl Perl script.
%% R documentation for the SDF, as.matrix.SDF, plot.SDF, print.SDF functions
\name{SDF}
\alias{SDF}
\alias{as.matrix.SDF}
\alias{plot.SDF}
\alias{print.SDF}
\title{Nonparametric (cross) spectral density function estimation}
\concept{spectral density function estimation}
\usage{SDF(x, method="direct", taper.=NULL, window=NULL,
n.taper=5, overlap=0.5, blocksize=NULL,
single.sided=TRUE, sampling.interval=NULL,
center=TRUE, recenter=FALSE, npad=2*numRows(x))}
\description{Estimate the process (cross) spectral density function via nonparametric
models.}
\arguments{
\item{x}{a vector or matrix containing uniformly-sampled real-valued time series.
If a \code{matrix}, each column should contain a different time series.}
\item{blocksize}{an integer representing the number
of points (width) of each block in the WOSA estimator scheme.
Default: \code{floor(N/4)} where \code{N} is the number of
samples in each series.}
\item{center}{a logical value. If \code{TRUE}, the mean of each
time series is recentered prior to estimating
the SDF. Default: \code{TRUE}.}
\item{method}{a character string denoting the method to use in estimating the SDF.
Choices are \code{"direct"}, \code{"lag window"}, \code{"wosa"} (Welch's Overlapped Segment Averaging),
\code{"multitaper"}. See \bold{DETAILS} for more information. Default: \code{"direct"}.}
\item{n.taper}{an integer defining the number of tapers
to use in a multitaper scheme. This value is overwritten if
the \code{taper} input is of class \code{taper}. Default: \code{5}.}
\item{npad}{an integer representing the total length of each
time series to analyze after padding with zeros. This argument
allows the user to control the spectral resolution of the SDF
estimates: the normalized frequency interval is
\eqn{\Delta f = 1 / \hbox{npad}}{deltaf=1/npad}.
This argument must be set such that
\eqn{\hbox{npad} > 2}{npad > 2}.
Default: \code{2*numRows(x)}.}
\item{overlap}{a numeric value on \eqn{[0,1]} denoting the fraction
of window overlap for the WOSA estimator. Default: \code{0.5}.}
\item{recenter}{a logical value. If \code{TRUE}, the mean of each
time series is recentered after (posssibly) tapering the series prior to estimating
the SDF. Default: \code{FALSE}.}
\item{sampling.interval}{a numeric value representing the interval
between samples in the input time series \code{x}. Default: \code{NULL}, which
serves as a flag to obtain the sampling interval via the \code{deltat}
function. If \code{x} is a list, the default sampling interval is \code{deltat(x[[1]])}.
If \code{x} is an atomic vector (ala \code{isVectorAtomic}), then the default
samplign interval is established ala \code{deltat(x)}. Finally, if the
input series is a matrix, the sampling interval of the first series (assumed to
be in the first column) is obtained ala \code{deltat(x[,1])}.}
\item{single.sided}{a logical value. If \code{TRUE}, a single-sided
SDF estimate is returned corresponding to the normalized frequency
range of \eqn{[0,1/2]}. Otherwise, a double-sided SDF estimate
corresponding to the normalized frequency interval \eqn{[-1/2,1/2]}
is returned. Default: \code{TRUE}.}
\item{taper.}{an object of class \code{taper} or a character string denoting the primary taper.
If an object of class \code{taper}, the length of the taper is checked to ensure
compatitbility with the input \code{x}.
See \bold{DETAILS} for more information. The default values are a function
of the \code{method} as follows:
\describe{
\item{direct}{normalized rectangular taper}
\item{lag window}{normalized Parzen window with a cutoff at \eqn{N/2} where
\eqn{N} is the length of the time series.}
\item{wosa}{normalized Hanning taper}
\item{multitaper}{normalized Hanning taper}}}
\item{window}{an object of class \code{taper} or a character string denoting the (secondary) window
for the lag window estimator.
If an object of class \code{taper}, the length of the taper is checked to ensure
compatitbility with the input \code{x}. See \bold{DETAILS} for more information.
Default: Normalized Hanning window.}
}
\value{
an object of class \code{SDF}.
}
\section{S3 METHODS}{
\describe{
\item{as.matrix}{converts the (cross-)SDF estimate(s) as a matrix. Optional arguments
are passed directly to the \code{matrix} function during the conversion.}
\item{plot}{plots the (cross-)SDF estimate(s). Optional arguments are:
\describe{
\item{xscale}{a character string defining the scaling to perform on the (common) frequency vector
of the SDF estimates. See the \code{scaleData} function for supported choices. Default: \code{"linear"}.}
\item{yscale}{a character string defining the scaling to perform on the SDF estimates.
See the \code{scaleData} function for supported choices. Default: \code{"linear"}.}
\item{type}{a single character defining the plot type (ala the \code{par} function) of the
SDF plots. Default: \code{ifelse(numRows(x) > 100, "l", "h")}.}
\item{xlab}{a character string representing the x-axis label. Default: \code{"FREQUENCY (Hz)"}.}
\item{ylab}{a (vector of) character string(s), one per (cross-)SDF estimate,
representing the y-axis label(s). Default: in the multivariate case, the strings
\code{"Sij"} are used for the y-axis labels, where i and j are the indices of the
different variables. For example, if the user supplies a 2-column matrix for \code{x},
the labels \code{"S11"}, \code{"S12"}, and \code{"S22"} are used to label the y-axes of the corresponding
(cross-)SDF plots. In the univariate case, the default string \code{"SDF"} prepended with a string
describing the type of SDF performed (such as \code{"Multitaper"}) is used to label the y-axis.}
\item{plot.mean}{a logical value. If \code{TRUE}, the SDF value at normalized frequency \eqn{f=0}
is plotted for each SDF. This frequency is associated with the sample mean
of the corresponding time series. A relatively large mean value dominates
the spectral patterns in a plot and thus the corresponding frequency is typically not plotted.
Default: \code{!attr(x,"center")}.}
\item{n.plot}{an integer defining the maximum number of SDF plots to place onto a single graph.
Default: \code{3}.}
\item{FUN}{a post processing function to apply to the SDF values prior to plotting. Supported
functions are \code{Mod}, \code{Im}, \code{Re} and \code{Arg}. See each of these functions for details.
If the SDF is purely real (no cross-SDF is calculated), this argument is coerced to the \code{Mod} function.
Default: \code{Mod}.}
\item{add}{A logical value. If \code{TRUE}, the plot is added using the current
\code{par()} layout. Otherwise a new plot is produced. Default: \code{FALSE}.}
\item{...}{additional plot parameters passed directly to the \code{genPlot} function used
to plot the SDF estimates.}}}
\item{print}{prints the object. Available options are:
\describe{
\item{justify}{text justification ala \code{prettPrintList}. Default: \code{"left"}.}
\item{sep}{header separator ala \code{prettyPrintList}. Default: \code{":"}.}
\item{...}{Additional print arguments sent directly to the \code{prettyPrintList} function.}}}
}
}
\details{
%
Let \eqn{X_t}{x(t)} be a uniformly sampled real-valued time series of length \eqn{N},
Let an estimate of the process spectral density function be denoted as
\eqn{\hat{S}_X(f)}{S(f)} where \eqn{f} are
frequencies on the interval
\eqn{[-1/(2\Delta t),1/(2\Delta t)]}{-1/(2*deltat),1/(2*deltat)} where
\eqn{\Delta t}{deltat} is the sampling interval. The supported SDF
estimators are:
\describe{
\item{direct}{The direct SDF estimator is defined as
\eqn{\hat{S}_X^{(d)}(f) = | \sum_{t=0}^{N-1} h_t X_t e^{-i2\pi f t}|^2}{S(f)=|sum[t=0,...,N-1]{h(t)*x(t)*exp(-i*2*pi*f*t)}|^2},
where \eqn{\{h_t\}}{h(t)} is a data taper normalized such that
\eqn{\sum_{t=0}^{N-1} h_t^2 = 1}{sum[t=0,...,N-1]{h(t)^2} = 1}. If
\eqn{h_t=1/\sqrt{N}}{h(t)=1/sqrt(N)} then we obtain the definition
of the periodogram
\eqn{\hat{S}_X^{(p)}(f) = \frac{1}{N} | \sum_{t=0}^{N-1} X_t e^{-i2\pi f t}|^2}{S(f)=(1/N) * |sum[t=0,...,N-1]{x(t)*exp(-i*2*pi*f*t)}|^2}.
See the \code{taper} function for more details on supported window types.}
\item{lag window}{The lag window SDF estimator is defined as
\eqn{\hat{S}_X^{(lw)}(f) = \sum_{\tau=-(N-1)}^{N-1} w_\tau
\hat{s}_{X,\tau}^{(d)} e^{-i2\pi f \tau}}{S(f)=sum[k=-(N-1),...,(N-1)]{w(k)*s(k)*exp(-i*2*pi*f*k)}|^2},
where \eqn{\hat{s}_{X,\tau}^{(d)}}{s(k)} is the autocovariance
sequence estimator corresponding to some
direct spectral estimator (often the periodogram)
and \eqn{w_\tau}{w(k)} is a lag window (popular choices
are the Parzen, Papoulis, and Daniell windows). See the
\code{taper} function for more details.}
\item{wosa}{Welch's Overlapped Segment Averaging SDF estimator is
defined as
\deqn{
\hat S^{(wosa)} =
{1\over N_B} \sum_{j=0}^{N_B-1}
\hat S^{(d)}_{jN_O} (f)
}{S(f)=(1/Nb)*sum[j=0,...,Nb-1]{S(j*No,f)}}
where
\deqn{
\hat S^{(d)}_{l}(f)
\equiv
\left|
\sum_{t=0}^{N_S-1}
h_t X_{t+l} e^{-i2\pi ft}
\right|^2,
\enskip
0 \le l \le N - N_S;
}{S(l,f) =|sum[t=0,...,Ns-1]{h(t)*x(t+l)*exp(-i*2*pi*f*t)}|^2}
Here, \eqn{N_O}{No} is a positive integer
that controls how much overlap there is between segments
and that must satisfy both
\eqn{N_O \le N_S}{No <= Ns} and \eqn{N_O(N_B-1) = N-N_S}{No * (Nb - 1) = N - Ns},
while \eqn{\{ h_t \}}{h(t)} is a data taper appropriate
for a series of length \eqn{N_S}{Ns}
(i.e., \eqn{\sum_{t=0}^{N_S-1} h_t^2 = 1}{sum[t=0,...,Ns-1]{h_t^2} = 1}).}
\item{multitaper}{A multitaper spectral estimator is given by
\deqn{\hat S^{(mt)}_X(f)=
{1\over K}
\sum_{k=0}^{K-1}
\left| \sum_{t=0}^{N-1} h_{k,t} X_t e^{-i2\pi ft} \right|^2,
}{S(f) = (1/K) * sum[k=0,...,K-1] S(k,f)}
where
\eqn{S(k,f) = {|\sum_{t=0}^{N-1} h_{k,t} X_t \exp(-i 2 \pi f t)|}^2}{S(k,f) = |sum[t=0,...,N-1]{h(k,t) * X(t) * exp(-i*2*pi*f*t)}|^2}
and \eqn{\{ h_{k,t} \}$, $k=0,\ldots,K-1}{h(k,t) for k=0,...,K-1},
is a set of \eqn{K} orthonormal data tapers.
\deqn{\sum_{t=0}^{N-1} h_{k,t} h_{k',t}
=
\left\{
\begin{array}{ll}
1,& \mbox{if $k=k'$;}\\
0,& \mbox{otherwise}
\end{array}
\right.
}{See reference(s) for further details.}
Popular choices for multitapers include sinusoidal tapers and
discrete prolate spheroidal sequences (DPSS). See the
\code{taper} function for more details.}}
\bold{Cross spectral density function estimation:}
If the input \code{x} is a matrix, where each column contains
a different time series, then the results are returned in
a matrix whose columns correspond to all possible unique combinations
of cross-SDF estimates. For example, if \code{x} has three columns,
then the output will be a matrix whose columns are
\eqn{\{S_{11},S_{12},S_{13},S_{22},S_{23},S_{33}\}}{{S11, S12, S13, S22, S23, S33}}
where \eqn{S_{ij}}{Sij} is the cross-SDF estimate of the \code{i}th
and \code{j}th column of \code{x}. All cross-spectral density
function estimates are returned as complex-valued series to maintain
the phase relationships between components. For all \eqn{S_{ij}}{Sij}
where \eqn{i=j}, however, the imaginary portions will be zero (up to a
numerical noise limit).
}
\references{
Percival, Donald B. and Constantine, William L. B. (2005)
``Exact Simulation of Gaussian Time Series
from Nonparametric Spectral Estimates
with Application to Bootstrapping",
\emph{Journal of Computational and Graphical Statistics},
accepted for publication.
D.B. Percival and A. Walden (1993),
\emph{Spectral Analysis for Physical Applications: Multitaper
and Conventional Univariate Techniques},
Cambridge University Press, Cambridge, UK.
}
\seealso{
\code{\link{taper}}, \code{\link{ACVS}}.}
\examples{
## calculate various SDF estimates for the
## sunspots series. remove mean component for a
## better comparison.
require(ifultools)
data <- as.numeric(sunspots)
methods <- c("direct","wosa","multitaper",
"lag window")
S <- lapply(methods, function(x, data) SDF(data, method=x), data)
x <- attr(S[[1]], "frequency")[-1]
y <- lapply(S,function(x) decibel(as.vector(x)[-1]))
names(y) <- methods
## create a stack plot of the data
stackPlot(x, y, col=1:4)
## calculate the cross-spectrum of the same
## series: all spectra should be the same in
## this case
SDF(cbind(data,data), method="lag")
## calculate the SDF using npad=31
SDF(data, npad=31, method="multitaper")
}
\keyword{univar}
|
edd8765b9283acfa41c4db40c829f8f3ec9ce135
|
8e46ecb5e85b4b7b197ad98a56a9c57f1b7aaf02
|
/enrichment.R
|
c38e8a6d64aa236494ea301b2a51df44e264218a
|
[] |
no_license
|
federicocozza/BioInformaticsGlioma
|
d6887fa90d02617cdd22acfbcb4df72bb3a65c90
|
ded0018bc1eadd3e7401993b83adc4bf1b6318f4
|
refs/heads/master
| 2021-01-18T13:10:58.656789
| 2017-03-10T08:29:04
| 2017-03-10T08:29:04
| 80,731,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,454
|
r
|
enrichment.R
|
library(GSA)
#gmtfile <- system.file("extdata", "msigdb.v5.2.entrez.gmt",package="clusterProfiler")
gmtfile <- system.file("extdata", "msigdb.v5.2.symbols.gmt",package="clusterProfiler")
c2kegg <- read.gmt(gmtfile)
#sum(gene %in% c2kegg$gene)
### Glioma - Gene
load("D:/R_workspace/BioInformaticsGlioma/paramList10.Rdata")
gene <- rownames(paramList$dfps)
egmt <- enricher(gene, TERM2GENE=c2kegg,pvalueCutoff = 0.05)
#head(egmt)
#View(egmt@result[grep(pattern = "KEGG",x = rownames(egmt@result),ignore.case = F),])
reactome <- egmt@result[grep(pattern = "reactome",x = rownames(egmt@result),ignore.case = T),]
kegg <- egmt@result[grep(pattern = "kegg",x = rownames(egmt@result),ignore.case = T),]
biocarta <- egmt@result[grep(pattern = "biocarta",x = rownames(egmt@result),ignore.case = T),]
# in c2
# estrarre i primi 30 pathway con p-value più alto
# creare file per ogni pathway con i soli geni dei pazienti
dim(egmt@result)
View(egmt@result)
plot(egmt@result$pvalue)
c2 <- read.gmt('D:\\Download\\c2.all.v5.2.symbols.gmt')
"VERHAAK_GLIOBLASTOMA_MESENCHYMAL" %in% c2$ont
egmt_result <- egmt@result
save(egmt_result, file="egmt_result.Rdata")
# reactomeFinal <- reactome[which(reactome$Count >=10),]
# keggFinal <- kegg[which(kegg$Count >=10),]
# c6Final <- egmt@result[egmt@result$ID %in% c6 & egmt@result$Count >= 10,]
save(reactomeFinal,file="reactomeFinal.Rdata")
save(keggFinal,file="keggFinal.Rdata")
save(c6Final,file="c6Final.Rdata")
|
7a04967c4bd28e898e8666cc6a477e10a684547c
|
d97356ffe6e7494067ebdd3bad871a4c045f3627
|
/Covest.R
|
f3c50bf10695deabcd345862fe4fd62e400ac3fd
|
[] |
no_license
|
QingxiaCindyChen/2WayTimeVaryingSwitch
|
325375ed9779f0a131a241ffb93476bc103b7b2f
|
8aabbd74b8c7cc8ec25740ac35c7e9862a3b526a
|
refs/heads/main
| 2023-04-07T02:49:02.586609
| 2021-04-19T19:23:55
| 2021-04-19T19:23:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,383
|
r
|
Covest.R
|
Covest <- function(par, Umod, TDmod, TUmod, TGmod, SdatCount, Sdat, ID) {
# y0/y1/y2 are the jump time of h0/h1/h2 exclude censored
beta0 <- par$beta0vec; h0 <- par$h0; y0 <- par$timeD; nbeta0 <- par$nbeta0; n0 <- par$n0
beta1 <- par$beta1vec; h1 <- par$h1; y1 <- par$timeU; nbeta1 <- par$nbeta1; n1 <- par$n1
beta2 <- par$beta2vec; h2 <- par$h2; y2 <- par$timeG; nbeta2 <- par$nbeta2; n2 <- par$n2
alpha <- par$alpvec; nalpha <- par$nalpha
IND <- cumsum(c(nbeta0, n0, nbeta1, n1, nbeta2, n2, nalpha))
indbeta0 <- 1:IND[1]; indh0 <- (IND[1]+1):IND[2]
indbeta1 <- (IND[2]+1):IND[3]; indh1 <- (IND[3]+1):IND[4]
indbeta2 <- (IND[4]+1):IND[5]; indh2 <-(IND[5]+1):IND[6]
indalpha <- (IND[6]+1):IND[7]
npar <- max(indalpha)
n <- dim(SdatCount)[1]
nid <- dim(Sdat)[1];
d2L <- matrix(0, npar, npar)
score1 <- matrix(0, nid, npar)
score0 <- matrix(0, nid, npar)
X <- as.matrix(cbind(intercept=rep(1,nid), model.frame(Umod, data=Sdat, na.action="na.pass")[,-1]))
Xd <- model.matrix(TDmod, SdatCount)[,-1]
Xe <- model.matrix(TUmod, Sdat)[,-1]
temp <- as.matrix(model.frame(TDmod, data=SdatCount, na.action="na.pass"))
StartD <- temp[,1]; EndD <- temp[,2]; DeltaD <- temp[,3]
temp <- as.matrix(model.frame(TUmod, data=Sdat, na.action="na.pass"))
SurvU <- temp[,1]; DeltaU <- temp[,2]
temp <- as.matrix(model.frame(TGmod, data=SdatCount, na.action="na.pass"))
Xg <- temp[,-(1:3)]
StartG <- temp[,1]; EndG <- temp[,2]; DeltaG <- temp[,3]
indID <- (matrix(SdatCount[,ID], n, nid, byrow=F)==matrix(Sdat[,ID], n, nid, byrow=T))
# estimating alpha
PE1 <- exp(X%*%alpha)/(1+exp(X%*%alpha))
score1[, indalpha] <- matrix(1-PE1, nid, nalpha, byrow=FALSE)*(X)
score0[, indalpha] <- matrix(0-PE1, nid, nalpha, byrow=FALSE)*(X)
d2L[indalpha, indalpha] <- -(t(X)*matrix(PE1*(1-PE1),nalpha,nid,byrow=TRUE))%*%X
# beta0 h0
out <- Covestsub(Xd, beta0, h0, y0, StartD, EndD, DeltaD, (SdatCount$Group%in%c(1,4)), 1-SdatCount$Ee)
score0[, c(indbeta0, indh0)] <- t(indID)%*%out$score
d2L[c(indbeta0, indh0), c(indbeta0, indh0)] <- out$dscore
# beta1 h1
out <- Covestsub(Xe, beta1, h1, y1, rep(0,nid), SurvU, DeltaU, (Sdat$Group!=1), Sdat$Ee)
score1[, c(indbeta1, indh1)] <- out$score
d2L[c(indbeta1, indh1), c(indbeta1, indh1)] <- out$dscore
# beta2 h2
out <- Covestsub(Xg, beta2, h2, y2, StartG, EndG, DeltaG, (SdatCount$Group%in% c(2,3)), SdatCount$Ee)
score1[, c(indbeta2, indh2)] <- t(indID)%*%out$score
d2L[c(indbeta2, indh2), c(indbeta2, indh2)] <- out$dscore
# information matrix
Ee <- Sdat$Ee
EdLdL <- t(score1)%*%(as.vector(Ee)*score1)+t(score0)%*%(as.vector(1-Ee)*score0)
EdL <- as.vector(Ee)*score1+as.vector(1-Ee)*score0
EdLEdL <- t(EdL)%*%EdL
cov.est <- solve(-d2L-(EdLdL-EdLEdL))
Influence <- cov.est%*%t(EdL)
return(list(cov.est=cov.est, Infl=Influence))
}
Covestsub <- function(X, beta, hh0, yy0, Start, End, Delta, w, Ew) {
XX<- X[w>0,]
TStart<- Start[w>0]
TEnd<- End[w>0]
TDelta <- Delta[w>0]
Tw <- w[w>0]
ETw <- Ew[w>0]
n <- length(TStart)
nbeta <- length(beta)
nh <- length(hh0)
score <- matrix(0, n, nbeta+nh)
dscore <- matrix(0, nbeta+nh, nbeta+nh)
EXbeta <- exp(XX%*%beta)
indY <- (matrix(TEnd, n, nh, byrow=FALSE)>=matrix(yy0,n,nh,byrow=TRUE))*(matrix(TStart, n, nh, byrow=FALSE)<matrix(yy0,n,nh,byrow=TRUE))
indY2 <- (matrix(TEnd, n, nh, byrow=FALSE)<matrix(c(yy0[-1],Inf),n,nh,byrow=TRUE))
HY <- indY%*%hh0
OneY <- (indY*indY2)%*%rep(1,nh)
score[,1:nbeta] <- as.vector(TDelta*OneY)*XX-as.vector(Tw*HY*EXbeta)*XX
score[,nbeta+(1:nh)] <- as.vector(TDelta)*indY*indY2%*%diag(1/hh0)-as.vector(Tw*EXbeta)*indY
dscore[1:nbeta, 1:nbeta] <- -t(XX)%*%(matrix(ETw*HY*EXbeta,n,nbeta, byrow=FALSE)*XX)
dscore[1:nbeta, nbeta+(1:nh)] <- -t(XX)%*%(matrix(ETw*EXbeta,n,nh,byrow=FALSE)*indY)
dscore[nbeta+(1:nh), 1:nbeta] <- t(dscore[1:nbeta, nbeta+(1:nh)])
dscore[nbeta+(1:nh), nbeta+(1:nh)] <- -diag(apply(TDelta*indY*indY2,2,sum)/hh0^2)
Tscore <- score
score <- matrix(0, length(Start), nbeta+nh)
score[w>0,] <- Tscore
out=list(score=score, dscore=dscore)
return(out)
}
|
e256d9ce9c5ddeaee2a5ac90e15d31b60033b010
|
0a609864e2ea079f96ea9e53a73e12454f2d9667
|
/check_mark.R
|
ec2cf6ca0fccfb569efc90c6f2ee3dcddc216f02
|
[] |
no_license
|
julia722/36-350-Fork
|
9eec72906a032b44bbb4c728982f73507c66d1e9
|
0930792e59192e2978ee2b8ed81397a24ba0f614
|
refs/heads/master
| 2021-02-13T14:48:52.250893
| 2020-03-03T22:44:48
| 2020-03-03T22:44:48
| 244,705,758
| 0
| 0
| null | 2020-03-03T18:01:13
| 2020-03-03T18:01:13
| null |
UTF-8
|
R
| false
| false
| 11
|
r
|
check_mark.R
|
cat("---")
|
ebce23083ef800bbea56bfd7adf66f0dde8bf56a
|
ada662885ce76e3ea71abeef59fa2cf5281c2085
|
/test1.R
|
49d4413210c74474f029afc14c4dd8a61c6df16b
|
[] |
no_license
|
SCMA632/rlatestnew-ShivRamaswamy
|
d2c7b83cedb9064fb241efae2b07903bf717126c
|
2cfd4896811cc8c4f7e34bdb55d2cecf3465653d
|
refs/heads/main
| 2023-08-22T07:13:59.307602
| 2021-10-24T02:46:51
| 2021-10-24T02:46:51
| 420,421,964
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 544
|
r
|
test1.R
|
df = read.csv('4. NSSO68 data set.csv')
dim(df)
names(df)
summary(df)
library(psych)
unique(df$state)
meg = df[df$state_1 == 'MEG',]
# Data type of the subset data
class(meg)
any(is.na(meg))
# Shape of the Subset
dim(meg)
# To View the filtered data
#View(meg)
# Different columns
names(meg)
# Top 3 rows of the subset
head(meg,3)
# Bottom 3 rows of the subset
tail(meg,3)
# Unique Districts of the subset
str(meg)
#View(meg)
# takes lot time to generate a report
#create_report(meg)
describe(meg)
is.na(meg)
sum(is.na(meg))
|
386a78b896d3a17a37c6385024efdb73ccbf6fe9
|
366cf606c85cf5a47a2e6d96f5ed804b5f5af3a5
|
/rprog031/tests/testthat/test_prog3.R
|
cf7c45b38dfaf3f9a649bf0c82013f65ce9803a9
|
[] |
no_license
|
Momus/R
|
ac532f88c673886c1c9da7ec9d8555bd7f9cce51
|
6207a5a4f4e2affe8a747ef3255226de54bd0f6c
|
refs/heads/master
| 2021-01-19T07:57:56.966782
| 2017-09-16T23:56:47
| 2017-09-16T23:56:47
| 87,587,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
test_prog3.R
|
library(rprog031)
context("Outcomes frame created from data")
outcome <- load_outcomes(outcome="pneumonia")
test_that("loads_outcomes loads proper file and creates data frame" , {
expect_equal(class(outcome), "data.frame")
})
context("Finding the best hospital in the state")
test_that("Best function exists and takes the proper arguments", {
## expect_error(best())
## expect_error(best(state = "AK"))
## expect_error(best(outcome = "pneumonia"))
## expect_error(best(state = "AK", outcome = "herpies"), "invalid outcome", fixed=TRUE)
## expect_error(best(state = "XX", outcome = "heart attack"), "invalid state", fixed=TRUE)
## expect_equal(best("TX", "heart attack"), "CYPRESS FAIRBANKS MEDICAL CENTER")
## expect_equal(best("TX", "heart failure"), "FORT DUNCAN MEDICAL CENTER")
## expect_equal(best("MD", "heart attack"), "JOHNS HOPKINS HOSPITAL, THE")
## expect_equal(best("MD", "pneumonia"), "GREATER BALTIMORE MEDICAL CENTER")
})
context("Return name of hospital in given state with given rank")
test_that("rankhospital takes three arguments", {
expect_error(rankhospital())
expect_equal(rankhospital("TX", "heart failure", 4), "DETAR HOSPITAL NAVARRO")
expect_equal(rankhospital("MD", "heart attack", "worst"), "HARFORD MEMORIAL HOSPITAL")
myfuncton <- function() {NA}
#expect_equal(myfuncton(), NA )
})
context("Ranking each hospital in the state")
test_that("rankall is a stupid function but it works", {
})
|
ea242c2ccf50e230569eefcd75d71a088b6edd70
|
7f026bc3deee32e4732c13cd318cb32119c7dd69
|
/man/acf.Rd
|
03e0aefe83807932f75c3cb434a95ac7cc22dfcf
|
[] |
no_license
|
cran/TSA
|
109803777566ded77104af3a01e288c749daa97b
|
5050db06a645f31f2a37ac81a90fc5d2c590a25c
|
refs/heads/master
| 2022-07-28T07:23:53.254418
| 2022-07-05T10:36:22
| 2022-07-05T10:36:22
| 17,693,886
| 1
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,415
|
rd
|
acf.Rd
|
\name{acf}
\alias{acf}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Auto- and Cross- Covariance and -Correlation Function Estimation}
\description{
This function calls the acf function in the stats package and processes to drop lag-0 of the acf. It only works for univariate time series, so x below should be 1-dimensional.
}
\usage{
acf(x, lag.max = NULL, type = c("correlation", "covariance", "partial")[1],
plot = TRUE, na.action = na.fail, demean = TRUE, drop.lag.0 = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a univariate or multivariate (not ccf) numeric time series object or a numeric vector or matrix, or an "acf" object.}
\item{lag.max}{maximum number of lags at which to calculate the acf. Default is 10*log10(N/m) where N is the number of observations and m the number of series.}
\item{type}{character string giving the type of acf to be computed. Allowed values are "correlation" (the default), "covariance" or "partial".}
\item{plot}{
logical. If TRUE (the default) the acf is plotted.}
\item{na.action}{function to be called to handle missing values. na.pass can be used.}
\item{demean}{
logical. Should the covariances be about the sample means?}
\item{drop.lag.0}{logical. Should lag 0 be dropped}
\item{\dots}{further arguments to be passed to plot.acf.}
}
\value{
An object of class "acf", which is a list with the following elements:
\item{lag}{ A three dimensional array containing the lags at which the acf is estimated.}
\item{acf}{ An array with the same dimensions as lag containing the estimated acf.}
\item{type}{ The type of correlation (same as the type argument).}
\item{n.used}{ The number of observations in the time series.}
\item{series}{ The name of the series x.}
\item{snames}{ The series names for a multivariate time series.}
}
\references{ ~put references to the literature/web site here ~ }
\author{Original authors of stats:::acf are: Paul Gilbert, Martyn Plummer, B.D. Ripley. This wrapper is written by Kung-Sik Chan}
\seealso{\code{\link{plot.acf}}, \code{\link{ARMAacf}} for the exact autocorrelations of a given ARMA process.}
\examples{
data(rwalk)
model1=lm(rwalk~time(rwalk))
summary(model1)
acf(rstudent(model1),main='')
}
\keyword{methods}
|
0161497abf80c86c28f561c6fb798ef8979282f9
|
76dee326d2906752e3205fe43a5698dec3602406
|
/man/transform.Rd
|
0b9610a3149006d7ae42b331bf8c33040de4b61c
|
[] |
no_license
|
AmeliaMN/gigvis
|
d08e0e849087eb369e91cae3d99acb3b1849c7a9
|
db816da20df1d2642f778608b7379a3252ba3d06
|
refs/heads/master
| 2021-01-18T11:11:38.098173
| 2013-07-26T16:54:59
| 2013-07-26T16:54:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 172
|
rd
|
transform.Rd
|
\name{transform}
\alias{transform}
\title{S3 class: transform}
\usage{
transform(type, ...)
}
\description{
This is a type of \code{\link{pipe}}.
}
\keyword{internal}
|
a1941c8521a7ab80e703b8a38aaab4bcc2a42126
|
c26ff8949728d2704f6b8c7166380973f9e41b50
|
/W203 Week 4/async_material_Week4.R
|
064db410ce5dc5f72c0f8f280128a095d6488852
|
[] |
no_license
|
jhabib/W203
|
7d8d7db35dc4e0b80077cb3ce6a53109ff425aff
|
7814832ec95e51e7b53974568cf24f21b6da5910
|
refs/heads/master
| 2021-01-10T09:04:36.072682
| 2016-04-28T00:45:38
| 2016-04-28T00:45:38
| 50,896,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
async_material_Week4.R
|
names <- c("coye", "paul", "andrew", "judd")
y <- c(9, 7, 6, 6)
data.frame(chef = names, score = y)
data.frame(chef = names, score = y) -> pr
pr
pr$chef
names(pr)
names(pr)[2] = "score1"
pr
pr$score2 <- c(3, 2, 1, 2)
pr$spiciness <- c(3, 2, 1, 2)
pr
attach(pr)
score1
score1 = score1 + 10
score1
pr$score1
mean(pr$score1)
pr$score1 <- scale(pr$score1)
pr$score1
pr$score2 = scale(pr$score2)
pr
pr$total_Score <- (pr$score1 + pr$score2) / 2
pr
pr$above_av <- pr$total_Score > mean(pr$total_Score)
pr
pr$spiciness
pr$spiciness <- factor(pr$spiciness, levels = c(1, 2, 3), labels = c("mild", "spicy", "extra spicy"))
pr
levels(pr$spiciness)
levels(pr$spicines) <- c("mild", "medium", "hot")
pr
|
429b4b04c39c85308bdd9520b4875b0765940183
|
5a5f3be3124296d5b999c3d9667d79dfa9cc6550
|
/Rcode/06_maps.R
|
48c26ba409c569d978089430c909f2ec579a5679
|
[] |
no_license
|
flamontano/raptorMS
|
c51e9ef6aa38a5d1dd0557144a376fef68fdca60
|
fb7be053ee72de14a502b96bfb7290bcf98ae19e
|
refs/heads/main
| 2023-04-16T18:25:11.454399
| 2022-11-07T16:29:14
| 2022-11-07T16:29:14
| 560,494,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,832
|
r
|
06_maps.R
|
## Plotting global patterns of observed and SES Functional and phylogenetic diversity of raptors ##
### OBSERVED VALUES ###
data_raptor.plots<-data_raptor[data_raptor$continent !="Antarctica",]
# grouping some values to improve contrast in visualization #
data_raptor.plots$mpd1<-data_raptor.plots$mpd
data_raptor.plots$mpd1[data_raptor.plots$mpd1 <75] <- 74.9
data_raptor.plots$mntd1<-data_raptor.plots$mntd
data_raptor.plots$mntd1[data_raptor.plots$mntd1 >95] <-96
data_raptor.plots$mntd1[data_raptor.plots$mntd1 < 12] <-11.9
data_raptor.plots$fdis_obs_niche2<-data_raptor.plots$fdis_obs_niche
data_raptor.plots$fdis_obs_niche2[data_raptor.plots$fdis_obs_niche2 < 0.2] <-0.15
library(ggplot2)
p.0 = plot_world_eqaul_area(color_polygon = "grey90") +
viridis::scale_fill_viridis(direction = -1) +
theme(legend.position = c(0.55,0.10),
legend.direction = "horizontal",
legend.key.height = unit(0.5, 'lines'),
legend.key.width = unit(1.5, 'lines'),
plot.margin = margin(-0.5, -0.1, -0.5, -0.1, "cm"))
p_sprich = p.0 + geom_tile(data = filter(data_raptor.plots, !is.na(continent)),
aes(x = x, y = y, fill = sr), inherit.aes = F) +
labs(fill = 'SR')
p_sprich
p = plot_world_eqaul_area(color_polygon = "grey90", fill_polygon = "white") +
viridis::scale_fill_viridis(direction = -1) +
theme(legend.position = c(0.55,0),
legend.direction = "horizontal",
legend.key.height = unit(0.4, 'lines'),
legend.key.width = unit(1.5, 'lines'),
plot.margin = margin(-0.5, -0.1, -0.5, -0.1, "cm"))
# plot pd uroot
p_pduroot = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = pd.uroot),
inherit.aes = F) +
labs(fill = 'PD')
p_mpd = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = mpd1),
inherit.aes = F) +
labs(fill = 'MPD ')+ # Change legend labels of continuous legend
scale_fill_continuous(type = "viridis", direction= -1, breaks = c(80, 100, 120, 140, 160), labels = c("< 80", "100", "120", "140", "160"))
p_mntd = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = mntd1),
inherit.aes = F) +
labs(fill = 'MNTD ')+ # Change legend labels of continuous legend
scale_fill_continuous(type = "viridis", direction= -1, breaks = c(10, 30, 50, 70, 90), labels = c("<10", "30", "50", "70", ">90"))
#plot fd
p_morph = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_obs_morph),
inherit.aes = F) +
labs(fill = 'FD Morphology')
p_fdis_niche = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_obs_niche2),
inherit.aes = F) +
labs(fill = 'FD Niche ')+
scale_fill_continuous(type = "viridis", direction= -1, breaks = c(0, 0.15, 0.3, 0.45), labels = c("0.0", "0.15", "0.30", "0.45"))
p_fdis_diet = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_obs_diet),
inherit.aes = F) +
labs(fill = 'FD Diet ')+ # Change legend labels of continuous legend
scale_fill_continuous(type = "viridis", direction= -1, breaks = c(0, 0.03, 0.06, 0.09), labels = c("0.0", "0.03", "0.06", "0.09"))
p_fdis_forag = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_obs_forag),
inherit.aes = F) +
labs(fill = 'FD Foraging ')
p_fdis_dispersal = p + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_obs_dispe),
inherit.aes = F) +
labs(fill = 'FD Vagility ')
## SES VALUES ###
pz = plot_world_eqaul_area(color_polygon = "white") +
scale_fill_gradient2(low = "#B2182B", mid = "white", high = "#2166AC") +
theme(legend.position = c(0.55, 0),
legend.direction = "horizontal",
legend.key.height = unit(0.5, 'lines'),
legend.key.width = unit(1.5, 'lines'),
plot.margin = margin(-0.5, -0.1, -0.5, -0.1, "cm"))
#phylogenetic
p_pduroot_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = pd.uroot.z),
inherit.aes = F) +
labs(fill = 'SES PD')
p_mpd_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = mpd.z),
inherit.aes = F) +
labs(fill = 'SES MPD')
p_mntd_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = mntd.z),
inherit.aes = F) +
labs(fill = 'SES MNTD')
p_morph_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_ses_morph),
inherit.aes = F) +
labs(fill = 'SES FD Morphology')
p_fdis_niche_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_ses_niche),
inherit.aes = F) +
labs(fill = 'SES FD Niche')
p_fdis_diet_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_ses_diet),
inherit.aes = F) +
labs(fill = 'SES FD Diet')
p_fdis_forag_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_ses_forag),
inherit.aes = F) +
labs(fill = 'SES FD Foraging ')
p_fdis_dispersal_z = pz + geom_tile(data = data_raptor.plots,
aes(x = x, y = y, fill = fdis_ses_dispe),
inherit.aes = F) +
labs(fill = 'SES FD Vagility')
## FINAL FIGURES ##
library(cowplot)
p_obs = plot_grid(p_pduroot,
p_mpd,
p_mntd,
p_morph,
p_fdis_niche,
p_fdis_diet,
p_fdis_forag,
p_fdis_dispersal,
ncol = 2, labels = letters[2:9])
p_obs2 = plot_grid(p_sprich, p_obs, labels = c('a', ''), ncol = 1, rel_heights = c(0.3, 1))
#ggsave(filename = "Figures/newfig1_obs.pdf", plot = p_obs2, height = 15, width = 10)
p_ses = plot_grid(p_pduroot_z,
p_mpd_z,
p_mntd_z,
p_morph_z,
p_fdis_niche_z,
p_fdis_diet_z,
p_fdis_forag_z,
p_fdis_dispersal_z,
ncol = 2, labels = letters[1:8])
#ggsave(filename = "Figures/newfig2_ses_alt.pdf", plot = p_ses, height = 15, width = 11)
|
bcb6afb94c6fbd0fc6fbc2a80d99349a0bb08698
|
f58d73bb5d624a78c329e79a60d5fb06b4c36837
|
/inst/GRM/server.R
|
13f16d6c94ae83fc7dcb65d5fee534386f0556b5
|
[] |
no_license
|
cran/irtDemo
|
be108fc0c36aa0328f1ed23b5d2c153ed3c0b701
|
3b36e362d74563f404374c8333f11cda023abc70
|
refs/heads/master
| 2020-04-06T07:01:12.326558
| 2018-04-05T19:29:46
| 2018-04-05T19:29:46
| 57,357,089
| 3
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,129
|
r
|
server.R
|
shinyServer(function(input, output){
output$grm_plot <- renderPlot({
D <- switch(input$D, "1" = 1, "2" = 1.702)
p <- matrix(NA,nrow=N,4)
for(j in 1:N){
#p[j,0] <- 0
p[j,1] <- Pfun(D=D, theta=thetas[j], delta=input$delta1, alpha=input$alpha)
p[j,2] <- Pfun(D=D, theta=thetas[j], delta=input$delta2, alpha=input$alpha)
p[j,3] <- Pfun(D=D, theta=thetas[j], delta=input$delta3, alpha=input$alpha)
p[j,4] <- Pfun(D=D, theta=thetas[j], delta=input$delta4, alpha=input$alpha)
#p[j,5] <- 1
}
graphics::plot(NULL, ylab="P(X=m|theta)", xlab=expression(theta), main="Graded Response Model", xlim=c(-6,6), ylim=c(0,1))
lines(thetas, 1-p[,1], type="l", xlim=c(-6,6), col=2)
lines(thetas, p[,1]-p[,2], type="l", xlim=c(-6,6), col=3)
lines(thetas, p[,2]-p[,3], type="l", xlim=c(-6,6), col=4)
lines(thetas, p[,3]-p[,4], type="l", xlim=c(-6,6), col=5)
lines(thetas, p[,4]-0, type="l", xlim=c(-6,6), col=6)
legend(legend=c("P(X=1|theta)", "P(X=2|theta)","P(X=3|theta)","P(X=4|theta)","P(X=5|theta)"), col=2:6, lty=1, "right")
})
})
|
4add5cf28cc26b06a727179598105a7a38cf71f8
|
a9c918bf7f1f77fe2aea48b9cd4ae5427a82df08
|
/man/tables.Rd
|
7643caeb1e843a64f3309ab57be8189601a6730d
|
[] |
no_license
|
rje42/contingency
|
05097da4f927323b7f770063a5b1b6ddf3667f53
|
d1b4e65044d971cc9a3be1c1dfcad5d28540a998
|
refs/heads/master
| 2023-06-26T23:34:21.342576
| 2023-03-01T13:43:02
| 2023-03-01T13:43:02
| 66,774,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 286
|
rd
|
tables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/probMat.R
\name{tables}
\alias{tables}
\title{Create blank tables}
\usage{
tables(n, tdim)
}
\arguments{
\item{n}{number of tables}
\item{tdim}{dimension of each table}
}
\description{
Create blank tables
}
|
2347420bb67ba1592380b5281b41d75518a806cc
|
2a7655dc0c233967a41b99369eed3eb4a6be3371
|
/4-Merge_Data/Prediction_inputs/1-Format_data_sets.R
|
f8ad0887df51caa2703e9811816c0125ba01e3f7
|
[
"MIT"
] |
permissive
|
earthlab/Western_states_daily_PM2.5
|
0977b40d883842d7114139ef041e13a63e1f9210
|
3f5121cee6659f5f5a5c14b0d3baec7bf454d4bb
|
refs/heads/master
| 2023-02-25T14:32:20.755570
| 2021-02-04T00:08:03
| 2021-02-04T00:08:03
| 117,896,754
| 2
| 1
| null | 2021-01-27T22:19:14
| 2018-01-17T21:48:29
|
R
|
UTF-8
|
R
| false
| false
| 10,412
|
r
|
1-Format_data_sets.R
|
library(dplyr)
library(multidplyr)
library(lubridate)
library(stringr)
library(future.apply)
library(parallel)
library(stringr)
pred_locs<- read.csv("~/Data/West_prediction_locations.csv")
pred_locs$Lon<- round(pred_locs$Lon, 5)
pred_locs$Lat<- round(pred_locs$Lat, 5)
Locs<- unique(pred_locs[,c("Lon", "Lat")])
#Need to round to 4 digits for AF
# ##Active Fires
#
# AF1a<- read.csv("~/Data/fire_modis_25km_extract_final.csv")
# AF1b<- read.csv("~/Data/fire_modis_50km_extract_final.csv")
# AF1c<- read.csv("~/Data/fire_modis_100km_extract_final.csv")
# AF1d<- read.csv("~/Data/fire_modis_500km_extract_final.csv")
#
# af1a<- data.frame(Lon = AF1a$Lon, Lat = AF1a$Lat, Date = AF1a$Date, Fires_25km = AF1a$fire_count)
# af1b<- data.frame(Lon = AF1b$Lon, Lat = AF1b$Lat, Date = AF1b$Date, Fires_50km = AF1b$fire_count)
# af1c<- data.frame(Lon = AF1c$Lon, Lat = AF1c$Lat, Date = AF1c$Date, Fires_100km = AF1c$fire_count)
# af1d<- data.frame(Lon = AF1d$Lon, Lat = AF1d$Lat, Date = AF1d$Date, Fires_500km = AF1d$fire_count)
#
# AF<- Reduce(function(x,y) merge(x = x, y = y, by = c("Lon", "Lat", "Date"), all = TRUE), list(af1a, af1b, af1c, af1d))
#
# AF[is.na(AF)]<- 0
#
# AF$Date<- as.Date(AF$Date)
# AF$Lon<- round(AF$Lon, 4)
# AF$Lat<- round(AF$Lat, 4)
#
# AF_agg<- aggregate(. ~ Lon + Lat + Date, AF, mean)
# AF<- AF_agg
# AF_unique_locs<- unique(AF_agg[,c("Lon", "Lat")])
#
# rm(list=c("AF", "AF1a", "AF1b", "AF1c", "AF1d", "af1a", "af1b", "af1c", "af1d"))
#
# ncores = detectCores() - 6
# AF_lags_template<- data.frame(Lon = numeric(), Lat = numeric(), Date = as.Date(character()),
# Fires_lag0_25km = numeric(), Fires_lag0_50km = numeric(),
# Fires_lag0_100km = numeric(), Fires_lag0_500km = numeric(),
# Fires_lag1_25km = numeric(), Fires_lag1_50km = numeric(),
# Fires_lag1_100km = numeric(), Fires_lag1_500km = numeric(),
# Fires_lag2_25km = numeric(), Fires_lag2_50km = numeric(),
# Fires_lag2_100km = numeric(), Fires_lag2_500km = numeric(),
# Fires_lag3_25km = numeric(), Fires_lag3_50km = numeric(),
# Fires_lag3_100km = numeric(), Fires_lag3_500km = numeric(),
# Fires_lag4_25km = numeric(), Fires_lag4_50km = numeric(),
# Fires_lag4_100km = numeric(), Fires_lag4_500km = numeric(),
# Fires_lag5_25km = numeric(), Fires_lag5_50km = numeric(),
# Fires_lag5_100km = numeric(), Fires_lag5_500km = numeric(),
# Fires_lag6_25km = numeric(), Fires_lag6_50km = numeric(),
# Fires_lag6_100km = numeric(), Fires_lag6_500km = numeric(),
# Fires_lag7_25km = numeric(), Fires_lag7_50km = numeric(),
# Fires_lag7_100km = numeric(), Fires_lag7_500km = numeric())
#
# #Second try:
# dates<- seq.Date(as.Date("2008-01-01"), as.Date("2018-12-31"), by = "day")
# Date<- sort(rep(dates, dim(AF_unique_locs)[1]))
# Lon<- rep(AF_unique_locs$Lon, length(dates))
# Lat<- rep(AF_unique_locs$Lat, length(dates))
# all_LLD<- data.frame(Lon, Lat, Date)
#
# ready_AF<- left_join(all_LLD, AF_agg, by = c("Lon", "Lat", "Date"))
# num_Locs<- dim(AF_unique_locs)[1]
#
# merge_AF_lags<- function(these, j){
# these_AF_lags<- AF_lags_template
# p<-1
# for(l in these){
# for(d in 1:length(dates)){
# lags<- 7
# if(dates[d] < "2008-01-08"){
# lags<- as.numeric(dates[d] - as.Date("2008-01-01") )
# }
# these_AF_lags[p,1]<- ready_AF[l,"Lon"]
# these_AF_lags[p,2]<- ready_AF[l,"Lat"]
# these_AF_lags[p,3]<- dates[d]
# my_vec<- c()
# for(i in 0:lags){
# new<- ready_AF[(d-1-i)*num_Locs + l, 4:7]
# new[is.na(new)]<- 0
# my_vec<- append(my_vec, new)
# }
# my_vec<- unlist(my_vec)
# my_vec<- append(my_vec, rep(0, 32-length(my_vec)))
# these_AF_lags[p,4:35]<- my_vec
# p<- p+1
# }
# }
# write.csv(these_AF_lags, paste0("~/Data/AF/AF_lags4_",j,".csv"), row.names = FALSE)
# return(these_AF_lags)
# }
#
# my_seq<- seq(1, num_Locs, length.out = 300)
#
# loc_list<- c()
#
# for(j in my_seq){
# uniq_locs<- round(my_seq[j]):round(my_seq[j+1]-1)
# loc_list<- append(loc_list, list(uniq_locs))
# }
#
# loc_list[[length(loc_list)]]<- append(loc_list[[length(loc_list)]], num_Locs)
#
#
# options(future.globals.maxSize= 5000*1024^2)
#
# plan(multiprocess, workers = 8) ## Parallelize
# this_list<- future_lapply(1:length(loc_list), function(j){merge_AF_lags(loc_list[[j]],j)})
# # save.image("With_AF_lags.RData")
# ##THEN, IN TERMINAL: cat _____* > ______
#
# ##Get unique:
# AF_intermediate<- read.csv("~/Data/AF/AF_lags4.csv")
#
# #Remove text:
# text_pos<- which((AF_intermediate$Lon == "Lon")&(AF_intermediate$Lat == "Lat"))
# AF_intermediate<- AF_intermediate[-text_pos,]
#
# for(j in 1:(dim(AF_intermediate)[2]-1)){
# if(j != 3){
# AF_intermediate[,j]<- as.numeric(as.character(AF_intermediate[,j]))
# }else{
# AF_intermediate[,j]<- as.Date(AF_intermediate[,j])
# }
# }
#
# AF_final<- AF_intermediate
# rm(list=c("AF_intermediate", "AF_unique_locs", "cluster", "ready_AF",
# "j", "text_pos"))
# save.image("AF_final.RData")
#MAIAC
# MAIAC<- read.csv("~/Data/MAIAC_extracted.csv")
# maiac_locs<- unique(MAIAC[,c("Lon", "Lat")])
# maiac_locs<- apply(maiac_locs, MARGIN = 2, function(y){as.numeric(as.character(y))})
# maiac_locs<- maiac_locs[-1,]
# maiac_locs<- data.frame(Lon = maiac_locs[,1], Lat = maiac_locs[,2])
# maiac_locs$Lon<- round(maiac_locs$Lon, 5)
# maiac_locs$Lat<- round(maiac_locs$Lat, 5)
#
# MAIAC<- MAIAC[-1,]
# MAIAC<- MAIAC[-1,]
# MAIAC$Lon<- round(as.numeric(as.character(MAIAC$Lon)),5)
# MAIAC$Lat<- round(as.numeric(as.character(MAIAC$Lat)),5)
#
# Locs_with_maiac<- data.frame(Lon = Locs$Lon, Lat = Locs$Lat,
# M_lon = maiac_locs$Lon, M_lat = maiac_locs$Lat )
# #Note: Locs and maiac_locs are the same, but don't exactly match... rounding error somewhere along the line
#
# row.names(MAIAC)<- 1:dim(MAIAC)[1]
#
# MAIAC$Lon<- Locs_with_maiac[match(MAIAC$Lon, Locs_with_maiac$M_lon), "Lon"]
# MAIAC$Lat<- Locs_with_maiac[match(MAIAC$Lat, Locs_with_maiac$M_lat), "Lat"]
#
# save.image("MAIAC.RData")
load("MAIAC.RData")
MAIAC$Lon<- round(MAIAC$Lon, 4)
MAIAC$Lat<- round(MAIAC$Lat, 4)
MAIAC$Date<- as.Date(MAIAC$Date)
# ##NAM
# files<- list.files("~/NAM/", pattern="Step5*", full.names = TRUE)
#
# for(s in State[-11]){
# for(f in files){ #f is the filename
# data<- read.csv(f)
# date_str<- strsplit(f, "Step5_")[[1]][2] #Used to be 2
# date<- strsplit(date_str, "_batch")[[1]][1]
# names(data)[1:2]<- c("Lat", "Lon")
# names(data)[length(names(data))]<- "Date"
# data[,1:2]<- apply(data[,1:2], MARGIN = 2,
# function(y) round(as.numeric(as.character(y)),5))
# nam<- inner_join(data[,c(1:2, 7:21)], Stat[which(Stat$State == s),c("Lon", "Lat", "State")], by = c("Lon", "Lat"))
# write.csv(nam, paste0("~/NAM/nam_",s,"_", date, ".csv"), row.names = FALSE)
# }
# print(s)
# }
##Then merge in terminal, read in, and remove header rows
# for(y in 2008:2018){
# nam<- read.csv(paste0("~/Data/NAM_data/NAM_", y, ".csv"))
# nam<- nam[,c(1:2,7:10, 14:19, 21)]
# names(nam)<- c("Lat", "Lon", "HPBL_surface", "TMP_2m", "RH_2m",
# "DPT_2m", "Ugrd_10m", "Vgrd_10m", "PRMSL_mean_sea_level",
# "PRES_surface", "DZDT_850_mb", "DZDT_700_mb", "Date")
# nam<- nam[which(nam$Lon != "Longitude"),]
# nam$Lon<- round(as.numeric(as.character(nam$Lon)), 5)
# nam$Lat<- round(as.numeric(as.character(nam$Lat)), 5)
#
# NAM<- inner_join(nam, Stat, by = c("Lon", "Lat"))
# all_NAM<- rbind(all_NAM, NAM)
# }
# save.image("NAM.RData")
##NDVI
# NDVI1<- read.csv("~/Data/ndvi_mod13a3_subset1_latlon.csv")
# NDVI2<- read.csv("~/Data/ndvi_mod13a3_subset2_latlon.csv")
# NDVI3<- read.csv("~/Data/ndvi_mod13a3_subset3_latlon.csv")
# NDVI3<- NDVI3[,-1]
# NDVI<- rbind(NDVI1, NDVI2, NDVI3) #Actually have all the data!
# rm(list=c("NDVI1", "NDVI2", "NDVI3"))
#
# NDVI_final<- NDVI[,c("Longitude", "Latitude", "Date", "NDVI")]
# names(NDVI_final)[1:2]<- c("Lon", "Lat")
# NDVI_final[,c("Lon", "Lat", "NDVI")]<- apply(NDVI_final[,c("Lon", "Lat", "NDVI")],
# MARGIN = 2,
# function(y) round(as.numeric(y),5))
# NDVI_final$Date<- as.Date(NDVI_final$Date)
# rm(list=setdiff(ls(), "NDVI_final"))
# save.image("NDVI_final.RData")
##Stationary variables:
#NLCD
NLCD1<- read.csv("~/Data/nlcd_1km_extract.csv")
NLCD2<- read.csv("~/Data/nlcd_5km_extract.csv")
NLCD3<- read.csv("~/Data/nlcd_10km_extract.csv")
NLCD1_agg<- aggregate(percent_urban_buffer ~ Lon + Lat, data = NLCD1, FUN = mean)
NLCD2_agg<- aggregate(percent_urban_buffer ~ Lon + Lat, data = NLCD2, FUN = mean)
NLCD3_agg<- aggregate(percent_urban_buffer ~ Lon + Lat, data = NLCD3, FUN = mean)
NLCD<- Reduce(function(x,y) unique(inner_join(x, y, by = c("Lon", "Lat"))), list(NLCD1_agg, NLCD2_agg, NLCD3_agg))
NLCD$Lon<- round(as.numeric(as.character(NLCD$Lon)),5)
NLCD$Lat<- round(as.numeric(as.character(NLCD$Lat)),5)
names(NLCD)<- c("Lon", "Lat", "NLCD_1km", "NLCD_5km",
"NLCD_10km")
##Population Density
pop<- read.csv("~/Data/Pop_density.csv")
pop$Lon<- round(pop$Lon, 5)
pop$Lat<- round(pop$Lat, 5)
pop_agg<- aggregate(. ~ Lon + Lat, pop[,c(2:3, 8)], mean)
##Highways
HW<- read.csv("~/Data/Highways.csv")
HW$Lon<- round(HW$Lon, 5)
HW$Lat<- round(HW$Lat, 5)
HW_vars<- c("Lon", "Lat", "A_100", "C_100", "Both_100", "A_250", "C_250", "Both_250",
"A_500", "C_500", "Both_500", "A_1000", "C_1000", "Both_1000")
HW_agg<- aggregate(. ~ Lon + Lat, HW[,HW_vars], mean)
##Elevation
elev<- read.csv("~/Data/ned_extracted.csv")
elev$Lon<- round(elev$Lon, 5)
elev$Lat<- round(elev$Lat, 5)
elev_agg<- aggregate(. ~ Lon + Lat, elev[,c(1:8)], mean)
##Merge stationary variables:
Stat<- unique(Reduce(function(x,y){inner_join(x,y, by = c("Lon", "Lat"))},
list(pop_agg, NLCD, HW_agg, elev_agg)))
write.csv(Stat, "Stationary_variables.csv", row.names = FALSE)
|
b4afee6b360660265f9496861c35e4669a8b1439
|
3d41fbf1e8277ad6cf4e53ffe2acb9ef95059cf0
|
/man/WGLP.Rd
|
4809be46a4b5f4f5627f6cbc8f15aa747d2a78bc
|
[] |
no_license
|
WenlongLi2020/MaximinDesign
|
fb51f8a4ec0791ede9e379755bd935b9ae1acdd1
|
20c88c0e7a46ec0f998b2b0a0df5cdba876a92e4
|
refs/heads/main
| 2023-02-08T18:27:06.424095
| 2020-12-21T08:00:00
| 2020-12-21T08:00:00
| 323,265,531
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,126
|
rd
|
WGLP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WGLP.R
\name{WGLP}
\alias{WGLP}
\title{Williams Transformation}
\usage{
WGLP(s)
}
\arguments{
\item{s}{The run size of design B, where s is a prime.}
}
\value{
The return value is an LH(s,s-1) when s is a prime or LH(s, phi(s)) when s is not a prime.
}
\description{
\code{WGLP} is Williams transformations of linearly transformed good lattice points. This provides a choice of design B.
}
\details{
Under the L1-distance, Wang et al. (2018,Theorem 2) constructed an LH(s,s-1) when s is a prime, where LH(s,s-1) is a Latin hypercube of s runs for s-1 factors. If s is not
a prime, the Williams transformation can generate designs with s runs and phi(s) factors,
where phi(s) is the Euler function, that is, the number of positive integers smaller than and coprime to s.
}
\examples{
# Note that WGLP(7) produces an equi-distant LH(7,6)
B <- WGLP(7)
B <- WGLP(13)
}
\references{
Wenlong Li, Min-Qian Liu and Boxin Tang (2021). A method of constructing maximin distance designs. \emph{Biometrika}, published online. <doi:10.1093/biomet/asaa089>
}
|
00fd89039feaef11d459d12f48b075fe3c2f92e4
|
76a60411ed849bc0366a5f69887629567875832d
|
/Resampling codes/q5.R
|
3b3b99a4d6ec7129245f8d091749e24926c69a0b
|
[] |
no_license
|
somak135/Resampling-codes
|
a7f3dd0054dd2e205c57f146bdda05561ffb05e6
|
008429b4dc692f31f92c87bbf1cc8fed238cb2ee
|
refs/heads/main
| 2023-05-22T08:48:42.868581
| 2021-06-14T16:26:02
| 2021-06-14T16:26:02
| 374,428,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,576
|
r
|
q5.R
|
##### Defining global things
### define Z(t) as function of t
q = 4
n = 100 ## number of sample components
T = seq(1, 4, length = 10)
m = length(T) ##number of time points
Z = function(t) {
return(matrix(c(12-0.5*t^2, -2*t, log(3+1/t), exp(-t)), ncol = 1))
}
sigma_e = 0.05
### declare cutoff
eta = 20
### define paramters for Theta_i
library(MASS)
theta = c(4, 3, 5, 5)
Sigma = matrix(c(0.75, -.27, .092, -.21, -.27, .86,
.15, .049, .092, .15, .75, -.071,
-.21, .049, -0.071, .24), nrow = q)
##### Simulate
Simu = function(n, m, q, theta, Sigma, sigma_e) {
Datmat = matrix(nrow = m, ncol = n)
for(i in 1:n) {
Theta = mvrnorm(1, theta, Sigma)
for(j in 1:m){
Datmat[j, i] = t(Z(T[j])) %*% Theta + rnorm(1, mean = 0, sd = sigma_e)
}
}
mylist = list("data" = Datmat)
return(mylist)
}
#### Estimation of R-hat(t)
Estim = function(n, m, q, S, Z, T) {
Zmat = matrix(nrow = m, ncol = q)
for(i in 1:m){
Zmat[i, ] = Z(T[i])
}
Ybar = 0
for(i in 1:n){
Ybar = Ybar + S[, i]
}
Ybar = Ybar/n
#### hat(theta)_n
theta_hat = solve(t(Zmat) %*% Zmat) %*% t(Zmat) %*% Ybar
#### hat(sigma)_e^2
sigma_e2_hat = 0
for(i in 1:n) {
sigma_e2_hat = sigma_e2_hat + t(S[,i]) %*% S[,i] - t(S[,i]) %*% Zmat %*% solve(t(Zmat)%*%Zmat) %*% t(Zmat)%*%S[,i]
}
sigma_e2_hat = sigma_e2_hat/(n*(m-q))
#### hat(Sigma_Theta)
Sigma_Theta = matrix(rep(0, q^2), nrow = q)
for(i in 1:n) {
X = solve(t(Zmat) %*% Zmat) %*% t(Zmat) %*% (S[,i]-Ybar) %*% t(S[,i]-Ybar) %*% Zmat %*% solve(t(Zmat) %*% Zmat)
Sigma_Theta = Sigma_Theta + X
}
Sigma_Theta = Sigma_Theta/n - as.numeric(sigma_e2_hat) * solve(t(Zmat) %*% Zmat)
#### hat(s(t))
st = c()
for(i in 1:m) {
x = sqrt(t(Zmat[i, ]) %*% Sigma_Theta %*% Zmat[i, ])
st = c(st, x)
}
##### hat(R(t))
Rt = c()
for(i in 1:m) {
x = (t(Zmat[i, ]) %*% theta_hat - eta) / st[i]
Rt = c(Rt, pnorm(x))
}
mylist = list("theta_hat" = theta_hat, "sigma_e2_hat" = sigma_e2_hat,
"Sigma_Theta_hat" = Sigma_Theta, "Rt" = Rt)
return(mylist)
}
#### Now that we are done with Simulation and Estimation, lets jump into Jackknife and Bootstrap!
vjack = function(n, m, S) {
Jmat = matrix(nrow = n, ncol = m)
for(i in 1:n) {
S_new = S[, -i]
result = Estim((n-1), m, q, S_new, Z, T)
Jmat[i, ] = result$Rt
}
return(((n-1)^2/n) * diag(var(Jmat)))
}
vboot = function(B = 200, m, S) {
Bmat = matrix(nrow = B, ncol = m)
for(b in 1:B) {
S_new = S[ , sample(n, n, replace = TRUE)]
result = Estim(n, m, q, S_new, Z, T)
Bmat[b, ] = result$Rt
}
return(((B-1)/B) * diag(var(Bmat)))
}
### repeating Jackknife and Bootstrap multiple times
N = 100; v_jack = c(); v_boot = c(); B = 2*n
for(i in 1:N) {
set.seed(i)
S = Simu(n,m,q,theta, Sigma, sigma_e)$data
v_jack = rbind(v_jack, vjack(n, m, S))
v_boot = rbind(v_boot, vboot(B, m, S))
}
##### Now trying to find the tedious one -- the linear estimate
gradg = function(x1, x2, x3, i) {
d = x2 - x1^2 - x3*(t(Zmat[i, ])%*%(solve(t(Zmat)%*%Zmat))%*%Zmat[i,])
g1 = dnorm((x1 - eta)/sqrt(d)) * (sqrt(d) + (x1 - eta)*x1/sqrt(d))/d
g2 = dnorm((x1 - eta)/sqrt(d)) * (-0.5*(x1 - eta)/sqrt(d))/d
g3 = dnorm((x1 - eta)/sqrt(d)) *
(0.5*(x1 - eta)*(t(Zmat[i, ])%*%(solve(t(Zmat)%*%Zmat))%*%Zmat[i,])/sqrt(d))/d
return(c(g1, g2, g3))
}
N = 100; v_L = c()
Zmat = matrix(nrow = m, ncol = q)
for(i in 1:m){
Zmat[i, ] = Z(T[i])
}
for(i in 1:N) {
set.seed(i)
S = Simu(n,m,q,theta, Sigma, sigma_e)$data
M=solve(t(Zmat)%*%Zmat)
K=Zmat%*%M%*%t(Zmat)
v_linear = c()
for(t in 1:m) {
P=t(Zmat[t, ])%*%M%*%t(Zmat)
f1<-function(v)
{
return(P%*%v)
}
x1=apply(S, 2, f1)
x2=x1^2
f2<-function(v)
{
return(t(v)%*%v-t(v)%*%K%*%v)
}
x3=apply(S, 2, f2)/(m-q)
X=cbind(x1, x2, x3)
Sigma_hat_X=var(X)
u=colMeans(X)
g=gradg(u[1],u[2],u[3], t)
v=(t(g)%*%Sigma_hat_X%*%g)/n
v_linear = c(v_linear, v)
}
v_L = rbind(v_L, v_linear)
}
report_mean_matrix = matrix(nrow = m, ncol = 3)
report_sd_matrix = matrix(nrow = m, ncol = 3)
for(j in 1:m) {
report_mean_matrix[j, 1] = mean(sqrt(v_jack[, j]))
report_mean_matrix[j, 2] = mean(sqrt(v_boot[, j]))
report_mean_matrix[j, 3] = mean(sqrt(v_L[, j]))
report_sd_matrix[j, 1] = sd(sqrt(v_jack[, j]))
report_sd_matrix[j, 2] = sd(sqrt(v_boot[, j]))
report_sd_matrix[j, 3] = sd(sqrt(v_L[, j]))
}
report_mean_matrix
report_sd_matrix
library(beepr)
beep(4)
|
44808f701994c5f4249080de698d725eb0ed9bcf
|
6f5711a306320d04f2ce8109880573065c710142
|
/app.R
|
bc8253461221197d2cb762566abfd6452a51a2fc
|
[] |
no_license
|
konradmiz/IntroNetworks
|
5b45498062269fc8ad4a0fc72acde8ce9c3e503e
|
b4a12f79fa9873ba69c7f2970b287789c8ba1a5a
|
refs/heads/master
| 2020-03-19T02:10:58.573746
| 2018-06-02T21:02:59
| 2018-06-02T21:02:59
| 135,606,642
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,626
|
r
|
app.R
|
library(readr)
library(shiny)
library(shinythemes)
library(dplyr)
library(ggplot2)
library(visNetwork)
library(igraph)
hub_locations <- readRDS("Hub Locations.rds")
ui <- fluidPage(theme = shinytheme("flatly"),
#shinythemes::themeSelector(),
titlePanel("BikeTown Trips Network"),
#mainPanel(
navlistPanel(
"Introduction",
tabPanel("Bikeshare background",
p('Bike-sharing systems are becoming more and more prevalent in cities in both the United States and worldwide.
A variety of systems exist, but a common "docked" setup consists of hundreds (or thousands) of bikes deployed at one of tens (or hundreds) of docking stations,
alternately known as hubs or stands. People can unlock one of these bikes and ride them around, then leave them locked at a hub or on the street
(depending on their payment plan). Both bikes and hubs are equipped with sensors that send and receive real-time feed data which can be accessed through an API.
When a user borrows a bike, information on trip start time, location, user id, payment plan type, duration, end time, end location, and
distance traveled are stored in a database. Some bike-sharing systems have publicly released anonymized journey data with fields as listed above;
Austwick et. al analyzed data from London, UK; Boston, MA; Denver, CO; Minneapolis, MN; and Washington, DC.
Recently, BikeTown, the bike-sharing system in Portland, OR, released all anonymized trip information from their inception, July 2016, to March 2018.
Portland is a "semi-dockless" system in that trips can start or end at one of over 100 hubs or by parking the bike on the street
(though there is a fee for not returning the bike to a hub and therefore most trips are from hub to hub).')
),
tabPanel("Bikeshare as a network",
h3('Adjacency Matrix approach'),
p('It is logical to think about or model docked or semi-dockless bike-sharing systems with a networks approach.
Hubs are well-defined, stationary places at which trips start/end, and trip data presents information on the flow of travelers between them
(i.e., the edges connecting two nodes). Some trips did not start or end at a hub and were therefore not included in this analysis. The simplest networks approach would be
to create a symmetric adjacency matrix where two hubs are connected if a trip took place between them. While this is is an okay start, it misses much of what makes bikesharing
interesting. Though easily represented in network form, starting from an adjacency matrix the nature of trips in bikeshare networks requires that the network representation
needs to be spatial, directed, temporal, weighted, with self-loops and non-sparsity (Austwick et. al), and additionally is non-planar in that edges between nodes
intersect or overlap (Barthelemey). More detail on these characteristics is given below'),
h3('Spatial approach'),
p('Trips are inherently spatial, taking place in two-dimensional Euclidean space with a defined start- and end-point, which may or may not be the same. The
network topology is intrinsically tied to the topology of the city and the location of points of interest within the city. The spatial layout of the city
(and by extension bike trips) can be considered with a core-periphery structure approach with densely core nodes and sparsely-connected periphery nodes (Rombach et al).
This can be identified in the network visualization below.'),
h3('Temporal approach'),
p('The temporal aspect of trips is present in several ways: the time of when trips start and end is not uniform throughout the day, week, or time of year
but instead shows interesting patterns; likewise, the trips themselves have a non-zero duration, of importance when optimizing bike placement to not run out of available bikes.
The majority of bikes are rented around 4-6PM, though the specific pattern of rides is different between weekdays and weekends or holidays; likewise, summer months see
many more rides than the winter months do. Over time, the connectivity of the network increases as rides take place between hubs that had not been connected previously'),
h3('Multiplex approach'),
p('The users themselves have not yet been considered, but their attributes can have non-trivial effects on the network structure. There are many types of membership
available to BikeTown users. The dataset provided to the public only has two membership types: "subscriber" or "casual". These users differ in their ridership habits, both
in what time of day/week/year they ride, but also in the nodes they are likely to visit'),
h3('Multigraph (directed graph) approach'),
p('Given an appropriate amount of time for trips to occur, multiple trips will occur between hubs, and the flow of bikes between hubs has strong importance
for the operational aspect of supplying bikes. On an aggregated level, which can be useful when dealing with thousands or millions of trips,
the flows between hubs can be intuitively seen as a weighted property: while each trip has a weight of only one, on a larger scale the directed edge weight between hubs
is the sum of the trips between the start and end hub. Which hubs see lots of traffic and which do not is an interesting and useful characteristic of the system.')
),
tabPanel("Materials",
h3('References'),
p('Barthelemy, Marc. "Spatial Networks." Physics Reports, vol. 499, no. 1, 2011, pp. 1-101.'),
p('Rombach, Puck, et al. "Core-Periphery Structure in Networks (Revisited)." SIAM Review, vol. 59, no. 3, 2017, pp. 619-646.'),
p('Expert, Paul, et al. "Uncovering Space-Independent Communities in Spatial Networks."
Proceedings of the National Academy of Sciences of the United States of America, vol. 108, no. 19, 2011, pp. 7663-8.'),
p('Zaltz Austwick, Martin, et al. "The Structure of Spatial Networks and Communities in Bicycle Sharing Systems." PLoS ONE, vol. 8, no. 9, 2013, p. e74685.'),
h3('Dataset'),
a(href="https://www.biketownpdx.com/system-data", "BikeTown System Data", target = "_blank"),
h3('Graphical interpretation packages'),
strong('shiny'), p(),
strong('igraph'), p(),
strong('visNetwork')
),
"Network",
#tabsetPanel("Network Visualiztion", type = "tabs",
tabPanel("Network Properties",
h4("By default, the network shown is from the entire trips dataset. To dig deeper, filter by date or time."),
h3("Network properties: "),
p("Node size is proportional to the total degree of the node, while edge width is proportional to the out-degree.
Edges are colored gray if they both nodes are in the same neighborhood (N, NE, NW, etc) and colored blue if they are not.")
),
tabPanel("Dynamic Network",
sidebarLayout(
sidebarPanel(width = 2, position = "left",
dateRangeInput("tripDate", label = "Trip Date Range", start = "2016-07-01", end = Sys.Date()),
#sliderInput("tripDate", "Choose Date Range:",
# min = as.Date("2016-07-19"), max = Sys.Date(),
# value = c(as.Date("2016-07-19"), as.Date("2016-07-19")),
# animate = TRUE),
sliderInput("time", label = "Trip start time", min = 0, max = 23.5, step = 0.5, value = c(0,23.5)),
radioButtons("weekend", "Weekend Trips", choices = c("Yes", "No", "Both"), selected = "Both")
),
mainPanel(
visNetworkOutput("network", height = "500", width = "800")
)
)),
tabPanel("Summary Statistics",
tableOutput("avgDegree")),
#),
tabPanel("Degree Distributions",
plotOutput("degreeDist"),
plotOutput("components")),
#tabPanel("Static Networks"),
widths = c(2,10)
)
)
server <- function(input, output){
network_data <- reactive({
#trips <- readRDS("C:/Users/Konrad/Desktop/Intro to Networks/Term Project/All Trips.rds")
trips <- readRDS("All Trips.rds")
filt_trips <- trips %>%
filter(StartDate >= input$tripDate[1] & StartDate <= input$tripDate[2] &
StartTime >= input$time[1] * 60 * 60 & StartTime < input$time[2] * 60 * 60 &
!is.na(StartHub) & !is.na(EndHub))
to_from_trips <- filt_trips %>%
group_by(StartHub, EndHub) %>%
count() %>%
select(from = StartHub, to = EndHub, weight = n)
return(to_from_trips)
})
network_graph <- reactive({
#od_matrix <- network_od()
trips_data <- network_data()
early_graph <- igraph::graph_from_edgelist(cbind(trips_data$from, trips_data$to), directed = TRUE)
#early_graph <- igraph::graph_from_edgelist(cbind(early_june_trips$from, early_june_trips$to), directed = TRUE)
#early_graph <- igraph::graph_from_adjacency_matrix(as.matrix(od_matrix), weighted = TRUE, mode = "directed")
return(early_graph)
})
network_vis_graph <- reactive({
early_graph <- network_graph()
vis_early <- visIgraph(early_graph)
#vis_early$x$nodes$id <- V(early_graph)$name
#vis_early$x$nodes$label <- V(early_graph)$name
vis_early$x$nodes <- vis_early$x$nodes %>%
left_join(hub_locations, by = c("id" ="StartHub"))
vis_early$x$nodes$value <- sqrt(degree(early_graph))
vis_early$x$nodes$color <- "#ff8d00"
vis_early$x$nodes$title = paste0(vis_early$x$nodes$id, "<br>", "In-degree: ", degree(early_graph, mode = "in"),
"<br>", "Out-degree: ", degree(early_graph, mode = "out"))
#vis_early$x$edges <- vis_early$x$edges %>%
# left_join(early_june_trips) %>%
# mutate(width = weight)
#degree(early_graph,)
vis_early$x$edges$width = vis_early$x$edges$weight
vis_early$x$edges$start_part <- substr(vis_early$x$edges$from, start = 1, stop = 2)
vis_early$x$edges$end_part <- substr(vis_early$x$edges$to, start = 1, stop = 2)
vis_early$x$edges$color <- NA
vis_early$x$edges <- vis_early$x$edges %>%
mutate(color = ifelse(start_part != end_part, "#b6bcc6", "blue"))
#vis_early$x$edges$color.highlight.background <- "red"
return(vis_early)
})
network_summary <- reactive({
trips_data <- network_data()
early_graph <- network_graph()
#vis_early <- network_vis_graph()
#od_matrix <- network_od()
#edge_info <- vis_early$x$edges
avg_connected_nodes <- trips_data %>%
group_by(from) %>%
count() %>%
ungroup() %>%
summarise(Mean = mean(n))
density = ecount(early_graph)/(vcount(early_graph)^2)
summary_stats <- tibble(trips = sum(trips_data$weight),
numNodes = vcount(early_graph),
numEdges = ecount(early_graph),
AvgEdgeWeight = mean(trips_data$weight),
AvgDegree = avg_connected_nodes$Mean,
AvgPathLength = average.path.length(early_graph, directed = TRUE),
#Diameter = diameter(early_graph),
Density = density,
Clustering = transitivity(early_graph))
average.path.length(early_graph, directed = TRUE)
return(summary_stats)
})
output$avgDegree <- renderTable({
network_summary()
})
output$network <- renderVisNetwork({
vis_early <- network_vis_graph()
vis_early %>%
visIgraphLayout(layout = "layout.norm", layoutMatrix = cbind(vis_early$x$nodes$Lon, -vis_early$x$nodes$Lat)) %>%
visNodes(color = list(background = "#ff8d00", highlight = "black")) %>%
#visEdges(color = "#b6bcc6", arrows = "none") %>%
visEdges(arrows = "none") %>%
visOptions(highlightNearest = TRUE) %>%
visInteraction(dragNodes = FALSE, hover = TRUE, keyboard = TRUE)
#visEvents(click = "function(nodes){
# Shiny.onInputChange('click', nodes.nodes[0]);
# ;}"
#)
#visEvents(selectNode = "function(properties) {
#alert('selected nodes ' + this.body.data.nodes.get(properties.nodes[0]).id);}")
#visEvents(selectNode = "function myFunction() {
# var popup = document.getElementById('myPopup');
# popup.classList.toggle('show');}")
})
output$degreeDist <- renderPlot({
early_graph <- network_graph()
my_data <- data.frame(Degree = degree(early_graph))
ggplot(my_data, aes(Degree)) +
geom_histogram() +
theme_minimal()
})
output$components <- renderPlot({
early_graph <- network_graph()
my_data <- data.frame(Var = c("Components", "Nodes"), Val= c(components(early_graph)$no,
vcount(early_graph)))
ggplot(my_data, aes(Var, Val)) +
geom_bar(stat = "identity") +
ylab("Count") +
xlab("Variable") +
theme_minimal()
})
}
shinyApp(ui = ui, server = server)
|
3af0030a388e62f152523ca47ac43796063ddb99
|
a148cf702f9d7263b8a44de40ac942b2696132b3
|
/scripts/amerifluxFormatting.R
|
78fa9f61c13f51d9a484fbf40bc2553d660d7306
|
[] |
no_license
|
USEPA/actonEC
|
0374f1ed67e628f8516e16f7a7e249c195c237e7
|
ceeab99bae03f9cdd5dcbd27865b43bfee9ecf07
|
refs/heads/master
| 2021-07-18T11:57:23.295626
| 2020-11-29T20:00:41
| 2020-11-29T20:00:41
| 242,174,208
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,240
|
r
|
amerifluxFormatting.R
|
#This script prepares a file in the format needed to submit to AmeriFlux
#before running, load the following data frames (you can use loadPrelimOutputs.R):
#epOutOrder, rbrTsub, campMet, vanni30min
#file.edit("scriptsAndRmd/loadPrelimOutputs.R")
USact<-mutate(epOutOrder,
RDateTime_START = RDateTime,
RDateTime_END = (RDateTime+30*60),
FC_SSITC_TEST = qc_co2_flux,
FCH4_SSITC_TEST = qc_ch4_flux,
FETCH_70 = x_70,
FETCH_90 = x_90,
FETCH_FILTER = -9999, # 0 and 1 flag indicating direction that should be discarded and kept, respectively
FETCH_MAX = x_peak,
CH4 = ch4_mixing_ratio*1000, #nmolCH4 per mol
CO2 = co2_mixing_ratio, #umol CO2 per mol
CO2_SIGMA = sqrt(co2_var),
FC = co2_flux, #umolCO2 m-2 s-1
FCH4 = ch4_flux*1000, #nmolCH4 m-2 s-1
H2O = h2o_mixing_ratio, #mmol mol-1
H2O_SIGMA = sqrt(h2o_var/1000), #h2o_var doesn't have units, and an
# investigation (see co2FluxDiagnostics) reveals that
# it is generally ~1000x the variance calculated from the raw
# dataset, so probably in umol/mol, while h2o mixing ratio is
# in mmol/mol. Likely not precisely a factor of 1000 due to processing steps.
SC = co2_strg,
SCH4 = ch4_strg*1000, #nmol/m2/s
H = H,
H_SSITC_TEST = qc_H,
LE = LE,
LE_SSITC_TEST = qc_LE,
SH = H_strg,
SLE = LE_strg,
PA = air_pressure/1000, #kPa -- air_p_mean was being loaded in incorrectly
RH = RH,
T_SONIC = sonic_temperature-273.15, #C
TA = air_temperature-273.15, #C
VPD = VPD/100, #hPa
P=-9999, #precipitation
P_RAIN = -9999, #rainfall, from VWS
NETRAD = -9999, #net radiation, W/m2, from our net radiometer
PPFD_IN = -9999, #PPFD, incoming
TS = -9999, #soil temperature, sed t?, from RBRs, ~1.6m
TW_1 = -9999, #water T, from RBRs -0.1
TW_2 = -9999, #water T, from RBRs -0.25
TW_3 = -9999, #water T, from RBRs -0.5
TW_4 = -9999, #water T, from RBRs -0.75
TW_5 = -9999, #water T, from RBRs -1.0
TW_6 = -9999, #water T, from RBRs -1.25
WTD = -9999,
MO_LENGTH = L,
TAU = -Tau, #ameriflux sign convention: negative value of Tau indicates a downward transport of momentum flux
TAU_SSITC_TEST = qc_Tau,
U_SIGMA = sqrt(u_var), #rotated?
USTAR = ustar,
V_SIGMA = sqrt(v_var),
W_SIGMA = sqrt(w_var),
WD = wind_dir,
WS = wind_speed,
WS_MAX = max_wind_speed,
ZL=zL)%>%
select(RDateTime_START, RDateTime_END, FC_SSITC_TEST, FCH4_SSITC_TEST, FETCH_70, FETCH_90,
FETCH_FILTER, FETCH_MAX, CH4, CO2, CO2_SIGMA, FC, FCH4, H2O, H2O_SIGMA, SC, SCH4,
H, H_SSITC_TEST, LE, LE_SSITC_TEST, SH, SLE, PA, RH, T_SONIC, TA, VPD, P, P_RAIN,
NETRAD, PPFD_IN, TS, TW_1, TW_2, TW_3, TW_4, TW_5, TW_6, WTD, MO_LENGTH, TAU,
TAU_SSITC_TEST, U_SIGMA, USTAR, V_SIGMA, W_SIGMA, WD, WS, WS_MAX, ZL)
### filter values outside of plausible range (per email from Ameriflux Team):
ggplot(USact, aes(RDateTime_START, NETRAD))+
geom_point()+
ylim(-9000, 2000)
USact<-USact%>%
mutate(CH4=replace(CH4, CH4< (-750), NA),
CO2=replace(CO2, CO2>1570, NA),
FC=replace(FC, abs(FC)>110, NA),
FCH4=replace(FCH4, FCH4>5275, NA),
H2O=replace(H2O, H2O>105, NA))
#merge RBRs
amerifluxTime<-select(USact, RDateTime_START)
amerifluxTime$RDateTime<-amerifluxTime$RDateTime_START
amerifluxRBR<-left_join(amerifluxTime, rbrTsub, by = "RDateTime")
amerifluxRBR2<-subset(amerifluxRBR, !duplicated(RDateTime)) #30693
USact<-subset(USact, !duplicated(RDateTime_START)) #30693
#give USact RBR values for TS, TW 1 thru 6 where available, -9999s where not avail
USact<-mutate(USact,
TW_1 = amerifluxRBR2$RBRmeanT_0.1,
TW_2 = amerifluxRBR2$RBRmeanT_0.25,
TW_3 = amerifluxRBR2$RBRmeanT_0.5,
TW_4 = amerifluxRBR2$RBRmeanT_0.75,
TW_5 = amerifluxRBR2$RBRmeanT_1,
TW_6 = amerifluxRBR2$RBRmeanT_1.25,
TS = amerifluxRBR2$RBRmeanT_1.6)
# ggplot(campMet, aes(RDateTime, Rain_mm_tot))+
# geom_point(alpha=0.3)
# ggplot(vanni30min, aes(RDateTime, dailyRain.vws))+
# geom_point()
# ggplot(filter(vanni30min, RDateTime>"2017-10-01"), aes(RDateTime, rain30min))+
# geom_point()
#merge precip & PAR & water level from VWS
# also need to change precip from daily cumulative to 30-min
# P=-9999, #precipitation
# P_RAIN = -9999, #rainfall, from VWS
# NETRAD = -9999, #net radiation, W/m2, from our net radiometer
# PPFD_BC_IN = -9999, #PPFD, below canopy, incoming
amerifluxVWS<-left_join(amerifluxTime, vanni30min, by="RDateTime") %>% #30709
subset(!duplicated(RDateTime)) #30693
#merge net radiation and precip from campbell suite
amerifluxCampVWS<-left_join(amerifluxVWS, campMet, by="RDateTime")%>%
subset(!duplicated(RDateTime)) #30693
# ggplot(amerifluxCampVWS, aes(rain30min, Rain_mm_tot))+
# geom_point(alpha=0.2)#
# ggplot(filter(amerifluxCampVWS, RDateTime>"2018-04-15",
# RDateTime<"2018-07-01"))+
# geom_point(aes(RDateTime, rain30min, color="VWS"), alpha=0.3)+
# geom_point(aes(RDateTime, Rain_mm_tot, color="tower"), alpha=0.3)+
# ylim(0, 5)
ggplot(amerifluxCampVWS, aes(RDateTime, NR_Wm2_avg))+
geom_line()
#give USact VWS values for PAR, precip, and water level (WTD) where avail, -9999s where not avail
#also fill in fetch filter values here
for(i in 1:length(USact$TS)){
USact$P[i] = if(!is.na(amerifluxCampVWS$rain30min[i])) amerifluxCampVWS$rain30min[i] else
if(!is.na(amerifluxCampVWS$Rain_mm_tot[i])) amerifluxCampVWS$Rain_mm_tot[i] else
-9999
USact$P_RAIN[i] = USact$P[i]
USact$PPFD_IN[i] = ifelse(!is.na(amerifluxCampVWS$par.vws[i]),
amerifluxCampVWS$par.vws[i], -9999)
USact$WTD[i] = ifelse(!is.na(amerifluxCampVWS$levelAdj.vws[i]), #level adjust has the offset for the step change, plus 1 m to account for the depth difference between the flux footprint and the msmt site
amerifluxCampVWS$waterLevel.vws[i], -9999)
USact$NETRAD[i] = ifelse(!is.na(amerifluxCampVWS$NR_Wm2_avg[i]),
amerifluxCampVWS$NR_Wm2_avg[i], -9999)
USact$FETCH_FILTER[i]=ifelse(USact$RDateTime_START<"2018-05-01 00:00:00" & USact$WD>195 & USact$WD<330, 0, #value if winds are from the W at the dock
1) #value if aquatic tower -- no fetch filter
}
# ggplot(USact, aes(RDateTime_START, P))+
# geom_point(alpha=0.2)+
# ylim(0, 10)
#change all na's to -9999's
USact[is.na(USact)]<- -9999
USact[is.nan(USact)]<- -9999
USactNA<-USact
USactNA[USact== -9999]<- NA
sum(is.na(USactNA$FETCH_FILTER))
#check on outliers
# ggplot(USactNA, aes(RDateTime_START, CO2))+
# geom_point()
# ggplot(USactNA, aes(RDateTime_START, CH4))+
# geom_point()
# ggplot(USactNA, aes(RDateTime_START, FC))+
# geom_point()
# ggplot(USactNA, aes(RDateTime_START, FCH4))+
# geom_point()
# ggplot(USactNA, aes(RDateTime_START, H2O))+
# geom_point()
# ggplot(USactNA, aes(RDateTime_START, SC))+
# geom_point()
# ggplot(USactNA, aes(RDateTime_START, SCH4))+
# geom_point()
# ggplot(USactNA, aes(RDateTime_START, NETRAD))+
# geom_point()
USactNA<-USactNA%>%
mutate(NETRAD=replace(NETRAD, NETRAD< (-390), NA),
SC = replace(SC, abs(SC)>200, NA),
SCH4=replace(SCH4, abs(SCH4)>200, NA))
USactOF<-USactNA
USactOF[is.na(USactOF)]<- -9999 #OF for outlier filtered
sum(is.na(USactOF$NETRAD))
#2017
USactSub17<-filter(USactOF, RDateTime_START>"2017-01-25 18:30",
RDateTime_START<"2017-12-31 19:00")
#2018
USactSub18<-filter(USactOF, RDateTime_START>"2018-01-01 00:00",
RDateTime_START<"2018-12-31 23:30")
head(USactSub17$RDateTime_START)
tail(USactSub17$RDateTime_END)
head(USactSub18$RDateTime_START)
tail(USactSub18$RDateTime_END)
#check for missing HH periods:
USactSub17$check<-c(1800, diff(as.numeric(USactSub17$RDateTime_START), 1))
summary(USactSub17$check)
USactSub18$check<-c(1800, diff(as.numeric(USactSub18$RDateTime_START), 1))
summary(USactSub18$check)
ggplot(filter(USactSub, RDateTime_START>"2017-01-01", RDateTime_START<"2017-02-01"),
aes(RDateTime_START, check))+
geom_point()
#change timestampts to YYYYMMDDHHMM format
#strptime(USactSub$RDateTime_START, "%Y-%m-%d %H:%M:%S")
USactSub17<-mutate(USactSub17,
TIMESTAMP_START=format(strptime(RDateTime_START,
"%Y-%m-%d %H:%M:%S"),
"%Y%m%d%H%M"),
TIMESTAMP_END=format(strptime(RDateTime_END,
"%Y-%m-%d %H:%M:%S"),
"%Y%m%d%H%M"))%>%
select(TIMESTAMP_START, TIMESTAMP_END, FC_SSITC_TEST, FCH4_SSITC_TEST, FETCH_70, FETCH_90,
FETCH_FILTER, FETCH_MAX, CH4, CO2, CO2_SIGMA, FC, FCH4, H2O, H2O_SIGMA, SC, SCH4,
H, H_SSITC_TEST, LE, LE_SSITC_TEST, SH, SLE, PA, RH, T_SONIC, TA, VPD, P, P_RAIN,
NETRAD, PPFD_IN, TS, TW_1, TW_2, TW_3, TW_4, TW_5, TW_6, WTD, MO_LENGTH, TAU,
TAU_SSITC_TEST, U_SIGMA, USTAR, V_SIGMA, W_SIGMA, WD, WS, WS_MAX, ZL)
head(USactSub17)
USactSub18<-mutate(USactSub18,
TIMESTAMP_START=format(strptime(RDateTime_START,
"%Y-%m-%d %H:%M:%S"),
"%Y%m%d%H%M"),
TIMESTAMP_END=format(strptime(RDateTime_END,
"%Y-%m-%d %H:%M:%S"),
"%Y%m%d%H%M"))%>%
select(TIMESTAMP_START, TIMESTAMP_END, FC_SSITC_TEST, FCH4_SSITC_TEST, FETCH_70, FETCH_90,
FETCH_FILTER, FETCH_MAX, CH4, CO2, CO2_SIGMA, FC, FCH4, H2O, H2O_SIGMA, SC, SCH4,
H, H_SSITC_TEST, LE, LE_SSITC_TEST, SH, SLE, PA, RH, T_SONIC, TA, VPD, P, P_RAIN,
NETRAD, PPFD_IN, TS, TW_1, TW_2, TW_3, TW_4, TW_5, TW_6, WTD, MO_LENGTH, TAU,
TAU_SSITC_TEST, U_SIGMA, USTAR, V_SIGMA, W_SIGMA, WD, WS, WS_MAX, ZL)
head(USactSub18)
#2017
# write.table(USactSub,
# file=("output/US-Act_HH_201701260000_201712311800.csv"),
# sep=",",
# row.names=FALSE)
write.table(USactSub17,
file=("output/US-Act_HH_201701260300_201712311800.csv"),
sep=",",
row.names=FALSE)
#2018
# write.table(USactSub,
# file=("C_R_Projects/actonFluxProject/output/US-Act_HH_201801011130_201811131230.csv"),
# sep=",",
# row.names=FALSE)
write.table(USactSub18,
file=("output/US-Act_HH_201801011130_201811131230.csv"),
sep=",",
row.names=FALSE)
|
9e253fe81c3f6faf1ad523cdcdbf44156b66646e
|
d0725763bf2a1a35a5ba91de5e1caf33cc49722e
|
/App Indices de Vulnerabilidad/Code/Mapa/Data Mapa.R
|
c1dbabc52f8578a77ccf93a77782dcc69de15f0d
|
[] |
no_license
|
InstitutoInvestigacionesEconomicasPUCE/Indicadores_Vulnerabilidad
|
8a1e9602c4b6a2b1dcad2cd5030147bf1bf92711
|
2a8f15d078d9d79f2d02a58d37ed75e402e135f3
|
refs/heads/master
| 2020-06-03T15:50:13.051515
| 2019-06-12T20:36:05
| 2019-06-12T20:36:05
| 191,637,254
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,899
|
r
|
Data Mapa.R
|
# DATOS Mapa CANTONAL ===========================
BDDMapReact = reactive({
periodos = c(2005,as.numeric(input$periodos),2019)
pd = as.numeric(input$producto)
BDDMap = data.frame()
for(i in 1:length(IPC_canton)){
Fechas_aux = IPC_canton[[i]][,1] #Corregir
# IPC_val_aux = IPC_canton[[i]][,pd+1]
mav12 = MedMovBeta(IPC_canton[[i]][,2],n=12)
IPC_val_aux = IPC_canton[[i]][,pd+1]/as.numeric(mav12$mvxRecup) #IPC Deflactado
nombre_aux = names(IPC_canton)[i]
BDDMap_aux = data.frame(ARCH_CANTON = nombre_aux,
Fecha=Fechas_aux,
Valor = IPC_val_aux)
BDDMap = rbind(BDDMap, BDDMap_aux)
}
BDDMap = ArchCodCanton %>%
dplyr::inner_join(BDDMap,by="ARCH_CANTON") %>%
dplyr::select(COD_CANTON,CANTON,Fecha,Valor)
return(BDDMap)
})
# Datos Betas Cantonal =================================
BDDMapBetas = reactive({
# periodos = c(2005,as.numeric(input$periodos),2019)
# print("Aqui se calcula Periodos !!!!!!!!!!!!")
# print(periodos)
pd = as.numeric(input$producto)
BDDMap = data.frame()
for(i in 1:length(IPC_canton)){
Fechas_aux = IPC_canton[[i]][,1] #Corregir
mav12 = MedMovBeta(IPC_canton[[i]][,2],n=12)
IPC_val_aux = IPC_canton[[i]][,pd+1]/as.numeric(mav12$mvxRecup) #IPC Deflactado
nombre_aux = names(IPC_canton)[i]
#Base del Canton i y producto pd -------------------
# periodos = c(2007,2010,2015)
# periodos = c(2005,as.numeric(periodos),2019)
periodos = c(2005,as.numeric(input$periodos),2019)
Fecha = as.Date(IPC_canton[[1]]$Fecha, format = "%Y-%m-%d")
Anio = as.numeric(format(Fecha, "%Y")) #;remove(Fecha)
etiquetas = c()
for (i in 1:(length(periodos) - 1)) {
etiquetas[i] = paste0("Periodo: ", periodos[i], " - ", periodos[i + 1])
}
PeriodoCorte = cut(Anio,
breaks = periodos ,
labels = etiquetas ,
right = F)
remove(Anio,periodos,Fecha,i)
#----------------------------
BDD_aux = data.frame(Periodo = PeriodoCorte,
Tmp = 1:length(Fechas_aux),
Valor = IPC_val_aux
# Valor = round(IPC_val_aux,digits = 5)
# Valor = format(IPC_val_aux, scientific = TRUE)
) #mapa
remove(mav12,Fechas_aux,IPC_val_aux)
#Regresion de Panel --------------------------------
modelo = lm(data = BDD_aux, formula = Valor ~ Tmp*Periodo)
# Betas del Modelo ---------------------------------
resumen = data.frame(round(xtable(summary(modelo)),digits = 5))
names(resumen) = c("Estimación","Error Estándar","t-valor","Pr(>|t|)")
remove(BDD_aux,modelo)
b_nomb=startsWith(rownames(resumen),"Tmp")
betas=resumen$`Estimación`[b_nomb]
tablabetas=resumen[b_nomb,c(1,2)]
remove(b_nomb)
#Betas Acumulados (Sumado el pivote)
tablabetas[2:length(tablabetas[,1]),1]=tablabetas[2:length(tablabetas[,1]),1]+tablabetas[1,1]
tablabetas = data.frame(ARCH_CANTON = nombre_aux, #mapa
Fecha = as.character(etiquetas),
Valor = round(tablabetas[,1],digits = 6))
#Datos para MAPA ----------------------------------
BDDMap = rbind(BDDMap, tablabetas)
}
BDDMap = ArchCodCanton %>%
dplyr::inner_join(BDDMap,by="ARCH_CANTON") %>%
dplyr::select(COD_CANTON,CANTON,Fecha,Valor)
return(BDDMap)
})
# Datos Para Mapa de Provincia Sola ----------------------
BDDMapBetasProv = reactive({
BDDMap = BDDMapBetas()
BDDMap = BDDMap %>%
dplyr::inner_join(ArchCodCanton[,c("COD_CANTON","PROVINCIA")],by="COD_CANTON") %>%
dplyr::filter(PROVINCIA == input$provincia) %>%
dplyr::select(COD_CANTON,CANTON,Fecha,Valor)
return(BDDMap)
})
|
ee001531d3ccb1bd801894f51d42759c9bd9a141
|
3da7397a406e5e788d08e2f9ca2b4b4b41be22ed
|
/R-code/pkg/R/RcppExports.R
|
d50ce977beb71830e707d382345c536715eabf1c
|
[
"MIT"
] |
permissive
|
ZhihaoMa/bartik-weight
|
f44a931ba1e44a968f81ed421392460107f05ba6
|
722ceb85484d6a2bf77985edf2403515eacd1770
|
refs/heads/master
| 2020-12-26T20:08:04.509430
| 2019-12-13T14:50:17
| 2019-12-13T14:50:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
ComputeAlphaBeta <- function(y, x, WW, weight, Z, G) {
.Call(`_bartik_weight_ComputeAlphaBeta`, y, x, WW, weight, Z, G)
}
|
ca71a310c0e48a499370e8a1bd15384233b51403
|
845a4db68eebe70d5c204fbad2dd27cabf1908df
|
/doc/elastic-net.R
|
0dfc251cdcc04947335d55f7013b4b634e59ae86
|
[] |
no_license
|
jashu/beset
|
6b1a6d8340b887a3628d0db6563bcdf53b4c709c
|
703e4e7da70185d279c4a60e76207ff2dae91103
|
refs/heads/master
| 2023-05-03T20:22:00.304497
| 2023-04-18T18:23:26
| 2023-04-18T18:23:26
| 49,987,418
| 6
| 0
| null | 2021-04-13T11:36:35
| 2016-01-19T22:24:12
|
R
|
UTF-8
|
R
| false
| false
| 2,498
|
r
|
elastic-net.R
|
## ---- echo = FALSE, message = FALSE-------------------------------------------
library(beset)
suppressPackageStartupMessages(library(tidyverse))
## -----------------------------------------------------------------------------
set.seed(42)
data <- cbind(swiss, matrix(replicate(5, rnorm(nrow(swiss))), ncol = 5))
names(data)[7:11] <- paste0("noise", names(data)[7:11])
## -----------------------------------------------------------------------------
mod <- beset_elnet(Fertility ~ ., data)
## ---- fig.height=4, fig.width=5-----------------------------------------------
plot(mod)
## -----------------------------------------------------------------------------
mod_sum <- summary(mod, oneSE = FALSE)
mod_sum
## -----------------------------------------------------------------------------
summary(mod)
## -----------------------------------------------------------------------------
summary(mod, alpha = 0.01)
## -----------------------------------------------------------------------------
mod <- beset_elnet(Fertility ~ ., data, nest_cv = TRUE)
## ---- fig.height=4, fig.width=5-----------------------------------------------
plot(mod)
## -----------------------------------------------------------------------------
mod_sum <- summary(mod)
## -----------------------------------------------------------------------------
summary(mod, robust = TRUE)
## -----------------------------------------------------------------------------
summary(mod, oneSE = FALSE)
## -----------------------------------------------------------------------------
summary(mod) %>% print(metric = "mse")
## -----------------------------------------------------------------------------
validate(mod, metric = "auto", oneSE = TRUE, alpha = NULL, lambda = NULL)
## -----------------------------------------------------------------------------
summary(prostate)
## -----------------------------------------------------------------------------
mod <- beset_elnet(tumor ~ ., data = prostate, family = "binomial",
nest_cv = TRUE)
summary(mod)
## ---- fig.height=4, fig.width=5-----------------------------------------------
plot(mod)
## ---- fig.height=4, fig.width=5-----------------------------------------------
plot(mod) + ylab("Log-loss")
## ---- fig.height=4, fig.width=5-----------------------------------------------
plot(mod, metric = "auc")
## -----------------------------------------------------------------------------
summary(mod, metric = "auc") %>% print(metric = "auc")
|
15ad3288eac044b62c8ca1674699eefeae3791ed
|
404f40e474f1389d8c927371b50464cd65539ef9
|
/man/construct.Rd
|
ce279df50751ee895c0c69b121113899fb05c9f5
|
[] |
no_license
|
mafuguo/wiotrs
|
097447f82fb693b4c6898b90865c8ef0df32e043
|
fc886aaed07bc52c4a16cbe6f54cd1ad6c46a824
|
refs/heads/master
| 2020-04-02T18:19:51.919574
| 2018-10-22T06:29:04
| 2018-10-22T06:29:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 534
|
rd
|
construct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/construct.R
\name{construct}
\alias{construct}
\title{Construct technical matrices}
\usage{
construct()
}
\value{
WIOT A list containig input-output table and other parameters. Function adds/updates
Leontieff Inverse L, technical coefficient matrix A, Final Demand matrix, value added per
unit of gross output vector.
}
\description{
Construct technical matrices
}
\examples{
get_io(year = 2009, version = "B")
construct()
get_io(2008)
construct()
}
|
437ed3994b513f0b5f10f4f8033b113acc3b434b
|
1ce825976064853b64fad4a1597b608a3fc7e831
|
/tests/1.R
|
411f5abf600b1c2ef5db72c13c94180ab11c9743
|
[] |
no_license
|
avastermark19/doseLM
|
a91cc0742965b6ce6551adfe4f9c430fc8dd9818
|
30afa3e8bf226e233bc2aa4d2532d3acbaa723fb
|
refs/heads/master
| 2020-04-30T03:12:30.093953
| 2019-03-20T19:27:59
| 2019-03-20T19:27:59
| 176,581,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 112
|
r
|
1.R
|
library(doseLM)
library(edgeR)
se <- simData(FA=100)
RUnit::checkEqualsNumeric(dim(se)[1]*dim(se)[2], 8000)
|
cb72b4af5dce8d0d1292ea5543292a2780bd5f6a
|
6ba6652d631677e7288ef8ed58803dcee107a6dc
|
/format_REST_data.R
|
96a713e81888b90a627c6b08b70ecf907fce58cb
|
[] |
no_license
|
galielle/RS-fMRI-of-Reading
|
242ce36584559ede12a1ad7c5ec5ae0629be3e5d
|
ef0988c769a4e47eeb26c5349fd61a9a0cbbfadc
|
refs/heads/master
| 2020-04-09T05:42:16.474035
| 2018-12-06T14:23:55
| 2018-12-06T14:23:55
| 160,075,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,601
|
r
|
format_REST_data.R
|
require(utils);
require(plyr);
library(lmerTest);
library(car);
require(data.table)
require(psych)
require(gdata)
### lists and data
area_id_list <- as.character(c('14','15','16','17','18','19','20','21','22','39','40','41','43','44','45','46','47','48','49','50','51','52',
'53','54','55','56','68','69','70','71','72','73','74','93','94','95','96','97','98','99','100','101')); # list of area_ids
area_data <- read.table("area_data3.txt", header = TRUE)
### functions
# convert REST matrix to individual data file for participant
convert_participant_data <- function(participant, data, area_id_list = area_id_list) {
participant_data <- data;
colnames(participant_data)<- c(area_id_list) # change column names to be the area ids
rownames(participant_data) <- c(area_id_list)
participant_data[participant_data > 1] <- 1
diag(participant_data)<-NA
participant_data[lower.tri(participant_data)]<-NA # get only half
ind_p_data<-as.data.table(unmatrix(participant_data)); # transform from matrix to vector
colnames(ind_p_data) <- "corr"
ind_p_data$participant <- participant # add col for participant - from function input
stimpairs <- combn(area_id_list,2); # compute the pairwise names from the "area_id_list" list of col names
pairnames<-expand.grid(area_id_list,area_id_list)
pairnames<-paste0(pairnames[,2],'_',pairnames[,1])
ind_p_data$area_id<-pairnames
ind_p_data<-ind_p_data[complete.cases(ind_p_data),]
return(ind_p_data)
}
# create one data file for all participants, based on participant list
create_all_ind <- function(area_id_list) {
all_ind <- c();
flist <- list.files(pattern = 'zFC*')
for (i in 1:length(flist)) {
# if (p < 10) { p_name <- paste("0", p, sep = "") } else { p_name <- p };
# data <- as.matrix(read.table(paste("zFCMap_Subject", p_name, ".txt", sep = "")));
data <- as.matrix(read.table(flist[i]));
ind_data <- convert_participant_data(i, data, area_id_list);
all_ind <- rbind(all_ind, ind_data)
}
return(all_ind)
}
# create unified data file with all relevant information
add_cols_all_ind <- function(ind_data, area_data) {
all_ind <- ind_data;
all_ind$abs_corr <- abs(all_ind$corr);
area_data <- area_data;
all_ind <- merge(all_ind, area_data, by = "area_id");
return(all_ind)
}
### Create one data file - *RUN THIS*
ind_all <- add_cols_all_ind(create_all_ind(area_id_list), area_data);
write.table(ind_all, file = 'all_data_18p_opn.txt', sep = ",", quote = FALSE, col.names = TRUE);
|
012133caa50571f219afa601eeec08e469f4b086
|
377220a5b50eb158efab006356d2d9703937ac44
|
/man/plot_elements.Rd
|
752a7496840b32c1af4e62e1a08a164789afb08a
|
[] |
no_license
|
flinder/flindR
|
56b545d920631aa9e12fbb99d3079fd962614991
|
b628b84a1cf859c9bd03945ae4c797e6ddcbc799
|
refs/heads/master
| 2020-06-10T21:41:51.122412
| 2018-09-17T18:24:28
| 2018-09-17T18:24:28
| 75,864,713
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 243
|
rd
|
plot_elements.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_elements.R
\name{plot_elements}
\alias{plot_elements}
\title{Get different plot elements}
\usage{
plot_elements()
}
\description{
Get different plot elements
}
|
659fc76b906dd4be49766434d6c85e734a32ea87
|
c1968efd0edc2e4f26ac855a9e3259d537856d1b
|
/man/IRFinder.Rd
|
40891b8c7f8fb907c1a65430b52fee7bc0166e13
|
[
"MIT"
] |
permissive
|
alexw-gsct/NxtIRF
|
386abb430d236e2034d7e77d0843ec171a1f578d
|
dd1ed9e4a2075459f2855155340ed942698db412
|
refs/heads/master
| 2023-03-14T08:41:35.849645
| 2021-03-04T01:24:47
| 2021-03-04T01:24:47
| 292,820,854
| 0
| 0
|
NOASSERTION
| 2021-03-04T01:24:48
| 2020-09-04T10:35:49
|
R
|
UTF-8
|
R
| false
| true
| 2,506
|
rd
|
IRFinder.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CollateData.R
\name{IRFinder}
\alias{IRFinder}
\title{A wrapper function to call NxtIRF/IRFinder}
\usage{
IRFinder(
bamfiles = "Unsorted.bam",
sample_names = "sample1",
reference_path = "./Reference",
output_path = "./IRFinder_Output",
n_threads = 1,
run_featureCounts = FALSE,
localHub = FALSE,
ah = AnnotationHub(localHub = localHub)
)
}
\arguments{
\item{bamfiles}{The file names of 1 or more BAM files}
\item{sample_names}{The sample names of the given BAM files. Must
be a vector of the same length as \code{bamfiles}}
\item{reference_path}{The directory of the NxtIRF reference}
\item{output_path}{The directory where NxtIRF/IRFinder output
should be stored}
\item{n_threads}{The number of threads to use. On Linux / Windows, this will
use OpenMP from within the C++ subroutine. On Macs, BiocParallel
MulticoreParam will be used on single-threaded NxtIRF/IRFinder}
\item{run_featureCounts}{Whether this function will run
\code{Rsubread::featureCounts()} on the BAM files. If so, the output will be
saved to "main.FC.Rds" in the output directory as a list object}
\item{localHub}{Set as TRUE to disable AnnotationHub online mode}
\item{ah}{An AnnotationHub object.}
}
\value{
None. \code{IRFinder()} will save output to \code{output_path}. \cr\cr
sample.txt.gz: The main IRFinder output file containing the quantitation
of IR and splice junctions, as well as QC information\cr\cr
sample.cov: Contains coverage information in compressed binary. This
format is 5-10X faster than BigWig format (see \code{\link[=GetCoverage]{GetCoverage()}})\cr\cr
main.FC.Rds: A single file containing gene counts for the whole dataset
(only if \code{run_featureCounts == TRUE})
}
\description{
This function calls IRFinder on one or more BAM files.
}
\examples{
\donttest{
# Run IRFinder on single BAM file, do not run featureCounts:
IRFinder(
bamfiles = "sample1.bam",
sample_names = "sample1",
reference_path = "./Reference",
output_path = "./IRFinder_Output",
run_featureCounts = FALSE
)
# Run IRFinder on multiple BAM file, run featureCounts, use 4 threads:
IRFinder(
bamfiles = c("UT1.bam", "UT2.bam", "UT3.bam",
"Rx1.bam", "Rx2.bam", "Rx3.bam"),
sample_names = c("UT1", "UT2", "UT3",
"Rx1", "Rx2", "Rx3"),
reference_path = "./Reference",
output_path = "./IRFinder_Output",
run_featureCounts = TRUE,
n_threads = 4
)
}
}
|
367b076e0eddd54d819af1102fe11caa610a2d51
|
2e1794af130eb9c326e4936c39576117a4a0ef40
|
/Estatistica e Probabilidade/Lista de exercicios/Ex1.r
|
8f938893f736ade6fba9de70ea49f24bace5b2bd
|
[] |
no_license
|
NayrozD/Universidade
|
b8701a7e690c72436fffed6756ed3c9a2d051b1e
|
6eceb95eeb1104c920c2deda5f41986aae263cb0
|
refs/heads/master
| 2022-01-08T07:03:57.270521
| 2018-05-16T18:20:10
| 2018-05-16T18:20:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 433
|
r
|
Ex1.r
|
<<<<<<< HEAD
#1. Crie uma sequencia de numeros de 1 a 90, de duas em duas unidades, utilizando a função seq do R. Armazene em uma variavel qualquer;
#seq(de, ate, x(de x em x)
a = seq(1,90,2)
=======
#1. Crie uma sequencia de numeros de 1 a 90, de duas em duas unidades, utilizando a função seq do R. Armazene em uma variavel qualquer;
#seq(de, ate, x(de x em x)
a = seq(1,90,2)
>>>>>>> 4d687118a90e6f1ef766281ce9447fa83af69c76
a
|
3e0f2bb1ac3d42beb1ad332ec4976a6e09f4e786
|
436570c53fbf34dd2ac73282b4b3cf558c214d3e
|
/ds/df3.R
|
91f2333b802944febb51abbc3ed380096b1d3fe1
|
[] |
no_license
|
dupadhyaya/dspgmsc2017
|
4ce6debe7f87a4ac20da98cb3cf049c6c60335c5
|
e6062aa49fd0e10466830c6c03511823aa42e5ca
|
refs/heads/master
| 2021-01-22T16:53:35.407476
| 2018-05-23T04:09:50
| 2018-05-23T04:09:50
| 100,725,221
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
df3.R
|
# Data Frame 3
#Properties
#sdata
rollno = c(10,11,12,13)
name = c('Achal','Apoorva','Goldie','Hitesh')
gender = c('M','F','M','M')
sdata = data.frame(rollno, name, gender)
sdata
#Change Row and Colnmames
colnames(sdata) = c("rollno1", "name1", "gender1")
colnames(sdata)
rownames(sdata) = c("ID1", "ID2", "ID3", "ID4")
rownames(sdata)
#Dimensions
dim(sdata)
dim(sdata)[1]
#Number of rows
dim(sdata)[2]
#Number of columns
#No of Rows & Colns
nrow(sdata)
ncol(sdata)
length(sdata)
#Changing Data
attach(sdata)
rollno1 = rollno1 - 5
rollno1
#reduce rollno by 5 (does not store in DF)
sdata$rollno1
#Remove Colns/ Rows
#Colns
sdata[1] <- NULL
#Rows
rows_to_keep <- c(TRUE, FALSE, TRUE, FALSE)
#Method1
df_limit = df[rows_to_keep, ]
df_limit
#Method2
df_limit2 <- df[ !rows_to_keep, ]
df_limit2
#Threshold
df_limit3 <- df[df$col1 > 40, ]
df_limit3
# Add Rows & Columns to DF
cbind(df, x)# x - same no of rows as df
rbind(df, y) # y - same no of colns as df
#Sort / Order / Rank
#Order
order(mtcars)
#sort
sort(mtcars) # error
sort(mtcars[1, ]) # order row 1 by values
sort(mtcars[ , 1]) # sort coln 1
sort(mtcars$mpg, decreasing=F)
mtcars
order(mtcars$mpg)
mtcars[ order(mtcars$mpg), ]
order( mtcars$mpg, mtcars[ , 2], decreasing=F)
with( mtcars$mpg, order(mpg, cyl))
#rank
rank(mtcars$mpg)
rank( c(10, 7, 3, 4, 5))
# Options na.last=T , ties.method = c('average', 'first', 'random', 'max', 'min'))
dplyr::arrange
dplyr::arrange(mtcars, cyl, disp)
dplyr::arrange(mtcars, desc(disp))
|
e2a55a150f0b88f6b103f2f86d1fc89f9f565dfb
|
12248769773269e24daa3795b7d4589f72c88907
|
/WK5 -3.R
|
a5e87a04cc7867ac9b3012529fcf3507e0ecf533
|
[] |
no_license
|
sonali4794/R-Code
|
23c5d73583d76a3d142f5ac454aad9056c7ecf22
|
7ab9d7fcec77650ac0f3268994da2b82cded197e
|
refs/heads/main
| 2023-07-11T19:21:47.466691
| 2021-08-16T15:56:55
| 2021-08-16T15:56:55
| 389,376,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 403
|
r
|
WK5 -3.R
|
library("tidyverse")
library("dplyr")
library("mosaic")
set = c(12, 18,15,8,17,13,22,13,13,13,12,11,15,15,12,8,20,12,14,11,9,15,16,20,9,15,13,19,18,14)
s = sum(set)
size = length(set)
a = s/size
SE = sqrt(a/size)
LN = a - 1.96*SE
UN = a + 1.96*SE
LN
UN
poissonsample = rpois(30, a)
boot = do(1000)*{
btx = resample(poissonsample)
mean(btx)
}
confint(boot)
hist(boot$result)
|
4af8e9f94ecd8fa1a90e060ed3efbe08ac9c3c3d
|
b2664f3ae4301fe5770b0e85001b89486d62e2fc
|
/plot4.R
|
d62e10d5f958f7dc8559383c68507f11143f469c
|
[] |
no_license
|
CSoaresF/ExData_Plotting1
|
2faa76a8034494a7bcf013d7a35e84b02a5c7e65
|
973a16954b4bcfe3f7e6b3282ed0141d003e51fd
|
refs/heads/master
| 2021-01-15T12:10:18.173155
| 2015-12-14T00:14:13
| 2015-12-14T00:14:13
| 43,750,875
| 0
| 0
| null | 2015-10-06T13:01:43
| 2015-10-06T13:01:43
| null |
UTF-8
|
R
| false
| false
| 2,055
|
r
|
plot4.R
|
# plot4.R
# set folder of project
setwd("C:/EDA_PROJECT1")
# if file not exist, download and unzip in subfolder "/data"
if(!file.exists("data/household_power_consumption.txt")) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile="data.zip")
unzip(zipfile="data.zip", exdir="data")
}
# load file to workspace
data1 <- read.table("C:/EDA_PROJECT1/data/household_power_consumption.txt",
header=TRUE,
sep=";",
dec=".",
stringsAsFactors=FALSE)
# filter days 1 and 2 february 2007
data2<- subset(data1, (data1$Date == "1/2/2007" | data1$Date== "2/2/2007"))
# create variable DateTime
data2 <- transform(data2, DateTime=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
# convert character to numeric
data2$Sub_metering_1 <- as.numeric(as.character(data2$Sub_metering_1))
data2$Sub_metering_2 <- as.numeric(as.character(data2$Sub_metering_2))
data2$Sub_metering_3 <- as.numeric(as.character(data2$Sub_metering_3))
attach(data2) # use the variables of data2
# 4 graphics in 2 x 2
par(mfrow = c(2, 2))
# generate graphic top-left
plot(DateTime, Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power")
# generate graphic top-right
plot(DateTime, Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
# generate graphic bottom-left
plot(DateTime, Sub_metering_1,
type="l",
ylab= "Energy sub metering",
xlab="")
lines(DateTime, Sub_metering_2,
type="l",
col="red")
lines(DateTime, Sub_metering_3,
type="l",
col="blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1,
col=c("black", "red", "blue"))
# generate graphic bottom-right
plot(DateTime, Global_reactive_power,
type = "l",
col = "black",
xlab = "datetime",
ylab = colnames(data2)[4])
# generate output
dev.copy(png, file="plot4.png", with=480, height=480)
dev.off()
detach(data2)
|
9b8e0496a2d1cfb6874d3d3420b88e685c8aae1b
|
8f0d122d166d74b9e2d0d92896891af4d7c1d8ca
|
/R/fpl.R
|
09b409689f5ccf314550d850036cf9b4d9b95b1b
|
[] |
no_license
|
cran/richards
|
22dd6bf22c97c3efa168332a41e69ec3f81ba075
|
32f323299ab88c879f96d0906747adf98cdbca34
|
refs/heads/master
| 2016-08-08T00:40:13.342386
| 2009-03-31T00:00:00
| 2009-03-31T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 86
|
r
|
fpl.R
|
`fpl` <-
function (x, a = 0.1, d = 2.4, e = 100, b = 1)
d + (a - d)/(1 + (x/e)^b)
|
56269f284db1efd93f2b062da8414af097898da6
|
9984ac7ab4d7531e374983c18b2e0341894f371a
|
/man/simplify_immgen_celltype.Rd
|
f7d34f7c48d68ca9b64448d182fb96c906d871a7
|
[
"MIT"
] |
permissive
|
ddiez/celltype
|
38c3d75a99a822320d66ae0a9bf11243687b7eef
|
678c184a2c5ae8c3bbe605db37a9eb9053c9b6e6
|
refs/heads/master
| 2021-08-17T04:22:18.294013
| 2020-04-30T02:49:39
| 2020-04-30T02:49:39
| 172,221,916
| 2
| 1
|
NOASSERTION
| 2019-05-24T12:27:59
| 2019-02-23T14:12:08
|
R
|
UTF-8
|
R
| false
| true
| 511
|
rd
|
simplify_immgen_celltype.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{simplify_immgen_celltype}
\alias{simplify_immgen_celltype}
\alias{simplify_immgen_celltype.character}
\title{simplify_immgen_celltype}
\usage{
simplify_immgen_celltype(x)
\method{simplify_immgen_celltype}{character}(x)
}
\arguments{
\item{x}{character vector with cell type names from Immgen.}
}
\description{
Simplifies cell type names in the immgen dataset by picking the upper level
cell type in the hierarchy.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.