blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e8e849fd86ce64f520187ad451451109aea07de | 29585dff702209dd446c0ab52ceea046c58e384e | /slfm/R/RcppExports.R | 48321d20450b1f51db975fed1df85007d088dc83 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 820 | r | RcppExports.R | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
gibbs <- function(x, ite, a = 2.1, b = 1.1, gamma_a = 1, gamma_b = 1, omega_0 = 0.01, omega_1 = 10, degenerate = FALSE) {
.Call('slfm_gibbs', PACKAGE = 'slfm', x, ite, a, b, gamma_a, gamma_b, omega_0, omega_1, degenerate)
}
slfm_MDN <- function(x, a = 2.1, b = 1.1, gamma_a = 1, gamma_b = 1, omega_1 = 10, burnin = 1000L, lag = 1L, npost = 500L) {
.Call('slfm_slfm_MDN', PACKAGE = 'slfm', x, a, b, gamma_a, gamma_b, omega_1, burnin, lag, npost)
}
slfm_MNN <- function(x, a = 2.1, b = 1.1, gamma_a = 1, gamma_b = 1, omega_0 = 0.01, omega_1 = 10, burnin = 1000L, lag = 1L, npost = 500L) {
.Call('slfm_slfm_MNN', PACKAGE = 'slfm', x, a, b, gamma_a, gamma_b, omega_0, omega_1, burnin, lag, npost)
}
|
4b4752ae4bd8a60c9148e796652e0646716e7f83 | 0d74c6026340636cb7a73da2b53fe9a80cd4d5a5 | /simsem/man/bindDist.Rd | ffe946e5783980f488d6b15a23e92ae7c6511a0f | [] | no_license | simsem/simsem | 941875bec2bbb898f7e90914dc04b3da146954b9 | f2038cca482158ec854a248fa2c54043b1320dc7 | refs/heads/master | 2023-05-27T07:13:55.754257 | 2023-05-12T11:56:45 | 2023-05-12T11:56:45 | 4,298,998 | 42 | 23 | null | 2015-06-02T03:50:52 | 2012-05-11T16:11:35 | R | UTF-8 | R | false | false | 4,694 | rd | bindDist.Rd | \name{bindDist}
\alias{bindDist}
\title{
Create a data distribution object.
}
\description{
Create a data distribution object. There are two ways to specify nonnormal data-generation model. To create nonnormal data by the copula method, \code{margins} and \code{...} arguments are required. To create data by Vale and Maurelli's method, \code{skewness} and/or \code{kurtosis} arguments are required.
}
\usage{
bindDist(margins = NULL, ..., p = NULL, keepScale = TRUE, reverse = FALSE,
copula = NULL, skewness = NULL, kurtosis = NULL)
}
\arguments{
\item{margins}{
A character vector specifying all the marginal distributions. The characters in argument margins are used to construct density, distribution, and quantile function names. For example, \code{"norm"} can be used to specify marginal distribution, because \code{"dnorm"}, \code{"pnorm"}, and \code{"qnorm"} are all available. A user-defined distribution or other distributions can be used. For example, \code{"gl"} function in the \code{"gld"} package can be used to represent the generalized lambda distribution where \code{"dgl"}, \code{"pgl"}, and \code{"qgl"} are available. See the description of \code{margins} attribute of the \code{\link[copula]{Mvdc}} function for further details.
}
\item{...}{
A list whose each component is a list of named components, giving the parameter values of the marginal distributions. See the description of \code{paramMargins} attribute of the \code{\link[copula]{Mvdc}} function for further details.
}
\item{p}{
Number of variables. If only one distribution object is listed, the \code{p} will make the same distribution objects for all variables.
}
\item{keepScale}{
A vector representing whether each variable is transformed its mean and standard deviation or not. If TRUE, transform back to retain the mean and standard deviation of a variable equal to the model implied mean and standard deviation (with sampling error)
}
\item{reverse}{
A vector representing whether each variable is mirrored or not. If \code{TRUE}, reverse the distribution of a variable (e.g., from positive skewed to negative skewed. If one logical value is specified, it will apply to all variables.
}
\item{copula}{
A copula class that represents the multivariate distribution, such as \code{\link[copula]{ellipCopula}}, \code{\link[copula]{normalCopula}}, or \code{\link[copula]{archmCopula}}. When this copula argument is specified, the data-transformation method from Mair, Satorra, and Bentler (2012) is used. If this copula argument is not specified, the naive Gaussian copula is used such that the correlation matrix is direct applied to the multivariate Gaussian copula. The correlation matrix will be equivalent to the Spearman's correlation (rank correlation) of the resulting data.
}
\item{skewness}{
A vector of skewness of each variable. The Vale & Maurelli (1983) method is used in data generation.
}
\item{kurtosis}{
A vector of (excessive) kurtosis of each variable. The Vale & Maurelli (1983) method is used in data generation.
}
}
\value{
\code{\linkS4class{SimDataDist}} that saves analysis result from simulate data.
}
\references{
Mair, P., Satorra, A., & Bentler, P. M. (2012). Generating nonnormal multivariate data using copulas: Applications to SEM. \emph{Multivariate Behavioral Research, 47}, 547-565.
Vale, C. D. & Maurelli, V. A. (1983) Simulating multivariate nonormal distributions. \emph{Psychometrika, 48}, 465-471.
}
\author{
Sunthud Pornprasertmanit (\email{psunthud@gmail.com})
}
\seealso{
\itemize{
\item \code{\linkS4class{SimResult}} for the type of resulting object
}
}
\examples{
# Create data based on Vale and Maurelli's method by specifying skewness and kurtosis
dist <- bindDist(skewness = c(0, -2, 2), kurtosis = c(0, 8, 4))
\dontrun{
library(copula)
# Create three-dimensional distribution by gaussian copula with
# the following marginal distributions
# 1. t-distribution with df = 2
# 2. chi-square distribution with df = 3
# 3. normal distribution with mean = 0 and sd = 1
# Setting the attribute of each marginal distribution
d1 <- list(df=2)
d2 <- list(df=3)
d3 <- list(mean=0, sd=1)
# Create a data distribution object by setting the names of each distribution
# and their arguments
dist <- bindDist(c("t", "chisq", "norm"), d1, d2, d3)
# Create data by using Gumbel Copula as the multivariate distribution
dist <- bindDist(c("t", "chisq", "norm"), d1, d2, d3, copula = gumbelCopula(2, dim = 3))
# Reverse the direction of chi-square distribution from positively skew to negatively skew
dist <- bindDist(c("t", "chisq", "norm"), d1, d2, d3, copula = gumbelCopula(2, dim = 3),
reverse = c(FALSE, TRUE, FALSE))
}
}
|
938e3479969018842584b370a0fc44cdae1e4005 | b7714ee147af95f48861922fff12e501f6f60f35 | /R/moreq.r | 3d78729c8d3e12c2c14eed78b5cb34ed09918f7a | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | peterlharding/PDQ-v5 | 452eed667bf319901b9fcc459bea48c7dd2fe481 | b6ff8dd958dbae85b4402745539898b711760713 | refs/heads/master | 2023-06-26T19:14:24.907596 | 2021-07-17T02:38:45 | 2021-07-17T02:38:45 | 3,510,379 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,722 | r | moreq.r | ###############################################################################
# Copyright (C) 1994 - 2009, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
# Load the library
library("pdq")
#
# Thruput bounds for closed tandem QNM with fixed Z
# Created by NJG on Fri Feb 27, 2009
clients = 200
think = 8 * 10^(-3) # ms as seconds
stime = 500 * 10^(-3) # ms as seconds
work = "w"
for(k in list(1,20,50)) {
xc<-0 # prudent to reset these
yc<-0
for (i in 1:clients) {
Init("")
CreateClosed(work, TERM, as.double(i), think)
# create k queueing nodes in PDQ
for (j in 1:k) {
nname = paste("n",sep="",j) # concat j to node name
CreateNode(nname,CEN,FCFS)
SetDemand(nname,work,stime)
}
Solve(APPROX)
# set up for plotting in R
xc[i]<-as.double(i)
yc[i]<-GetThruput(TERM,work)
nopt<-(k*stime+think)/stime
}
if (k==1) {
# establish plot frame and first curve
plot(xc, yc, type="l", ylim=c(0,1/stime), lwd=2, xlab="N Clients",
ylab="Throughput X(N)")
title("Increasing Number (K) of Queues")
abline(0, 1/(nopt*stime), lty="dashed", col="blue")
abline(1/stime, 0, lty="dashed", col = "red")
abline(v=nopt, lty="dashed", col="gray50") # grid line
text(150,1.95,paste("K =", k))
text(5+k,1.4,paste("N* =",nopt),adj=c(0,0))
} else {
# add the other curves
points(xc, yc, type="l", lwd=2)
abline(0,1/(nopt*stime), lty="dashed", col="blue")
abline(v=nopt, lty="dashed", col="gray50")
text(150,1.85-0.085*k/10, paste("K =", k))
text(k+5,0.5-0.1*k/10,paste("N* =",nopt),adj=c(0,0))
}
}
|
f26c1a20e1b71e10684e5e0dc06870bc69c8a8e0 | df2b962f7a7dee56c5358cd83c73aa4c051df512 | /R/genZ2ZS.R | ac5c014694ce5bb063aac236bf41b9edffd9af45 | [] | no_license | cran/noia | 787eb4f7ab9db691f11baff8fa9cde6c2f2c8660 | dacfb23d55f7fb1cb0d22428ff6aafb5d14a5b79 | refs/heads/master | 2023-03-24T11:03:33.032673 | 2023-03-08T07:10:10 | 2023-03-08T07:10:10 | 17,697,929 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,694 | r | genZ2ZS.R | genZ2ZS <-
function (genZ, reference = "F2", max.level = NULL, max.dom = NULL,
threshold = 0)
{
"strrev" <- function(ss) {
sapply(lapply(strsplit(ss, character(0)), rev), paste,
collapse = "")
}
ans <- list()
N <- nrow(genZ)
ans$smat <- 1
ans$zmat <- as.matrix(rep(1, N))
nloc <- ncol(genZ)/3
for (l in 1:nloc) {
eff <- colnames(ans$smat)
geno <- colnames(ans$zmat)
ans$zmat <- t(apply(cbind(genZ[, (3 * l - 2):(3 * l)],
ans$zmat), 1, function(x) {
c(x[1] * x[4:length(x)], x[2] * x[4:length(x)], x[3] *
x[4:length(x)])
}))
ans$smat <- kronecker(Sloc(reference = reference, l,
genZ), ans$smat)
if (is.null(eff)) {
colnames(ans$smat) <- noia::effectsNames[1:3]
}
else {
colnames(ans$smat) <- strrev(kronecker(noia::effectsNames[1:3],
strrev(eff), "paste", sep = ""))
}
if (is.null(geno)) {
colnames(ans$zmat) <- noia::genotypesNames
}
else {
colnames(ans$zmat) <- strrev(kronecker(noia::genotypesNames,
strrev(geno), "paste", sep = ""))
}
rownames(ans$smat) <- colnames(ans$zmat)
useful.effects <- effectsSelect(nloc = nloc, max.level = max.level,
max.dom = max.dom, effects = colnames(ans$smat))
useful.genotypes <- colnames(ans$zmat)
ans$smat <- ans$smat[useful.genotypes, useful.effects]
ans$zmat <- ans$zmat[, useful.genotypes]
}
rownames(ans$smat) <- colnames(ans$zmat)
rownames(ans$zmat) <- rownames(genZ)
return(ans)
}
|
28b21e36e1c6d0a180520cbad383ad69cbf24b7e | 6cb21fd5e1726ed181c30c2dc07823df207a2cee | /data-raw/DATASET.R | 0c930d9506e3d3a58f9d0901ac80f98c83a3adea | [] | no_license | Reed-Statistics/westCoastMeasles | 5e814b7b26304323cb864d1783da0ee90e0efabe | 917cd3f1f7ee7d4f1e5c69dfa2488b90d35ab1ee | refs/heads/master | 2021-03-05T22:57:05.011612 | 2020-03-09T23:08:22 | 2020-03-09T23:08:22 | 246,160,447 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 955 | r | DATASET.R | ## code to prepare `DATASET` dataset goes here
library(tidyverse)
library(stringi)
options(digits=7)
california <-read.csv("~/wrangling_measles_data/california.csv")
california<-california %>%
mutate(lat = as.character(lat), lng = as.character(lng))%>%
mutate(lat = as.numeric(lat), lng = as.numeric(lng))%>%
select(-c(enroll))
oregon <-read.csv("~/wrangling_measles_data/oregon.csv")%>%
select(-c(enroll))%>%
mutate(type = NA, city = NA)
washington <-read.csv("~/wrangling_measles_data/washington.csv")%>%
select(-c(enrollment))%>%
mutate(year="2017-2018", type = NA)
west_measles_data <-rbind(washington, oregon)
west_measles_data <-rbind(west_measles_data, california)
west_measles_data <- west_measles_data %>%
mutate(name = stri_enc_toutf8(name, is_unknown_8bit=TRUE),
city = stri_enc_toutf8(city, is_unknown_8bit=TRUE),
county = stri_enc_toutf8(county))
usethis::use_data(west_measles_data, overwrite = TRUE)
|
4201161eb8198a31f5b6ae5d4acd7b87827719e2 | c87c6397713f7573ce404c7a4bbbc989cc06a974 | /TransMeta/R/SKAT_Optimal_Integrate_Func_Davies.R | cb0ca088e357a5e4ca76fb08ba249a8ea7310221 | [] | no_license | shijingc/TransMeta | bb663b70b6b9adbbdf8362e0e2eceac7a0adaa9c | 3a3a8e4b601f4158b613a030c92ddf53b4fdce89 | refs/heads/master | 2020-03-19T08:56:07.606527 | 2018-06-05T23:41:46 | 2018-06-05T23:41:46 | 136,246,248 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 768 | r | SKAT_Optimal_Integrate_Func_Davies.R | SKAT_Optimal_Integrate_Func_Davies <-
function(x,pmin.q,param.m,r.all){
n.r<-length(r.all)
n.x<-length(x)
temp1<-param.m$tau %x% t(x)
temp<-(pmin.q - temp1)/(1-r.all)
temp.min<-apply(temp,2,min)
re<-rep(0,length(x))
for(i in 1:length(x)){
#a1<<-temp.min[i]
min1<-temp.min[i]
if(min1 > sum(param.m$lambda) * 10^4){
temp<-0
} else {
min1.temp<- min1 - param.m$MuQ
sd1<-sqrt(param.m$VarQ - param.m$VarRemain)/sqrt(param.m$VarQ)
min1.st<-min1.temp *sd1 + param.m$MuQ
dav.re<-SKAT:::SKAT_davies(min1.st,param.m$lambda,acc=10^(-6))
temp<-dav.re$Qq
if(dav.re$ifault != 0){
stop("dav.re$ifault is not 0")
}
}
if(temp > 1){
temp=1
}
#lambda.record<<-param.m$lambda
#print(c(min1,temp,dav.re$ifault,sum(param.m$lambda)))
re[i]<-(1-temp) * dchisq(x[i],df=1)
}
return(re)
}
|
0f57345dce383164770f6b9968741a3629728e4b | a495b873b0c82b31b5a75ca3547c9febf3af5ddc | /array/SNPArray/utilitys/summarizeCNVCalls.r | bfc46f676c600c8077b2c05ade30fd316293b21e | [
"Artistic-2.0"
] | permissive | ejh243/BrainFANS | 192beb02d8aecb7b6c0dc0e59c6d6cf679dd9c0e | 5d1d6113b90ec85f2743b32a3cc9a428bd797440 | refs/heads/master | 2023-06-21T17:42:13.002737 | 2023-06-20T12:35:38 | 2023-06-20T12:35:38 | 186,389,634 | 3 | 2 | Artistic-2.0 | 2022-02-08T21:45:47 | 2019-05-13T09:35:32 | R | UTF-8 | R | false | false | 6,382 | r | summarizeCNVCalls.r | ## Written by Eilis
## Summarize CNV calls
## identify any overlapping with known SCZ loci or pathogenic CNVs
findOverlapsMinProp<-function(query, subject, pThres){
# function to find overlap between two Granges based on minimum proportion
hits <- findOverlaps(query, subject)
overlaps <- pintersect(query[queryHits(hits)], subject[subjectHits(hits)])
percentOverlap <- width(overlaps) / width(subject[subjectHits(hits)])
hits <- hits[percentOverlap > pThres]
hitsOut <- query[queryHits(hits)]
mcols(hitsOut)$Locus<-subject$Locus[subjectHits(hits)]
mcols(hitsOut)$hg38<-subject$hg38[subjectHits(hits)]
return(hitsOut)
}
library(GenomicRanges)
args<-commandArgs(trailingOnly = TRUE)
fileName<-args[1]
superPop<-args[2]
folder<-dirname(fileName)
setwd("/gpfs/mrc0/projects/Research_Project-MRC190311/SNPdata/CNV/")
dat<-read.table("PennCNVOutput/SCZ_GCModel_MergedFiltered_AnnoGencodev29.rawcnv", stringsAsFactors=FALSE)
pheno<-read.table("../MRC2_UpdatePheno.txt")
dat$V2<-as.numeric(gsub("numsnp=", "",dat$V2))
dat$V3<-as.numeric(gsub(",", "", gsub("length=", "",dat$V3)))
table(dat$V4)
## summarise CNV calls
par(mfrow = c(3,2))
hist(dat$V2, xlab = "number of markers", breaks = 35, main = "All")
hist(dat$V3/1000, xlab = "length (kb)", breaks = 35, main = "All")
index.del<-which(dat$V4 == "state2,cn=1")
hist(dat$V2[index.del], xlab = "number of markers", breaks = 35, main = "Deletions")
hist(dat$V3[index.del]/1000, xlab = "length (kb)", breaks = 35, main = "Deletions")
index.dup<-which(dat$V4 == "state5,cn=3")
hist(dat$V2[index.dup], xlab = "number of markers", breaks = 35, main = "Duplications")
hist(dat$V3[index.dup]/1000, xlab = "length (kb)", breaks = 35, main = "Duplications")
## summarise CNVs by person
par(mfrow = c(3,2))
totByPerson<-aggregate(dat$V3/1000, by = list(dat$V5), sum)
muByPerson<-aggregate(dat$V3/1000, by = list(dat$V5), mean)
hist(totByPerson[,2], xlab = "Combined length of CNVs", ylab = "Number of samples", breaks = 35, main = "All")
hist(muByPerson[,2], xlab = "Mean length of CNVs", ylab = "Number of samples", breaks = 35, main = "All")
totByPerson<-aggregate(dat$V3[index.del]/1000, by = list(dat$V5[index.del]), sum)
muByPerson<-aggregate(dat$V3[index.del]/1000, by = list(dat$V5[index.del]), mean)
hist(totByPerson[,2], xlab = "Combined length of CNVs", ylab = "Number of samples", breaks = 35, main = "Deletions")
hist(muByPerson[,2], xlab = "Mean length of CNVs", ylab = "Number of samples", breaks = 35, main = "Deletions")
totByPerson<-aggregate(dat$V3[index.dup]/1000, by = list(dat$V5[index.dup]), sum)
muByPerson<-aggregate(dat$V3[index.dup]/1000, by = list(dat$V5[index.dup]), mean)
hist(totByPerson[,2], xlab = "Combined length of CNVs", ylab = "Number of samples", breaks = 35, main = "Duplications")
hist(muByPerson[,2], xlab = "Mean length of CNVs", ylab = "Number of samples", breaks = 35, main = "Duplications")
## convert to GRanges; do sep for deletions and duplications
index<-which(dat$V4 == "state2,cn=1")
allCNVs.del<-GRanges(dat$V1[index])
mcols(allCNVs.del)$SampleID <- dat$V5[index]
mcols(allCNVs.del)$Type <- dat$V4[index]
index<-which(dat$V4 == "state5,cn=3")
allCNVs.dup<-GRanges(dat$V1[index])
mcols(allCNVs.dup)$SampleID <- dat$V5[index]
mcols(allCNVs.dup)$Type <- dat$V4[index]
## do any overlap known scz cnv loci ## list taken from Rees et al. Br J Psychiatry (merge tables 1 & 2)
sczLoci<-read.csv("../../References/CNV/SCZ_CNVloci.csv", skip = 1, stringsAsFactors = FALSE)
## filter to those significant in MetaAnalysis
sczLoci<-sczLoci[which(sczLoci$significantMeta == "*"),]
sczLoci.hg38.del<-GRanges(sczLoci$hg38[grep("del", sczLoci$Locus)])
mcols(sczLoci.hg38.del)$Locus<-sczLoci$Locus[grep("del", sczLoci$Locus)]
mcols(sczLoci.hg38.del)$hg38<-sczLoci$hg38[grep("del", sczLoci$Locus)]
sczLoci.hg38.dup<-GRanges(sczLoci$hg38[grep("dup", sczLoci$Locus)])
mcols(sczLoci.hg38.dup)$Locus<-sczLoci$Locus[grep("dup", sczLoci$Locus)]
mcols(sczLoci.hg38.dup)$hg38<-sczLoci$hg38[grep("dup", sczLoci$Locus)]
pThres<-0.9 ## set as minimum overlap required
overlapDel<-findOverlapsMinProp(allCNVs.del, sczLoci.hg38.del, pThres)
overlapDup<-findOverlapsMinProp(allCNVs.dup, sczLoci.hg38.dup, pThres)
output<-rbind(data.frame(overlapDel), data.frame(overlapDup))
write.csv(output, "CNVsoverlappingKnownSCZRiskLoci.csv")
## do any overlap known ID cnv loci ## list taken from Rees et al. JAMA Psychiatry 2016 (eTable 2)
idLoci<-read.csv("../../References/CNV/IDCNVLoci.csv", stringsAsFactors = FALSE)
idLoci<-idLoci[which(idLoci$hg38 != ""),] ## 1 region I couldn't lift over
idLoci.hg38.del<-GRanges(idLoci$hg38[grep("del", idLoci$Syndrome)])
mcols(idLoci.hg38.del)$Locus<-idLoci$Syndrome[grep("del", idLoci$Syndrome)]
mcols(idLoci.hg38.del)$hg38<-idLoci$hg38[grep("del", idLoci$Syndrome)]
idLoci.hg38.dup<-GRanges(idLoci$hg38[grep("dup", idLoci$Syndrome)])
mcols(idLoci.hg38.dup)$Locus<-idLoci$Syndrome[grep("dup", idLoci$Syndrome)]
mcols(idLoci.hg38.dup)$hg38<-idLoci$hg38[grep("dup", idLoci$Syndrome)]
pThres<-0.9 ## set as minimum overlap required
overlapDel<-findOverlapsMinProp(allCNVs.del, idLoci.hg38.del, pThres)
overlapDup<-findOverlapsMinProp(allCNVs.dup, idLoci.hg38.dup, pThres)
output<-rbind(data.frame(overlapDel), data.frame(overlapDup))
write.csv(output, "CNVsoverlappingIDRiskLoci.csv")
## do any overlap known pathogenic cnv loci ## list taken from Kendall et al 2017 Biol Psychiatry
pathLoci<-read.table("../../References/CNV/PathogenicCNVLoci.txt", stringsAsFactors = FALSE, sep = "\t", header = TRUE)
pathLoci<-pathLoci[which(pathLoci$hg38 != ""),] ## 1 region I couldn't lift over
pathLoci.hg38.del<-GRanges(pathLoci$hg38[grep("del", pathLoci$CNV.locus)])
mcols(pathLoci.hg38.del)$Locus<-pathLoci$CNV.locus[grep("del", pathLoci$CNV.locus)]
mcols(pathLoci.hg38.del)$hg38<-pathLoci$hg38[grep("del", pathLoci$CNV.locus)]
pathLoci.hg38.dup<-GRanges(pathLoci$hg38[grep("dup", pathLoci$CNV.locus)])
mcols(pathLoci.hg38.dup)$Locus<-pathLoci$CNV.locus[grep("dup", pathLoci$CNV.locus)]
mcols(pathLoci.hg38.dup)$hg38<-pathLoci$hg38[grep("dup", pathLoci$CNV.locus)]
pThres<-0.9 ## set as minimum overlap required
overlapDel<-findOverlapsMinProp(allCNVs.del, pathLoci.hg38.del, pThres)
overlapDup<-findOverlapsMinProp(allCNVs.dup, pathLoci.hg38.dup, pThres)
output<-rbind(data.frame(overlapDel), data.frame(overlapDup))
write.csv(output, "CNVsoverlappingPathogenicCNV.csv")
|
f729508cd0ca12ad61a3bea88f0216c9802aefb2 | 5f20fd1380bc9d06273778e729e45d8d9960c284 | /RNASeq.R | bf03a0c5d8b2ac4f1fbc742da68a4efbe98f2346 | [] | no_license | hzorkic/machine_learning_and_molecular_docking_for_drug_discovery | 8e009e19c5a175e8973f1bad65c64be5923b1441 | a6052bca25e68b3fa6559ac5d860c8cc956439e7 | refs/heads/main | 2023-04-16T05:06:19.828482 | 2021-04-29T03:36:07 | 2021-04-29T03:36:07 | 354,099,001 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,051 | r | RNASeq.R | # In this report, we are primarily interested in understanding how the
# differences in gene expression between healthy and canerous tissue samples
# from The Cancer Genome Altas (TCGA)
##################
# load libraries #
##################
library(DESeq2)
library(tidyverse)
library(ggplot2)
library(matrixStats)
library(pheatmap)
################################################
# Perform DESeq analysis on our raw RNA counts #
################################################
# A DESeqDataSet Object was constructed on the gene expression data using the
# DESeq package to perform a hypothesis test for the significance of tissue type
# in the model
# rnaCounts: data.frame containing RNA-seq counts for each gene in each sample
# (genes are in rows of data.frame, samples in columns):
rnaCounts = read.table("CONVERTED_COAD_GCM.csv",
sep=",",
header=TRUE,
row.names=1,
check.names=FALSE)
# sampleAnnotation: data.frame with one row per sample; columns say what
# group (=tissue cancerous or not) describe each sample:
sampleAnnotation = read.table("COAD_sample_annotation.csv",
sep=",",
header=TRUE,
row.names=1,
check.names=FALSE)
# first initialize DESeq object:
dds <- DESeqDataSetFromMatrix(
## countData argument should be numeric matrix (or data.frame with
## only numeric columns which can be coerced to numeric matrix):
countData = rnaCounts,
## colData argument should be data.frame with grouping information
## (and any other non-gene-expression covariates that might be
## useful in modeling the counts) whose rows correspond to the
## columns (hence name colData) of countData:
colData = sampleAnnotation,
## note tilde preceding the name group of the relevant column in
## the data.frame sampleAnnotation provided as the colData argument
## in the design argument---design argument must be formula object,
## similar to the right-hand side of the formulas used for linear
## modeling with lm:
design = ~ tissue)
# now run DESeq analysis pipeline on object dds in order to perform a hypothesis
# test specifically for the significance of the interaction term
dds <- DESeq(dds)
#Finally, use the results function from the DESeq2 package to extract a table of
# results for each gene from the DESeqDataSet object.
# sort dds results data.frame by p-value:
ddsresults <- results(dds)
ddsresults <- ddsresults[order(ddsresults$pvalue), ]
ddsresults
##############################################################
# Extract the Normalized Counts from the DESeqDataSet Object #
##############################################################
# use counts function with normalized arg set to TRUE to extract
# "normalized counts" (which are not integers) from dds
# (note: counts *function* from DESeq2 package is different object
# in R than counts *data.frame* we loaded from csv file;
# R can tell based on context which one we mean):
normed <- counts(dds, normalized=TRUE)
# log transform in order to makes it easy to see proportional changes in
# expression levels in differential expression. For example we would observe
# a tumor gene to have positive expression in tumor tissue, but the same gene
# would have negative proportional expression in a healthy tissue
lgNorm <- log2(normed + 1)
# save the normalized counts matrix to a tsv file:
write.table(data.frame(normed),
"COAD_normalized_counts.csv",
sep = ",", row.names = FALSE,
quote = FALSE)
##########################################################
# Generate a PCA Plot of Normalized Gene Expression Data #
##########################################################
# make sure lgNorm is a data.frame
lgNorm <- data.frame(lgNorm)
# make the rownames a column name
lgNorm <- rownames_to_column(lgNorm, var = "gene")
lgGo <- column_to_rownames(lgNorm, var = "gene")
sampleAnnotation = data.frame(
## set row.names of sampleAnnotation to match col.names of normed:
row.names = colnames(normed))
# save
write.csv(lgGo, file="log2transformed_and_normalized_gene_expression_data.csv")
pca = prcomp(t(lgGo))
## perform PCA on Normalized data
pcaFit = rowMeans(lgGo) + t( pca$x %*% t(pca$rotation) )
## have to transpose pca$x %*% t(pca$rotation) above b/c lgNorm is t(z)
## set up data.frame pcaData for ggplot...
pcaData = data.frame(pca$x[ , 1:2])
## add in sample annotation info
pcaData$group = sampleAnnotation[rownames(pcaData), "group"]
## and sample names
pcaData$sample = rownames(pcaData)
pcaData <- pcaData %>% data.frame()
pcaData$group <- ifelse(grepl("Healthy", pcaData$sample, ignore.case = T), "Healthy",
ifelse(grepl("Tumor", pcaData$sample, ignore.case = T), "Tumor", "Other"))
pcaData
ggplot(pcaData, aes(x=PC1, y=PC2, label=sample, color = group)) +
geom_point(size=4, shape = 18, alpha = 0.75) +
scale_color_manual(values = c("Healthy" = "orange", "Tumor" = "maroon")) +
ggtitle("Principal Components Analysis (PCA) of TCGA COAD Tissue RnaSeq Data") +
theme_light()
##################################################
# Genes with Significant Differential Expression #
##################################################
# order for top gene
top <- ddsresults[order(ddsresults$padj),]
# select only those genes with a padj greater than X (0.05)
top <- data.frame(top) %>% filter(padj < 0.05)
nrow(top)
# select the top X genes (3000)
top <- top[1:3000, ]
# remove any rows that have NAs
top <- na.omit(top)
# save
write.csv(top, file="top_genes.csv")
##############################################
# Heatmap of Normalized Gene Expression Data #
##############################################
# IMPORTANT::: I would actually just input the
# log2transformed_and_normalized_gene_expression_data.csv
# Gene Expression Data into Morpheus:
# https://software.broadinstitute.org/morpheus/
|
8b0bf7203e7addfac50224532ea9b5fbcb44129b | 51b73b87bbddaa2aa06bd1753c2cd575ddc756a6 | /GROWTH CURVE ANALYSIS.R | af2ee36ea3f9ea6c25231b1d12fa1587e4f5f3a6 | [] | no_license | aravinddharmaraj/HBGDKi | d92876547d24946a02c782b7d89eed816783beee | 14860309d1153fbfca5d5a939e915d47c3279e29 | refs/heads/main | 2023-08-13T00:33:30.885254 | 2021-09-22T07:05:48 | 2021-09-22T07:05:48 | 395,277,023 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,505 | r | GROWTH CURVE ANALYSIS.R | cat("\f")###to clear console
# LOAD REQUIRED LIBRARIES
library(ggplot2)
library(reshape2)
library(plyr)
library(dplyr)
library(stringr)
library(caret)
library(grid)
rm(list = ls()) # CLEAR ENVIRONMENT
# SET WORK DIRECTORY
setwd("C:\\Aravind work file\\BF_data\\MERGING FILES\\data_clean")
######## **************** LOAD DATASETS ********************##########
growth_vel <- read.csv("main/data_clean/final_data/growth_outcome_count/post_match_data_height_weight.csv")###for 7 studies
str(growth_vel)
length(unique(growth_vel$idno))
# CHANGE AGE TO MONTHS
growth_vel$age_m <- (growth_vel$age)/30
# VISUALIZE AGE
hist(growth_vel$age)
hist(growth_vel$age_m)
str(growth_vel)
# ROUND AGE WITHOUT INTEGERS
growth_vel$age_m1<- round(growth_vel$age_m,digits=0)
# FILTER AGE 0 TO 24
growth_vel$age_cat <- ifelse(growth_vel$age_m1 <= 24 , "less than 24",
ifelse(growth_vel$age_weand >= 25 , "others",NA))
# FILTER
growth_vel <- filter(growth_vel, age_cat %in% c("less than 24"))
# CHANGE TO FACTOR
growth_vel$age_m1<- as.factor(growth_vel$age_m1)
growth_vel$study<- as.factor(growth_vel$study)
table(growth_vel$age_m1)
str(growth_vel)
# FILTER FOR 4 AND 6 MONTHS (EXPOSURE GROUP)
growth_vel$weand_cat <- ifelse(growth_vel$age_weand <= 134 & growth_vel$age_weand >= 106, "4 months",
ifelse(growth_vel$age_weand <= 194 & growth_vel$age_weand >= 166, "6 months",
ifelse(growth_vel$age_weand , "others",NA)))
table(growth_vel$weand_cat)
table(growth_vel$study)
##filter for multiple categories
growth_vel <- filter(growth_vel, weand_cat %in% c("4 months", "6 months"))
hist(growth_vel$age_weand,breaks = 100)
head(growth_vel)
O<-growth_vel
# keep only first entries based on ID and Age
O <- distinct(O, idno , age_m1 ,.keep_all = TRUE)
## CHECK OUTLIERS AND REMOVE FOR HEIGHT AND WEIGHT
boxplot(O$height)
height<- O[-which(O$height %in% boxplot.stats(O$height)$out),]
boxplot(height$height)
summary(height$height)
summary(O$height)
boxplot(O$weight)
weight<- O[-which(O$weight %in% boxplot.stats(O$weight)$out),]
boxplot(weight$weight)
summary(weight$weight)
summary(O$weight)
### removed outliers
## take children with minimum 5 serial measurement
head(height)
d1_sort<- select(height, idno, age_m1, height,weight) # SELECT REQUIRED VARIABLES
d1_sort <- d1_sort[order(d1_sort[,"idno"], d1_sort[,"age_m1"]), ] # SORT
library(tidyverse)
d1_sort1<- d1_sort
d1_sort1<- d1_sort1 %>%
group_by(idno) %>%
count()
d1_sort1$cat <- ifelse(d1_sort1$n <= 4 , "less than 4",
ifelse(d1_sort1$n >= 5 , "5 and above",NA))
table(d1_sort1$cat)
d1_sort1<- filter(d1_sort1, cat %in% c("5 and above"))
library(dplyr)
d1_sort2 <- merge(d1_sort1,height, by="idno") # merge by id # left joint
length(unique(d1_sort2$idno)) # 243 correct!
table(d1_sort2$cat)
head(d1_sort2)
height<- select(d1_sort2, idno,study,age_m1,age, weight,height,weand_cat,sex)
##########################################################
########### calculating residual standard deviation ######
##########################################################
head(height)
str(height)
height$age_m1<- as.character(height$age_m1)
height$age_m1<- as.numeric(height$age_m1)
random_id <- height[order(height[,"idno"], height[,"age_m1"]), ] # SORT
# CHECK THE DATA ON HEIGHT AND WEIGHT FOR RANDOM INDIVIDUAL
random_id1<-height[height$idno %in% sample(unique(height$idno),1),]
table(random_id1$idno)
summary(random_id1)
str(random_id1)
plot(age_m1, height)
###########################################################################
## to extract residual standard deviation for each children ###############
###########################################################################
# LINEAR REGRESSION
model1<- by(random_id, random_id$idno,
function(data)sigma(lm(height~age_m1,data=data)))
rse<- model1
# sum of all the rsd
linear<- sum(rse)
linear
nrow(na.omit(model1))
# calculating percent error through rsd
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=random_id$height,by=list(random_id$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
table(is.na(mod$x))
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
# QUADRATIC POLYNOMIAL REGRESSION
model2<- by(random_id, random_id$idno,
function(data)sigma(lm(height~age_m1+I(age_m1^2),data=data)))
rse<- model2
quadratic<- sum(rse)
quadratic
nrow(na.omit(model2))
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=random_id$height,by=list(random_id$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
# CUBIC POLYNOMIAL REGRESSION
model3<- by(random_id, random_id$idno,
function(data)sigma(lm(height~age_m1+I(age_m1^2)+
+I(age_m1^2),data=data)))
rse<- model3
cubic<- sum(rse)
cubic
nrow(na.omit(model3))
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=random_id$height,by=list(random_id$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
# LOG LINEAR REGRESSION
model4<- by(random_id, random_id$idno,
function(data)sigma(lm(log(height)~age_m1,data=data)))
rse<- model4
loglinear<- sum(rse)
loglinear
nrow(na.omit(model4))
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=random_id$height,by=list(random_id$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
# LINEAR LOG REGRESSION
# TO PERFORM LOG 0 MUST BE EXCLUDED
random_id$age_cat <- ifelse(random_id$age_m1 <= 0 , "less than 0",
ifelse(random_id$age_m1 >= 1 , "others",NA))
table(random_id$age_cat)
##filter for multiple categories
height1 <- filter(random_id, age_cat %in% c("others"))
model5<- by(height1, height1$idno,
function(data)sigma(lm(height~log(age_m1),data=data)))
rse<- model5
#rse<- exp(rse)
linearlog<- sum(rse)
linearlog
nrow(na.omit(model5))
#exp(linearlog)
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=height1$height,by=list(height1$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
# LOG LOG REGRESSION
model6<- by(height1, height1$idno,
function(data)sigma(lm(log(height)~log(age_m1),data=data)))
rse<- exp(rse)
rse<- model6
loglog<- sum(rse)
loglog
nrow(na.omit(model6))
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=height1$height,by=list(height1$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
# SPLINE REGRESSION (PIESEWISE)
library(splines)
knots <- quantile(random_id$age_m1, p = c(0.25, 0.5, 0.75))
model7<- by(random_id, random_id$idno,
function(data)sigma(lm(height ~ bs(age_m1, knots = knots),
data=data)))
rse<- model7
spline<- sum(rse,na.rm = T)
spline
nrow(na.omit(model7))
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=random_id$height,by=list(random_id$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod, na.rm = T)/24200)*100
rse_p1
# FRACTIONAL POLYNOMIAL
model8<- by(random_id, random_id$idno,
function(data)sigma(lm(log(height)~ I(age_m1^2)
+ I(age_m1^(1/2)),
data=data)))
rse<- model8
fractional<- sum(rse,na.rm = T)
fractional
nrow(na.omit(model8))
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=random_id$height,by=list(random_id$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
# FRACTIONAL POLYNOMIAL
model9<- by(height1, height1$idno,
function(data)sigma(lm(height ~ I(age_m1^(2))+log(age_m1)
+I(age_m1^(2)),
data=data)))
mod<- lm(weight ~ I(age_m1^2)+ I(age_m1^1/2),data=random_id)
summary(mod)
rse<- model9
fractional<- sum(rse,na.rm = T)
fractional
nrow(na.omit(model9))
rsd <- data.frame(matrix(unlist(rse), nrow=length(rse), byrow=TRUE))
colnames(rsd)[1]<- 'x'
m1<- aggregate(x=random_id$height,by=list(random_id$idno),FUN=mean )
m1<- select(m1, x)
mod<- rsd*100/m1$x
rse_p1<- (sum(mod,na.rm = T)/24300)*100
rse_p1
############################################################################
######## EXTRACT AVERAGE DISTANCE FOR AVG VS INDIVIDUAL PREDICTED VALUE ####
############################################################################
##################### spline regression ####################################
############################################################################
library(splines)
knots <- quantile(random_id$age_m1, p = c(0.25, 0.5, 0.75))
#################### spline regression model individual ########
####################### prediction value extraction ##############
###################################################################
# REMOVE NA'S FOR HEIGHT
random_id <- random_id[which(random_id$height != "NA"),]
model7<- by(random_id, random_id$idno,
function(data)fitted.values(lm(height ~ bs(age_m1, knots = knots),
data=data)))
spline_fitted<- data.frame(t(sapply(model7, function(x) x[1:max(lengths(model7))])))
head(spline_fitted)
spline <- spline_fitted
head(spline)
sp<- melt(spline)
head(sp)
table(is.na(sp$value))
d <- sp[which(sp$value != "NA"),]
d1<- select(d,idno, value)
# select only id and age
df<- select(random_id, idno, age_m1)
df1<- select(df,idno,age_m1)
length(unique(d1$idno))
length(unique(df1$idno))
d1 <- d1[order(d1[,"idno"], d1[,"value"]), ]
df1 <- df1[order(df1[,"idno"],df1[,"age_m1"]), ]
head(df1)
d1_sum<- d1 %>% count(idno)
d1_sum$cat<- rep("predicted")
df1_sum<- df1 %>% count(idno)
df1_sum$cat<- rep("overall")
str(df1_sum)
str(d1_sum)
o <- rbind(df1_sum, d1_sum)
o1<- merge(df1_sum,d1_sum, by="idno")
str(o1)
# predicted minus overall
o1$dif<- (o1$n.x - o1$n.y)
sum(o1$dif)
str(df1)
str(d1)
#write.csv(df1,"main/results/output/ps_output/new/observed_ido.csv")
#write.csv(d1,"main/results/output/ps_output/new/fitted.spline.csv")
mod<- lm(height ~ bs(age_m1, knots = knots),
data=random_id)
random_id$fit<- predict(mod, random_id)
random_id2<- data.frame(unique(random_id$fit))
random_id2 <- random_id2[order(random_id2[,"unique.random_id.fit."]), ]
#write.csv(random_id2,"main/results/output/ps_output/new/spline_fitted.avg.csv")
## calculating distance for individual vs overall average ####
##############################################################
spline <- read.csv("main/results/output/ps_output/new/spline/fitted.spline1.csv")
head(spline)
spline$dis<- spline$avg_predict-spline$ind_predict
m1<- aggregate(x=spline$dis,by=list(spline$age_m1),FUN=sum )
#write.csv(m1,"main/results/output/ps_output/new/spline/spline_fit_bymonth.csv")
sum(m1$x)
########################################################
#########################################################
###################### linear regression ###############
#########################################################
model7<- by(random_id, random_id$idno,
function(data)fitted.values(lm(height ~ age_m1,
data=data)))
spline_fitted<- data.frame(t(sapply(model7, function(x) x[1:max(lengths(model7))])))
head(spline_fitted)
spline <- spline_fitted
head(spline)
sp<- melt(spline)
head(sp)
table(is.na(sp$value))
d <- sp[which(sp$value != "NA"),]
d1<- select(d,idno, value)
# select only id and age
df<- select(random_id, idno, age_m1)
df1<- select(df,idno,age_m1)
length(unique(d1$idno))
length(unique(df1$idno))
d1 <- d1[order(d1[,"idno"], d1[,"value"]), ]
df1 <- df1[order(df1[,"idno"],df1[,"age_m1"]), ]
head(df1)
d1_sum<- d1 %>% count(idno)
d1_sum$cat<- rep("predicted")
df1_sum<- df1 %>% count(idno)
df1_sum$cat<- rep("overall")
str(df1_sum)
str(d1_sum)
o <- rbind(df1_sum, d1_sum)
o1<- merge(df1_sum,d1_sum, by="idno")
str(o1)
# predicted minus overall
o1$dif<- (o1$n.x - o1$n.y)
sum(o1$dif)
str(df1)
str(d1)
write.csv(df1,"main/results/output/ps_output/new/observed_ido.csv")
write.csv(d1,"main/results/output/ps_output/new/fitted.spline.csv")
mod<- lm(height ~ age_m1,
data=random_id)
random_id$fit<- predict(mod, random_id)
random_id2<- data.frame(unique(random_id$fit))
#random_id2 <- random_id2[order(random_id2[,"unique.random_id.fit."]), ]
write.csv(random_id2,"main/results/output/ps_output/new/spline_fitted.avg.csv")
## calculating distance for individual vs overall average ####
##############################################################
spline <- read.csv("main/results/output/ps_output/new/spline/fitted.spline2.csv")
head(spline)
spline$dis<- spline$avg_predict-spline$ind_predict
growth_vel <- filter(spline, age_m1 %in% c("8"))
sum(growth_vel$dis)
m1<- aggregate(x=spline$dis,by=list(spline$age_m1),FUN=sum )
write.csv(m1,"main/results/output/ps_output/new/spline/linear_fit_bymonth.csv")
sum(m1$x)
########################################################
#########################################################
#########################################################
###################### quadratic regression ###############
#########################################################
random_id <- random_id[which(random_id$height != "NA"),]
model7<- by(random_id, random_id$idno,
function(data)fitted.values(lm(height ~ I(age_m1)+I(age_m1^2),
data=data)))
spline_fitted<- data.frame(t(sapply(model7, function(x) x[1:max(lengths(model7))])))
head(spline_fitted)
spline<- spline_fitted
head(spline)
sp<- melt(spline)
head(sp)
table(is.na(sp$value))
d <- sp[which(sp$value != "NA"),]
d1<- select(d,idno, value)
# select only id and age
df<- select(random_id, idno, age_m1)
df1<- select(df,idno,age_m1)
length(unique(d1$idno))
length(unique(df1$idno))
d1 <- d1[order(d1[,"idno"], d1[,"value"]), ]
df1 <- df1[order(df1[,"idno"],df1[,"age_m1"]), ]
head(df1)
d1_sum<- d1 %>% count(idno)
d1_sum$cat<- rep("predicted")
df1_sum<- df1 %>% count(idno)
df1_sum$cat<- rep("overall")
str(df1_sum)
str(d1_sum)
o <- rbind(df1_sum, d1_sum)
o1<- merge(df1_sum,d1_sum, by="idno")
str(o1)
# predicted minus overall
o1$dif<- (o1$n.x - o1$n.y)
sum(o1$dif)
str(df1)
str(d1)
write.csv(df1,"main/results/output/ps_output/new/spline/observed_ido.csv")
write.csv(d1,"main/results/output/ps_output/new/spline/fitted.spline.csv")
mod<- lm(height ~ age_m1+I(age_m1^2),
data=random_id)
summary(mod)
random_id$fit<- predict(mod, random_id)
random_id2<- data.frame(unique(random_id$fit))
#random_id2 <- random_id2[order(random_id2[,"unique.random_id.fit."]), ]
write.csv(random_id2,"main/results/output/ps_output/new/spline/spline_fitted.avg.csv")
## calculating distance for individual vs overall average ####
##############################################################
spline <- read.csv("main/results/output/ps_output/new/spline/fitted.spline3.csv")
head(spline)
spline$dis<- spline$avg_predict-spline$ind_predict
growth_vel <- filter(spline, age_m1 %in% c("8"))
sum(growth_vel$dis)
m1<- aggregate(x=spline$dis,by=list(spline$age_m1),FUN=sum )
write.csv(m1,"main/results/output/ps_output/new/spline/quadratic_fit_bymonth.csv")
sum(m1$x)
########################################################
#########################################################
#########################################################
###################### fractional regression ############
#########################################################
random_id <- random_id[which(random_id$height != "NA"),]
random_id$age_cat <- ifelse(random_id$age_m1 <= 0 , "less than 0",
ifelse(random_id$age_m1 >= 1 , "others",NA))
table(random_id$age_cat)
##filter for multiple categories
height1 <- filter(random_id, age_cat %in% c("others"))
model7<- by(height1, height1$idno,
function(data)fitted.values(lm(height ~ age_m1+log(age_m1)+I(age_m1^-2),
data=data)))
spline_fitted<- data.frame(t(sapply(model7, function(x) x[1:max(lengths(model7))])))
head(spline_fitted)
spline <- spline_fitted
head(spline)
colnames(spline)[1]<- "idno"
sp<- melt(spline)
head(sp)
table(is.na(sp$value))
d <- sp[which(sp$value != "NA"),]
d1<- select(d,idno, value)
# select only id and age
df<- select(height1, idno, age_m1)
df1<- select(df,idno,age_m1)
#table(df1$age_m1)
#df1<- df1[!(df1$age_m1=="0"),] delete any value remove 0
#table(df1$age_m1)
length(unique(d1$idno))
length(unique(df1$idno))
d1 <- d1[order(d1[,"idno"], d1[,"value"]), ]
df1 <- df1[order(df1[,"idno"],df1[,"age_m1"]), ]
head(df1)
d1_sum<- d1 %>% count(idno)
d1_sum$cat<- rep("predicted")
df1_sum<- df1 %>% count(idno)
df1_sum$cat<- rep("overall")
str(df1_sum)
str(d1_sum)
o <- rbind(df1_sum, d1_sum)
o1<- merge(df1_sum,d1_sum, by="idno")
str(o1)
# predicted minus overall
o1$dif<- (o1$n.x - o1$n.y)
sum(o1$dif)
str(df1)
str(d1)
write.csv(df1,"main/results/output/ps_output/new/spline/observed_ido.csv")
write.csv(d1,"main/results/output/ps_output/new/spline/fitted.spline.csv")
mod<- lm(height ~ age_m1+log(age_m1)+I(age_m1^-2),
data=height1)
summary(mod)
height1$fit<- predict(mod, height1)
random_id2<- data.frame(unique(height1$fit))
#random_id2 <- random_id2[order(random_id2[,"unique.random_id.fit."]), ]
write.csv(random_id2,"main/results/output/ps_output/new/spline/spline_fitted.avg.csv")
## calculating distance for individual vs overall average ####
##############################################################
spline <- read.csv("main/results/output/ps_output/new/spline/fitted.spline4.csv")
head(spline)
spline$dis<- spline$avg_predict-spline$ind_predict
growth_vel <- filter(spline, age_m1 %in% c("8"))
sum(growth_vel$dis)
m1<- aggregate(x=spline$dis,by=list(spline$age_m1),FUN=sum )
write.csv(m1,"main/results/output/ps_output/new/spline/fractional_fit_bymonth.csv")
sum(m1$x)
########################################################
#########################################################
|
bdedcaa0f8aaa79274f3b6c446ba3f3bd2016811 | 97a10d4612014d6f1b3cae279d70983f876b7586 | /man/do_gnomonic.Rd | 428d523698d1e231e901b3cf710f1c38d4412675 | [
"MIT"
] | permissive | mdsumner/pproj | c3dfe25404bb1c0063dd0c8960e5d48ff8559d93 | 9aa2c45c8efd71c5bc19ad1d6b58a93834cbbebb | refs/heads/main | 2023-02-06T08:47:59.650271 | 2023-02-05T22:55:54 | 2023-02-05T22:55:54 | 24,184,076 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 336 | rd | do_gnomonic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gnomonic3d.R
\name{do_gnomonic}
\alias{do_gnomonic}
\title{Illustrate gnomonic}
\usage{
do_gnomonic()
}
\value{
nothing, side effect a 3D plot
}
\description{
May need 'rgl::rglwidget()' if you don't have interactive graphics.
}
\examples{
#do_gnomonic()
}
|
f83b77d4c3622ac8ff6e8794eb23978ec3d8fbdd | 34bc20cc310551137ed8ec787bd48d910db640ad | /VallelujaAnalysis.R | 0e1bcd8bff8f95c16285c5673a61ab13ce858dfc | [] | no_license | MrYeti1/raceRnalysis | dc05cf15a02c4845f3677995f2aa39ad55b0ad43 | 7f8c0a2507080eba4b529b0a9bc537d3390637a8 | refs/heads/master | 2020-03-19T14:36:50.931772 | 2020-03-09T14:05:31 | 2020-03-09T14:05:31 | 136,631,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,520 | r | VallelujaAnalysis.R | devtools::install("raceRnalysis")
library(raceRnalysis)
library(dplyr)
library(ggplot2)
resultsUrl <- "https://www.rootsandrain.com/race6667/2018-mar-25-tweedlove-whyte-vallelujah-glentress/results/filters/overall/ajax/filtered-content/race-results?/filters/overall/&format=overall&sex=&s1=-1"
resultsUrl <- "https://www.rootsandrain.com/race6668/2018-jun-10-tweedlove-uk-enduro-national-champs-18-glentress/results/filters/overall/ajax/filtered-content/race-results?/filters/overall/&format=overall&sex=&s1=-1"
webPageCurl <- RCurl::getURL(resultsUrl)
rawResultsTable <- XML::readHTMLTable(webPageCurl)$T1
guessedStageNames <- c("Morning ...", "Fool's G...", "Scotland...", "F.E.A.R.", "Born Sli...", "There's ...")
normResultsTable <- rawResultsTable %>%
raceRnalysis::normaliseResultsTable(guessedStageNames) %>%
raceRnalysis::extractSex(Category) %>%
raceRnalysis::highlightNames(Name, c("James CLEWS", "Rob GROVES", "Nicholas SMITH"))
resultsTable <- normResultsTable %>% raceRnalysis::stageTimesToSeconds()
resultsTable %>% glimpse
######
resultsStages <- resultsTable %>% raceRnalysis::meltTimePerLap()
raceRnalysis::plotTimePerLap(resultsStages, eventTitle="Tweedlove British Enduro", outfile = "~/Desktop/tweed-champs-TimePerStage.png")
######
rankedCumulative <- resultsTable %>% raceRnalysis::meltCumulativeRank() %>% rename(fullName = Name)
raceRnalysis::plotRankCumulative(rankedCumulative, eventTitle="Tweedlove British Enduro", outfile="~/Desktop/tweed-champs-rankedCumulative.png")
|
e55221d0eab361eadb99e841563ada08edde93d9 | 849d92ba959d1c7d3b06aeffccdd242a26604f75 | /R/hkHoliday.R | a16cbfcbb368993b40b409ed3a901dc8279b161e | [] | no_license | chainsawriot/hongkong | 2b9458f5474a2c6aef40d787f172002140a6fc88 | 02ccd4081199c6447f0070fb3a8487e9922b0c4a | refs/heads/master | 2021-01-13T02:22:17.239571 | 2017-01-06T15:54:05 | 2017-01-06T15:54:05 | 9,976,717 | 2 | 5 | null | null | null | null | UTF-8 | R | false | false | 6,700 | r | hkHoliday.R | #' Calculate Hong Kong public holidays
#'
#' This function is to calculate public holidays in Hong Kong for a given year
#' @param yearInt The integer value of year
#' @param push Boolean, to push the public holidays in Sunday to the nearest workday
#' @param withSunday Boolean, also included all Sundays as public holidays
#' @export
hkHoliday <- function(yearInt, push=TRUE, withSunday=TRUE) {
# return all the holidays in a given year
# According to the General Holidays Ordinance
if (yearInt < 1999) {
warning("hkHoliday may not be accurate for yearInt < 1999")
}
composeHoliday <- function(day, mon, year, push=TRUE, lunar=FALSE, minus=FALSE) {
if (lunar) {
holidayDate <- lunarCal(c(Year=year, Month=mon, Day=day))
} else {
holidayDate <- as.Date(paste0(year, "-", mon, "-", day))
}
if (push) {
holidayDate <- pushToWorkday(holidayDate, minus=minus)
}
return(holidayDate)
}
pushToWorkday <- function(holidayDate, minus=FALSE) {
if (wday(holidayDate) == 1 & !minus) {
holidayDate <- holidayDate + days(1)
} else if (wday(holidayDate) == 1 & minus) {
holidayDate <- holidayDate - days(1)
}
return(holidayDate)
}
Easter <- function(year) {
### calculate the Easter Sunday, taken from timeDate package
### Diethelm Wuertz, Yohan Chalabi and Martin Maechler with contributions from Joe W. Byers, and others
### GPL-2
a <- year%%19
b <- year%/%100
c <- year%%100
d <- b%/%4
e <- b%%4
f <- (b+8)%/%25
g <- (b-f+1)%/%3
h <- (19*a+b-d-g+15)%%30
i <- c%/%4
k <- c%%4
l <- (32+2*e+2*i-h-k)%%7
m <- (a+11*h+22*l)%/%451
easter.month = (h+l-7*m+114)%/%31
p <- (h+l-7*m+114)%%31
easter.day = p+1
return(as.Date(paste0(year, "-", easter.month, "-", easter.day)))
}
holidays <- c()
## 1st day of Jan
holidays['firstDayJan'] <- composeHoliday(1, 1, yearInt, push, FALSE)
class(holidays) <- "Date"
## lunar New Year
lnyDays <- c()
for (i in 1:3) {
toAdd <- lunarCal(c(Year=yearInt, Month=1, Day=i))
if (wday(toAdd) == 1 & push) {
if (yearInt >= 2011) { ### rule changed after 2011
toAdd <- lunarCal(c(Year=yearInt, Month=1, Day=4))
} else if (i == 1 & push) {
toAdd <- toAdd - days(1)
} else if (push) {
toAdd <- lnyDays[1] - days(1)
}
}
lnyDays[i] <- toAdd
class(lnyDays) <- "Date"
}
holidays['lnyDay1'] <- lnyDays[1]
holidays['lnyDay2'] <- lnyDays[2]
holidays['lnyDay3'] <- lnyDays[3]
## Ching Ming
cmd <- c(5L, 6L, 6L, 5L, 5L, 6L, 6L, 5L, 5L, 6L, 6L, 5L, 5L, 5L, 6L,
5L, 5L, 5L, 6L, 5L, 5L, 5L, 6L, 5L, 5L, 5L, 6L, 5L, 5L, 5L, 6L,
5L, 5L, 5L, 6L, 5L, 5L, 5L, 6L, 5L, 5L, 5L, 6L, 5L, 5L, 5L, 5L,
5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L,
5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 4L, 5L, 5L, 5L,
4L, 5L, 5L, 5L, 4L, 5L, 5L, 5L, 4L, 5L, 5L, 5L, 4L, 5L, 5L, 5L,
4L, 5L, 5L, 5L, 4L, 5L, 5L, 5L, 4L, 5L, 5L, 5L, 4L, 4L, 5L, 5L,
4L, 4L, 5L, 5L, 4L, 4L, 5L, 5L, 4L, 4L, 5L, 5L, 4L, 4L, 5L, 5L,
4L, 4L, 5L, 5L, 4L, 4L, 5L, 5L, 4L, 4L, 5L, 5L, 4L, 4L, 4L, 5L,
4L, 4L, 4L, 5L, 4L, 5L, 4L, 5L, 4L, 4L, 4L, 5L, 4L, 4L, 4L, 5L,
4L, 4L, 4L, 5L, 4L, 4L, 4L, 5L, 4L, 4L, 4L, 5L, 4L, 4L, 4L, 4L,
4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L,
4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 5L)
holidays['chingMing'] <- composeHoliday(cmd[yearInt - 1900], 4, yearInt, push, FALSE)
## Good Friday
easterDate <- Easter(yearInt)
holidays['goodFriday'] <- easterDate - days(2)
## Day after Good Friday
holidays['dayAfterGoodFriday'] <- easterDate - days(1)
## Easter Monday
holidays['easterMonday'] <- easterDate + days(1)
## Labour Day
holidays['labourDay'] <- composeHoliday(1, 5, yearInt, push, FALSE)
## Buddha birthday
holidays['buddhaBirthday'] <- composeHoliday(8, 4, yearInt, push, TRUE)
## Tuen Ng
holidays['tuenNg'] <- composeHoliday(5, 5, yearInt, push, TRUE)
## HKSAR establishment day
holidays['firstJuly'] <- composeHoliday(1, 7, yearInt, push, FALSE)
## National Day
holidays['natlDay'] <- composeHoliday(1, 10, yearInt, push, FALSE)
## day after mid autumn
holidays['midAutumn'] <- composeHoliday(16, 8, yearInt, push, TRUE, TRUE)
## chung Yeung
holidays['chungYeung'] <- composeHoliday(9, 9, yearInt, push, TRUE)
## X'mas
## first weekday after X'mas
xmasDay <- as.Date(paste0(yearInt, "-12-25"))
if (wday(xmasDay) == 1 & push) {
xmasDay <- xmasDay + days(2)
}
holidays['xmasDay'] <- xmasDay
holidays['xmasDayAfter'] <- composeHoliday(26, 12, yearInt, push, FALSE)
### detect collisions : very messy, need a better logic
z <- as.data.frame(table(holidays), stringsAsFactors=FALSE)
##print(z)
if (sum(z$Freq != 1) != 0 & push) { ## Collision detected
colliedDates <- z$holidays[z$Freq > 1]
##print(colliedDates)
for (i in 1:length(colliedDates)) {
whichCollied <- names(holidays)[holidays == colliedDates[i]]
##print(whichCollied)
if (length(whichCollied) == 2) {
toMove <- whichCollied[1]
holidays[toMove] <- holidays[toMove] + days(1)
}
}
}
if (yearInt==1999) {
holidays['milleniumBug'] <- as.Date('1999-12-31')
}
if (withSunday) {
allDates <- seq(as.Date(paste0(yearInt, "-01-01")), as.Date(paste0(yearInt,"-12-31")), by="1 day")
allSundays <- allDates[wday(allDates) == 1]
for (i in 1:length(allSundays)) {
holidays[paste0("Sunday #", i)] <- allSundays[i]
}
}
return(sort(holidays))
}
#' Calculate Hong Kong public holidays
#'
#' This function is to determine the Date object in x are public holidays in Hong Kong
#' @param x Vector of Date object
#' @param push Boolean, to push the public holidays in Sunday to the nearest workday
#' @param withSunday Boolean, also included all Sundays as public holidays
#' @return logical vector
#' @note very slow despite memoisation is used
#' @examples
#' data(hkweiboscope)
#' hkweiboscope$hkHoliday <- is.hkHoliday(hkweiboscope$date)
#' plot(x=hkweiboscope$date, y=hkweiboscope$count, col=ifelse(hkweiboscope$hkHoliday, 2, 1))
#' @seealso \code{\link{hkHoliday}}
#' @export
is.hkHoliday <- function(x, push=TRUE, withSunday=TRUE) {
hkHolidayMemo <- memoise(hkHoliday) ### despite memoisation, still very slow!
single.is.hkHoliday <- function(x, push, withSunday) {
y <- year(x)
allHolidays <- hkHolidayMemo(y, push=push, withSunday=withSunday)
return(x %in% allHolidays)
}
return(sapply(x, single.is.hkHoliday, push=push, withSunday=withSunday))
}
|
3bbe8d0e9282238de5efa6a97abbe9afe30e330d | 4d36492368e067bdc821b2ee8bc5a9d524458c9a | /tests/testthat/test_contains.R | ae55073e94096185027cdb15d751468d5133a9c9 | [] | no_license | cran/processcheckR | 27e177bb799cb9ebfe4708ea56c9bb99cbd1685f | bb4e4523ae7b65f4f37adcf3eb1380735f8887a3 | refs/heads/master | 2022-10-15T01:29:09.022611 | 2022-10-03T08:40:08 | 2022-10-03T08:40:08 | 152,093,595 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,493 | r | test_contains.R |
#### eventlog ####
test_that("test contains on eventlog with arg n = 1", {
load("./testdata/patients.rda")
contains <- patients %>%
check_rule(contains("check-in", 1))
expect_s3_class(contains, "eventlog")
expect_equal(dim(contains), c(nrow(patients), ncol(patients) + 1))
expect_equal(colnames(contains), c(colnames(patients), "contains_check_in_1"))
# Only George Doe does not contain "check-in".
expect_true(all(contains[contains$patient != "George Doe",]$contains_check_in_1))
expect_equal(contains[contains$patient == "George Doe",]$contains_check_in_1, FALSE)
})
test_that("test contains on eventlog with arg n > 1", {
load("./testdata/patients.rda")
contains <- patients %>%
check_rule(contains(activity = "surgery", n = 2))
expect_s3_class(contains, "eventlog")
expect_equal(dim(contains), c(nrow(patients), ncol(patients) + 1))
expect_equal(colnames(contains), c(colnames(patients), "contains_surgery_2"))
# Only John Doe has twice "surgery".
expect_true(all(contains[contains$patient == "John Doe",]$contains_surgery_2))
expect_false(any(contains[contains$patient != "John Doe",]$contains_surgery_2))
})
test_that("test contains on eventlog fails on non-existing activity", {
load("./testdata/patients.rda")
expect_error(
contains <- patients %>%
check_rule(contains("blood sample", 1)),
"*Activity blood sample not found in log*")
})
test_that("test contains on eventlog fails on arg n = 0", {
load("./testdata/patients.rda")
expect_error(
contains <- patients %>%
check_rule(contains("check-in", 0)),
"*n should be greater than or equal to 1*")
})
test_that("test contains on grouped_eventlog", {
load("./testdata/patients_grouped_resource.rda")
contains <- patients_grouped_resource %>%
check_rule(contains("check-in", 1))
expect_s3_class(contains, "grouped_eventlog")
expect_equal(dim(contains), c(nrow(patients_grouped_resource), ncol(patients_grouped_resource) + 1))
expect_equal(colnames(contains), c(colnames(patients_grouped_resource), "contains_check_in_1"))
expect_equal(groups(contains), groups(patients_grouped_resource))
# Only George Doe does not contain "check-in".
expect_true(all(contains[contains$patient != "George Doe",]$contains_check_in_1))
expect_equal(contains[contains$patient == "George Doe",]$contains_check_in_1, FALSE)
})
#### activitylog ####
test_that("test contains on activitylog with arg n = 1", {
load("./testdata/patients_act.rda")
contains <- patients_act %>%
check_rule(contains("check-in", 1))
expect_s3_class(contains, "activitylog")
expect_equal(dim(contains), c(nrow(patients_act), ncol(patients_act) + 1))
expect_equal(colnames(contains), c(colnames(patients_act), "contains_check_in_1"))
# Only George Doe does not contain "check-in".
expect_true(all(contains[contains$patient != "George Doe",]$contains_check_in_1))
expect_equal(contains[contains$patient == "George Doe",]$contains_check_in_1, FALSE)
})
test_that("test contains on activitylog with arg n > 1", {
load("./testdata/patients_act.rda")
contains <- patients_act %>%
check_rule(contains(activity = "surgery", n = 2))
expect_s3_class(contains, "activitylog")
expect_equal(dim(contains), c(nrow(patients_act), ncol(patients_act) + 1))
expect_equal(colnames(contains), c(colnames(patients_act), "contains_surgery_2"))
# Only John Doe has twice "surgery".
expect_true(all(contains[contains$patient == "John Doe",]$contains_surgery_2))
expect_false(any(contains[contains$patient != "John Doe",]$contains_surgery_2))
})
test_that("test contains on grouped_activitylog", {
load("./testdata/patients_act_grouped_resource.rda")
contains <- patients_act_grouped_resource %>%
check_rule(contains("check-in", 1))
expect_s3_class(contains, "grouped_activitylog")
expect_equal(dim(contains), c(nrow(patients_act_grouped_resource), ncol(patients_act_grouped_resource) + 1))
expect_equal(colnames(contains), c(colnames(patients_act_grouped_resource), "contains_check_in_1"))
expect_equal(groups(contains), groups(patients_act_grouped_resource))
# Only George Doe does not contain "check-in".
expect_true(all(contains[contains$patient != "George Doe",]$contains_check_in_1))
expect_equal(contains[contains$patient == "George Doe",]$contains_check_in_1, FALSE)
}) |
21c1a73522f8e19bb2094a3f596fdd97a0026c2f | adc56496cdb5b8a6a446b61f23a144cc069ed062 | /R/qog.R | c51c132ffc7b5822086a08d51f5d4e5ca58060fb | [] | no_license | qianmingax/qogdata | d9c060a046c9eab44a1e4f78db8d0d5fd6615ece | 3700ec9c0c6554d9de7bd93aaa0a1270f1052719 | refs/heads/master | 2021-01-18T17:08:31.369984 | 2013-11-09T14:11:35 | 2013-11-09T14:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,869 | r | qog.R | #' Get Quality of Government datasets in \code{xtdata} format
#'
#' Function to download Quality of Government (QOG) data and load it as a data
#' frame in R. The result carries an \code{\link{xtdata}} attribute that can be
#' passed to the \code{\link{xtmerge}} panel data method. Please visit the
#' QOG Institute website at \url{http://www.qog.pol.gu.se/} for a presentation
#' of QOG research.
#'
#' @export
#' @aliases get_qog
#' @param file a filename to save the dataset at.
#' If set to \code{FALSE} (the default), the function just returns the link
#' to the dataset.
#' If set to \code{TRUE}, the server filename of the dataset is used, which
#' returns either a CSV file if \code{version} is set to \code{std}, or
#' a Stata \code{dta} file otherwise. See 'Details'.
#' @param replace whether to overwrite the dataset even if a file already
#' exists at the download location. Defaults to \code{FALSE}.
#' @param path a folder path to prepend to the filename and to the codebook
#' if \code{codebook} is not \code{FALSE}.
#' @param version the QOG version:
#' \code{std} (Standard), \code{soc} (Social Policy), \code{bas} (Basic)
#' or \code{exp} (Expert). Defaults to \code{std}. See 'Details'.
#' @param format the QOG format, usually \code{cs} for cross-sectional data
#' or \code{ts} for time series in the \code{std} and \code{bas} versions.
#' See 'Details' for the full list of specifications. Defaults to \code{cs}.
#' @param codebook whether to download the codebook. Calls \code{qogbook} by
#' passing the \code{codebook}, \code{version} and \code{path} arguments to it,
#' where \code{codebook} is treated as the filename for the codebook.
#' Defaults to \code{FALSE}.
#' @param variables a selection of variables to import. \code{ccode} ISO-3N
#' country codes \code{ccode} and \code{year} identifiers will be forced into
#' the output if relevant.
#' @param years a selection of years to import. Effective only with
#' the \code{ts}, \code{tsl} or \code{ind} formats.
#' @param ... other arguments supplied to the import method, which is
#' \code{read.csv} by default,
#' or \code{\link[foreign]{read.dta}} if \code{file} is a Stata \code{dta} dataset,
#' or \code{\link[foreign]{read.spss}} if \code{file} is a SPSS \code{sav} dataset.
#' @details This version of the package handles all four QOG datasets:
#' \tabular{lcl}{
#' QOG Standard \tab \code{std} \tab 15 May 2013\cr
#' QOG Social Policy \tab \code{soc} \tab 4 April 2012\cr
#' QOG Basic \tab \code{bas}): \tab 28 March 2011\cr
#' QOG Expert Survey \tab \code{exp} \tab 3-6 September 2012\cr
#' URL: \tab \tab \url{http://www.qog.pol.gu.se}\cr
#' }
#'
#' Each QOG dataset is available in a variety of data formats:
#'
#' \itemize{
#' \item QOG datasets \code{std} and \code{bas}
#' require format \code{cs} (cross-section)
#' or \code{ts} (time series).
#' \item QOG dataset \code{soc}
#' requires format \code{cs}, \code{tsl} (time series, long)
#' or \code{tsw} (time series, wide)
#' \item QOG dataset \code{exp}
#' requires format \code{cntry} (country-level)
#' or \code{ind} (individual survey)
#' }
#'
#' The QOG Standard series comes in CSV, SPSS and Stata file formats, CVS being
#' the only format that contains numeric codes instead of QOG value labels.
#' Datasets outside of the QOG Standard series are available only as Stata items
#' and require that \code{file} ends in \code{.dta} when \code{version} is not
#' \code{std}. The only exception is dataset \code{csyom}, which automatically
#' sets \code{version} to \code{std} and requires \code{file} to end
#' in \code{.csv}. Filenames with inadequate extensions will be modified to
#' conform to these expectations if they do not.
#' @seealso \code{\link{qogbook}}, \code{\link[foreign]{read.dta}}, \code{\link[foreign]{read.spss}}
#' @author Francois Briatte \email{f.briatte@@ed.ac.uk}
#' @examples
#' # Show URL to QOG Standard cross-section.
#' qogdata()
#' # Show URL to QOG Social Policy time series, long format.
#' qogdata(version = "soc", format = "tsl")
#' ## Download codebook and recent years from QOG Basic cross-section (not run).
#' # QOG = qogdata(file = "qog.cs.txt", version = "bas", format = "cs",
#' # years = 2002:2012, codebook = TRUE)
#' ## Download QOG Standard cross-section years of measurement (not run).
#' # QOG = qogdata(tempfile(fileext = ".csv"), format = "csyom")
#' ## Show QOG years of measurement for Gini coefficient (not run).
#' # table(QOG$wdi_gini)
#' @keywords qog
qogdata <- function(file = FALSE, replace = FALSE, codebook = FALSE, path = "",
version = "std", format = "cs",
variables = NULL, years = NULL, ...) {
try_require("foreign")
#
# currently available
#
versions = list(
std = c("ts", "cs", "csyom"),
bas = c("ts", "cs"),
exp = c("ctry", "ind"),
soc = c("cs", "tsl", "tsw"))
#
# correct version
#
if(!version %in% names(versions)) {
stop("Invalid version: use one of ",
paste0(names(versions), collapse = ", "))
}
#
# correct format
#
if(format == "csyom") {
version = "std"
if(!grepl(".csv$|.txt$", file))
file = gsub("(\\.|\\w){4}$", ".csv", file)
}
if(!format %in% unlist(versions[version])) {
stop("Invalid format: use one of ",
paste0(unlist(versions[version]), collapse = ", "))
}
#
# automatic filename
#
if(isTRUE(file)) {
file = paste0("qog_",
version,
"_",
format,
ifelse(version == "std",
paste0("_", "15May13.csv"),
".dta")
)
}
else {
if(is.character(file) & version != "std" & !grepl(".dta$", file)) {
file = gsub("(\\.|\\w){4}$", ".dta", file)
warning("QOG datasets other than std are available only as Stata files.\n",
" The filename that you specified was modified to ", file)
}
}
if(is.character(path))
if(nchar(path) > 0) file = paste(path, file, sep = "/")
#
# online source
#
link = paste0("http://www.qogdata.pol.gu.se/data/",
ifelse(version == "std", "QoG", "qog"),
"_", version, "_", format,
ifelse(version == "std", paste0("_", "15May13"), ""),
ifelse(version == "std" & grepl("csv|dta|sav", file),
substring(file, nchar(file) - 3),
".dta")
)
if(is.logical(file)) {
return(link)
}
else {
if(replace || !file.exists(file)) {
message("Downloading ", link, "...")
download.file(link, file, mode = "wb", quiet = TRUE)
}
else {
message("Loading from disk...")
}
}
#
# reader call
#
read = "read.csv"
args = list(file = file, ...)
# foreign or not
if(!grepl(".dta$|.sav$", file))
args["sep"] = ";"
# stata args
if(grepl(".dta$", file)) {
read = "read.dta"
if(is.null(unlist(args["warn.missing.labels"])))
args["warn.missing.labels"] = FALSE
}
# spss args
if(grepl(".sav$", file)) {
read = "read.spss"
if(is.null(unlist(args["to.data.frame"])))
args["to.data.frame"] = TRUE
}
data = do.call(read, args)
#
# selected variables
#
uids = c("ccode", "ccodealp", "cname", "ccodecow", "ccodewb")
pids = uids %in% names(data)
# avoid ts bug
if(grepl("ts|tsl", format) & !any(pids))
stop("You are trying to load a QOG dataset as time series, but it has no identifier variable.")
# avoid ts bug
if(grepl("ts|tsl", format) & !"year" %in% names(data))
stop("You are trying to load a QOG dataset as time series, but it has no year variable.")
if(!is.null(variables)) {
if(grepl("ts|tsl", format) & !"year" %in% variables) {
warning("Forcing year identifier into the dataset.")
variables = c("year", variables)
}
if(grepl("std|bas|soc", version) & !"ccode" %in% variables) {
warning("Forcing ccode identifier into the dataset.")
variables = c("ccode", variables)
}
data = data[, names(data) %in% variables]
}
#
# selected years
#
if(!is.null(years) && format %in% c("ts", "tsl", "ind"))
data = data[data$year %in% years, ]
#
# message
#
message("Loaded ", file, " (N = ", nrow(data),
ifelse(format %in% c("ts", "tsl", "ind"),
paste0(", ", min(data$year),
"-", max(data$year),
", T = ", length(unique(data$year))),
""),
").")
#
# grab codebook
#
if(isTRUE(codebook) || grepl(".pdf", codebook))
qogbook(codebook, version, path, replace)
#
# xtdata spec
#
pids = uids[pids]
if(format == "ts" | format == "tsl") {
data = xtset(data,
data = c(pids[1], "year", pids[-1]),
spec = c("iso3n", "year"),
type = "country",
name = "Quality of Government, time series data"
)
}
#
# finish line
#
return(data)
}
#' Download Quality of Government codebooks
#'
#' Function to download Quality of Government (QOG) codebooks. Please visit
#' the QOG Institute website at \url{http://www.qog.pol.gu.se/} for a
#' presentation of QOG research.
#'
#' @export
#' @param file a filename to save the codebook at.
#' If set to \code{TRUE}, the name of the codebook on the QOG server will be used.
#' If set to \code{FALSE} (the default), the function only returns the link to
#' the dataset. The filename must end in \code{.pdf}.
#' @param replace whether to download the dataset even if a file already exists
#' at the download location. Defaults to \code{FALSE}.
#' @param path a folder path to append to the filename.
#' @param version the QOG version: \code{std} (Standard),
#' \code{soc} (Social Policy), \code{bas} (Basic) or \code{exp} (Expert).
#' Defaults to \code{std}.
#' @details The function mimics Richard Svensson's \code{qogbook} Stata command.
#' @seealso \code{\link{qogdata}}
#' @author Francois Briatte \email{f.briatte@@ed.ac.uk}
#' @examples
#' # Show the URL to the QOG Standard dataset codebook.
#' qogbook()
#' ## Download QOG Standard codebook with default filename (not run).
#' # qogbook(file = TRUE)
#' ## Download QOG Basic dataset codebook to specific filename (not run).
#' # qogbook(file = "qog.basic.codebook.pdf", version = "bas")
#' @keywords qog
qogbook <- function(file = FALSE, version = "std", path = "", replace = FALSE) {
if(!version %in% c("std", "bas"))
stop("Codebook available only for versions bas, std")
link = "http://www.qogdata.pol.gu.se/data/Codebook_QoG_Std15May13.pdf"
if(version == "bas") {
link = "http://www.qogdata.pol.gu.se/codebook/codebook_basic_20120608.pdf"
}
if(isTRUE(file) & version == "bas") {
file = "codebook_basic_20120608.pdf"
}
else if(isTRUE(file)) {
file = "Codebook_QoG_Std15May13.pdf"
}
#
# path
#
if(is.character(path))
if(nchar(path) > 0) file = paste(path, file, sep = "/")
#
# download
#
if(is.logical(file)) {
return(link)
}
else if(!grepl(".pdf$", file)) {
stop("Please specify a .pdf codebook filename or set file = TRUE.")
}
else {
if(replace || !file.exists(file)) {
message("Downloading codebook to ", file, "...")
download.file(link, file, mode = "wb", quiet = TRUE)
}
message("Codebook: ", file)
}
}
#' Find Quality of Government variables
#'
#' Function to perform a \code{regex} search on QOG variable names and labels.
#' A few labels are missing for strictly cross-sectional variables.
#'
#' @export
#' @param ... keywords or \code{regex} phrases passed to \code{grepl}.
#' @param version the QOG version to search: either \code{std} (the default) or \code{soc}.
#' @param compact whether to limit the labels returned to 32 characters. Defaults to \code{TRUE} for better console output.
#' @param show which variables to show for years of measurement: \code{cs} (cross-sectional), \code{ts} (time series), or \code{all} (the default).
#' @return a data frame containg matching variables, described by their names, labels and years of measurement in the time series (\code{ts}) cross-sectional (\code{cs}) datasets. The information should match the ranges indicated in the \emph{QOG Standard Codebook} and \emph{QOG Social Policy Codebook}.
#' @references
#' Svensson, Richard, Stefan Dahlberg, Staffan Kumlin & Bo Rothstein.
#' 2012. \emph{The QoG Social Policy Dataset}, version 4Apr12.
#' University of Gothenburg: The Quality of Government Institute,
#' \url{http://www.qog.pol.gu.se}.
#'
#' Teorell, Jan, Nicholas Charron, Stefan Dahlberg, Soren Holmberg,
#' Bo Rothstein, Petrus Sundin & Richard Svensson. 2013.
#' \emph{The Quality of Government Dataset}, version 15May13.
#' University of Gothenburg: The Quality of Government Institute,
#' \url{http://www.qog.pol.gu.se}.
#' @seealso \code{\link{grep}}, \code{\link{qogdata}}
#' @author Francois Briatte \email{f.briatte@@ed.ac.uk}
#' @examples
#' # QOG Standard search.
#' qogfind("regime", "institutions")
#' # QOG Standard search, with regex syntax.
#' qogfind("public|administration")
#' # QOG Social Policy search, showing cross-sectional information only.
#' head(qogfind("^socx", version = "soc", show = "cs", compact = FALSE))
#' # QOG Standard variables featured only in the cross-sectional version.
#' qogfind("*")[is.na(qogfind("*")$ts.N), ]
#' @keywords qog
qogfind <- function(..., version = "std", compact = TRUE, show = "all") {
x = paste0(c(...), collapse = "|")
data(qog.index)
if (version == "std") {
message("QOG Standard results")
r = qog.std.index[grepl(x, qog.std.index$variable, ignore.case = TRUE) |
grepl(x, qog.std.index$label, ignore.case = TRUE), ]
}
if (version == "soc") {
message("QOG Social Policy results")
r = qog.soc.index[grepl(x, qog.soc.index$variable, ignore.case = TRUE) |
grepl(x, qog.soc.index$label, ignore.case = TRUE), ]
}
if (version == "bas") {
message("QOG Basic results")
r = qog.bas.index[grepl(x, qog.bas.index$variable, ignore.case = TRUE) |
grepl(x, qog.bas.index$label, ignore.case = TRUE), ]
}
r$variable = as.character(r$variable)
if(compact) r$label = substr(r$label, 1, 32)
if(show == "cs") r = r[, !grepl("ts.", names(r))]
if(show == "ts") r = r[, !grepl("cs.", names(r))]
return(r)
}
#' Join historical and recent states in QOG Standard time series data
#'
#' Function to plot maps of Quality of Government (QOG) data. Requires the \code{ggplot2} and \code{maps} packages.
#'
#' @export
#' @param data a QOG Standard time series dataset, or any data frame with \code{cname} (country) and \code{year} information coded as in the QOG Standard time series dataset.
#' @param country the country name to join data over. Requires the \code{cname} variable.
#' @details The function will try to find two series of country-year observations that both match the \code{country} argument. Within the QOG Standard time series dataset, this will match historical states like "France (-1962)" to modern states like "France (1963-)". The function will then create a new variable out of both series, joined at their separation years, and set its country code attributes to the most recent ones. See Appendix A of the \emph{QOG Standard Codebook} for details on historical states in the QOG Standard time series dataset.
#' @return a data frame with country-year observations
#' @author Francois Briatte \email{f.briatte@@ed.ac.uk}
#' @references Teorell, Jan, Nicholas Charron, Stefan Dahlberg, Soren Holmberg,
#' Bo Rothstein, Petrus Sundin & Richard Svensson. 2013.
#' \emph{The Quality of Government Dataset}, version 15May13.
#' University of Gothenburg: The Quality of Government Institute,
#' \url{http://www.qog.pol.gu.se}.
#' @examples
#' # Load QOG demo datasets.
#' data(qog.demo)
#' QOG = qog.ts.demo
#' QOG = qogjoin(QOG, "Ethiopia")
#' QOG = qogjoin(QOG, "France")
#' QOG = qogjoin(QOG, "Malaysia")
#' QOG = qogjoin(QOG, "Pakistan")
#' @keywords qog
qogjoin <- function(data, country = NULL) {
stopifnot("cname" %in% names(data))
stopifnot("year" %in% names(data))
stopifnot(is.character(country))
x = data$cname[grepl(country, data$cname)]
x = unique(x)
if(length(x) == 0)
stop("No country match for ", country)
else if(length(x) == 1)
stop("Single country match for ", country)
else if(length(x) > 2)
stop("More than two country matches: ", paste0(x, collapse = ", "))
t = xt(data)
y = as.numeric(gsub("\\D", "", x))
min = x[order(y) == 1]
max = x[order(y) == 2]
message("Joining ", min, " to ", max)
one = data[data$cname == min & data$year <= y[order(y) == 1], ]
two = data[data$cname == max & data$year >= y[order(y) == 2], ]
new = rbind(one, two)
new$cname = country
# country codes
if("ccode" %in% names(data))
new$ccode = unique(data$ccode[data$cname == max])
if("ccodealp" %in% names(data))
new$ccodealp = unique(data$ccodealp[data$cname == max])
if("cname_year" %in% names(data))
new$cname_year = paste(new$cname, new$year)
if("ccodealp_year" %in% names(data))
new$ccodealp_year = paste(new$ccodealp, new$year)
if("ccodewb" %in% names(data))
new$ccodewb = unique(data$ccodewb[data$cname == max])
message(country, " now runs over ", nrow(new), " country-year observations, ",
paste0(range(new$year), collapse = "-"))
data[, "cname"] = as.character(data[, "cname"])
data[data$cname == max, ] = new
data = subset(data, cname != min)
data[, "cname"] = factor(data[, "cname"])
data = xtset(data,
data = t$data,
spec = t$spec,
name = t$name,
url = t$url,
quiet = FALSE)
return(data)
}
|
93b7d59390304c66b1fe2a786cdc452e8d9335fd | 2b08ece86eaffc1f19966bac10349fc8e1a31adf | /volcano2.r | 64a90a1292286947edbc55d283a2d007ec99bb69 | [] | no_license | pengweixing/FFPE-ATAC | 9450f4eabade3298bdb16677ba5984f3d45a9b64 | 107bd6ee771c3ecacf0c57ec5d25cac2ab709b4e | refs/heads/master | 2023-05-28T12:00:04.462962 | 2021-06-13T09:51:23 | 2021-06-13T09:51:23 | 374,730,658 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,533 | r | volcano2.r | #################################################
# File Name:volcano.r
# Author: xingpengwei
# Mail: xingwei421@qq.com
# Created Time: Sun 30 May 2021 11:31:54 AM UTC
#################################################
library(ggpubr)
library(ggthemes)
args = commandArgs(T)
deg.data <- read.table(args[1],header=T,sep="\t")
deg.data$logP <- -log10(deg.data$padj)
#ggscatter(deg.data,x="logFC",y="logP")+theme_base()
deg.data$Group = "not-significant"
deg.data$Group[which((deg.data$padj < 0.01) & (deg.data$log2FoldChange > 3))] = paste0(args[2])
deg.data$Group[which((deg.data$padj < 0.01) & (deg.data$log2FoldChange < -3))] = paste0(args[3])
deg.data$Group=factor(deg.data$Group,levels = c(args[2],"not-significant",args[3]))
deg.data2 = deg.data[deg.data$Group==args[2]|deg.data$Group==args[3],]
write.table(deg.data2,file="significant_peak_xls",sep="\t",quote=FALSE)
mytable=as.data.frame(table(deg.data$Group))
p=ggscatter(deg.data, x = "log2FoldChange", y = "logP", color = "Group",
palette = c("#2f5688", "#BBBBBB", "#CC0000"), size = 1,
#label = deg.data$Label, font.label = 8, repel = T,
xlab = "log2FoldChange", ylab = "-log10(Adjust P-value)",)+
theme_base()+geom_hline(yintercept = 2, linetype="dashed")+
geom_vline(xintercept = c(-3,-3), linetype="dashed")+annotate("text", x = 5, y = 10, label = mytable[1,2])+
annotate("text", x = 0, y = 10, label = mytable[2,2])+annotate("text", x = -5, y = 10, label = mytable[3,2])
ggsave("all_peak_diff2.pdf",p,width = 6,height = 4)
save.image('all_peak_diff2.Rdata')
|
2553b9909e0f5deb77f095527b97569bd6b51394 | 377d9dcc37f618d684b65f7f1ea1f953627df4fe | /code/config/jalaun_params.R | 2ab1f70148341571d2c7c6447f162fa080455b6c | [] | no_license | jimmyokeeffe/jimmyokeeffe-Farmer_irrigation_behavior_sociohydrological_model_WRR2018 | 6b568f094a5b96c48421561611c7fed881916e35 | e4bfe9cea28ae6c7b80d3eada42f8df3ee7582b1 | refs/heads/master | 2020-03-23T00:05:32.586182 | 2018-07-13T12:37:37 | 2018-07-13T12:37:37 | 140,843,737 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,059 | r | jalaun_params.R | ## Author : Simon Moulds & Jimmy O'Keeffe
## Date : 10 Jan 2018
## Parameter file for Jalaun district
## region characteristics
## ######################
farm_area = 10000 ## m2 (i.e. one hectare) **checked**
## hydrological parameters
## #######################
## aquifer specific yield
Sy = 0.15 ## **checked**
## surface elevation
surface_elev_masl = 139 ## actual elevation above sea level **checked**
## initial head
H_init_masl = surface_elev_masl - 5 ## **checked**
## root zone parameters
field_capacity = 0.2 ## **checked**
wilting_point = 0.12 ## **checked**
root_zone_depletion_init = 0 ## **checked**
## coefficients to control runoff, bare soil evaporation and return flow
runoff_coef = 0.95 ## **calibrated**
evaporation_loss_coef = 0.45 ## **calibrated**
## runoff_coef = 0.95 ## **checked**
## evaporation_loss_coef = 0.7 ## **checked**
return_flow_coef = 0.3 ## **checked**
## canal volume and leakage coefficient
canal_volume = 1 * 5 * 100 ## **checked**
canal_leakage_coef = 0.4 ## **checked**
## canal_prob = 0.3 ## **checked**
canal_prob_upper = 0.4
canal_prob_lower = 0.3
## crop parameters
## ###############
## factor to change crop max yield by as values used are averages from Gov stats
wheat_yield_coeff = 2.0 ## **checked** (missing from params_fn xlsx file)
rice_yield_coeff = 0 ## **checked**
## water stress coefficient max/min value
Ks_max = 1 ## **checked**
Ks_min = 0 ## **checked**
## yield response factor
Ky_wheat = 0.65 ## **checked**
Ky_rice = 0 ## **checked**
## crop factor (growth stages 1-4); values from Choudhury et al (2013)
Kc_fallow = 0
Kc1w = c(320,350,1.00) ## **checked**
Kc2w = c(351,16,1.12) ## **checked**
Kc3w = c(17,47,1.25) ## **checked**
Kc4w = c(48,106,0.46) ## **checked**
Kc1r = c(148,185,0) ## **checked**
Kc2r = c(186,223,0) ## **checked**
Kc3r = c(224,261,0) ## **checked**
Kc4r = c(262,296,0) ## **checked**
## et depletion factor
et_depletion_factor_fallow = 0.1 ## **checked**
et_depletion_factor_wheat = c(320,106,0.5) ## **checked**
et_depletion_factor_rice = c(148,296,0) ## **checked**
## rooting depth
rooting_depth_fallow = 0.01 ## **checked**
rooting_depth_wheat = c(320,106,1.25) ## **checked**
rooting_depth_rice = c(148,296,0) ## **checked**
## irrigation parameters
## #####################
## harvest day
harvest_day = 305 ## **checked**
## months in which crops are grown
wheat_months = c(11,12,1,2,3,4) ## **checked**
rice_months = c(5,6,7,8,9,10) ## **checked**
## irrigation efficiency
irrigation_eff = 0.4 ## **checked**
## Pump efficiency multiplier 100=1.0 90=1.11 80=1.25 70=1.43 50=2.0 30=3.33
fuel_eff = 2.00 ## **checked**
## crop min/max irrigation depth
wheat_min = 0.07 ## **checked**
wheat_max = 0.3 ## **checked**
rice_min = 0 ## **checked**
rice_max = 0 ## **checked**
## well depths associated with categories 1-3
well_depth1_masl = surface_elev_masl - 20 ## **checked**
well_depth2_masl = surface_elev_masl - 60 ## **checked**
well_depth3_masl = surface_elev_masl - 90 ## **checked**
## farmer parameters
## #################
## category probability
cat_prob = c(0.6,0.3,0.1)
## initial savings upper and lower bounds
saving_lower = 50 ## missing from params_fn xlsx file
saving_upper = 500 ## **checked**
## Amount savings are reduced per year when farmers income is zero
savings_reduction = 0.8 ## **checked**
## Percentage of income retained as savings
saving_percentage = 0.15 ## **checked**
|
ff3315e80013645f932ae05f1243b63d5ab84ec6 | 3cae4ac748f5dd272973da4f25a42dcbac54511b | /R/dfa_zi.r | 16b91a49d2c8570400e28edf989890721766acaf | [] | no_license | openfields/des | 4ef8c4cf9ad4705c25c89d7d02bdbefe3429f438 | 1b1f76e06229ac46a8f9b38be063629715c9baec | refs/heads/master | 2021-01-10T05:59:04.130332 | 2016-09-26T19:37:49 | 2016-09-26T19:37:49 | 44,263,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,343 | r | dfa_zi.r | # DFA ZI
library(jagsUI)
#dfa = read.csv('/home/will/Documents/Stream/Abundance/qryDFA_counthabitat.csv', header=TRUE)
dfa = read.table('C:/Users/wfields/Documents/Stream/Abundance/qryDFA_counthabitat.csv', header=TRUE, sep=',')
y <- dfa[,14:16]
y[is.na(y)]<-0
nsites <- dim(dfa)[1]
ncap<-apply(y,1,sum)
ymax<-ncap
# set nsites, ncaps, ymax
nsites<-nrow(y)
ncap<-apply(y,1,sum)
ymax<-apply(y,1,sum)
# create data list
tokunaga(dfa[,12],dfa[,10])->tok
# scale catchment area and betweenness
betsc <- scale(dfa$bet)
betsc[is.na(betsc)]<-0
fasc <- scale(dfa$facc)
fasc[is.na(fasc)] <- 0
data.dfa.zi <- list(y=y,nsites=nsites,ncap=ncap,cov1=scale(dfa[,4]),cov2=scale(dfa[,5]),cov3=scale(dfa[,6]),cov4=scale(dfa[,11]),cov5=scale(tok$t1),
cov6=scale(dfa[,12]),cov7=betsc, cov8=fasc)
# initial values
inits <- function(){
list (p0=runif(1),beta0=runif(1,-1,1),N=ymax+1,z=rep(1,59))
}
# parameters to monitor
parameters <- c("N","p0","beta0","beta1","beta2","beta3","beta4","beta5","beta6","beta7","beta8","omega","b4","b5","fit","fit.new")
# mcmc settings
nthin<-150
nc<-3
nb<-75000
ni<-500000
system.time(dfa.zi2<-jags(data.dfa.zi, inits, parameters, "zi3.txt", n.chains=nc, n.iter=ni, n.burnin=nb, parallel=TRUE))
print(dfa.zi2)
dfj = read.csv('/home/will/Documents/Stream/Abundance/qryDFJ_stats.csv', header=TRUE)
dfj <- read.table('C:/Users/wfields/Documents/Stream/Abundance/qryDFJ_stats.csv', header=TRUE, sep=',')
head(dfj)
y <- dfj[,17:19]
y[is.na(y)]<-0
nsites <- dim(dfj)[1]
ncap<-apply(y,1,sum)
ymax<-ncap
# set nsites, ncaps, ymax
nsites<-nrow(y)
ncap<-apply(y,1,sum)
ymax<-apply(y,1,sum)
# create data list
tokunaga(dfa[,12],dfa[,10])->tok
data.dfj <- list(y=y,nsites=nsites,ncap=ncap,cov1=scale(dfa[,4]),cov2=scale(dfa[,5]),cov3=scale(dfa[,6]),cov4=scale(dfa[,11]),
cov5=scale(tok$t1),cov6=scale(dfa[,12]),cov7=betsc,cov8=fasc)
# initial values
inits <- function(){
list (p0=runif(1),beta0=runif(1,-1,1),N=ymax+1,z=rep(1,59))
}
# parameters to monitor
parameters <- c("N","p0","beta0","beta1","beta2","beta3","beta4","beta5","beta6","beta7","beta8","omega","b4","b5","fit","fit.new")
# mcmc settings
nthin<-200
nc<-3
nb<-75000
ni<-500000
# fit model
system.time(dfj.zi2<-jags(data.dfj, inits, parameters, "zi3b.txt", n.chains=nc, n.iter=ni, n.burnin=nb, parallel=TRUE))
print(dfj.zi2)
|
950aff810317d169d4bc3a17787faff0da71659a | 4a082b465f43514425c0b19f8f2052ff770d7551 | /man/O_matrix.Rd | 97e9d3db7c5d3711ed5606942042796cc3274a17 | [] | no_license | bsaul/wgee | 04e69bced0c1111e2842c987ba5c66a98472f338 | 636c77cff009c2d8a831c9d627276941a9ec86db | refs/heads/master | 2021-01-10T12:57:26.455578 | 2016-01-05T15:48:38 | 2016-01-05T18:47:50 | 48,764,972 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | rd | O_matrix.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/matrix_computations.R
\name{O_matrix}
\alias{O_matrix}
\title{Cluster-level O matrix}
\usage{
O_matrix(D, W, V.inv)
}
\arguments{
\item{D}{matrix of derivatives of mean function}
\item{W}{weight matrix}
\item{V.inv}{inverse variance matrix}
}
\description{
Computes
\deqn{ D_i^T V_i^{-1} W_i D_i}
}
|
ec60208a9f7284be3d8c7392b14c8757089d552e | 8c0f0f13707180d2fe7cd86db4618029d2bb84f3 | /australian/R/garch.R | 015f7c5752973013e9eadfdd24c45dec6e9cadcb | [] | no_license | 4350/grandslam | 23d030ae27b8da70d8e6bed0293db4ab470a442d | e5962ccded3bdfa68fff6d7cc6b2887525b43ffd | refs/heads/master | 2021-01-24T21:35:57.029066 | 2016-12-06T13:41:15 | 2016-12-06T13:41:15 | 68,724,045 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,082 | r | garch.R | #' @importFrom rugarch ugarchfit
NULL
#' Fit GARCH models to data
#'
#' @param garch List of ugarchspec
#' @param data TxN data points
#'
#' @return
#' @export
garch_fit <- function(garch, data) {
foreach(i = seq_along(garch)) %dopar% {
ugarchfit(garch[[i]], data[, i], solver = 'hybrid')
}
}
#' Turns GARCH fits to specs with fixed.pars
#'
#' @param fits List of GARCH fits
#'
#' @return List of GARCH specs
#' @export
garch_fit2spec <- function(fits) {
lapply(fits, function(fit) {
garch_specgen(
fit@model$modelinc['ar'],
fit@model$modelinc['ma'],
fixed.pars = fit@fit$coef,
vtarget = F # necessary to activate omega
)
})
}
#' Create GARCH specifications
#'
#' Makes building GARCH models easier as standard options
#' apart from p, q order is preset. Standard options include
#' variance targeting, GJRGACH(1,1) specification and the GHST
#' distribution for innovations
#'
#' @param p Scalar - AR order
#' @param q Scalar - MA order
#' @param r Scalar - GARCH alpha order
#' @param s Scalar - GARCH beta order
#' @param vtarget Logical - setting long-term variance using unconditional mean
#'
#' @return uGARCHspec object
#' @export
garch_specgen <- function(p, q = 0, r = 1, s = 1, model = 'fGARCH', submodel = 'GJRGARCH',
vtarget = T, dist = 'ghst', fixed.pars = list()) {
spec <- rugarch::ugarchspec(
mean.model = list(
armaOrder = c(p,q)
),
distribution.model = dist,
variance.model = list(
model = model,
submodel = submodel,
variance.targeting = vtarget
),
fixed.pars = fixed.pars
)
spec
}
#' Turn uniform to stdresid for a list of GARCH models
#'
#' @param garch N list of GARCH models
#' @param u TxN uniforms
#'
#' @export
garch_uniform2stdresid <- function(garch, u) {
foreach (i = seq_along(garch), .combine = 'cbind') %dopar% {
.garch_qghyp(garch[[i]], u[, i])
}
}
#' Turn standardized residuals to uniforms. Only works with ghst GARCH
#'
#' @param garch N list of GARCH models
#' @param stdresid TxN matrix of stdresid
#'
#' @export
garch_stdresid2uniform <- function(garch, stdresid) {
foreach(i = seq_along(garch), .combine= 'cbind') %do% {
pars <- garch[[i]]@model$pars[, 'Level']
rugarch:::psghst(stdresid[, i], shape = pars['shape'], skew = pars['skew'])
}
}
.garch_qghyp <- function(garch_i, p) {
pars <- garch_i@model$pars[, 'Level']
shape <- pars['shape']
skew <- pars['skew']
# The rugarch parametrization is *kinda* the location and scale invariant
# parametrization mentioned in the ghyp package documentation. The below
# code is copy-pasted from rugarch source code (rugarch-distributions.R)
# which uses the SkewHyperbolic library, and then fed to the ghyp
# quantile function.
#
# chi is delta ^ 2
nu <- shape
chi <- 1 / ( ((2 * skew^2)/((nu-2)*(nu-2)*(nu-4))) + (1/(nu-2)) )
beta <- skew / sqrt(chi)
mu <- -(beta * chi / (nu - 2))
ghyp::qghyp(p, ghyp::student.t(
mu = mu,
chi = chi,
nu = nu,
sigma = 1,
gamma = beta
), method = 'splines')
}
|
a719da7a9bc72652d1a1978330bd4c53edd1363a | 8a210793f5fba65e63cd29dc9f2a17c573488da8 | /man/ScoresPMF.Rd | e8909b744845bdfc2fb04e77ccefc61dcb290d12 | [] | no_license | ManuelPerisDiaz/MetaOdysseus | a29613137a783451788a01f7c2c63fa7e7542173 | 92f516186bfde6d70447608d7ea4859a4d53140c | refs/heads/master | 2022-12-29T06:04:29.770112 | 2020-10-14T12:06:09 | 2020-10-14T12:06:09 | 282,212,240 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,024 | rd | ScoresPMF.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ScoresPMF.R
\name{ScoresPMF}
\alias{ScoresPMF}
\title{Scoring function for peptide mass fingerprint MALDI-MS experiment.}
\usage{
ScoresPMF(
sequence,
maldiTOF,
TheoreticalMass,
Total,
Mw,
write = TRUE,
save.name,
missed,
enzym = "trypsin"
)
}
\arguments{
\item{sequence}{Protein sequence.}
\item{maldiTOF}{Output obtained from \code{\link{annotationMALDI_Bot}}.}
\item{TheoreticalMass}{Output obtained from \code{\link{Protein.to.Peptide}}.}
\item{Total}{Number of amino acids in the protein sequence.}
\item{Mw}{Molecular weight of the protein assayed.}
\item{write}{Write output with PMF score.}
\item{save.name}{Name to save the output results.}
\item{missed}{Number of missed proteolytic cleavages}
\item{enzym}{Enzyme used for proteolysis.}
}
\value{
Annotation score for PMF MALDI-MS.
}
\description{
Scoring function for peptide mass fingerprint MALDI-MS experiment.
}
|
28dc16a7bbb5f6e98e1a148ff50bff31267475b4 | fa4094e3eaa63df4c849918822acf0762e6c3f5f | /incl/withProgressShiny.R | 4d89d0cababe7008819bef70095539b90ee9772c | [] | no_license | HenrikBengtsson/progressr | 52a899b83054dcef3a6109cc8255e75203ac92a6 | 0dbb50edae6d5ff0b18d7d2981fadf0cbb1967d7 | refs/heads/develop | 2023-08-22T09:36:01.662246 | 2023-08-10T23:31:44 | 2023-08-10T23:31:44 | 180,921,189 | 273 | 12 | null | 2022-08-23T15:09:36 | 2019-04-12T03:12:33 | R | UTF-8 | R | false | false | 735 | r | withProgressShiny.R | library(shiny)
library(progressr)
app <- shinyApp(
ui = fluidPage(
plotOutput("plot")
),
server = function(input, output) {
output$plot <- renderPlot({
X <- 1:15
withProgressShiny(message = "Calculation in progress",
detail = "Starting ...",
value = 0, {
p <- progressor(along = X)
y <- lapply(X, FUN=function(x) {
Sys.sleep(0.25)
p(sprintf("x=%d", x))
})
})
plot(cars)
## Terminate the Shiny app
Sys.sleep(1.0)
stopApp(returnValue = invisible())
})
}
)
local({
oopts <- options(device.ask.default = FALSE)
on.exit(options(oopts))
if (interactive()) print(app)
})
|
8161d64ef65a4f62b983accba78eca9d826ec0ed | 1e820fe644a039a60bfbee354e50c775af675f6b | /DAofExp/07_crossed_factors_drilladvance.R | 9001660b52bfe49f1e9cb120060fd9cfaa466b74 | [] | no_license | PyRPy/stats_r | a334a58fca0e335b9b8b30720f91919b7b43d7bc | 26a3f47977773044d39f6d8ad0ac8dafb01cce3f | refs/heads/master | 2023-08-17T00:07:38.819861 | 2023-08-16T14:27:16 | 2023-08-16T14:27:16 | 171,056,838 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,134 | r | 07_crossed_factors_drilladvance.R | # drilladvance.r, drill advance experiment, Table 7.12, p231
# page - 220
# Daniel (1976) described a single replicate 2×2×2×2 experiment to
# study the effects of four treatment factors on the rate of advance
# of a small stone drill. The treatment factors were “load on the drill” (A),
# “flow rate through the drill” (B), “speed of rotation” (C), and “type of
# mud used in drilling” (D). Each factor was observed at two levels,
# coded 1 and 2.
# Input data for A, B, C, D and Advance
drill.data <- read.table("Data/drill.advance.txt", header=T)
# Compute log advance, and convert levels 1 and 2 to coeffs -1 and 1, resp.
drill.data <- within(drill.data,
{ y = log10(Advance); A = 2*A-3; B = 2*B-3; C = 2*C-3; D = 2*D-3 })
head(drill.data, 3)
# Fit regression model with interactions to obtain estimates
model1 <- lm(y ~ A*B*C*D, data=drill.data)
model1$coefficients # Show estimates
# Generate half-normal plot of effect estimates
# install.packages("gplots")
library(gplots)
qqnorm.aov(model1, xlab="Half-Normal Scores",
ylab="Normalized Absolute Estimates")
|
40995c59f17a7ccf34c1dc4a2ae2fc1d3253cf0d | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /An_Introduction_To_Statistical_Methods_And_Data_Analysis_by_R_Lyman_Ott_And_Michael_Longnecker/CH4/EX4.15/Ex4_15.r | c68fd8f18dcfcf1f332a1a091b79bfe1a3e7b9b0 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 39 | r | Ex4_15.r | # Page No. 174
pnorm(23,mean =20,sd=2) |
8491315520c250ed5e0982ebe4d5a48176b9db85 | 84b88096eb178671541f030cf0c31ea3dbeec633 | /plot6.R | a151e891094f646285ee339e68fa03a17a29d276 | [] | no_license | muhsalem/Ex-Data-Analysis-Project-2 | 72bc6a992ef0429109d846596bdefd539d4e0376 | 58a23ad799b61163e4cd6d4ea4b8f7bf243c2597 | refs/heads/master | 2020-03-27T08:04:41.549189 | 2018-08-26T21:58:50 | 2018-08-26T21:58:50 | 146,218,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 860 | r | plot6.R | library(dplyr)
NEI <- readRDS("~/exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("~/exdata-data-NEI_data/Source_Classification_Code.rds")
NEISCC <- merge(NEI,SCC,by.x="SCC",by.y="SCC")
NEISCCVehicle <- NEISCC[which(grepl("Vehicles",NEISCC$EI.Sector)==TRUE),]
NEISCCVehicle4 <- subset(NEISCCVehicle,NEISCCVehicle$fips=="24510")
NEISCCVehicle5 <- tapply(NEISCCVehicle4$Emissions,NEISCCVehicle4$year,sum)
NEISCCVehicle6 <- subset(NEISCCVehicle,NEISCCVehicle$fips=="06037")
NEISCCVehicle7 <- tapply(NEISCCVehicle6$Emissions,NEISCCVehicle6$year,sum)
par(mfrow=c(1,2))
plot(names(NEISCCVehicle5),NEISCCVehicle5,type="b",col="red",xlab="Year",ylab="Total Emissions PM2.5 from Vehicles sources",main="Baltimore")
plot(names(NEISCCVehicle7),NEISCCVehicle7,type="b",col="red",xlab="Year",ylab="Total Emissions PM2.5 from Vehicles sources",main="Los Angeles")
|
07f2423d86026646922f0d241217e6764bcb5b8e | abdd957fa4e827cd09abae7f2d839c0ab816c644 | /lab7_Rcode.R | 33a373d5d44def0dd9fce1569dccc9deb154ca82 | [] | no_license | quantt94/Multivariate-Analysis | c8e0972d7246d635a7cfa64a9c86770bbec912fa | e631a15c3f03991dc57dfa6dddd0984801a0aebb | refs/heads/master | 2020-06-20T15:22:27.868056 | 2019-07-16T09:56:55 | 2019-07-16T09:56:55 | 197,162,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,310 | r | lab7_Rcode.R | #Stat 372, Lab 7, Feb.29, 2016
################################################################################
#Topics covered:
# 1. K-nearest neighbor
# 2. Logistic regression
# 3. Classification tree
# 4. Regression tree
################################################################################
#consider the Iris data, the last two classes.
#divide the data into TRAINING AND TEST SETS.
set.seed(107) #make sure you set the seed every time you take SRS
ind=sort(c(sample(51:100,25),sample(101:150,25))) #take SRS from 51st species to 100th with 25 species in total, and 101st to 150th with 25 species
train=iris[ind,]
test=iris[setdiff(51:150,ind),] #set.different for removing those from ind, the remaining of those we took
train$Species=as.factor(as.matrix(train$Species))
test$Species=as.factor(as.matrix(test$Species))
head(train) #see the data structure for training set, the first 6 rows of the test
levels(train[,5])
# 1. K-nearest neighbor
#use cross-validation to obtain the best k
library(class) #load the library
kvec=seq(1,49,by=2) #grid of k values, sequence of only odd number, by=2 so that we have majority
klen=length(kvec)
evec=rep(0,klen) #vector for misclassification rate for each k value
yind=5 #the class label = 5
for (i in 1:klen){
y.cv=knn.cv(train[,-yind],train[,yind],k=kvec[i]) #give you what are those class label? #built-in R function using leave-one-out cross validation
evec[i]=sum(y.cv!=train[,yind]) #calculate the number of error for k=i
}
plot(kvec,evec)
(kbest=kvec[order(evec)[1]]) #pick the k giving the smallest misclassification rate
#postscript(file="/Users/wanhuasu/Documents/MacEwan/stat372/plots/iris_knn_tune.ps",height=8,width=8)
#plot(kvec,evec,xlab="k",ylab="# of Errors",cex.lab=1.5,pch=19)
#dev.off()
#apply the KNN model with the best k on the test set
kvec[c(1,2,8)]
#k=1,3,15 give the smallest error.
kbest=3
(mknn=knn(train[,-yind],test[,-yind],train[,yind],k=kbest,prob=T))
(ktab=table(test[,yind],mknn)) #misclassification table
(krate=1-sum(diag(ktab)/sum(ktab)))
?knn
#if take k=15
kbest=15
mknn=knn(train[,-yind],test[,-yind],train[,yind],k=kbest,prob=T)
(ktab=table(test[,yind],mknn)) #misclassification table
(krate=1-sum(diag(ktab)/sum(ktab)))
#if k=1
kbest=1
mknn=knn(train[,-yind],test[,-yind],train[,yind],k=kbest,prob=T)
(ktab=table(test[,yind],mknn)) #misclassification table
(krate=1-sum(diag(ktab)/sum(ktab)))
# 2. Logistic regression
?glm
?predict.glm
m0=glm(Species~.,data=train,family="binomial",maxit = 100) #maxit = maximum integration using MLE method
#Warning message:
#glm.fit: fitted probabilities numerically 0 or 1 occurred #maybe 1 variable can separate the class perfectly
#this is due to the fact that there might be one variable
#sepaate the species perfectly.
(summary(m0))
pvec=predict(m0,test,type="response") #type="response" gives the probabilities
(ltab=table(test[,yind],pvec>0.5))
(lrate=1-sum(diag(ltab)/sum(ltab)))
plot(train[,c(1,2)], col=c("red","black")[unclass(train$Species)],pch=c(1,3)[unclass(train$Species)],cex.lab=1.3)
legend(4.9,3.4, col=c("red","black"),pch=c(1,3),c("Versicolor","Virginical"),cex=1.2)
plot(train[,c(3,4)], col=c("red","black")[unclass(train$Species)],pch=c(1,3)[unclass(train$Species)],cex.lab=1.3)
legend(3.3,2.5, col=c("red","black"),pch=c(1,3),c("Versicolor","Virginical"),cex=1.2)
#3. classification tree
library(rpart)
?rpart
tobj=rpart(Species~.,data=train,method="class",parms=list(split="information"),cp=0,minsplit=2)
#set cp=0, a tree without pruning, a large tree
#minslit=2 mean I want at least 2
#minimize c(t)+alpha|t|
#c(t) - error, misclassification rate
#alpha - penalize
#|t| - tree size
#grow a huge tree then we prune it
# if alpha is large then we prefer a small tree
#tobj=rpart(Species~.,data=train,method="class",parms=list(split="information"),cp=0)
mt=predict(tobj,test,type="prob")
temp=as.numeric(apply(-mt,1,order)[1,])
(ttab=table(test[,yind],temp))
(trate=1-sum(diag(ttab)/sum(ttab)))
#or use type="class"
mt=predict(tobj,test,type="class")
(ttab=table(test[,yind],mt))
(trate=1-sum(diag(ttab)/sum(ttab)))
plot(tobj)
text(tobj,minlength=0, digits=6,use.n =TRUE)
#pretty plot?
#par(mfrow=c(1,1),xpd=NA)
plot(tobj,uniform=T,branch=0)
text(tobj,use.n=T,all=T,fancy=T,cex=1.2,fwidth=0.6,fheight=1.2)
#shall we prune the tree?
#On choosing the optimal # of splits:
#1. choose the one gives the SMALLEST cross-validated error (xerror)
#2. Pick the SMALLEST TREE WITHIN ONE STANDARD DEVIATION of the best.
names(tobj)
tobj$cptable
#xerror - cross validation error
#xstd - standard deviation
#the trees with 2 splits and 3 splits have the same cross-validated error
#prune the tree
tobj.prune=prune(tobj,cp=0.04)
plot(tobj.prune)
text(tobj.prune,minlength=0, digits=6,use.n =TRUE)
(tm.prune=predict(tobj.prune,test,type="class"))
(tptab=table(test[,yind],tm.prune))
(tprate=1-sum(diag(tptab)/sum(tptab)))
#spam data
#data0=read.table("/Users/wanhuasu/Documents/MacEwan/stat372/Lab/data/spam.txt",sep="")
data0=read.table("M:/stat372/Lab/data/spam.txt", sep="")
data0=spam
head(data0)
data=data0[,-58]
y=data0[,58]
table(y) #0 is email and 1 is spam
len=length(y)
ind1=which(y==1)
ind0=which(y==0)
n1=round(length(ind1)/2)
n0=ceiling(length(ind0)/2)
#divide into training and test sets
set.seed(107)
trind=sort(c(sample(ind1,n1),sample(ind0,n0)))
train=data0[trind,]
test=data0[-trind,]
ytrain=y[trind]
ytest=y[-trind]
dimnames(train)[[2]]=c("make","address","all","3d","our","over","remove","internet","order","mail","receive","will","people","report","addresses","free","business","email","you","credit","your","font","000","money","hp","hpl","george","650","lab","labs","telnet","857","data","514","85","technology","1999","parts","pm","direct","cs","meeting","original","project","re","edu","table","conference","ch;","ch(","ch[","ch!","ch$","ch#","CRL_avg","CRL_longest","CRL_total","y")
dimnames(test)[[2]]=c("make","address","all","3d","our","over","remove","internet","order","mail","receive","will","people","report","addresses","free","business","email","you","credit","your","font","000","money","hp","hpl","george","650","lab","labs","telnet","857","data","514","85","technology","1999","parts","pm","direct","cs","meeting","original","project","re","edu","table","conference","ch;","ch(","ch[","ch!","ch$","ch#","CRL_avg","CRL_longest","CRL_total","y")
tobj=rpart(y~.,data=data.frame(x=train[,-58],y=train[,58]),method="class",parms=list(split="information"),cp=0) #build a big tree, cp=0
mt=predict(tobj,data.frame(x=test[,-58],y=test[,58]),type="prob")
(spamtab=table(ytest,mt[,1]<mt[,2]))
(spamrate=1-sum(diag(spamtab))/nrow(test))
#plot the big tree, overfit
plot(tobj,uniform=T,branch=0)
text(tobj,use.n=T,all=T,cex=0.6)
#we need to prune the tree, that means we need to choose a value of cp to cut the tree
#print the cptable
(cptab=tobj$cptable)
#search for the value of cp such that xerror is the smallest.
#For ties, take the large one, we prefer a simpler model.
plot(log(cptab[,1]),cptab[,4],xlab="Log of CP value", ylab="Error")
mind=order(cptab[,4])[1] #pick the cp with the smallest error
cp1=cptab[mind,1]
#or use the function ``which.min''
(cp1=cptab[which.min(cptab[,4]),1])
tobj.prune=prune(tobj,cp=cp1) #the prune tree has 16 splits and therefore 17 nodes
pmt=predict(tobj.prune,data.frame(x=test[,-58],y=test[,58]),type="prob")
(mat=table(ytest,pmt[,1]<pmt[,2]))
(ptrate=1-sum(diag(mat))/nrow(test))
#Plot the prune tree
plot(tobj.prune,uniform=T,branch=0)
text(tobj.prune,use.n=T,all=T,cex=0.6)
#4. Regression tree
#try the Boston data
library(MASS)
?Boston
#divide the data into training and testing
data=Boston
(n=nrow(data))
set.seed(107)
ind=sample(1:n,round(n/2))
train=data[ind,]
test=data[-ind,]
m1=rpart(medv~.,data=train,cp=0)
#plot the tree without pruning
plot(m1, uniform=TRUE, compress=TRUE, margin=0.1)
text(m1)
(tab=m1$cptable)
(cp1=tab[which.min(tab[,4]),1])
m1.prune=prune(m1,cp=cp1)
plot(m1.prune, uniform=TRUE, compress=TRUE, margin=0.1)
text(m1.prune)
#calculate the fitted value by regression tree
pvec1=predict(m1.prune,data=test)
#fit a multiple regression
m2=lm(medv~., data=train)
summary(m2)
#fitted value by the multiple regression
pvec2=predict(m2, data=test)
#compare the SSE
c(sum((pvec1-test$medv)^2),sum((pvec2-test$medv)^2))
|
4a4a9937c87e4f0932ef9d31f80c26bb29be62a7 | 48194198e8b75c22c9e9dab61e7a2c918a5c5a7e | /R/util_fcns.R | 985fc79341c0ecf5154614495d98a592ea86752f | [] | no_license | amcrisan/minCombinR | 95a5de4c9ddb17204ccf28d87ae0f2c5d745687a | a435a2432a5a2e80559f3f0702eda9716641259f | refs/heads/master | 2020-03-18T14:58:31.390064 | 2020-02-09T02:45:53 | 2020-02-09T02:45:53 | 134,878,729 | 0 | 1 | null | 2019-03-31T20:13:23 | 2018-05-25T16:11:24 | HTML | UTF-8 | R | false | false | 5,907 | r | util_fcns.R | #Handy function to dynamically combine aes for ggplot
`+.uneval` <- function(a,b) {
`class<-`(modifyList(a,b), "uneval")
}
#might go elsewhere, but essentially, calls a shinyapp like a function to allow a user to annotate their image
#and create the image file
#An important detail : all the spatial mappings are depend on OUR chioce of how to render the image
#So it's based on assuming 1000 pixel width. The transformations are meaningless if the image is resized.
#NOTE : CURRENTLY ONLY TESTED ON IMAGE DATA, BUT EXPECTED TO BECOME MORE GENERIC IN THE FUTURE
#' Title
#'
#' @param img
#' @param imgDetails
#'
#' @return
#' @export
#'
#' @examples
annotate_app<-function(img,imgDetails){
require(shiny)
require(ggplot2)
require(DT)
require(grid)
annotDat<-c() #global variable needed for return value on session end
shiny::shinyApp(
ui = shiny::fluidPage(
shiny::h1("Welcome to annotator"),
shiny::p("Here's what you can do. Click on the button to start annotating items on your image. Complex polygons are not currently supported, so please pick the centriod (i.e middle of a room) or some other landmark to annotate data"),
shiny::br(),
shiny::fluidRow(
shiny::column(6,
shiny::plotOutput("testPlot",dblclick = "plot_click",brush = "plot_brush",height="1000px")),
shiny::column(6,
shiny::textInput(inputId = "elementID",label="Element Name",
placeholder = "Add name here, then click on plot"),
DT::dataTableOutput("elementTable",width="80%"))
)
),
server = function(input,output,session){
#when the user closes the browser the app stops running
#and passes a dataframe of the annotations the annote_image function
session$onSessionEnded(function(){stopApp(annotDat)})
#reactivedata
values <- reactiveValues(df_data = NULL,
pointObj = 0,
shapeObj = 0)
imgBase<- reactive({
imgRaster <- rasterGrob(img, width=unit(1,"npc"), height=unit(1,"npc"), interpolate = TRUE)
ggplot()+
xlim(c(0,imgDetails$width))+
ylim(c(0,imgDetails$height))+
#scale_x_continuous(expand=c(0,0))+
annotation_custom(imgRaster, 0, imgDetails$width, 0, imgDetails$height) +
theme_bw()
})
output$testPlot<-renderPlot({
#this image raster code allows it to be automatically resized according to display window that the plot is rendered into
p<-imgBase()
if(!is.null(values$df_data)){
df<-data.frame(elemID =values$df_data[,1],
x = as.numeric(values$df_data[,2]),
y = as.numeric(values$df_data[,3]),
xmax = as.numeric(values$df_data[,4]),
ymax = as.numeric(values$df_data[,5]),
element_name = values$df_data[,6],
type = values$df_data[,7],
stringsAsFactors = FALSE)
df_point <- dplyr::filter(df,type=="point")
df_shape <- dplyr::filter(df,type=="square")
p<- p +
geom_point(data = df_point,aes(x =x,y=y),colour="red",size=2)+
geom_rect(data = df_shape,aes(xmin=x,ymin=y,xmax=xmax,ymax = ymax,group=elemID),alpha = 0.2,colour="blue")+
theme_bw()
}
p
})
#Table output of save elements
output$elementTable<-renderDataTable({
if(is.null(values$df_data))
return(NULL)
df<-data.frame(elemID =values$df_data[,1],
x = as.numeric(values$df_data[,2]),
y = as.numeric(values$df_data[,3]),
xmax = as.numeric(values$df_data[,4]),
ymax = as.numeric(values$df_data[,5]),
element_name = values$df_data[,6],
type = values$df_data[,7],
stringsAsFactors = FALSE)
df
},editable = T)
#Add plot shapes
observeEvent(input$plot_click,{
type="point"
elemID<-paste0(type,values$pointObj)
values$pointObj<-values$pointObj+1
#ggplot, oddly, won't allow the origin to be 0,0 and the
#expand scales function appears to die when used in the way
#that this is used. SSo a correction if neede here
x<-ifelse(input$plot_click$x<0,0,input$plot_click$x)
y<-ifelse(input$plot_click$y<0,0,input$plot_click$y)
if(!input$elementID==""){
x<-10
values$df_data<-rbind(values$df_data,c(elemID,x,y,NA,NA,input$elementID,type))
updateTextInput(session,"elementID",value="")
}else{
values$df_data<-rbind(values$df_data,c(elemID,x,y,NA,NA,"ADD ELEMENT ID",type))
}
annotDat<<-values$df_data
})
#Add square shapes
observeEvent(input$plot_brush,{
type="square"
elemID<-paste0(type,values$shapeObj)
values$shapeObj<-values$shapeObj+1
if(!input$elementID==""){
values$df_data<-rbind(values$df_data,c(elemID,input$plot_brush$xmin,input$plot_brush$ymin,input$plot_brush$xmax,input$plot_brush$ymax,input$elementID,type))
updateTextInput(session,"elementID",value="")
}else{
values$df_data<-rbind(values$df_data,c(elemID,input$plot_brush$xmin,input$plot_brush$ymin,input$plot_brush$xmax,input$plot_brush$ymax,"ADD ELEMENT ID",type))
}
annotDat<<-values$df_data
session$resetBrush("plot_brush")
})
#observe and keep edits user makes to cell
observeEvent(input$elementTable_cell_edit,{
changeSite<-input$elementTable_cell_edit
values$df_data[changeSite$row,changeSite$col]<-changeSite$value
annotDat<<-values$df_data
})
}
)
}
|
88e5e9100b624030a7f0424c836923c079a9be55 | f586cc3599f8685ffed9f10befa8bef0dd761cd4 | /man/exer.Rd | 6a63dfa39e7bdadd91bc0f6fd7187d5a1e938170 | [] | no_license | cran/mrt | 87bd3d0b56c73c95146ab1c1d8703f8a303e3c89 | b2ad5f7db7432499d81f827812b2cfbf068132c1 | refs/heads/master | 2020-04-07T15:45:38.872572 | 2009-08-17T00:00:00 | 2009-08-17T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,078 | rd | exer.Rd | \name{exer}
\alias{exer}
\docType{data}
\title{Getting Children to Exercise}
\description{
These data are from Hill et al. (2007) and are used to illustrate multilevel modeling.
}
\usage{data(exer)}
\format{
A data frame with 503 observations on the following 12 variables.
\describe{
\item{\code{wcond}}{a numeric vector}
\item{\code{class}}{a numeric vector}
\item{\code{w1sn02}}{a numeric vector}
\item{\code{w2sn02}}{a numeric vector}
\item{\code{w1int}}{a numeric vector}
\item{\code{w2int}}{a numeric vector}
\item{\code{w1att}}{a numeric vector}
\item{\code{w2att}}{a numeric vector}
\item{\code{z1pbc}}{a numeric vector}
\item{\code{z2pbc}}{a numeric vector}
\item{\code{sqw1}}{a numeric vector}
\item{\code{sqw2}}{a numeric vector}
}
}
\references{
Hill, C., Abraham, C., & Wright, D. B. (2007). Can theory-based messages
in combination with cognitive prompts promote exercise in classroom settings?
\emph{Social Science & Medicine}, \bold{65}, 1049-1058.
}
\keyword{datasets}
|
8b7d4e4dce22bb45c765b36bf16cc58d73410d44 | 5d56068241b13bdc9bfe52d431e5313f0f0642a0 | /01 Datos reales/01_tratamiento.R | 2edec6c575256499a59eb64d708b63a5e400361e | [] | no_license | cesarqb/University-Satisfaction---in-R | 30bc87c2a42b497e194a78fa8f2f16986423021d | 683f7f750d40d2e4ee3099637e3d0c37820dd43f | refs/heads/master | 2022-09-19T21:54:21.937926 | 2020-05-20T05:12:59 | 2020-05-20T05:12:59 | 265,457,049 | 0 | 0 | null | null | null | null | IBM852 | R | false | false | 18,999 | r | 01_tratamiento.R | #==================================================================
rm(list = ls())
gc()
library(ggplot2)
library(tm)
library(stringr)
library(topicmodels)
library(hunspell)
# LECTURA DE DATOS
setwd('C:/Users/Cesar Quezada/Dropbox/KAGGLE/00. Concurso 00 - Titanic')
train_txt <- read.csv('train_universidad.csv')
train_txt$type_data <- 'train'
test_txt <- read.csv('test_universidad.csv')
test_txt$NPS <- NA
test_txt$type_data <- 'test'
datos_txt <- rbind(train_txt, test_txt)
rm(train_txt, test_txt)
id <- 'COD_ENCUESTADO'
target <- 'NPS'
#==================================================================
# Tratando algunos datos
# DEPORTISTA
datos_txt$ind_deportista <- ifelse(datos_txt$UOD_depostista_ind_deportista=='Deportista',1,0)
datos_txt$UOD_depostista_ind_deportista <- NULL
mosaicplot(datos_txt$ind_deportista~datos_txt[,target], col=T)
prop_table <- prop.table(table(datos_txt$ind_deportista,
datos_txt$type_data),
margin = 2)
barplot(prop_table)
chisq.test(prop_table)
# DELEGADO
datos_txt$ind_delegado <- ifelse(datos_txt$IND_DELEGADO=='Delegado',1,0)
datos_txt$IND_DELEGADO <- NULL
mosaicplot(datos_txt$ind_delegado~datos_txt[,target], col=T)
prop_table <- prop.table(table(datos_txt$ind_delegado,
datos_txt$type_data),
margin = 2)
barplot(prop_table)
chisq.test(prop_table)
# Nombre.Campus
datos_txt$nombre_campus <- as.factor(datos_txt$Nombre.Campus)
datos_txt$Nombre.Campus <- NULL
mosaicplot(datos_txt$nombre_campus~datos_txt[,target], col=T)
prop_table <- prop.table(table(datos_txt$nombre_campus,
datos_txt$type_data),
margin = 2)
barplot(prop_table)
chisq.test(prop_table)
# IND_GEA
datos_txt$ind_gea <- ifelse(datos_txt$IND_GEA=='GEA',1,0)
datos_txt$IND_GEA <- NULL
mosaicplot(datos_txt$ind_gea~datos_txt[,target], col=T)
prop_table <- prop.table(table(datos_txt$ind_gea,
datos_txt$type_data),
margin = 2)
barplot(prop_table)
chisq.test(prop_table)
# Ciclo (la dejaremos como numero)
datos_txt$ciclo <- datos_txt$Ciclo
datos_txt$Ciclo <- NULL
table(datos_txt$ciclo,datos_txt$NPS)
boxplot(datos_txt$ciclo~datos_txt$NPS)
plot(density(datos_txt$ciclo[datos_txt$type_data=='train']),
ylim = c(0,0.6), lwd = 2)
lines(density(datos_txt$ciclo[datos_txt$type_data=='test']),col='red',
lwd = 3)
ks.test(datos_txt$ciclo[datos_txt$type_data=='train'],
datos_txt$ciclo[datos_txt$type_data=='test'])
# Clave.de.carrera (la dejaremos como numero)
datos_txt$clave_carrera <- datos_txt$Clave.de.carrera
datos_txt$Clave.de.carrera <- NULL
boxplot(datos_txt$clave_carrera~datos_txt$NPS)
plot(density(datos_txt$clave_carrera[datos_txt$type_data=='train']),
ylim = c(0,0.03), lwd = 2)
lines(density(datos_txt$clave_carrera[datos_txt$type_data=='test']),col='red',
lwd = 3)
ks.test(datos_txt$clave_carrera[datos_txt$type_data=='train'],
datos_txt$clave_carrera[datos_txt$type_data=='test'])
# CANT_CURSOS_MATRICU_SIN_INGLES (la dejaremos como numero)
datos_txt$s_ingles <- datos_txt$CANT_CURSOS_MATRICU_SIN_INGLES
datos_txt$CANT_CURSOS_MATRICU_SIN_INGLES <- NULL
table(datos_txt$s_ingles,datos_txt$NPS)
boxplot(datos_txt$s_ingles~datos_txt$NPS)
plot(density(datos_txt$s_ingles[!is.na(datos_txt$s_ingles)&
datos_txt$type_data=='train']),
ylim = c(0,0.7), lwd = 2)
lines(density(datos_txt$s_ingles[!is.na(datos_txt$s_ingles)&
datos_txt$type_data=='test']),
ylim = c(0,0.7), lwd = 3, col='red')
summary(datos_txt$s_ingles)
ks.test(datos_txt$s_ingles[datos_txt$type_data=='train'],
datos_txt$s_ingles[datos_txt$type_data=='test'])
datos_txt$na_s_ingles <- ifelse(is.na(datos_txt$s_ingles),1,0)
table(datos_txt$na_s_ingles,datos_txt[,target])
mosaicplot(datos_txt$na_s_ingles~datos_txt[,target], col=T)
prop_table <- prop.table(table(datos_txt$na_s_ingles,
datos_txt$type_data),
margin = 2)
barplot(prop_table)
chisq.test(prop_table)
# NIVEL.ACTUAL
datos_txt$nivel <- datos_txt$NIVEL.ACTUAL
datos_txt$NIVEL.ACTUAL <- NULL
table(datos_txt$nivel,datos_txt[,target])
mosaicplot(datos_txt$nivel~datos_txt[,target], col=T)
prop_table <- prop.table(table(datos_txt$nivel,
datos_txt$type_data),
margin = 2)
barplot(prop_table)
chisq.test(prop_table)
names(datos_txt)
#==================================================================
# Tratamiento de texto
library(stringr)
datos_txt$COMENTARIO <- as.character(datos_txt$COMENTARIO)
# Cantidad de textos
datos_txt$longitud <- sapply(gregexpr("\\W+", datos_txt$COMENTARIO), length) + 1
plot(density(datos_txt$longitud))
boxplot(datos_txt$longitud~datos_txt$NPS)
plot(density(datos_txt$longitud[datos_txt$type_data=='train']),
ylim = c(0,0.1), lwd = 2)
lines(density(datos_txt$longitud[datos_txt$type_data=='test']),
ylim = c(0,0.1), lwd = 3, col='red')
ks.test(datos_txt$longitud[datos_txt$type_data=='train'],
datos_txt$longitud[datos_txt$type_data=='test'])
# Cantidad de numeros
cant_numer <- strsplit(datos_txt$COMENTARIO, " ")
cant_numer <- lapply(cant_numer,function(x){
xx <- as.numeric(x)
long <- length(xx[!is.na(xx)])
return(long)
})
cant_numer <- unlist(cant_numer)
datos_txt$ind_number_text <- ifelse(cant_numer>0,1,0)
table(datos_txt$ind_number_text,datos_txt[,target])
mosaicplot(datos_txt$ind_number_text~datos_txt[,target], col=T)
prop_table <- prop.table(table(datos_txt$ind_number_text,
datos_txt$type_data),
margin = 2)
barplot(prop_table)
chisq.test(prop_table)
# datos_txt$prop_number <- cant_numer / datos_txt$longitud
# plot(density(datos_txt$prop_number[!is.na(datos_txt$NPS)&datos_txt$NPS==1]),col=1)
# lines(density(datos_txt$prop_number[!is.na(datos_txt$NPS)&datos_txt$NPS==2]),col=2)
# lines(density(datos_txt$prop_number[!is.na(datos_txt$NPS)&datos_txt$NPS==3]),col=3)
# lines(density(datos_txt$prop_number[!is.na(datos_txt$NPS)&datos_txt$NPS==4]),col=4)
# boxplot(log(datos_txt$prop_number+1)~datos_txt$NPS)
# plot(density(datos_txt$prop_number[datos_txt$type_data=='train']),
# ylim = c(0,250), lwd = 2)
# lines(density(datos_txt$prop_number[datos_txt$type_data=='test']),
# col="red")
# ks.test(datos_txt$prop_number[datos_txt$type_data=='train'],
# datos_txt$prop_number[datos_txt$type_data=='test'])
# Ind si hay may˙sculas
#=============================================================================
library(text2vec)
library(tokenizers)
library(SnowballC)
text.clean = function(x) # text data
{ require("tm")
x = gsub("<.*?>", " ", x) # regex for removing HTML tags
#x = iconv(x, "latin1", "ASCII", sub="") # Keep only ASCII characters
#x = gsub("[^[:alnum:]]", " ", x) # keep only alpha numeric
x = tolower(x) # convert to lower case characters
#x = removeNumbers(x) # removing numbers
x = stripWhitespace(x) # removing white space
x = gsub("^\\s+|\\s+$", "", x) # remove leading and trailing white space
# x =
return(x)
}
prep_fun <- text.clean
tok_fun <- tokenize_word_stems
it_train <- itoken(datos_txt$COMENTARIO,
preprocessor = prep_fun,
tokenizer = tok_fun,
ids = datos_txt[,id],
progressbar = T,
language = "spanish")
vocab <- create_vocabulary(it_train, ngram = c(1L, 10L))
vocab1 <- prune_vocabulary(vocab,
term_count_min = 10,
doc_proportion_min = 0.001)
vectorizer <- vocab_vectorizer(vocab1)
dtm_train <- create_dtm(it_train, vectorizer)
dim(dtm_train)
# define tfidf model
tfidf <- TfIdf$new()
# fit model to train data and transform train data with fitted model
dtm_train_tfidf <- fit_transform(dtm_train, tfidf)
class(dtm_train_tfidf)
dim(dtm_train_tfidf)
#==================================================================
# LSA
set.seed(12345)
lsa <- LatentSemanticAnalysis$new(n_topics = 10)
lsa_data <- lsa$fit_transform(dtm_train_tfidf)
for(i in 1:10){
boxplot(lsa_data[,i]~datos_txt$NPS)
}
# LDA
vocab2 <- prune_vocabulary(vocab,
term_count_min = 10,
doc_proportion_max = 0.3)
vectorizer2 <- vocab_vectorizer(vocab2)
lda_model <- LatentDirichletAllocation$new(n_topics = 10, vocabulary = vocab2)
dtm <- create_dtm(it_train, vectorizer2, type = "lda_c")
doc_topic_distr <-
lda_model$fit_transform(dtm, n_iter = 1000, convergence_tol = 0.01,
check_convergence_every_n = 10)
for(i in 1:10){
boxplot(doc_topic_distr[,i]~datos_txt$NPS)
}
# lda_model$plot()
#==================================================================
# TSNE
coment_matrix <- as.matrix(dtm_train_tfidf)
library(Rtsne)
data_1 <- data.frame(coment_matrix,row.names = NULL)
set.seed(12345)
data_1 <- sapply(data_1,
function(x)return(x + runif(length(x),-1e-10,1e-10)))
set.seed(12345)
tsne <- Rtsne(data_1, dims = 2, perplexity = 30, verbose = TRUE, max_iter = 1000)
tsne_data <- data.frame(tsne$Y)
names(tsne_data) <- c('tsn2_1','tsn2_2')
#==================================================================
# CLUSTER
set.seed(123)
# Compute and plot wss for k = 2 to k = 15
k.max <- 15 # Maximal number of clusters
wss <- sapply(1:k.max,
function(k){kmeans(tsne_data, k, nstart = 10)$tot.withinss})
plot(1:k.max, wss,
type="b", pch = 19, frame = FALSE,
xlab="Number of clusters K",
ylab="Total within-clusters sum of squares")
abline(v = 3, lty =2)
library(flexclust)
set.seed(12345)
cluster <- kcca(tsne_data, k = 4, kccaFamily("kmeans"))
image(cluster)
cluster_class <- predict(cluster, tsne_data)
cluster_1 <- ifelse(cluster_class==1,1,0)
cluster_2 <- ifelse(cluster_class==2,1,0)
cluster_3 <- ifelse(cluster_class==3,1,0)
cluster_4 <- ifelse(cluster_class==4,1,0)
medias_cluster <- cluster@centers
dist_1 <- apply(tsne_data,
1,
function(x){
sqrt(sum((x-as.numeric(medias_cluster[1,]))^2))
})
dist_2 <- apply(tsne_data,
1,
function(x){
sqrt(sum((x-as.numeric(medias_cluster[2,]))^2))
})
dist_3 <- apply(tsne_data,
1,
function(x){
sqrt(sum((x-as.numeric(medias_cluster[3,]))^2))
})
dist_4 <- apply(tsne_data,
1,
function(x){
sqrt(sum((x-as.numeric(medias_cluster[4,]))^2))
})
distance_cluster <- dist_1
distance_cluster[cluster_class==1] <- dist_1[cluster_class==1]
distance_cluster[cluster_class==2] <- dist_1[cluster_class==2]
distance_cluster[cluster_class==3] <- dist_1[cluster_class==3]
distance_cluster[cluster_class==4] <- dist_1[cluster_class==4]
#===============================================================================
# Objetos creados
# dtm_train_tfidf
# target
# lsa_data
# doc_topic_distr
# tsne_data
# distance_cluster
# cluster_1
# cluster_2
# cluster_3
# cluster_4
# id
campus_1 <- ifelse(datos_txt$nombre_campus==1,1,0)
campus_2 <- ifelse(datos_txt$nombre_campus==2,1,0)
campus_3 <- ifelse(datos_txt$nombre_campus==3,1,0)
campus_4 <- ifelse(datos_txt$nombre_campus==4,1,0)
nivel_ac <- ifelse(datos_txt$nivel == 'AC', 1, 0)
nivel_fc <- ifelse(datos_txt$nivel == 'FC', 1, 0)
nivel_online <- ifelse(datos_txt$nivel == 'ON LINE', 1, 0)
nivel_presencial <- ifelse(datos_txt$nivel == 'PRESENCIAL', 1, 0)
datos_modeliza <- data.frame(
COD_ENCUESTADO = datos_txt[,id],
NPS = datos_txt[,target],
as.matrix(dtm_train_tfidf),
lsa_data,
doc_topic_distr,
tsne_data,
cluster_1,
cluster_2,
cluster_3,
cluster_4,
distance_cluster,
ind_deportista = datos_txt[,'ind_deportista'],
ind_delegado = datos_txt[,'ind_delegado'],
campus_1,
campus_2,
campus_3,
campus_4,
ind_gea = datos_txt[,'ind_gea'],
ciclo_estudio = datos_txt[,'ciclo'],
clave_carrera = datos_txt[,'clave_carrera'],
s_ingles = datos_txt[,'s_ingles'],
na_s_ingles = datos_txt[,'na_s_ingles'],
nivel_ac,
nivel_fc,
nivel_online,
nivel_presencial,
longitud = datos_txt[,'longitud'],
ind_number_text = datos_txt[,'ind_number_text']
)
# Imputando s_ingles
library(ranger)
set.seed(12345)
ranger_imputa <- ranger(formula = s_ingles ~ .-COD_ENCUESTADO-NPS,
data = datos_modeliza[!is.na(datos_modeliza$s_ingles),],
# mtry = mtry_opt,
num.trees = 500,
num.threads = 4,
write.forest = T,
importance = 'impurity')
barplot(importance(ranger_imputa))
s_ingles_imputa <- predict(ranger_imputa,datos_modeliza)$predictions
datos_modeliza$s_ingles[is.na(datos_modeliza$s_ingles)] <-
s_ingles_imputa[is.na(datos_modeliza$s_ingles)]
dev.off()
pdf("histogramas.pdf",width=7,height=5)
for(i in 3:3075){
plot(density(datos_modeliza[!is.na(datos_modeliza[,target]),i]),col='blue')
lines(density(datos_modeliza[is.na(datos_modeliza[,target]),i]),col='red')
print(i)
}
dev.off()
#===============================================================================
# Generando cluster
library(rBayesianOptimization)
cv_folds_ensable <- KFold(datos_modeliza[!is.na(!datos_modeliza[,target]),target],
nfolds = 5, stratified = TRUE, seed = 12345)
cv_folds <- KFold(datos_modeliza[!is.na(!datos_modeliza[,target]),target],
nfolds = 5, stratified = TRUE, seed = 0)
#===============================================================================
# Guardando resultados
setwd('C:/Users/Cesar Quezada/Dropbox/KAGGLE/01. Concurso 01')
lista_subir <- list(datos_modeliza = datos_modeliza,
cv_folds_ensable = cv_folds_ensable,
cv_folds = cv_folds,
id = id,
target = target)
saveRDS(lista_subir, "lista_subir_final.rds")
#===============================================================================
#===============================================================================
#===============================================================================
#===============================================================================
rm(list=ls())
gc()
setwd('C:/Users/Cesar Quezada/Dropbox/KAGGLE/01. Concurso 01')
lista_datos <- readRDS('lista_subir_final.rds')
datos_modeliza <- lista_datos$datos_modeliza
cv_folds_ensable <- lista_datos$cv_folds_ensable
cv_folds <- lista_datos$cv_folds
id <- lista_datos$id
target <- lista_datos$target
rm(lista_datos)
gc()
#==================================================================
library(xgboost)
# Generando los datos
dtrain <- xgb.DMatrix(data = as.matrix(datos_modeliza[!is.na(datos_modeliza[,target]),
!names(datos_modeliza)%in%c(id,target)]),
label = as.numeric(datos_modeliza[!is.na(datos_modeliza$NPS),target])-1,
missing = NA)
dtest <- xgb.DMatrix(data = as.matrix(datos_modeliza[is.na(datos_modeliza[,target]),
!names(datos_modeliza)%in%c(id,target)]),
missing = NA)
gc()
#================================================================================
# Funcion para calcular los valores optimos
library(rBayesianOptimization)
xgb_cv_bayes <- function(max_depth, eta, gamma, subsample, colsample_bytree) {
param <- list(objective = "multi:softprob",
eval_metric = 'mlogloss',
"num_class" = 4,
booster = "gbtree",
max_depth = max_depth,
eta = eta,
gamma = gamma,
subsample = subsample,
colsample_bytree = colsample_bytree)
cv <- xgb.cv(params = param,
data = dtrain, nround = 10000,
folds = cv_folds, prediction = TRUE, showsd = TRUE,
early_stopping_rounds = 200,
# maximize = TRUE,
verbose = TRUE,
print_every_n = 10)
errores <- cv$evaluation_log$test_mlogloss_mean+cv$evaluation_log$test_mlogloss_std
# minimizar el log_loss (-1 para maximizar)
list(Score = (-1)*min(errores), Pred = cv$pred, model = cv)
}
aa <- xgb_cv_bayes(5,0.01,0.1,0.5,0.5)
#================================================================================
# Generando los modelos
# parametros optimos
max_depth <- 5
eta <- 0.01
gamma <- 0.1
subsample <- 0.5
colsample_bytree <- 0.5
nround <- 3706
param_opt <- list(objective = "multi:softprob",
eval_metric = 'mlogloss',
"num_class" = 4,
booster = "gbtree",
max_depth = max_depth,
eta = eta,
gamma = gamma,
subsample = subsample,
colsample_bytree = colsample_bytree)
# Fase 1:
xgb_tree <- lapply(seq(1:5),function(x){
set.seed(10*x)
model <- xgb.train(params = param_opt, data = dtrain,
nround = nround)
print(model)
return(model)
})
xgb.save(xgb_tree[[1]],'xgb_1_20170724')
xgb.save(xgb_tree[[2]],'xgb_2_20170724')
xgb.save(xgb_tree[[3]],'xgb_3_20170724')
xgb.save(xgb_tree[[4]],'xgb_4_20170724')
xgb.save(xgb_tree[[5]],'xgb_5_20170724')
importancia1 <- xgb.importance(model=xgb_tree[[1]],
feature_names = names(datos_modeliza)[!names(datos_modeliza)%in%c(id,target)])
View(importancia1)
# Fase 1
prob_tree <- lapply(xgb_tree,function(x){
pred <- t(matrix(predict(x, dtest),nrow=4,ncol=8427))
print('aaa')
return(pred)
})
prob_tree_1 <- Reduce('+',prob_tree)
prob_tree_1 <- prob_tree_1/5
prob_tree_1 <- data.frame(prob_tree_1)
colnames(prob_tree_1) <- c(1,2,3,4)
data_subir <- data.frame(COD_ENCUESTADO = datos_modeliza[is.na(datos_modeliza$NPS),
id],
prob_tree_1)
names(data_subir) <- c('COD_ENCUESTADO','1','2','3','4')
write.csv(data_subir,'subida_xgb_20170724.csv',row.names = F)
|
386dea5912e2d364249bf78725667273146ad839 | 36f2ea4eec9d53b985d7c4a6fc0a6253b4fad3bd | /R/voter_profile.R | 690df76e74be594bf7c19f969078f27d93d90478 | [] | no_license | cran/electionsBR | e887429e825b6ab169bef84331133017d1622ce2 | 89e125adf0ea55b6f4ad87fd88f29cc11d900a51 | refs/heads/master | 2021-07-12T09:02:46.741519 | 2021-01-30T12:50:02 | 2021-01-30T12:50:02 | 66,004,313 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,793 | r | voter_profile.R | #' Download data on the voters' profile
#'
#' \code{voter_profile()} downloads and cleans data on the voters' profile aggregated by state, city and electoral zone.
#' The function returns a \code{data.frame} where each observation corresponds to a voter profile type.
#'
#' @param year Election year (\code{integer}). For this function, the following years are available: 1994, 1996, 1998,
#' 2000, 2002, 2004, 2006, 2008, 2010, 2012, 2014, 2016, 2018 and 2020.
#'
#' @param ascii (\code{logical}). Should the text be transformed from Latin-1 to ASCII format?
#'
#' @param encoding Data original encoding (defaults to 'windows-1252'). This can be changed to avoid errors
#' when \code{ascii = TRUE}.
#'
#' @param export (\code{logical}). Should the downloaded data be saved in .dta and .sav in the current directory?
#'
#' @param temp (\code{logical}). If \code{TRUE}, keep the temporary compressed file for future use (recommended)
#'
#' @details If export is set to \code{TRUE}, the downloaded data is saved as .dta and .sav
#' files in the current directory.
#'
#' @return \code{voter_profile()} returns a \code{data.frame} with the following variables:
#'
#' \itemize{
#' \item PERIODO: Election year.
#' \item UF: Units of the Federation's acronym in which occurred the election.
#' \item MUNICIPIO: Municipality name.
#' \item COD_MUNICIPIO_TSE: Municipal's Supreme Electoral Court code (number).
#' \item NR_ZONA: Electoral zone's Supreme Electoral Court code (number).
#' \item SEXO: Voters' sex.
#' \item FAIXA_ETARIA: Voters' age group.
#' \item GRAU_DE_ESCOLARIDADE: Voters' education degree.
#' \item QTD_ELEITORES_NO_PERFIL: Absolute number of voters.
#' }
#'
#' @import utils
#' @importFrom magrittr "%>%"
#' @export
#' @examples
#' \dontrun{
#' df <- voter_profile(2002)
#' }
voter_profile <- function(year, ascii = FALSE,
encoding = "windows-1252",
export = FALSE,
temp = TRUE){
# Inputs
if(!year %in% seq(1994, 2020, by = 2)) stop("Invalid 'year'. Please check the documentation and try again.")
test_encoding(encoding)
#if(year == 2020){
# urldir <- "http://agencia.tse.jus.br/estatistica/sead/odsele/perfil_eleitorado/perfil_eleitorado_ATUAL.zip"
#} else{
# urldir <- sprintf("http://agencia.tse.jus.br/estatistica/sead/odsele/perfil_eleitorado/perfil_eleitorado_%s.zip", year)
# }
filenames <- paste0("/perfil_eleitorado_", year, ".zip")
dados <- paste0(file.path(tempdir()), filenames)
url <- "https://cdn.tse.jus.br/estatistica/sead/odsele/perfil_eleitorado%s"
# Downloads the data
download_unzip(url, dados, filenames, year)
# remover temp file
if(temp == FALSE){
unlink(dados)
}
setwd(as.character(year))
archive <- Sys.glob("*")[grepl(".pdf", Sys.glob("*")) == FALSE] %>%
file.info() %>%
.[.$size > 200, ] %>%
row.names()
if(grepl(".csv", archive[1])){
test_col_names <- TRUE
}else{
test_col_names <- FALSE
}
banco <- readr::read_delim(archive, col_names = test_col_names, delim = ";", locale = readr::locale(encoding = encoding), col_types = readr::cols(), progress = F) %>%
dplyr::as_tibble()
setwd("..")
unlink(as.character(year), recursive = T)
# Change variable names
if(year < 2016){
names(banco) <- c("PERIODO", "UF", "MUNICIPIO", "COD_MUNICIPIO_TSE", "NR_ZONA",
"SEXO", "FAIXA_ETARIA", "GRAU_DE_ESCOLARIDADE", "QTD_ELEITORES_NO_PERFIL")
}
# Change to ascii
if(ascii) banco <- to_ascii(banco, encoding)
# Export
if(export) export_data(banco)
message("Done.\n")
banco
}
|
2d7bf16b4ff3e0e206653e93b8c6571edac029c4 | 0701fd92013875af4a3d2bd86e30cd04a1388758 | /R/filter_cip.R | 08187321ce421f540eefcedf0382a5462acec65e | [
"MIT"
] | permissive | MIDFIELDR/midfieldr | cc6a90c4b40c012e0a5795ff4ddd273cf38b6355 | 90cfecec130ac806484f72bcc9bc689bdc8df9ac | refs/heads/main | 2023-08-18T17:04:37.287531 | 2023-08-13T18:45:19 | 2023-08-13T18:45:19 | 113,693,328 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,199 | r | filter_cip.R |
#' Subset rows that include matches to search strings
#'
#' Subset a CIP data frame, retaining rows that match or partially match a
#' vector of character strings. Columns are not subset unless selected in an
#' optional argument.
#'
#' Search terms can include regular expressions. Uses `grepl()`, therefore
#' non-character columns (if any) that can be coerced to character are also
#' searched for matches. Columns are subset by the values in `select` after the
#' search concludes.
#'
#' If none of the optional arguments are specified, the function returns the
#' original data frame.
#'
#' @param keep_text Character vector of search text for retaining rows,
#' not case-sensitive. Can be empty if `drop_text` is used.
#' @param ... Not used, force later arguments to be used by name
#' @param drop_text Optional character vector of search text for dropping
#' rows, default NULL.
#' @param cip Data frame to be searched. Default `cip`.
#' @param select Optional character vector of column names to return,
#' default all columns.
#' @return A `data.table` subset of `cip` with the following properties:
#' \itemize{
#' \item Rows matching elements of `keep_text` but excluding rows
#' matching elements of `drop_text`.
#' \item All columns or those specified by `select`.
#' \item Grouping structures are not preserved.
#' }
#'
#'
#' @family filter_*
#'
#'
#' @example man/examples/filter_cip_exa.R
#'
#'
#' @export
#'
#'
filter_cip <- function(keep_text = NULL,
...,
drop_text = NULL,
cip = NULL,
select = NULL) {
# assert arguments after dots used by name
wrapr::stop_if_dot_args(
substitute(list(...)),
paste(
"Arguments after ... must be named.\n",
"* Did you forget to write `drop_text = ` or `select = `?\n *"
)
)
# optional arguments
cipx <- cip %?% midfieldr::cip
dframe <- copy(cipx)
select <- select %?% names(dframe)
# remove all keys
on.exit(setkey(dframe, NULL))
# required argument
qassert(dframe, "d+")
# return if no work is being done
if (identical(select, names(dframe)) &
is.null(keep_text) &
is.null(drop_text)) {
setkey(dframe, NULL)
return(dframe)
}
# assertions for optional arguments
qassert(select, "s+") # missing is OK
if (!is.null(keep_text)) qassert(keep_text, "s+")
if (!is.null(drop_text)) qassert(drop_text, "s+")
# input modified (or not) by reference
setDT(dframe)
# required columns
# NA
# class of required columns
# NA
# bind names due to NSE notes in R CMD check
cip <- NULL
# do the work
dframe <- filter_char_frame(
dframe = dframe,
keep_text = keep_text,
drop_text = drop_text
)
# stop if all rows have been eliminated
if (abs(nrow(dframe) - 0) < .Machine$double.eps^0.5) {
stop(
paste(
"The search result is empty. Possible causes are:\n",
"* 'cip' contained no matches to terms in 'keep_text'.\n",
"* 'drop_text' eliminated all remaining rows."
),
call. = FALSE
)
}
# message if a search term was not found
# data frame with as many columns as there are keep_text terms
# as many rows as there are being searched in data
df <- data.frame(matrix("", nrow = nrow(dframe), ncol = length(keep_text)))
names(df) <- keep_text
for (j in seq_along(keep_text)) {
df[, j] <- apply(dframe, 1, function(i) {
any(grepl(keep_text[j], i, ignore.case = TRUE))
})
}
# the sum is 0 for all FALSE in a column for that search term
sumTF <- colSums(df)
not_found <- sumTF[!sapply(sumTF, as.logical)]
if (length(not_found) > 0) {
message(paste(
"Can't find these terms:",
paste(names(not_found), collapse = ", ")
))
}
# subset columns
dframe <- dframe[, .SD, .SDcols = select]
# enable printing (see data.table FAQ 2.23)
dframe[]
}
# ------------------------------------------------------------------------
# Subset rows of character data frame by matching keep_texts
# dframe data frame of character variables
# keep_text character vector of search keep_texts for retaining rows
# drop_text character vector of search keep_texts for dropping rows
filter_char_frame <- function(dframe,
keep_text = NULL,
drop_text = NULL) {
DT <- as.data.table(dframe)
# filter to keep rows
if (length(keep_text) > 0) {
keep_text <- paste0(keep_text, collapse = "|")
DT <- DT[apply(DT, 1, function(i) {
any(grepl(keep_text, i, ignore.case = TRUE))
}), ]
}
# filter to drop rows
if (length(drop_text) > 0) {
drop_text <- paste0(drop_text, collapse = "|")
DT <- DT[apply(DT, 1, function(j) {
!any(grepl(drop_text, j, ignore.case = TRUE))
}), ]
}
# works by reference
return(DT)
}
|
700acdbff9e7d7ef2a31d8d3d0f7b223361454d1 | 843e69e884fb254866fb320d332725b126e5d23e | /TandemRepeatsAnalysis.R | 639144e84c20e988a036b659797620adc6499c27 | [] | no_license | Aragret/comparative_genomics_OLD | b86d31f97ab45223319120de2f32937549922533 | e9cbfa028ca7c29dddd73d1fb79156b164b16734 | refs/heads/master | 2020-03-09T18:11:59.363689 | 2018-04-10T18:23:43 | 2018-04-10T18:23:43 | 128,926,456 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,103 | r | TandemRepeatsAnalysis.R | # вот моя строчка
rm(list=ls(all=TRUE)) # remove everything from R memory (old variables, datasets...)
user = 'Alina'
if(user == 'Alina'){
setwd('/home/aragret/Alina/COMPARATIVE_GENOMICS')
}
tr = read.table('Derived_data/TRFinder.txt', sep='\t', header=TRUE)
hist(tr$PercentMatches)
countCharOccurrences <- function(char, s) {
s2 <- gsub(char,"",as.character(s))
return (nchar(s) - nchar(s2))
}
for(i in 1:nrow(tr)){
# i = 1
tr$A[i] = countCharOccurrences('A', as.character(tr$RepeatsRegion[i]))
tr$G[i] = countCharOccurrences('G', as.character(tr$RepeatsRegion[i]))
tr$C[i] = countCharOccurrences('C', as.character(tr$RepeatsRegion[i]))
tr$Ti[i] = countCharOccurrences('T', as.character(tr$RepeatsRegion[i]))
tr$ATCont[i] = (tr$A[i] + tr$Ti[i]) / nchar(as.character(tr$RepeatsRegion[i]))
tr$GCCont[i] = (tr$G[i] + tr$C[i]) / nchar(as.character(tr$RepeatsRegion[i]))
}
young_repeats = tr[tr$PercentMatches >= 95,]
old_repeats = tr[tr$PercentMatches < 95,]
# write.table(young_repeats, 'Derived_data/YoungTandemRepeats.txt', sep='\t', row.names = F)
# write.table(old_repeats, 'Derived_data/OldTandemRepeats.txt', sep='\t', row.names = F)
library(ggplot2)
library(gridExtra)
ggplot(tr, aes(ATCont)) + geom_histogram(col="red", fill="green", alpha = .2)
plot1 = ggplot(young_repeats, aes(ATCont)) + geom_histogram(col="red", fill="green", alpha = .2)
plot2 = ggplot(old_repeats, aes(ATCont)) + geom_histogram(col="red", fill="green", alpha = .2)
grid.arrange(plot1, plot2, ncol=2)
plot1 = ggplot(young_repeats, aes(GCCont)) + geom_histogram(col="red", fill="green", alpha = .2)
plot2 = ggplot(old_repeats, aes(GCCont)) + geom_histogram(col="red", fill="green", alpha = .2)
grid.arrange(plot1, plot2, ncol=2)
wilcox.test(young_repeats$ATCont, old_repeats$ATCont)
t.test(young_repeats$ATCont, old_repeats$ATCont)
plot1 = ggplot(young_repeats, aes('1', ATCont)) + geom_boxplot(col="red", fill="green", alpha = .2)
plot2 = ggplot(old_repeats, aes('2', ATCont)) + geom_boxplot(col="red", fill="green", alpha = .2)
grid.arrange(plot1, plot2, ncol=2)
##############################################################################
### repeats within each species
length(unique(tr$Species)) # 2040
length(unique(young_repeats$Species)) # 1311
length(unique(old_repeats$Species)) # 1295
one_line = c()
for(sp in unique(tr$Species)){
young_sp = young_repeats[young_repeats$Species == as.character(sp),]
old_sp = old_repeats[old_repeats$Species == as.character(sp),]
if((nrow(old_sp) > 1) & (nrow(young_sp) > 1)){
a = merge(young_sp, old_sp, by='Species')
a = a[, c('ATCont.x', 'ATCont.y')]
test = wilcox.test(a[,1], a[,2])
one_line = rbind(one_line, c(sp, nrow(young_sp), nrow(old_sp), test$statistic, test$p.value))
}
}
result = as.data.frame(one_line)
names(result) = c('Species', 'YoungRepeatsNumber', 'OldSpeciesNumber', 'W', 'PValue')
write.table(result, 'Derived_data/ATContentInYoungOldTR.txt', sep='\t', quote=FALSE,
row.names = FALSE)
###########################################################################
###
|
13c13bde3eb216663a486c6228eff5429da6bab5 | 5ae175b758a82b868eb44b769de3c97c2b3cbd8d | /Chage Point Model/Code convert.R | 7ae4424f1dc7d37bcb97ebf76c6bfc77ca8744ee | [] | no_license | kstatju/Social-Network-Analysis-and-Change-Point-Model | 78e131ff35ff80374d52182e6bad2358f19765f8 | 546392da7ec907dcbbb7629c69fe13ef33981ac2 | refs/heads/master | 2023-02-05T12:10:00.700717 | 2020-12-16T01:50:41 | 2020-12-16T01:50:41 | 82,312,234 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,547 | r | Code convert.R | # implicit none
# real(8), allocatable :: data(:), xbar(:,:), s(:,:), dev(:), dev2(:), &
# fract(:,:,:), window(:)
# real(4), allocatable :: tmax(:,:)
# integer :: nord, nwind, ignore, i, j, k, n, isim, nsim, ij, kl, where, &
# nsurv, last, iglist(5) = (/0, 20, 25, 30, 40 /), ig, ialf, &
# nordlist(5) = (/2, 3, 4, 5, 10/), inord, outer
# real(8) :: one = 1.d0, zero = 0.d0, rat, denom, count, spl, splmax, tsq,&
# tsqmax, getper, alph(5) = (/0.99, 0.995, 0.998, 0.999, 0.9995/)
# logical(1), allocatable :: alive(:)
#
# logical :: first
iglist = c(0, 20, 25, 30, 40)
nordlist = c(2, 3, 4, 5, 10)
alph = c(0.99, 0.995, 0.998, 0.999, 0.9995)
1-alph
ij = 10
kl = 11
zero = 0.0
one = 1.0
nsim = 200000
nwind = 1000
nord = 5
first = TRUE
for (outer in 1:20){
for (inord in 5:1){
nord = nordlist[inord]
# if (!first){deallocate (data, xbar, s, dev, dev2, tmax, fract, &
# window, alive)}
first = FALSE
# allocate (data(nord), xbar(nord,0:nwind), s(nord,nord), dev(nord), &
# dev2(nord), tmax(nsim,nwind), fract(nwind, 5, 5), window(nsim), &
# alive(nsim))
iglist[1] = nord + 1
xbar = array(data = NA, dim = c(nord,nwind+1))
s = array(data = NA, dim = c(nord,nord))
tmax = array(data = -1, dim = c(nsim,nwind))
alive = array(data = TRUE, dim = nsim)
window = array(data = NA, dim = nsim)
fract = array(data = NA, dim = c(nwind, 5, 5))
dev2 = array(data = NA, dim = nord)
for (isim in 1:nsim){
xbar[,1] = zero
s[,] = zero
count = zero
for (n in 2:(nwind+1)){
#call randn(data, nord, ij, kl) ********************************
data = rnorm(nord, ij, kl)
# ! write(*,'(5f8.4)') data
count = count + one
dev = data - xbar[,n-1]
xbar[,n] = xbar[,n-1] + dev / count
if(n-iglist[1] <= 1) { #! Build up S
dev2[] = data - xbar[,n]
for (j in 1:nord){
s[,j] = s[,j] + dev * dev2[j]
}
if (n - iglist[1] == 1) { #! Invert S
for (j in 1:nord){
call sweep(s, nord, j, one) ****************************************
}
s = -s
}
}
if(n-iglist[1] >= 2) { # ! Production. Update inverse
for (j in 1:nord){
dev2[j] = s[,j] %*% dev
}
rat = (count - one) / count
denom = one + rat * (dev %*% dev2)
for (j in 1:nord){
s[,j] = s[,j] - rat * dev2 * dev2[j] / denom
}
}
if (n > iglist[1]) {
splmax = 0
for (k in 1:(n-1)){
dev = xbar[,k] - xbar[,n]
for (j in 1:nord){
dev2[j] = s[,j] %*% dev
}
rat = n * k / (n-k)
spl = rat * (dev %*% dev2)
tsq = spl / (one - spl) * (n-2)
# ! if (n == nwind) write(*,'(i5, 2f9.3)') k, spl, tsq
if (spl > splmax) {
splmax = spl
where = k
}
}
tsqmax = splmax / (one - splmax) * (n - 2)
tmax[isim, n] = tsqmax
# ! write(*,'(2i5, 2f12.3)') isim, n, splmax, tsqmax
if (n < nwind){
cycle
}
}
}
}
# !
# ! harvest
# !
fract[,,] = -99
for (ig in 1:5){
for (ialf in 1:5){
alive[] = TRUE
nsurv = nsim
for (i in (iglist[ig]+1):nwind){
# ! write(*,'(10f8.2)') tmax(:,i)
last = nsurv
nsurv = 0
for (j in 1:nsim){
if (alive[j]) {
nsurv = nsurv + 1
window[nsurv] = tmax[j,i]
}
}
if (nsurv > 0){
fract[i, ig, ialf] = getper(window, nsurv, alph[ialf]) ******************************
}
# ! write(*,'(4i8,2f9.6,g15.7)') i, nsurv, ig, ialf, real(nsurv)/real(last), &
# ! fract(i, ig, ialf)
for (j in 1:nsim){
alive[j] = alive[j] * tmax[j,i] < fract[i, ig, ialf]
}
}
}
}
# ! open(20, file='fractab.out', action='write', position='append')
#open(20, file='fractab.lng', action='write', position='append')
for (ialf in 1:5){
line=c(nord, alph[ialf], iglist)
write(line,file="C:/Users/ka746940/Desktop/UCF/STA 6908 - Edgard Maboudou/R code/myfile.txt",append=TRUE)
#write(*,'('' p='', i5, '' alpha = '',f8.6/'' n ignore'',i3,10i14)') &
# nord, alph(ialf), iglist
for (i in (iglist[1] + 1):nwind){
line1=c(i, fract[i, , ialf])
write(line1,file="C:/Users/ka746940/Desktop/UCF/STA 6908 - Edgard Maboudou/R code/myfile1.txt",append=TRUE)
line2=c(nord, ialf, i, fract[i, , ialf])
write(line2,file="C:/Users/ka746940/Desktop/UCF/STA 6908 - Edgard Maboudou/R code/myfile2.txt",append=TRUE)
#write(*, '(i5,3x,5g14.5)') i, fract(i, :, ialf)
#write(20, '(3i5,5g14.5)') nord, ialf, i, fract(i, :, ialf)
}
}
}
}
|
3a5312434d465915fa0d8dbe9793c8af52e6682b | db0e244c6d3c1aa0bef5d5906750d8f94c388387 | /analyses/que_es_un_consens/prepare_data.R | 9e2488cee99502b0eeb7704d73f36e40ac6d6617 | [
"MIT"
] | permissive | joebrew/vilaweb | ea9796aa7a5d4f0676608618ba975dac95346000 | f0b028c07484c750d75a101308c3937d81b40d80 | refs/heads/master | 2021-06-09T02:00:21.502677 | 2020-09-07T22:20:03 | 2020-09-07T22:20:03 | 159,472,849 | 23 | 9 | NOASSERTION | 2021-06-01T23:59:21 | 2018-11-28T09:01:42 | HTML | UTF-8 | R | false | false | 19,518 | r | prepare_data.R | # Libraries
library(vilaweb)
library(tidyverse)
library(ggplot2)
library(tidyr)
# Functions
mround <- function(x,base){
base*round(x/base)
}
round_percent <- function(x) {
x <- x/sum(x)*100 # Standardize result
res <- floor(x) # Find integer bits
rsum <- sum(res) # Find out how much we are missing
if(rsum<100) {
# Distribute points based on remainders and a random tie breaker
o <- order(x%%1, sample(length(x)), decreasing=TRUE)
res[o[1:(100-rsum)]] <- res[o[1:(100-rsum)]]+1
}
res
}
numberfy <- function(x){
gsub(',', '.', scales::comma(x), fixed = TRUE)
}
# Get most recent CEO data
ceo_june_2019 <- vilaweb::ceo_june_2019
# Get age range
get_details <- function(df){
df %>%
mutate(referendum = `Fins a quin punt està d’acord o en desacord amb cadascuna de les següents afirmacions: Catalunya té el dret de celebrar un referèndum d'autodeterminació`) %>%
mutate(avis = as.character(`Quants dels seus avis/àvies van néixer a Catalunya?`)) %>%
mutate(avis = ifelse(avis == 'Cap', '0',
ifelse(avis == 'Un', '1',
ifelse(avis == 'Dos', '2',
ifelse(avis == 'Tres', '3',
ifelse(avis == 'Quatre', '4', NA)))))) %>%
mutate(avis = as.numeric(avis)) %>%
mutate(pare_cat = `Em podria dir el lloc de naixement del seu pare?` == 'Catalunya',
pare_esp = `Em podria dir el lloc de naixement del seu pare?` == 'Altres comunitats autònomes',
mare_cat = `Em podria dir el lloc de naixement de la seva mare?` == 'Catalunya',
mare_esp = `Em podria dir el lloc de naixement de la seva mare?` == 'Altres comunitats autònomes') %>%
mutate(pare_cat = as.numeric(pare_cat),
pare_esp = as.numeric(pare_esp),
mare_cat = as.numeric(mare_cat),
mare_esp = as.numeric(mare_esp)) %>%
summarise(p50 = median(Edat),
avg = mean(Edat),
p75 = quantile(Edat, 0.75),
p25 = quantile(Edat, 0.25),
pcat = length(which(`Em podria dir on va néixer?` == 'Catalunya')) / n(),
pesp = length(which(`Em podria dir on va néixer?` == 'Altres comunitats autònomes')) / n(),
avis = mean(avis, na.rm = TRUE),
avisp50 = median(avis, na.rm = TRUE),
avisp75 = quantile(avis, 0.75, na.rm = TRUE),
avisp25 = quantile(avis, 0.25, na.rm = TRUE),
pares_cat = mean(pare_cat + mare_cat, na.rm = TRUE),
pares_esp = mean(pare_esp + mare_esp, na.rm = TRUE),
pares_catp25 = quantile(pare_cat + mare_cat, 0.25, na.rm = TRUE),
pares_catp75 = quantile(pare_cat + mare_cat, 0.75, na.rm = TRUE),
pares_espp25 = quantile(pare_esp + mare_esp, 0.25, na.rm = TRUE),
pares_espp75 = quantile(pare_esp + mare_esp, 0.75, na.rm = TRUE))
}
# new_ceo %>% group_by(`Any de realització del baròmetre`) %>%
# get_details() %>%
# bind_rows(
# ceo_june_2019 %>%
# group_by(`Any de realització del baròmetre`) %>%
# get_details()
# ) %>%
# View
# Transform data to combine
transform_data <- function(df){
language_dict <- tibble(input = c('Català (valencià / balear)', 'Castellà', 'Totes dues igual: català (valencià / balear) i castellà', 'Altres llengües o altres combinacions', 'Aranès', 'No ho sap', 'No contesta'),
output_ca = c('Català',
'Castellà',
'Cat+Cast',
'Altres',
'Català',
'NS/NC',
'NS/NC'),
output_en = c('Catalan',
'Spanish',
'Cat+Spa',
'Others',
'Catalan',
'No answer',
'No answer'))
convert_language <- function(x, ca = TRUE){
z <- tibble(input = x)
joined <- left_join(z, language_dict)
if(ca){
as.character(joined$output_ca)
} else {
as.character(joined$en)
}
}
v1 <- "Amb quina de les següents frases se sent més identificat: em sento només espanyol, més espanyol que català, tan espanyol com català, més català que espanyol o només català?"
v2 <- 'Amb quina de les següents frases,em sento només espanyol, més espanyol que català, tan espanyol com català, més català que espanyol o només català, se sent més identificat?'
if(v1 %in% names(df)){
df$identificacio <- unlist(df[,v1])
} else {
df$identificacio <- unlist(df[,v2])
}
ref_var <- "Fins a quin punt està d’acord o en desacord amb cadascuna de les següents afirmacions: Catalunya té el dret de celebrar un referèndum d'autodeterminació"
if(ref_var %in% names(df)){
vals <- unlist(df[,ref_var])
if(!all(is.na(vals))){
df$referendum <- vals
}
}
ref_var <- "Fins a quin punt està d’acord o en desacord amb l’afirmació següent: “Els catalans i les catalanes tenen dret a decidir el seu futur com a país votant en un referèndum”?"
if(ref_var %in% names(df)){
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
vals <- c(1:5, 98, 99)
dict <- tibble(vals, referendum = levs)
dict$referendum <- factor(dict$referendum, levels = levs)
new_vals <- tibble(vals = unlist(df[,ref_var]))
new_vals <- left_join(new_vals, dict)
if(!all(is.na(new_vals$referendum))){
df$referendum <- new_vals$referendum
}
}
ref_var <- "Fins a quin punt està d’acord o en desacord amb cadascuna de les següents afirmacions: Catalunya no té el dret de celebrar un referèndum d'autodeterminació"
if(ref_var %in% names(df)){
vals <- as.character(unlist(df[,ref_var]))
# Reverse
vals2 <- ifelse(vals == "D'acord", "En desacord",
ifelse(vals == "Molt d'acord", "Molt en desacord",
ifelse(vals == "En desacord", "D'acord",
ifelse(vals == "Molt en desacord", "Molt d'acord", vals))))
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
vals <- factor(vals2, levels = levs)
if(!all(is.na(vals))){
df$referendum <- vals
}
}
if(!'referendum' %in% names(df)){
df$referendum <- NA
}
df %>%
mutate(partit = `Em podria dir per quin partit sent més simpatia?`) %>%
mutate(any = `Any de realització del baròmetre`,
mes = `Mes de realització del baròmetre`) %>%
mutate(mes = ifelse(mes == 3 & any == 2014, 4, mes),
mes = ifelse(mes == 10 & any == 2014, 11, mes),
mes = ifelse(mes == 3 & any == 2015, 2, mes),
mes = ifelse(mes == 7 & any == 2017, 6, mes),
mes = ifelse(mes == 7 & any == 2018, 6, mes),
mes = ifelse(mes == 11 & any == 2018, 10, mes),
mes = ifelse(mes == 7 & any == 2019, 6, mes)) %>%
mutate(date = as.Date(paste0(any, '-', mes, '-15'))) %>%
mutate(avis = as.character(`Quants dels seus avis/àvies van néixer a Catalunya?`)) %>%
mutate(avis = ifelse(avis == 'Cap', '0',
ifelse(avis == 'Un', '1',
ifelse(avis == 'Dos', '2',
ifelse(avis == 'Tres', '3',
ifelse(avis == 'Quatre', '4', NA)))))) %>%
mutate(avis = as.numeric(avis)) %>%
mutate(pare_cat = `Em podria dir el lloc de naixement del seu pare?` == 'Catalunya',
pare_esp = `Em podria dir el lloc de naixement del seu pare?` == 'Altres comunitats autònomes',
mare_cat = `Em podria dir el lloc de naixement de la seva mare?` == 'Catalunya',
mare_esp = `Em podria dir el lloc de naixement de la seva mare?` == 'Altres comunitats autònomes') %>%
mutate(pare_cat = as.numeric(pare_cat),
pare_esp = as.numeric(pare_esp),
mare_cat = as.numeric(mare_cat),
mare_esp = as.numeric(mare_esp)) %>%
mutate(llengua_primera = `Quina llengua va parlar primer vostè, a casa, quan era petit?`) %>%
mutate(llengua_primera = convert_language(llengua_primera),
llengua_habitual = convert_language(`Quina és la seva llengua habitual, ens referim a la llengua que parla més sovint?`),
llengua_propia = convert_language(`Quina és la seva llengua, ens referim a quina és la llengua que vostè considera com a pròpia?`)) %>%
mutate(indepe = `Vol que Catalunya esdevingui un Estat independent?`) %>%
# mutate(llengua_preferiex = `Prefereix que li faci les preguntes en català o en castellà?`),
mutate(neixer = `Em podria dir on va néixer?`,
informat = `Es considera vostè molt, bastant, poc o gens informat/ada del que passa en política?`,
interessat = `A vostè la política li interessa molt, bastant, poc o gens?`,
partit = `Em podria dir per quin partit sent més simpatia?`,
axis = `Quan es parla de política, normalment s’utilitzen les expressions esquerra i dreta, indiqui on s’ubicaria vostè?`,
telefon_fix = `Té telèfon fix a la seva llar?`,
ingressos = `Quins són els ingressos familiars que entren cada mes a casa seva?`) %>%
mutate(indepe = as.character(indepe)) %>%
mutate(indepe =
ifelse(indepe %in% c('No ho sap', 'No contesta'),
'NS/NC', indepe)) %>%
mutate(municipi = `Grandària del municipi`) %>%
mutate(provincia = `Província`) %>%
dplyr::select(
partit,
referendum,
identificacio,
municipi,
provincia,
date,
avis,
pare_cat, pare_esp,
mare_cat, mare_esp,
llengua_primera, llengua_habitual, llengua_propia, #llengua_prefereix,
neixer,
informat,
interessat,
partit,
axis,
telefon_fix,
ingressos,
indepe
) %>%
mutate(pares = ifelse(pare_cat + mare_cat == 2,
'2 pares nascuts a Cat',
ifelse(pare_cat + mare_cat == 1 &
pare_esp + mare_esp == 1,
'1 pare nascut a Cat, l\'altre a Esp',
ifelse(pare_esp + mare_esp == 2,
'2 pares nascuts a Esp',
'Altres combinacions')
))) %>%
mutate(partit = as.character(partit)) %>%
mutate(partit = ifelse(partit %in% c('ERC', 'PSC', 'CUP',
"PPC"),
partit,
ifelse(partit %in% c('Podemos','En Comú Podem', 'Catalunya en Comú Podem', 'Barcelona en Comú', 'Catalunya sí que es pot'), 'Podem',
ifelse(partit == "C's", "Cs",
ifelse(partit %in% c('CiU', 'Junts pel Sí', 'CDC', 'PDeCAT', 'Junts per Catalunya'), 'JxCat/PDeCat', 'Cap o altre partit')))))
}
# Combine
bop_numbers <- sort(unique(new_ceo$`Número d'ordre del baròmetre`))
bop_list <- list()
for(i in 1:length(bop_numbers)){
message(i)
this_bop_number <- bop_numbers[i]
this_bop <- new_ceo %>% filter(`Número d'ordre del baròmetre` == this_bop_number)
out <- transform_data(this_bop)
bop_list[[i]] <- out
}
bop <- bind_rows(bop_list)
combined <-
bop %>%
bind_rows(
transform_data(ceo_june_2019)
)
simple_plot <- function(ca = FALSE,
keep_simple = FALSE){
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
levs_en <- c('Strongly agree',
'Agree',
'Neither agree\nnor disagree',
'Disagree',
'Strongly disagree',
"Don't know",
"No answer")
pd <- combined %>%
filter(!is.na(referendum)) %>%
group_by(referendum) %>%
tally
cols <- RColorBrewer::brewer.pal(n = 5, name = 'Spectral')
cols <- rev(cols)
cols[3] <- 'darkgrey'
cols <- c(cols, rep('darkgrey', 2))
if(keep_simple){
pd <- pd %>%
filter(!referendum %in% c(levs[c(3,6,7)],
levs_en[c(3,6,7)]))
cols <- cols[!(1:length(cols) %in% c(3,6,7))]
}
pd <- pd %>%
mutate(p = n / sum(n) * 100)
if(ca){
the_labs <- labs(x = '',
y = 'Percentatge',
title = "'Catalunya té dret a celebrar\nun referèndum d'autodeterminació'",
subtitle = "Grau d'acord amb la frase",
caption = paste0('Gràfic de Joe Brew | @joethebrew | www.vilaweb.cat. Dades del CEO.\n',
'Frase exacte varia per data del qüestionari, detalls complets a:\n',
self_cite(),
'\nMida de mostra: ',
numberfy(sum(pd$n)),
' residents de Catalunya amb ciutadania espanyola, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = gsub("Ni d'acord ni en desacord",
"Ni d'acord ni\nen desacord", levs))
} else {
the_labs <- labs(x = '',
y = 'Percentage',
title = "'Catalonia has the right to hold\na self-determination referendum'",
subtitle = 'Extent of agreement with phrase',
caption = paste0('Chart by Joe Brew | @joethebrew | www.vilaweb.cat. Raw data from the CEO.\n',
'Actual phrase varied by questionnaire date, full details at:\n',
self_cite(),
'\nSample size: ',
numberfy(sum(pd$n)),
' residents of Catalonia with Spanish citenship, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = levs_en)
}
ggplot(data = pd,
aes(x = referendum,
y = p)) +
geom_bar(stat = 'identity',
aes(fill = referendum)) +
scale_fill_manual(name = '',
values = cols) +
theme_vilaweb() +
theme(legend.position = 'none') +
the_labs +
theme(plot.caption = element_text(size = 9)) +
geom_text(aes(label = round(p, digits = 1)),
nudge_y = 5,
alpha = 0.6)
}
party_plot <- function(ca = FALSE,
keep_simple = FALSE){
levs <- c("Molt d'acord", "D'acord", "Ni d'acord ni en desacord", "En desacord", "Molt en desacord", "No ho sap", "No contesta")
levs_en <- c('Strongly agree',
'Agree',
'Neither agree\nnor disagree',
'Disagree',
'Strongly disagree',
"Don't know",
"No answer")
pd <- combined %>%
filter(!is.na(referendum)) %>%
group_by(referendum, partit) %>%
tally
cols <- RColorBrewer::brewer.pal(n = 5, name = 'Spectral')
cols <- rev(cols)
cols[3] <- 'darkgrey'
cols <- c(cols, rep('darkgrey', 2))
if(keep_simple){
pd <- pd %>%
filter(!referendum %in% c(levs[c(3,6,7)],
levs_en[c(3,6,7)]))
cols <- cols[!(1:length(cols) %in% c(3,6,7))]
}
pd <- pd %>%
group_by(partit) %>%
mutate(p = n / sum(n) * 100)
if(ca){
the_labs <- labs(x = '',
y = 'Percentatge',
title = "'Catalunya té dret a celebrar\nun referèndum d'autodeterminació'",
subtitle = "Grau d'acord amb la frase, per partit",
caption = paste0('Gràfic de Joe Brew | @joethebrew | www.vilaweb.cat. Dades del CEO.\n',
'Frase exacte varia per data del qüestionari, detalls complets a:\n',
self_cite(),
'\nMida de mostra: ',
numberfy(sum(pd$n)),
' residents de Catalunya amb ciutadania espanyola, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = gsub("Ni d'acord ni en desacord",
"Ni d'acord ni\nen desacord", levs))
} else {
the_labs <- labs(x = '',
y = 'Percentage',
title = "'Catalonia has the right to hold\na self-determination referendum'",
subtitle = 'Extent of agreement with phrase, by party',
caption = paste0('Chart by Joe Brew | @joethebrew | www.vilaweb.cat. Raw data from the CEO.\n',
'Actual phrase varied by questionnaire date, full details at:\n',
self_cite(),
'\nSample size: ',
numberfy(sum(pd$n)),
' residents of Catalonia with Spanish citenship, 2018-2019.\n'))
pd$referendum <- factor(pd$referendum,
levels = levs,
labels = levs_en)
pd$partit <- gsub('Cap o altre partit', 'Other or no party', pd$partit)
}
g <- ggplot(data = pd,
aes(x = referendum,
y = p)) +
geom_bar(stat = 'identity',
aes(fill = referendum)) +
facet_wrap(~partit, ncol = 4, scales = 'free_x') +
scale_fill_manual(name = '',
values = cols) +
theme_vilaweb() +
the_labs +
theme(plot.caption = element_text(size = 9)) +
geom_text(aes(label = round(p, digits = 1)),
nudge_y = 5,
size = 2.7,
alpha = 0.6)
if(keep_simple){
g <- g +
theme(axis.text.x = element_text(size = 0))
} else {
g <- g +
theme(legend.position = 'none') +
theme(axis.text.x = element_text(angle = 90,
hjust = 1,
vjust = 0.5,
size = 6))
}
return(g)
}
time_plot <- function(ca = FALSE){
pd <- combined %>%
filter(!is.na(referendum)) %>%
filter(!referendum %in% c("Ni d'acord ni en desacord",
"No ho sap",
"No contesta")) %>%
mutate(referendum = ifelse(grepl("d'acord|D'acord", referendum), "D'acord", "Desacord")) %>%
group_by(date, referendum) %>%
tally %>%
mutate(p = n / sum(n) * 100)
ggplot(data = pd,
aes(x = date,
y = p)) +
geom_bar(stat = 'identity',
aes(fill = referendum))
} |
44a2f7be356bc4a117b17ed820942e0bd359eacd | fd0622e97276bba2c04d3c2fcba902cdfb65e214 | /packages/nimble/R/types_symbolTable.R | b8997d10de19bfc75cb2a25217564c555fe75310 | [
"GPL-2.0-or-later",
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-2.0-only",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | nimble-dev/nimble | 7942cccd73815611e348d4c674a73b2bc113967d | 29f46eb3e7c7091f49b104277502d5c40ce98bf1 | refs/heads/devel | 2023-09-01T06:54:39.252714 | 2023-08-21T00:51:40 | 2023-08-21T00:51:40 | 20,771,527 | 147 | 31 | BSD-3-Clause | 2023-08-12T13:04:54 | 2014-06-12T14:58:42 | C++ | UTF-8 | R | false | false | 41,951 | r | types_symbolTable.R | ## Actual symbolTable class is at the end
buildSymbolTable <- function(vars, type, size){
symTab <- symbolTable()
for(i in 1:length(vars) ) {
if(is.list(size) )
symTab$addSymbol( symbolBasic(name = vars[i], type = type[i], nDim = length(size[[i]]) , size = size[[i]] ) )
else
symTab$addSymbol( symbolBasic(name = vars[i], type = type[i], nDim = length(size) , size = size ) )
}
return(symTab)
}
nimbleTypeList2argTypeList <- function(NTL) {
## convert a list of nimbleType objects to a list of type declarations suitable for `formals` of a function
do.call('c', lapply(NTL, nimbleType2argType))
}
nimbleType2argType <- function(NT) {
type <- NT$type
if(!(type %in% c('double','integer','logical'))) stop(paste0('Invalid type \"',type,'\"'))
nDim <- NT$dim
name <- NT[['name']]
structure(list(substitute(TYPE(NDIM), list(TYPE = as.name(type), NDIM = as.numeric(nDim)))),
names = name)
}
nimbleTypeList2symbolTable <- function(NTL) {
## This is a lightweight analog to argTypeList2symbolTable but takes input in different format.
## NTL is a list of nimbleType objects. This allows programmatic construction of objects.
## Currently only basic types including 'double','integer', and 'logical' with an nDim are supported.
## Return object is a symbol table.
symTab <- symbolTable()
for(i in seq_along(NTL)) {
symTab$addSymbol(nimbleType2symbol(NTL[[i]]) )
}
symTab
}
## Convert one nimbleType object to a symbol object.
## Only basic types are supported.
nimbleType2symbol <- function(NT) {
type <- NT$type
if(!(type %in% c('double','integer','logical'))) stop(paste0('Invalid type \"',type,'\"'))
nDim <- NT$dim
name <- NT$name
size = as.numeric(rep(NA, nDim))
symbolBasic(name = name, type = type, nDim = nDim, size = size)
}
argTypeList2symbolTable <- function(ATL, neededTypes, origNames) {
## ATL is the argument-type list from run-time args to a nimble function
## This function creates a symbolTable from the argument-type list.
symTab <- symbolTable()
for(i in seq_along(ATL)) {
symTab$addSymbol(argType2symbol(ATL[[i]], neededTypes, names(ATL)[i], origNames[i]))
}
symTab
}
## This takes an unevaluated "argument type" expression as input (AT), such as
## double(), double(2), or double(2, c(5,5))
## The first argument is the number of dimensions, defaulting to 0 (indicated scalar)
## The second argument is a vector of the sizes, defaulting to rep(NA, nDim)
## If only some sizes are known, something like double(2, c(5, NA)) should be valid, but we'll have to check later handling to be sure.
argType2symbol <- function(AT, neededTypes, name = character(), origName = "") {
ans <- try(argType2symbolInternal(AT, neededTypes, name))
if(inherits(ans, 'try-error')) stop(paste0("Invalid type type declaration for ",origName,"."), call.=FALSE)
ans
}
argType2symbolInternal <- function(AT, neededTypes, name = character()) {
if(!is.null(AT$default)) AT$default <- NULL ## remove the 'default=' argument, if it's present
type <- deparse(AT[[1]])
if(type == "internalType") {
return(symbolInternalType(name = name, type = "internal", argList = as.list(AT[-1]))) ## save all other contents for any custom needs later
}
if(type %in% c('double', 'integer', 'character', 'logical', 'void', 'constDouble')){
nDim <- if(length(AT)==1) 0 else AT[[2]]
if(!is.numeric(nDim) || nDim %% 1 != 0)
stop("argType2symbol: unexpected dimension, '", AT[[2]], "', found in argument '", deparse(AT), "'. Dimension should be integer-valued.")
size <- if(nDim == 0) 1 else {
if(length(AT) < 3)
as.numeric(rep(NA, nDim))
else
eval(AT[[3]])
}
if(type == "character") {
if(nDim > 1) {
warning(paste("character argument",name," with nDim > 1 will be treated as a vector"))
nDim <- 1
size <- if(any(is.na(size))) as.numeric(NA) else prod(size)
}
return(symbolString(name = name, type = "character", nDim = nDim, size = size))
}
if(type == "constDouble"){
type <- 'double'
return(symbolConstDouble(name = name, type = type, nDim = nDim, size = size, const = TRUE))
} else {
return(symbolBasic(name = name, type = type, nDim = nDim, size = size))
}
}
return(symbolUnknown(name = name, argType = AT))
}
resolveOneUnknownType <- function(unknownSym, neededTypes = NULL, nimbleProject) {
## return a list of the new symbol (could be same as the old symbol) and newNeededType
newNeededType <- list()
if(!inherits(unknownSym, 'symbolUnknown')) return(list(unknownSym, newNeededType))
## We need to resolve the type:
AT <- unknownSym$argType
type <- deparse(AT[[1]])
name <- unknownSym$name
## first see if it is a type already in neededTypes
## This would occur if the type appeared in setup code
existingNeededTypeNames <- unlist(lapply(neededTypes, `[[`, 'name'))
isANeededType <- existingNeededTypeNames == type
if(any(isANeededType)) {
listST <- neededTypes[[which(isANeededType)[1]]]$copy(shallow = TRUE)
listST$name <- name
return(list(listST, newNeededType))
##symTab$addSymbol(listST, allowReplace = TRUE)
} else { ## either create the type, or in the case of 'return', search recursively into neededTypes
possibleTypeName <- type
## look for a valid nlGenerator in the global environment
if(exists(possibleTypeName, envir = globalenv())) {
possibleNLgenerator <- get(possibleTypeName, envir = globalenv())
if(is.nlGenerator(possibleNLgenerator)) {
className <- nl.getListDef(possibleNLgenerator)$className
## see if it is a different name for an existing neededType by matching on the internal className
isANeededType <- className == names(neededTypes)
if( any(isANeededType) ) {
listST <- neededTypes[[which(isANeededType)[1]]]$copy(shallow = TRUE)
listST$name <- name
return(list(listST, newNeededType))
## symTab$addSymbol(listST, allowReplace = TRUE)
}
## for the case of 'return' only, see if it matches a type nested within a neededType
if(name == 'return') {
listST <- recurseGetListST(className, neededTypes)
if(!is.null(listST)) {
listST$name <- name
return(list(listST, newNeededType))
## symTab$addSymbol(listST, allowReplace = TRUE)
}
}
## can't find it anywhere, so create it and add to newNeededTypes
## Need access to the nimbleProject here!
nlGen <- possibleNLgenerator
nlp <- nimbleProject$compileNimbleList(nlGen, initialTypeInferenceOnly = TRUE)
className <- nl.getListDef(nlGen)$className
newSym <- symbolNimbleList(name = name, nlProc = nlp)
newNeededType[[className]] <- newSym
returnSym <- symbolNimbleList(name = name, nlProc = nlp)
return(list(returnSym, newNeededType))
}
} else {
stop(paste0("Can't figure out what ", possibleTypeName, " is."))
}
}
}
resolveUnknownTypes <- function(symTab, neededTypes = NULL, nimbleProject) {
## modify the symTab in place.
## return new neededTypes
newNeededTypes <- list()
existingNeededTypeNames <- unlist(lapply(neededTypes, `[[`, 'name'))
for(name in symTab$getSymbolNames()) {
unknownSym <- symTab$getSymbolObject(name)
result <- resolveOneUnknownType(unknownSym, neededTypes, nimbleProject)
if(!identical(result[[1]], unknownSym)) symTab$addSymbol(result[[1]], allowReplace = TRUE)
newNeededTypes <- c(newNeededTypes, result[[2]])
}
newNeededTypes
}
recurseGetListST <- function(className, neededTypes){
listST <- NULL
for(NT in neededTypes){
if(NT$type %in% c('nimbleList', 'nimbleListGenerator')){
if(!inherits(NT$nlProc$neededTypes, 'uninitializedField')){
if(className %in% names(NT$nlProc$neededTypes)){
isANeededType <- (className == names(NT$nlProc$neededTypes))
listST <- NT$nlProc$neededTypes[[which(isANeededType == 1)[1]]]$copy(shallow = TRUE)
return(listST)
}
else listST <- recurseGetListST(className, NT$nlProc$neededTypes)
}
}
}
return(listST)
}
symbolTable2cppVars <- function(symTab, arguments = character(), include, parentST = NULL) {
newSymTab <- symbolTable(parentST = parentST)
if(missing(include)) include <- symTab$getSymbolNames()
for(s in include) {
inputArg <- s %in% arguments
sObj <- symTab$getSymbolObject(s)
if(inherits(sObj$type, 'uninitializedField')) stop(paste('Error in symbolTable2cppVars for ', symTab, '. type field is not set.'), call. = FALSE)
if(length(sObj$type == 'Ronly') == 0) stop(paste('Error in symbolTable2cppVars for ', symTab, ', length(sObj$type == "Ronly") == 0'), call. = FALSE)
if(sObj$type == 'Ronly') next
newSymOrList <- symTab$getSymbolObject(s)$genCppVar(inputArg)
if(is.list(newSymOrList)) {
for(i in seq_along(newSymOrList)) {
newSymTab$addSymbol(newSymOrList[[i]])
}
} else {
newSymTab$addSymbol(newSymOrList)
}
}
newSymTab
}
symbolBase <-
setRefClass(Class = 'symbolBase',
fields = list(name = 'ANY', #'character',
type = 'ANY' ), #'character'),
methods = list(
generateUse = function(...) name
)
)
symbolUnknown <- setRefClass(Class = 'symbolUnknown',
contains = 'symbolBase',
fields = list(argType = 'ANY'),
methods = list(
showMsg = function() {
if(inherits(argType, 'uninitializeField'))
paste0('symbolUnknown with no type declaration')
else
paste0('symbolUnknown with type declaration ', deparse(argType))
},
show = function() writeLines(showMsg()),
genCppVar = function() {
stop(paste0("Can't generate a C++ variable type from ", showMsg()))
}
)
)
## nDim and size are redundant for convenience with one exception:
## nDim = 0 must have size = 1 and means it is a true scalar -- NOT sure this is correct anymore...
## nDim = 1 with size = 1 means it is a 1D vector that happens to be length 1
symbolBasic <-
setRefClass(Class = 'symbolBasic',
contains = 'symbolBase',
fields = list(nDim = 'ANY', #'numeric',
size = 'ANY'), #'numeric'),
methods = list(
show = function() {
if(inherits(size, 'uninitializedField')) {
writeLines(paste0(name,': ', type, ' sizes = (uninitializedField), nDim = ', nDim))
} else {
writeLines(paste0(name,': ', type, ' sizes = (', paste(size, collapse = ', '), '), nDim = ', nDim))
}
},
genCppVar = function(functionArg = FALSE) {
if(type == 'void') return(cppVoid())
else if(type == 'integer') cType <- 'int'
else if(type == 'double') cType <- 'double'
else if(type == 'logical') cType <- 'bool'
else warning(paste("in genCppVar method for",name,"in symbolBasic class, type", type,"unrecognized\n"), FALSE)
if(nDim == 0) {
return(if(name != "pi")
cppVar(baseType = cType,
name = name,
ptr = 0,
ref = FALSE)
else
cppVarFull(baseType = cType,
name = name,
ptr = 0,
ref = FALSE,
constructor = "(M_PI)")
)
}
if(functionArg) {
return(cppNimArr(name = name,
nDim = nDim,
type = cType,
ref = TRUE))
} else {
return(cppNimArr(name = name,
nDim = nDim,
type = cType))
}
})
)
symbolConstDouble <- setRefClass(
Class = "symbolConstDouble",
contains = "symbolBasic",
fields = list(const = 'ANY'),
methods = list(
show = function() writeLines(paste('symbolConstDouble', name)),
genCppVar = function(functionArg = FALSE) {
cppNimArr(name = name,
nDim = nDim,
type = 'double',
ref = functionArg,
const = TRUE)
})
)
symbolSEXP <- setRefClass(
Class = "symbolSEXP",
contains = "symbolBase",
methods = list(
show = function() writeLines(paste('symbolSEXP', name)),
genCppVar = function(...) {
cppVar(name = name,
baseType = "SEXP")
})
)
symbolPtr <- setRefClass(
Class = "symbolPtr",
contains = "symbolBase",
methods = list(
show = function() writeLines(paste('symbolPtr', name)),
genCppVar = function(...) {
if(type == 'integer') cType <- 'int'
if(type == 'double') cType <- 'double'
if(type == 'logical') cType <- 'bool'
cppVar(name = name,
ptr = 1,
baseType = cType)
})
)
symbolSEXP <- setRefClass(
Class = "symbolSEXP",
contains = "symbolBasic", ## inheriting from symbolBasic instead of symbolBase make initSizes work smoothly
methods = list(
show = function() writeLines(paste('symbolSEXP', name)),
genCppVar = function(...) {
cppVar(name = name,
ptr = 0,
baseType = "SEXP")
})
)
## symbolADinfo <- setRefClass(
## Class = "symbolADinfo",
## contains = "symbolBase",
## methods = list(
## initialize = function(...) {
## callSuper(...)
## type <<- 'Ronly'
## },
## show = function() writeLines(paste("symbolADinfo", name)),
## genCppVar = function(...) {
## cppVar(name = name,
## ptr = 0,
## baseType = "nimbleCppADinfoClass")
## })
## )
symbolString <- setRefClass(
Class = "symbolString",
contains = "symbolBasic", ## inheriting from symbolBasic instead of symbolBase make initSizes work smoothly
methods = list(
show = function() writeLines(paste('symbolString', name)),
genCppVar = function(...) {
if(nDim == 0) {
cppVar(name = name, baseType = "std::string")
} else {
cppVarFull(name = name, baseType = "vector", templateArgs = list(cppVar(baseType = "std::string")))
}
})
)
symbolNimbleTimer <- setRefClass(
Class = "symbolNimbleTimer",
contains = "symbolBase",
methods = list(
show = function() writeLines(paste('symbolNimbleTimer', name)),
genCppVar = function(...) {
cppVar(name = name, baseType = "nimbleTimerClass_")
}))
symbolNimArrDoublePtr <-
setRefClass(Class = 'symbolNimArrDoublePtr',
contains = 'symbolBasic',
methods = list(
show = function() writeLines(paste('symbolNimArrDoublePtr', name)),
genCppVar = function(...){
if(type == 'integer') cType <- 'int'
else if(type == 'double') cType <- 'double'
else if(type == 'logical') cType <- 'bool'
else cat(paste0("Warning: in genCppVar method in symbolBasic class, type unrecognized: ", type, '\n'))
return(cppNimArrPtr(name = name, ## this is self-dereferencing
nDim = nDim,
ptr = 2,
type = cType))}
)
)
symbolVecNimArrPtr <-
setRefClass(Class = 'symbolVecNimArrPtr',
contains = 'symbolBase', ## Important that this is not symbolBase or it will be thought to be directly numeric
fields = list(
nDim = 'ANY', #'numeric',
size = 'ANY'), #'numeric'),
methods = list(
show = function() writeLines(paste('symbolVecNimArrPtr', name)),
genCppVar = function(...){
if(type == 'integer') cType <- 'int'
else if(type == 'double') cType <- 'double'
else if(type == 'logical') cType <- 'bool'
else cat(paste0("Warning: in genCppVar method in symbolBasic class, type unrecognized: ", type, '\n'))
return(cppVecNimArrPtr(name = name, selfDereference = TRUE,
nDim = nDim,
ptr = 1,
type = cType))}
)
)
symbolNodeFunctionVector <-
setRefClass(Class = 'symbolNodeFunctionVector',
contains = 'symbolBase',
methods = list(
initialize = function(...) {
callSuper(...)
type <<- 'symbolNodeFunctionVector'
},
show = function() writeLines(paste('symbolNodeFunctionVector', name)),
genCppVar = function(...) {
return(cppNodeFunctionVector(name = name))
}
)
)
symbolNodeFunctionVector_nimDerivs <-
setRefClass(Class = 'symbolNodeFunctionVector_nimDerivs',
contains = 'symbolBase',
methods = list(
initialize = function(...) {
callSuper(...)
type <<- 'symbolNodeFunctionVector_nimDerivs'
},
show = function() writeLines(paste('symbolNodeFunctionVector_nimDerivs', name)),
genCppVar = function(...) {
return(cppNodeFunctionVector(name = name))
}
)
)
symbolModel <-
setRefClass(Class = 'symbolModel',
contains = 'symbolBase',
fields = list(className = 'ANY'),
methods = list(
initialize = function(...) {
callSuper(...)
## type == 'local' means it is defined in setupCode and so will need to have an object and be built
## type == 'Ronly' means it is a setupArg and may be a different type for different nimbleFunction specializations
## and it will be like a model in C++ code: not there except by extracted pointers inside of it
},
show = function() writeLines(paste('symbolModel', name)),
genCppVar = function(...) {
return(cppVar(name = name, baseType = "Ronly", ptr = 1))
}
)
)
symbolModelValues <-
setRefClass(Class = 'symbolModelValues',
contains = 'symbolBase',
fields = list(mvConf = 'ANY'),
methods = list(
initialize = function(...) {
callSuper(...)
## type == 'local' means it is defined in setupCode and so will need to have an object and be built
## type == 'Ronly' means it is a setupArg and may be a different type for different nimbleFunction specializations
## and it will be like a model in C++ code: not there except by extracted pointers inside of it
},
show = function() writeLines(paste('symbolModelValues', name)),
genCppVar = function(...) {
return(cppVar(name = name, baseType = "Values", ptr = 1))
}
)
)
symbolMemberFunction <-
setRefClass(Class = 'symbolMemberFunction',
contains = 'symbolBase',
fields = list(nfMethodRCobj = 'ANY',
RCfunProc = 'ANY'), ## added so that we can access returnType and argument types (origLocalSymbolTable)
methods = list(
initialize = function(...) {callSuper(...); type <<- 'Ronly'},
show = function() writeLines(paste('symbolMemberFunction', name)),
genCppVar = function(...) {
stop(paste('Error, you should not be generating a cppVar for symbolMemberFunction', name))
} ))
symbolNimbleListGenerator <-
setRefClass(Class = 'symbolNimbleListGenerator',
contains = 'symbolBase',
fields = list(nlProc = 'ANY'),
methods = list(
initialize = function(...){callSuper(...); type <<- 'Ronly'},
show = function() writeLines(paste('symbolNimbleListGenerator', name)),
genCppVar = function(...) {
return( cppVarFull(name = name,
baseType = 'Ronly',
templateArgs = nlProc$name) )
}
))
symbolNimbleList <-
setRefClass(Class = 'symbolNimbleList',
contains = 'symbolBase',
fields = list(nlProc = 'ANY'),
methods = list(
initialize = function(...){callSuper(...); type <<- 'nimbleList'},
show = function() writeLines(paste('symbolNimbleList', name)),
genCppVar = function(...) {
pointeeType <- nlProc$name
if(is.null(pointeeType)) stop(paste('Internal error: nlProc is missing name:', name))
return( cppVarFull(name = name,
baseType = 'nimSmartPtr',
templateArgs = pointeeType) )
}
))
symbolNimbleFunction <-
setRefClass(Class = 'symbolNimbleFunction',
contains = 'symbolBase',
fields = list(nfProc = 'ANY'),
methods = list(
initialize = function(...) {callSuper(...)},
show = function() writeLines(paste('symbolNimbleFunction', name)),
genCppVar = function(...) {
cppName <- if(name == ".self") "this" else name
return(cppVarFull(name = cppName, baseType = environment(nfProc$nfGenerator)$name, ptr = 1))
}
))
symbolNimbleFunctionSelf <-
setRefClass(Class = 'symbolNimbleFunctionSelf',
contains = 'symbolBase',
fields = list(baseType = 'ANY'),
methods = list(
initialize = function(name, nfProc) {
callSuper(name = name, type = "Ronly");
baseType <<- environment(nfProc$nfGenerator)$name
},
show = function() writeLines(paste('symbolNimbleFunctionSelf', name)),
genCppVar = function(...) {
stop("Should not be creating a cppVar from a symbolNimbleFunctionSelf")
}
))
symbolVoidPtr <- setRefClass(Class = 'symbolVoidPtr',
contains = 'symbolBase',
methods = list(
initialize = function(...) callSuper(...),
show = function() writeLines(paste('symbolVoidPtr', name)),
genCppVar = function(...) {
return(cppVarFull(name = name, baseType = 'void', ptr = 1))
}))
symbolModelVariableAccessorVector <-
setRefClass(Class = 'symbolModelVariableAccessorVector',
contains = 'symbolBase',
fields = list(lengthName = 'ANY'), #'character'),
methods = list(
initialize = function(...) {
callSuper(...)
type <<- 'symbolModelVariableAccessorVector'
},
show = function() writeLines(paste('symbolModelVariableAccessorVector', name)),
genCppVar = function(...) {
return(cppModelVariableMapAccessorVector(name = name))
}
)
)
symbolModelValuesAccessorVector <-
setRefClass(Class = 'symbolModelValuesAccessorVector',
contains = 'symbolBase',
methods = list(
initialize = function(...) {
callSuper(...)
type <<- 'symbolModelValuesAccessorVector'
},
show = function() writeLines(paste('symbolModelValuesAccessorVector', name)),
genCppVar = function(...) {
return(cppModelValuesMapAccessorVector(name = name))
}
)
)
symbolGetParamInfo <-
setRefClass(Class = 'symbolGetParamInfo',
contains = 'symbolBase',
fields = list(paramInfo = 'ANY'), ## getParam_info, i.e. simple list
methods = list(
initialize = function(paramInfo, ...) {
callSuper(...)
paramInfo <<- paramInfo
type <<- 'Ronly'
},
show = function() writeLines(paste('symbolGetParamInfo', name)),
genCppVar = function(...) {
stop(paste('Error, you should not be generating a cppVar for symbolGetParamInfo', name))
} ))
symbolGetBoundInfo <-
setRefClass(Class = 'symbolGetBoundInfo',
contains = 'symbolBase',
fields = list(boundInfo = 'ANY'), ## getBound_info, i.e. simple list
methods = list(
initialize = function(boundInfo, ...) {
callSuper(...)
boundInfo <<- boundInfo
type <<- 'Ronly'
},
show = function() writeLines(paste('symbolGetBoundInfo', name)),
genCppVar = function(...) {
stop(paste('Error, you should not be generating a cppVar for symbolGetBoundInfo', name))
} ))
symbolNumericList <-
setRefClass(Class = 'symbolNumericList',
contains = 'symbolBase',
fields = list( className = 'ANY', #'character',
nDim ='ANY'), # 'numeric'),
methods = list(
initialize = function(...) {
callSuper(...)
type <<- 'symbolNumericList'
},
show = function() writeLines(paste('symbolNumericList', name)),
genCppVar = function(...){
if(type == 'integer') cType <- 'int'
else if(type == 'double') cType <- 'double'
else if(type == 'logical') cType <- 'bool'
else cat(paste0("Warning: in genCppVar method in symbolBasic class, type unrecognized: ", type, '\n'))
return(cppVecNimArrPtr(name = name,
nDim = nDim,
ptr = 0,
type = cType))}
)
)
symbolNimPtrList <-
setRefClass(Class = 'symbolNimPtrList',
contains = 'symbolBase',
fields = list(baseClass = 'ANY'),
methods = list(
initialize = function(...) callSuper(...),
show = function() writeLines(paste('symbolNimPtrList', name)),
genCppVar = function(...) {
componentCclassName <- environment(baseClass)$name
return(list(
cppVarFull(name = name,
baseType = 'vector',
templateArgs = list(cppVar(ptr = 1, baseType = componentCclassName) )
),
cppVarFull(name = paste0(name,'_setter'),
baseType = 'vectorOfPtrsAccess',
templateArgs = list(cppVar(baseType = componentCclassName) )
)
))
}
))
symbolCopierVector <-
setRefClass(Class = 'symbolCopierVector',
contains = 'symbolBase',
methods = list(
initialize = function(...) callSuper(...),
show = function() writeLines(paste('symbolCopierVector', name)),
genCppVar = function(...) {
cppVar(name = name, baseType = 'copierVectorClass')
}
))
symbolNimbleFunctionList <-
setRefClass(Class = 'symbolNimbleFunctionList',
contains = 'symbolNimPtrList',
fields = list(nfProc = 'ANY'))
symbolEigenMap <- setRefClass(Class = 'symbolEigenMap',
contains = 'symbolBase',
fields = list(
eigMatrix = 'ANY', #'logical', ## or array
strides ='ANY' # 'numeric' ## NA for Dynamic. length(0) means no strides
),
methods = list(
show = function() {
writeLines(paste0(name,': Eigen ',if(eigMatrix) 'Matrix' else 'Array', ' Map of ', type, if(length(strides) > 0) paste0(' with strides ', paste(strides, collapse = ', ')) else character()))
},
genCppVar = function(functionArg = FALSE) {
if(functionArg) stop('Error: cannot take Eigen Map as a function argument (without more work).')
if(length(strides)==2 & eigMatrix) {
if(all(is.na(strides))) {
baseType <- paste0('EigenMapStr', if(type == 'double') 'd' else if(type == 'integer') 'i' else 'b' )
return(cppVarFull(name = name,
baseType = baseType,
constructor = '(0,0,0, EigStrDyn(0, 0))',
ptr = 0,
static = FALSE))
}
}
cppEigenMap(name = name,
type = type,
strides = strides,
eigMatrix = eigMatrix)
}
)
)
symbolIndexedNodeInfoTable <-
setRefClass(Class = "symbolIndexedNodeInfoTable",
contains = "symbolBase",
methods = list(
initialize = function(...) callSuper(...),
show = function() writeLines(paste('symbolIndexedNodeInfoTable', name)),
## We need this to be copied, but it must be copied to a variable already declared in the nodeFun base class,
## so we don't want any genCppVar.
genCppVar = function(...) {
cppVarFull(name = name, silent = TRUE) ## this symbol exists to get a base class member data copied, so it shouldn't be declared
##stop(paste('Error, you should not be generating a cppVar for symbolIndexedNodeInfoTable', name))
## looks like if a copy type is created in makeNimbleFxnCppCopyTypes (based on the symbolXXXclass then it will be copied
## and if the type is Ronly then it will not be turned into a cppVar. So that bit of design worked out well
## it's in the nodeFun base class as vector<indexedNodeInfo>
}))
symbolInternalType <-
setRefClass(Class = "symbolInternalType",
contains = "symbolBase",
fields = list(argList = 'ANY'),
methods = list(
initialize = function(...) callSuper(...),
show = function() writeLines(paste('symbolInternalType', name)),
genCppVar = function(functionArg = FALSE) {
if(length(argList) == 0) stop(paste('No information for outputting C++ type of', name))
if(argList[[1]] == 'indexedNodeInfoClass'){
if(functionArg) return(cppVarFull(name = name, baseType = "indexedNodeInfo", const = TRUE, ref = TRUE))
return(cppVar(name = name, baseType = "indexedNodeInfo"))
}
})
)
## nDim is set to length(size) unless provided, which is how scalar (nDim = 0) must be set
symbolDouble <- function(name, size = numeric(), nDim = length(size)) {
if(is.logical(size)) size <- as.numeric(size)
if(nDim != length(size)) {
if(nDim != 0 | !identical(size, 1)) stop('Error in symbolDouble, nDim must be length(size) unless nDim == 0 and size == 1')
}
symbolBasic(name = name, type = 'double', nDim = nDim, size = size)
}
symbolInt <- function(name, size = numeric(), nDim = length(size)) {
if(is.logical(size)) size <- as.numeric(size)
if(nDim != length(size)) {
if(nDim != 0 | !identical(size, 1)) stop('Error in symbolInt, nDim must be length(size) unless nDim == 0 and size == 1')
}
symbolBasic(name = name, type = 'int', nDim = nDim, size = size)
}
symbolTable <-
setRefClass(Class = 'symbolTable',
fields = list(symbols = 'ANY', #'list',
parentST = 'ANY',
dimAndSizeList = 'ANY',
dimAndSizeListMade = 'ANY'),
methods = list(
initialize = function(parentST = NULL, ...) {
symbols <<- list()
dots <- list(...)
if('symbols' %in% names(dots)) {
if(is.list(dots$symbols)) {
for(s in dots$symbols) {
if(inherits(s$name, 'uninitializedField')) stop(paste0('Error: all symbols in list must have meaningful name fields'))
if(identical(s$name, character())) stop(paste0('Error: all symbols in list must have meaningful name fields'))
symbols[[s$name]] <<- s
}
} else stop('Error: symbols provided must be a list')
}
parentST <<- parentST
dimAndSizeListMade <<- FALSE
},
## add a symbol RC object to this symbolTable; checks for valid symbolRC object, and duplicate symbol names
addSymbol = function(symbolRCobject, allowReplace = FALSE) {
## if(!is(symbolRCobject, 'symbolBase')) stop('adding non-symbol object to symbolTable')
name <- symbolRCobject$name
if(!allowReplace) if(name %in% getSymbolNames()) warning(paste0('duplicate symbol name: ', name))
symbols[[name]] <<- symbolRCobject
if(dimAndSizeListMade) {
dimAndSizeList[[name]] <<- {ans <- try(list(symbolRCobject$size, symbolRCobject$nDim)); if(inherits(ans, 'try-error')) NULL else ans}
}
},
## remove a symbol RC object from this symbolTable; gives warning if symbol isn't in table
removeSymbol = function(name) {
if(!(name %in% getSymbolNames())) warning(paste0('removing non-existant symbol name: ', name))
symbols[[name]] <<- NULL },
## symbol accessor functions
getLength = function() return(length(symbols)),
getSymbolObjects = function() return(symbols),
getSymbolNames = function() if(is.null(names(symbols))) return(character(0)) else return(names(symbols)),
getSymbolObject = function(name, inherits = FALSE) {
ans <- symbols[[name]]
if(is.null(ans)) if(inherits) if(!is.null(parentST)) ans <- parentST$getSymbolObject(name, TRUE)
return(ans)
},
symbolExists = function(name, inherits = FALSE) {
return(!is.null(getSymbolObject(name, inherits)))
},
initDimAndSizeList = function() {
dimAndSizeList <<- lapply(symbols, function(x) {
ans <- try(list(x$size, x$nDim))
if(inherits(ans, 'try-error')) NULL else ans
})
dimAndSizeListMade <<- TRUE
},
makeDimAndSizeList = function(names) {
if(!dimAndSizeListMade) initDimAndSizeList()
dimAndSizeList[names]
},
getSymbolType = function(name) return(symbols[[name]]$type),
getSymbolField = function(name, field) return(symbols[[name]][[field]]),
setSymbolField = function(name, field, value) symbols[[name]][[field]] <<- value,
## parentST accessor functions
getParentST = function() return(parentST),
setParentST = function(ST) parentST <<- ST,
show = function() {
writeLines('symbol table:')
for(i in seq_along(symbols)) symbols[[i]]$show()
if(!is.null(parentST)) {
writeLines('parent symbol table:')
parentST$show()
}
}
)
)
areMVSymTabsEqual <- function(symTab1, symTab2) {
vN1 = symTab1$getSymbolNames()
vN2 = symTab2$getSymbolNames()
if(length(vN1) != length(vN2) )
return(FALSE)
for(i in seq_along(vN1) ) {
if(vN1[i] != vN2[i])
return(FALSE)
if(symTab1$symbols[[i]]$type != symTab2$symbols[[i]]$type)
return(FALSE)
if( !all( is(symTab1$symbols[[i]]) == is(symTab2$symbols[[i]]) ) )
return(FALSE)
if( inherits(symTab1$symbols[[i]], "symbolBasic")) {
nDim = symTab1$symbols[[i]]$nDim
if(nDim != symTab2$symbols[[i]]$nDim )
return(FALSE)
size1 = symTab1$symbols[[i]]$size
size2 = symTab2$symbols[[i]]$size
if(any(size1 != size2) )
return(FALSE)
}
}
return(TRUE)
}
|
d5ae0ee86b5a8f6a9d03746101d4968cd7a15934 | 094bacd8cf2e5908b711fdb17c1b3dc085351dad | /R/make-iris-combined.R | 15c9b473c67b62d5eef7eaa5cddb5b3a18442599 | [
"MIT"
] | permissive | tiernanmartin/drakepkg | 8cb1d8d152dedb539fd2d960508ff1d64963e7a4 | f6ce672e19328678939a926e38ad8f9191d266e8 | refs/heads/master | 2020-03-24T01:21:12.150866 | 2020-03-06T19:52:08 | 2020-03-06T19:52:08 | 142,332,694 | 29 | 12 | NOASSERTION | 2020-03-06T19:52:10 | 2018-07-25T17:29:56 | R | UTF-8 | R | false | false | 696 | r | make-iris-combined.R | #' @title Make Two Versions of Iris
#' @description Make an \code{tbl} object that combines the true \code{iris} dataset
#' with a resampled version.
#' @param iris_int_raw the \code{iris} dataset stored within the \code{link{drakepkg}} package
#' @param iris_ext_raw the \code{iris} dataset downloaded from the web
#' @return a \code{tibble} object
#' @export
make_iris_combined <- function(iris_int_raw, iris_ext_raw) {
iris_combined <- dplyr::bind_rows(iris_int_raw, iris_ext_raw, .id = "group") %>%
dplyr::mutate(group = factor(dplyr::if_else(group %in% "1", "accurate (internal)","fake (external)")),
Species = forcats::fct_inorder(Species))
return(iris_combined)
}
|
6d312e94afe5ca75c2284407b4b91da665dc4516 | 68906be46b2043e45ea3fc11769f8dbc60aeba6d | /profile_R_version.R | e3c997f37d9ae9af42ac7b4c7ea90224f3ce5ada | [] | no_license | mcasl/AMORE | 87808d7cc2e4ea0d6b036bbb2d08c8121e7ea541 | e71d417f76a033c54478d526aa0254735ba73281 | refs/heads/master | 2023-04-05T13:36:10.777574 | 2023-03-27T17:31:00 | 2023-03-27T17:31:00 | 56,980,333 | 1 | 0 | null | 2023-03-27T17:31:03 | 2016-04-24T15:54:00 | HTML | UTF-8 | R | false | false | 357 | r | profile_R_version.R | library(AMORE)
newff(c(1,50,1), learning.rate.global=0.01, momentum.global=0.01, error.criterium='LMS', hidden.layer='tansig', output.layer='purelin', method='ADAPTgd') -> net
P <- runif(1000)
T <- P ^2
system.time( train(net, P, T,n.shows=10,show.step=100) )
# index.show: 10 LMS 5.41332627268925e-06
# user system elapsed
# 2.852 0.000 2.853
|
1f526f4bcd002eeff6e1312b435dd6a67b17051e | 66a60bd87e025448b488ee37a4727c017da27404 | /ui.R | 16ded98a2ac7e804f613d20f491dd7542d497db8 | [] | no_license | rosagradilla19/uberNYC | da229783d61a480dd8db17dd4f9ae48dbafae513 | 07018e78ea8bc3862e2414a1343aea9b986406e0 | refs/heads/master | 2021-03-08T10:09:50.629961 | 2020-10-02T18:51:55 | 2020-10-02T18:51:55 | 246,339,617 | 0 | 1 | null | 2020-04-08T16:16:47 | 2020-03-10T15:34:33 | Jupyter Notebook | UTF-8 | R | false | false | 4,899 | r | ui.R | library(shiny)
library(plotly)
library(lubridate)
library(dplyr)
# Define UI for application that draws a histogram
shinyUI(
navbarPage(
title = "NYC Uber",
tabPanel("About",
div(img(src="https://raw.githubusercontent.com/jmhobbs29/uberNYC/master/skyline2.png", height = "25%", width="50%"),
style="text-align: center;"),
column(12,
align='center',
strong(p("City of New York", style = "font-family: 'arial'; font-size: 48px"))),
div(column(3, img(src="Jeeda.jpg", height="60%", width="60%")),style="text-align: center;"),
div(column(3, img(src="Rosa.jpg", height="60%", width="60%")),style="text-align: center;"),
div(column(3, img(src="jamie.jpg", height="60%", width="60%")),style="text-align: center;"),
div(column(3, img(src="Olivia.jpg", height="60%", width="60%")),style="text-align: center;"),
column(3, align='center', p("Jeeda AbuKhader", style = "font-family: 'arial'; font-size:16px")),
column(3, align='center', p("Rosa Gradilla", style = "font-family: 'arial'; font-size:16px")),
column(3, align='center', p("Jamie Hobbs", style = "font-family: 'arial'; font-size:16px")),
column(3, align='center', p("Olivia Roberts", style = "font-family: 'arial'; font-size:16px")),
p("Purpose:", style = "font-family: 'arial'; font-size:24px"),
p("As consultants, we have been hired by the City of New York to gather, analysis and present findings on ride share services, such as Uber. This analysis is intended to influence regulation and planning boards alike by understanding patterns of activity within the 2014 historical data of Uber pick-up locations. ", style = "font-family: 'arial'; font-size:18px"),
p("Focus Areas:", style = "font-family: 'arial'; font-size:24px"),
p("In our scope of work on the consulation, we were given the following questions to answer from our research:", style = "font-family: 'arial'; font-size:18px"),
column(12, align="center", p("1. How does the frequency of rides change throughout the year? Per burrow?", style = "font-family: 'arial'; font-size:18px")),
column(12, align="center", p("2. What does a given holiday look like for the ride share company?", style = "font-family: 'arial'; font-size:18px")),
column(12, align="center", p("3. What does an 'Average' Day look like in NYC in regards to the ride share company and taxi services?", style = "font-family: 'arial'; font-size:18px"))),
tabPanel("Uber by Burrow",
sidebarLayout(
sidebarPanel(
radioButtons("uberburrow", "Select Visualization", choices =c("Burrow Bar Chart", "Top Locations Map", "Burrow by Day", "Burrow by Hour", "Top Locations Table"))
),
mainPanel(
plotlyOutput('burrow')
)
)),
tabPanel("Holidays in NYC",
sidebarLayout(
sidebarPanel(
radioButtons("graph", "Select Visualization", choices = c("Holiday Line Graph", "Holiday Pick-up Map", "Independence Day Pick-ups", "Pride Day Pick-ups"), selected = "Holiday Line Graph")),
#radioButtons("table", "Select Visualization",choices =c("Independence Day Pick-ups", "Pride Day Pick-ups"))),
mainPanel(
plotlyOutput("WhichPlot")
)
)
),
tabPanel("Taxi vs Uber",
sidebarLayout(
sidebarPanel(
radioButtons("taxi", "Select Visualization", choices = c("Ride Distribution Chart", "Top Locations Map", "Top Uber Table", "Top Taxi Table"))
),
mainPanel(
plotlyOutput("taxiUber")
)
)
)
)
)
|
2aef88e66129c8ccb565d1f058bbd9a3b9d06677 | 179c53ca385f8918ad3daa587d1a080eff73e31d | /using textual and binary.R | 349be1b878baa27087a61313e294afe4e41c0eca | [] | no_license | pujoseno/-Menggunakan-Format-Tekstual-dan-Biner-untuk-Menyimpan-Data-Pada-R | 03f5301c492b13deafc774f5abc781848a9bb2c0 | 2298be20d029020c24854e5d63a49fe285cce527 | refs/heads/master | 2020-03-22T08:45:18.416172 | 2018-07-05T03:14:34 | 2018-07-05T03:14:34 | 139,788,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 902 | r | using textual and binary.R | setwd('F:/blogs/otw upload/using textual and binary/')
###1menggunakan dput dan dump
y <- data.frame(a = 1, b = "a")
y
## Print 'dput' output to console
dput(y)
## Send 'dput' output to a file
dput(y, file = "y.R")
## Read in 'dput' output from a file
new.y <- dget("y.R")
new.y
x <- "foo"
x
y <- data.frame(a = 1L, b = "a")
y
dump(c("x", "y"), file = "data.R")
rm(x, y)
x
y
source("data.R")
y
str(y)
x
####2 binary format
a <- data.frame(x = rnorm(100), y = runif(100))
head(a)
b <- c(3, 4.4, 1 / 3)
b
## Save 'a' and 'b' to a file
save(a, b, file = "mydata.rda")
rm(a,b)
a
# Load 'a' and 'b' into your workspace
load("mydata.rda")
tail(a)
## Save everything to a file
save.image(file = "mydata.RData")
rm(y,a,b)
tail(a)
## load all objects in this file
load("mydata.RData")
tail(a)
x <- list(1, 2, 3)
x
serialize(x, NULL)
|
523c77ca4b0280d991774a038f5e6e8e06348140 | 82bc1bf05918e8e7f30e7f3725bae5f4587c193e | /clustering/millennial.R | 82cd7c14366a1fc66553f58c51b4c773dc86b7ea | [] | no_license | jack1981/millennial | f99bf0c1c020d8bcef4634c5764685a2fe04b7c5 | 19050779bc6dfb161c9387fa7cbdccc67565d729 | refs/heads/master | 2021-05-03T21:17:21.324083 | 2018-02-18T06:57:40 | 2018-02-18T06:57:40 | 120,380,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,954 | r | millennial.R | #install.packages("factoextra")
library(factoextra) # clustering algorithms & visualization
# ---- Read in the data ----
data <- read.table('C:/Data/millennial')
col <- read.table('C:/Data/millennial_column')
colnames(data) <- col$V2
# ---- Sample/separate data ----
n <- dim(data)[1]
p <- dim(data)[2]
visit <- which( 1:p %% 2 == 0 )
spend <- which( 1:p %% 2 == 1 )[-1]
# visit
data_v <- data[,visit]
# spend
data_s <- data[,spend]
# visit & spend
data_vs <- data
# part of columns to identify the quality of cluster
visit_vars<-c("google_visit","apple_visit","jcrew_visit","itunes_visit")
spend_vars<-c("google_aveSpend","apple_aveSpend","jcrew_aveSpend","itunes_aveSpend")
visit_spend_vars<-c("google_visit","apple_visit","jcrew_visit","itunes_visit","google_aveSpend","apple_aveSpend","jcrew_aveSpend","itunes_aveSpend")
names(data_v)
names(data_s)
names(data_vs)
# scale the data sets
scaleDataV=scale(data_v[,-1])
scaleDataS=scale(data_s[,-1])
scaleDataVS=scale(data_vs[,-1])
#View(scaleDataV)
# define the UDF wssplot which helps to find best K
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
# define the UDF for cluster
doCluster <- function(scaleData,bestk=10,iterMax=500,nstart=15) {
return (kmeans(scaleData, bestk,iterMax,nstart))
}
# define the UDF findBiggestCluster
findBiggestCluster <- function(rawData,scaleData,bestk=10,iterMax=500,nstart=15) {
k_out <- doCluster(scaleData,bestk,iterMax,nstart)
table(k_out$cluster)
neg = which.max(k_out$size)
mil = !is.element(k_out$cluster, neg)
total = apply(scaleData, 1, sum)
tapply(total, mil, summary)
list(k_out=k_out,d_out=rawData[is.element(k_out$cluster, (1:bestk)[-neg]),1],neg=neg)
}
# === use visit ===
# find the best k from SSE
wssv1 <- wssplot(scaleDataV)
#best_v
best_v1 <- 12
millennial_v1 <- findBiggestCluster(data,scaleDataV,best_v1)
#View(millennial_v1$d_out)
# further break down the biggest one
row <- is.element(millennial_v1$k_out$cluster, millennial_v1$neg)
col <- which(apply(data_v[row, ], 2, sum) != 0)
data_v2 <- data_v[row, col]
scaleDataV2 <- scale(data_v2[,-1])
#View(data2_v)
wssv2 <- wssplot(scaleDataV2)
#best2_v
best_v2 <- 10
millennial_v2 <- findBiggestCluster(data,scaleDataV2,best_v2)
user_v2 = data[row, 1]
millennial_v_final <- user_v2[ is.element(millennial_v2$k_out$cluster, (1:best_v2)[-millennial_v2$neg])]
#View(millennial_v_final)
#visual the clusters, but use few columns
data_par_v <- subset(scaleDataV2,select = visit_vars)
#View(data_par_v)
k_v_par <- doCluster(data_par_v,best_v2)
fviz_cluster(k_v_par, data = data_par_v)
# save the final result to csv
write.csv(millennial_v_final, "C:/Data/millennial_v_final.csv")
# === use spend ===
# find the best k from SSE
wsss1 <- wssplot(scaleDataS)
#best_s
best_s1 <- 11
millennial_s1 <- findBiggestCluster(data,scaleDataS,best_s1)
# further break down the biggest one
row <- is.element(millennial_s1$k_out$cluster, millennial_s1$neg)
col <- which(apply(data_s[row, ], 2, sum) != 0)
data_s2 <- data_s[row, col]
scaleDataS2 <- scale(data_s2[,-1])
wsss2 <- wssplot(scaleDataS2)
#best_s2
best_s2 <- 12
millennial_s2 <- findBiggestCluster(data,scaleDataS2,best_s2)
user_s2 = data[row, 1]
millennial_s_final <- user_s2[ is.element(millennial_s2$k_out$cluster, (1:best_s2)[-millennial_s2$neg])]
#View(millennial_s_final)
#visual the clusters, but use few columns
data_par_s <- subset(scaleDataS2,select = spend_vars)
#View(data_par_s)
k_s_par <- doCluster(data_par_s,best_s2)
fviz_cluster(k_s_par, data = data_par_s)
# save the final result to csv
write.csv(millennial_s_final, "C:/Data/millennial_s_final.csv")
# === use visit+spend ===
# find the best k from SSE
wssvs1 <- wssplot(scaleDataVS)
#best_vs
best_vs1 <- 10
millennial_vs1 <- findBiggestCluster(data,scaleDataVS,best_vs1)
# further break down the biggest one
row <- is.element(millennial_vs1$k_out$cluster, millennial_vs1$neg)
col <- which(apply(data_vs[row, ], 2, sum) != 0)
data_vs2 <- data_vs[row, col]
scaleDataVS2 <- scale(data_vs2[,-1])
wssvs2 <- wssplot(scaleDataVS2)
#best_vs2
best_vs2 <- 8
millennial_vs2 <- findBiggestCluster(data,scaleDataVS2,best_vs2)
user_vs2 = data[row, 1]
millennial_vs_final <- user_vs2[ is.element(millennial_vs2$k_out$cluster, (1:best_vs2)[-millennial_vs2$neg])]
#View(millennial_s_final)
#visual the clusters, but use few columns
data_par_vs <- subset(scaleDataVS2,select = visit_spend_vars)
#View(data_par_vs)
k_vs_par <- doCluster(data_par_vs,best_vs2)
fviz_cluster(k_vs_par, data = data_par_vs)
# save the final result to csv
write.csv(millennial_vs_final, "C:/Data/millennial_vs_final.csv")
|
a8a639dd21376c0a1f86995d657cb60894b0b3bb | 324858f2e5b71d81667889e53a0eb0b90b04f91e | /Week 6/create_dataset_predict_gname.R | bd4b144ef2207aa3d02a103165980450b6bfe213 | [] | no_license | paulafortuna/tum-data-mining-lab | bb48e7abdca9598e24da681ed475580058ca2f0a | f9b79a15c8bc808a2dba5f064ec5930b39bc16c1 | refs/heads/master | 2020-03-18T19:23:37.274506 | 2017-01-31T19:26:21 | 2017-01-31T19:26:21 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 3,967 | r | create_dataset_predict_gname.R | library(dplyr)
# Have the 5 groups with more incidents for each area
df <- read.csv("../terrorism.csv", stringsAsFactors = FALSE)
top_names <- df %>%
filter(gname != "Other") %>%
group_by(region_txt, gname) %>%
summarise(freq = n()) %>%
arrange(region_txt, desc(freq)) %>%
group_by(region_txt) %>%
filter(row_number() <= 5L) %>%
ungroup()
# generate a new dataset with all the names that aren't the 5 groups marked as other
# se o nome %in% top_names[region_txt], nome, senão Other
#new_df <- df %>%
# inner_join(select(top_names, region_txt, gname), by="region_txt") %>%
# mutate(gname = ifelse(gname.x == gname.y, gname.x, "Other"))
#mutate(gname = ifelse(gname %in% (filter(top_names, region_txt = )))
top_names2 <- lapply(unique(top_names$region_txt), function(x) top_names %>% filter(region_txt == x) %>% .$gname %>% as.character)
names(top_names2) <- unique(top_names$region_txt)
# Label the groups according to the top we did
for(i in 1:nrow(df))
{
df[i, "gname"] <- ifelse(df[i, "gname"] %in% top_names2[[df[i, "region_txt"]]], df[i, "gname"], "Other")
}
# Clean the variables we want (with median)
df[is.na(df$nperps) | df$nperps == -99, "nperps"] <- df %>% filter(nperps != -99) %>% .$nperps %>% median(na.rm = TRUE)
df[is.na(df$nperpcap) | df$nperpcap == -99, "nperpcap"] <- df %>% filter(nperpcap != -99) %>% .$nperpcap %>% median(na.rm = TRUE)
df[is.na(df$nkill), "nkill"] <- median(df$nkill, na.rm = TRUE)
df[is.na(df$nwound), "nwound"] <- median(df$nwound, na.rm = TRUE)
df[is.na(df$nhostkid), "nhostkid"] <- 0
df[is.na(df$nhostkid), "nhostkid"] <- df %>% filter(nhostkid != -99) %>% .$nhostkid %>% median(na.rm = TRUE)
df[is.na(df$ransomamt), "ransomamt"] <- 0
df[is.na(df$ransomamt), "ransomamt"] <- df %>% filter(ransomamt != -99) %>% .$ransomamt %>% median(na.rm = TRUE)
df[is.na(df$ransompaid), "ransompaid"] <- 0
df[is.na(df$ransompaid), "ransompaid"] <- df %>% filter(ransompaid != -99) %>% .$ransompaid %>% median(na.rm = TRUE)
df[is.na(df$nreleased), "nreleased"] <- 0
df[is.na(df$nreleased), "nreleased"] <- df %>% filter(nreleased != -99) %>% .$nreleased %>% median(na.rm = TRUE)
### Attack Types
### Target Types
# Nationalities:
df[df$natlty1_txt == ".", "natlty1_txt"] <- "Unknown"
## Weapon types
df[df$weaptype1_txt == "Vehicle (not to include vehicle-borne explosives, i.e., car or truck bombs)", "weaptype1_txt"] <- "Vehicle"
df[df$weapsubtype1_txt == ".", "weapsubtype1_txt"] <- "Unknown"
write.csv(df
%>% select(iyear,
#imonth,
#iday,
multiple,
success,
suicide,
nperps,
nperpcap,
nkill,
nwound,
#property,
#ishostkid,
nhostkid,
#ransom,
ransomamt,
ransompaid,
nreleased,
# new categorical variables:
attacktype1_txt,
targtype1_txt,
natlty1_txt,
weaptype1_txt,
weapsubtype1_txt,
region_txt,
gname), "terrorism_cleaned_group.csv", fileEncoding = "UTF-8", row.names=FALSE)
# df %>% inner_join(select(top_names, region_txt, gname), by = "region_txt") %>% View
######
#df <- df %>% mutate(yes = gname %in% top_names2[region_txt])
#dfs <- lapply(unique(df$region_txt), function(region) df %>% filter(region_txt == region))
#names(dfs) <- unique(df$region_txt)
#for(region in regions)
#{
# df[df$region_txt == region & df$gname %in% top_names2[df$region_txt], "gname"] <- ifelse()
#}
###
#apply(df, 1, function(x)
# {
# if (!(x["gname"] %in% top_names2[x["region_txt"]]))
# x["gname"] <- "Other"
# })
## |
f59bcd3edb0102ca1db68df946e16de11ea9fa0f | 78e46929f37c1292ac7a6dd1f75a62ee86562dd6 | /R/sources_oceanographic.R | 75598443a4936f0af1e6d2275673eb6417dff0e4 | [
"LicenseRef-scancode-public-domain"
] | permissive | whigg/blueant | e1a5ec25d404e5e25c7dcb7d36ca1dc5c77f0045 | 67a535a316520a68472727149467430e6eef5fc7 | refs/heads/master | 2021-05-21T16:55:29.097980 | 2020-03-26T04:15:59 | 2020-03-26T04:15:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,497 | r | sources_oceanographic.R | #' Oceanographic data sources
#'
#' Data sources providing oceanographic data.
#'
#' \itemize{
#' \item "CSIRO Atlas of Regional Seas 2009": CARS is a digital climatology, or atlas of seasonal ocean water properties
#' \item "World Ocean Atlas 2009": World Ocean Atlas 2009 is included here for convenience but has been superseded by the World Ocean Atlas 2013 V2
#' \item "World Ocean Atlas 2013 V2": World Ocean Atlas 2013 version 2 (WOA13 V2) is a set of objectively analyzed (1 degree grid) climatological fields of in situ temperature, salinity, dissolved oxygen, Apparent Oxygen Utilization (AOU), percent oxygen saturation, phosphate, silicate, and nitrate at standard depth levels for annual, seasonal, and monthly compositing periods for the World Ocean. It also includes associated statistical fields of observed oceanographic profile data interpolated to standard depth levels on 5 degree, 1 degree, and 0.25 degree grids
#' \item "Argo ocean basin data (USGODAE)": Argo float data from the Global Data Access Centre in Monterey, USA (US Global Ocean Data Assimilation Experiment). These are multi-profile netcdf files divided by ocean basin. Accepts \code{region} parameter values of "pacific" (default), "atlantic", and/or "indian". Also accepts \code{years} parameter: an optional vector of years to download data for
#' \item "Argo profile data": Argo profile data, by default from the Global Data Access Centre in Monterey, USA (US Global Ocean Data Assimilation Experiment). The DAC can be changed by specifying a \code{dac_url} parameter (see example below). Also see \code{\link{bb_handler_argo}} for a description of the other parameters that this source accepts.
#' }
#'
#' The returned tibble contains more information about each source.
#'
#' @param name character vector: only return data sources with name or id matching these values
#' @param formats character: for some sources, the format can be specified. See the list of sources above for details
#' @param time_resolutions character: for some sources, the time resolution can be specified. See the list of sources above for details
#' @param ... : additional source-specific parameters. See the list of sources above for details
#'
#' @references See the \code{doc_url} and \code{citation} field in each row of the returned tibble for references associated with these particular data sources
#'
#' @seealso \code{\link{sources_altimetry}}, \code{\link{sources_biological}}, \code{\link{sources_meteorological}}, \code{\link{sources_ocean_colour}}, \code{\link{sources_reanalysis}}, \code{\link{sources_sdm}}, \code{\link{sources_seaice}}, \code{\link{sources_sst}}, \code{\link{sources_topography}}
#'
#' @return a tibble with columns as specified by \code{\link{bb_source}}
#'
#' @examples
#' \dontrun{
#' ## define a configuration and add the Atlas of Regional Seas data to it
#' cf <- bb_config("/my/file/root")
#' src <- sources_oceanographic("CSIRO Atlas of Regional Seas 2009")
#' cf <- bb_add(cf,src)
#'
#' ## Argo data, Pacific ocean basin only, all years
#' src <- sources_oceanographic("Argo ocean basin data (USGODAE)", region="pacific")
#'
#' ## Argo data, Pacific ocean basin for 2018 only
#' src <- sources_oceanographic("Argo ocean basin data (USGODAE)",
#' region="pacific", years=2018)
#'
#' ## Argo data, all ocean basins and for 2017 and 2018 only
#' src <- sources_oceanographic("Argo ocean basin data (USGODAE)",
#' region=c("pacific", "indian", "atlantic"), years=c(2017, 2018))
#'
#' ## Argo merge profile data, from the French GDAC (ftp://ftp.ifremer.fr/ifremer/argo/)
#' ## Only download profiles from institutions "CS" or "IN", south of 30S,
#' ## with parameter "NITRATE" or "CHLA"
#' src <- sources_oceanographic("Argo profile data", profile_type = "merge",
#' dac_url = "ftp://ftp.ifremer.fr/ifremer/argo/",
#' institutions = c("CS", "IN"),
#' latitude_filter = function(z) z < -30,
#' parameters = c("CHLA", "NITRATE"))
#' }
#' @export
sources_oceanographic <- function(name,formats,time_resolutions, ...) {
if (!missing(name) && !is.null(name)) {
assert_that(is.character(name))
name <- tolower(name)
} else {
name <- NULL
}
dots <- list(...)
out <- tibble()
if (is.null(name) || any(name %in% tolower(c("CSIRO Atlas of Regional Seas 2009", "cars2009")))) {
out <- rbind(out,
bb_source(
name = "CSIRO Atlas of Regional Seas 2009",
id = "cars2009",
description = "CARS is a digital climatology, or atlas of seasonal ocean water properties.",
doc_url = "http://www.marine.csiro.au/~dunn/cars2009/",
citation = "Ridgway K.R., J.R. Dunn, and J.L. Wilkin, Ocean interpolation by four-dimensional least squares - Application to the waters around Australia, J. Atmos. Ocean. Tech., Vol 19, No 9, 1357-1375, 2002",
source_url = "http://www.marine.csiro.au/atlas/",
license = "Please cite",
##method=list("bb_handler_wget",accept_regex=".*2009.*.nc.gz",robots_off=TRUE),
method = list("bb_handler_rget", level = 1),
postprocess = list("bb_gunzip"),
collection_size = 2.8,
data_group = "Oceanographic"))
}
if (is.null(name) || any(name %in% tolower(c("World Ocean Atlas 2009", "WOA09")))) {
out <- rbind(out,
bb_source(
name = "World Ocean Atlas 2009",
id = "WOA09",
description = "World Ocean Atlas 2009 (WOA09) is a set of objectively analyzed (1 degree grid) climatological fields of in situ temperature, salinity, dissolved oxygen, Apparent Oxygen Utilization (AOU), percent oxygen saturation, phosphate, silicate, and nitrate at standard depth levels for annual, seasonal, and monthly compositing periods for the World Ocean. It also includes associated statistical fields of observed oceanographic profile data interpolated to standard depth levels on both 1 degree and 5 degree grids",
doc_url = "http://www.nodc.noaa.gov/OC5/WOA09/pr_woa09.html",
citation = "Citation for WOA09 Temperature: Locarnini, R. A., A. V. Mishonov, J. I. Antonov, T. P. Boyer, and H. E. Garcia, 2010. World Ocean Atlas 2009, Volume 1: Temperature. S. Levitus, Ed. NOAA Atlas NESDIS 68, U.S. Government Printing Office, Washington, D.C., 184 pp.\nCitation for WOA09 Salinity: Antonov, J. I., D. Seidov, T. P. Boyer, R. A. Locarnini, A. V. Mishonov, and H. E. Garcia, 2010. World Ocean Atlas 2009, Volume 2: Salinity. S. Levitus, Ed. NOAA Atlas NESDIS 69, U.S. Government Printing Office, Washington, D.C., 184 pp.\nCitation for WOA09 Oxygen: Garcia, H. E., R. A. Locarnini, T. P. Boyer, and J. I. Antonov, 2010. World Ocean Atlas 2009, Volume 3: Dissolved Oxygen, Apparent Oxygen Utilization, and Oxygen Saturation. S. Levitus, Ed. NOAA Atlas NESDIS 70, U.S. Government Printing Office, Washington, D.C., 344 pp.\nCitation for WOA09 Nutrients: Garcia, H. E., R. A. Locarnini, T. P. Boyer, and J. I. Antonov, 2010. World Ocean Atlas 2009, Volume 4: Nutrients (phosphate, nitrate, silicate). S. Levitus, Ed. NOAA Atlas NESDIS 71, U.S. Government Printing Office, Washington, D.C., 398 pp.",
license = "Please cite",
source_url = "https://data.nodc.noaa.gov/woa/WOA09/NetCDFdata/",
##method=list("bb_handler_wget",robots_off=TRUE,reject="index.html*"), ## --recursive --no-parent
method = list("bb_handler_rget", level = 1),
postprocess = NULL,
collection_size = 6.0,
data_group = "Oceanographic"))
}
if (is.null(name) || any(name %in% tolower(c("World Ocean Atlas 2013 V2", "WOA13V2")))) {
out <- rbind(out,
bb_source(
name = "World Ocean Atlas 2013 V2",
id = "WOA13V2",
description = "World Ocean Atlas 2013 version 2 (WOA13 V2) is a set of objectively analyzed (1 degree grid) climatological fields of in situ temperature, salinity, dissolved oxygen, Apparent Oxygen Utilization (AOU), percent oxygen saturation, phosphate, silicate, and nitrate at standard depth levels for annual, seasonal, and monthly compositing periods for the World Ocean. It also includes associated statistical fields of observed oceanographic profile data interpolated to standard depth levels on 5 degree, 1 degree, and 0.25 degree grids",
doc_url = "https://www.nodc.noaa.gov/OC5/woa13/",
citation = "Citation for WOA13 Temperature:\nLocarnini, R. A., A. V. Mishonov, J. I. Antonov, T. P. Boyer, H. E. Garcia, O. K. Baranova, M. M. Zweng, C. R. Paver, J. R. Reagan, D. R. Johnson, M. Hamilton, and D. Seidov, 2013. World Ocean Atlas 2013, Volume 1: Temperature. S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 73, 40 pp.\nCitation for WOA13 Salinity:\nZweng, M.M, J.R. Reagan, J.I. Antonov, R.A. Locarnini, A.V. Mishonov, T.P. Boyer, H.E. Garcia, O.K. Baranova, D.R. Johnson, D.Seidov, M.M. Biddle, 2013. World Ocean Atlas 2013, Volume 2: Salinity. S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 74, 39 pp.\nCitation for WOA13 Oxygen:\nGarcia, H. E., R. A. Locarnini, T. P. Boyer, J. I. Antonov, O.K. Baranova, M.M. Zweng, J.R. Reagan, D.R. Johnson, 2014. World Ocean Atlas 2013, Volume 3: Dissolved Oxygen, Apparent Oxygen Utilization, and Oxygen Saturation. S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 75, 27 pp.\nCitation for WOA13 Nutrients:\nGarcia, H. E., R. A. Locarnini, T. P. Boyer, J. I. Antonov, O.K. Baranova, M.M. Zweng, J.R. Reagan, D.R. Johnson, 2014. World Ocean Atlas 2013, Volume 4: Dissolved Inorganic Nutrients (phosphate, nitrate, silicate). S. Levitus, Ed., A. Mishonov Technical Ed.; NOAA Atlas NESDIS 76, 25 pp.",
license = "Please cite",
source_url = "https://data.nodc.noaa.gov/woa/WOA13/DATAv2/",
##method = list("bb_handler_wget",level=5,robots_off=TRUE,reject="index.html*",reject_regex="/(ascii|csv|shape|5564|6574|7584|8594|95A4|A5B2)/"),
method = list("bb_handler_rget", level = 6, reject_follow = "/(ascii|csv|shape|5564|6574|7584|8594|95A4|A5B2)/"),
comment = "Only the long-term (not per-decade) netcdf files are retrieved here: adjust the method reject_download parameter if you want ascii, csv, or shapefiles, or per-decade files.",
postprocess = NULL,
collection_size = 57,
data_group = "Oceanographic"))
}
if (is.null(name) || any(name %in% tolower(c("Argo ocean basin data (USGODAE)", "10.17882/42182")))) {
if ("region" %in% names(dots)) {
region <- tolower(dots[["region"]])
if (!all(region %in% c("pacific", "atlantic", "indian")))
stop("the region parameter should be one or more of \"pacific\", \"atlantic\", or \"indian\"")
} else {
region <- "pacific" ## default
}
if ("years" %in% names(dots)) {
years <- dots[["years"]]
assert_that(is.numeric(years) || is.character(years))
} else {
years <- ""
}
## all source_url combinations of region and years
temp <- paste0(apply(expand.grid(paste0("http://www.usgodae.org/ftp/outgoing/argo/geo/", region, "_ocean"), years), 1, paste0, collapse = "/"), "/")
temp <- sub("/+$", "/", temp) ## remove any double trailing slashes
out <- rbind(out,
bb_source(
name = "Argo ocean basin data (USGODAE)",
id = "10.17882/42182",
description = "Argo float data from the Global Data Access Centre in Monterey, USA (US Global Ocean Data Assimilation Experiment). These are multi-profile netcdf files divided by ocean basin.",
doc_url = "http://www.argodatamgt.org/Documentation",
citation = "To properly acknowledge Argo data usage, please use the following sentence: \"These data were collected and made freely available by the International Argo Program and the national programs that contribute to it (http://www.argo.ucsd.edu, http://argo.jcommops.org). The Argo Program is part of the Global Ocean Observing System. http://doi.org/10.17882/42182\"",
license = "Please cite",
source_url = temp,
##method = list("bb_handler_wget", level=3, robots_off=TRUE),
method = list("bb_handler_rget", level = 3, accept_follow = "[[:digit:]]+/$"),
comment = "Only the 2018 data is so far included here",
postprocess = NULL,
collection_size = NA, ## unknown yet
data_group = "Oceanographic"))
}
## general Argo profile source, that accepts dac_url parameter but defaults to the USGODAE DAC
if (is.null(name) || any(name %in% tolower(c("Argo profile data", "10.17882/42182 profile")))) {
if ("profile_type" %in% names(dots)) {
profile_type <- dots[["profile_type"]]
} else {
profile_type <- "merge"
}
if ("dac_url" %in% names(dots)) {
dac_url <- dots[["dac_url"]]
} else {
dac_url <- "https://www.usgodae.org/ftp/outgoing/argo/"
## can also use this, but is slower: dac_url = "ftp://ftp.ifremer.fr/ifremer/argo/",
}
if ("institutions" %in% names(dots)) {
institutions <- dots[["institutions"]]
} else {
institutions <- NULL
}
if ("parameters" %in% names(dots)) {
parameters <- dots[["parameters"]]
} else {
parameters <- NULL
}
if ("latitude_filter" %in% names(dots)) {
latitude_filter <- dots[["latitude_filter"]]
} else {
latitude_filter <- function(z) rep(TRUE, length(z))
}
if ("longitude_filter" %in% names(dots)) {
longitude_filter <- dots[["longitude_filter"]]
} else {
longitude_filter <- function(z) rep(TRUE, length(z))
}
out <- rbind(out,
bb_source(
name = "Argo profile data",
id = "10.17882/42182 profile",
description = paste0("Argo profile data from ", dac_url, "."),
doc_url = "http://www.argodatamgt.org/Documentation",
citation = "To properly acknowledge Argo data usage, please use the following sentence: \"These data were collected and made freely available by the International Argo Program and the national programs that contribute to it (http://www.argo.ucsd.edu, http://argo.jcommops.org). The Argo Program is part of the Global Ocean Observing System. http://doi.org/10.17882/42182\"",
license = "Please cite",
source_url = dac_url,
method = list("bb_handler_argo", profile_type = profile_type, institutions = institutions, parameters = parameters, latitude_filter = latitude_filter, longitude_filter = longitude_filter),
postprocess = NULL,
collection_size = NA, ## unknown yet
data_group = "Oceanographic"))
}
## for backwards-compatibility, a source with "USGODAE" in the name, that does not accept a dac_url parameter
if (is.null(name) || any(name %in% tolower(c("Argo profile data (USGODAE)")))) {
if ("profile_type" %in% names(dots)) {
profile_type <- dots[["profile_type"]]
} else {
profile_type <- "merge"
}
if ("institutions" %in% names(dots)) {
institutions <- dots[["institutions"]]
} else {
institutions <- NULL
}
if ("parameters" %in% names(dots)) {
parameters <- dots[["parameters"]]
} else {
parameters <- NULL
}
if ("latitude_filter" %in% names(dots)) {
latitude_filter <- dots[["latitude_filter"]]
} else {
latitude_filter <- function(z) rep(TRUE, length(z))
}
if ("longitude_filter" %in% names(dots)) {
longitude_filter <- dots[["longitude_filter"]]
} else {
longitude_filter <- function(z) rep(TRUE, length(z))
}
out <- rbind(out,
bb_source(
name = "Argo profile data (USGODAE)",
id = "10.17882/42182 profile",
description = "Argo profile data from the Global Data Access Centre in Monterey, USA (US Global Ocean Data Assimilation Experiment).",
doc_url = "http://www.argodatamgt.org/Documentation",
citation = "To properly acknowledge Argo data usage, please use the following sentence: \"These data were collected and made freely available by the International Argo Program and the national programs that contribute to it (http://www.argo.ucsd.edu, http://argo.jcommops.org). The Argo Program is part of the Global Ocean Observing System. http://doi.org/10.17882/42182\"",
license = "Please cite",
source_url = "https://www.usgodae.org/ftp/outgoing/argo/",
## can also use this, but is slower: source_url = "ftp://ftp.ifremer.fr/ifremer/argo/",
method = list("bb_handler_argo", profile_type = profile_type, institutions = institutions, parameters = parameters, latitude_filter = latitude_filter, longitude_filter = longitude_filter),
postprocess = NULL,
collection_size = NA, ## unknown yet
data_group = "Oceanographic"))
}
out
}
|
30579869972715dbf977da34dc740e0843720cd3 | 5b5cf88112f5239e19302d8cfebff60ca3e2199a | /scripts/univariate/univariate-cond.R | fdf148e95762e47ba62b8900b8759b373791e1b9 | [] | no_license | eth-mds/bmi | e32ffc57717151c6b79954fb9c64b5c78dc5473e | c45a80e6e4295636364b2d8a4c7860b5cf531465 | refs/heads/main | 2023-08-12T06:33:26.860001 | 2021-10-01T14:47:42 | 2021-10-01T14:47:42 | 371,434,398 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,563 | r | univariate-cond.R | library(ricu)
library(ggplot2)
library(cowplot)
library(gridExtra)
library(grid)
library(stringr)
library(magrittr)
library(officer)
library(assertthat)
library(boot)
library(data.table)
library(icd)
root <- rprojroot::find_root(rprojroot::has_file(".gitignore"))
r_dir <- file.path(root, "r")
invisible(lapply(list.files(r_dir, full.names = TRUE), source))
Sys.setenv("RICU_CONFIG_PATH" = file.path(root, "config", "dict"))
cndl <- list(
diabetes = list(
src = c("mimic", "eicu"),
z = "DM",
z_binning = function(x) .bincode(x, c(-Inf, 0.5, Inf)),
titles = "by diabetes status",
z_cond_name = "Diabetes",
z_cond_labels = c("No", "Yes"),
figname = "Figure3.0"
),
hba1c = list(
src = "mimic",
z = "hba1c",
z_binning = function(x) .bincode(x, c(-Inf, 6.1, 6.5, 7, Inf)),
titles = "by HbA1c level",
z_cond_name = "HbA1c",
z_cond_labels = c("<6.1%", "6.1-6.5%", "6.6-7%", ">7%"),
figname = "Figure3.5"
)#,
# admission = list(
# src = c("aumc", "mimic", "eicu"),
# z = "adm",
# z_binning = function(x) ifelse(x == "med", 1, ifelse(x == "surg", 2, NA)),
# titles = "by admission type",
# z_cond_name = "Admission",
# z_cond_labels = c("Medical", "Surgical", "Other"),
# figname = "Figure2.5"
# )
)
out <- c("tw_avg_glu", "death", "hypo", "gv_cv")
ttl <- c("Time-weighted average glucose",
"Mortality", "Hypoglycemia", "Glucose variability")
ylabs <- c("Time-weighted glucose average (mg/dL)",
"Mortality (proportion)", "Hypoglycemia (proportion)",
"Blood glucose coefficient of variation (%)")
for (i in seq_along(cndl)) {
plt <- list()
for (j in seq_along(out)) {
res <- CI_dat(cndl[[i]][["src"]], y = out[j], z = cndl[[i]][["z"]],
z_binning = cndl[[i]][["z_binning"]])
plt[[j]] <- CI_plot(
res, title = paste(ttl[j], cndl[[i]][["titles"]]),
y_label = ylabs[j], z_cond = cndl[[i]][["z"]],
z_cond_name = cndl[[i]][["z_cond_name"]],
z_cond_labels = cndl[[i]][["z_cond_labels"]]
)
leg <- get_legend(plt[[j]] + theme(legend.position = "bottom"))
plt[[j]] <- plt[[j]] + theme(legend.position = "none")
cat("\n\n", i, "and ", out[j], "\n\n")
}
fig <- plot_grid(plotlist = plt, ncol = 2L, labels = c("a)", "b)", "c)", "d)"))
fig <- plot_grid(fig, leg, ncol = 1L, rel_heights = c(1, 0.05))
ggsave(file.path(root, "figures", paste0(cndl[[i]][["figname"]], ".tiff")),
plot = fig, width = 18, height = 12, compression = "lzw", type = "cairo")
}
|
6f5ee92459cbd29a51a2c7b458d163864e0ad020 | 0c8e62fd2f465d30440d2da7f8028fe238b82b38 | /plot1.R | 707480059841c74443a91e8ec9b609de8bbeb0aa | [] | no_license | horrorkumani/ExData_Plotting1 | 61034d1caa370da021e960f126767b81c7867331 | 74f7888dd6d75c1f00a9fd75b11f8f4558fdf39a | refs/heads/master | 2020-04-06T14:33:48.822639 | 2015-11-09T14:17:57 | 2015-11-09T14:17:57 | 42,268,282 | 0 | 0 | null | 2015-09-10T20:13:09 | 2015-09-10T20:13:09 | null | UTF-8 | R | false | false | 526 | r | plot1.R | data <- read.table("C:/Users/HP/Documents/GitHub/Huong/household_power_consumption.txt", header = TRUE, sep = ";", dec = ".")
data$Date <- as.Date(as.character(data$Date), format = "%d/%m/%Y")
d1<-as.Date("01/02/2007",format="%d/%m/%Y")
d2<-as.Date("02/02/2007",format="%d/%m/%Y")
echantillon<-data[data$Date==d1|data$Date==d2,]
echantillon$Global_active_power<-as.numeric(echantillon$Global_active_power)
hist(echantillon$Global_active_power/500,main="Global Active Power",col="red",xlab="Global Active Power (kilowatts)")
|
92a488b45481c257dfc99f6fa0ed0feeafed1c9f | b3a191d2b4c7e3011375135b87f3dd876cf05111 | /R/binSum.R | 15a5daff814ea71fa9f7e7c5b58194151b35f2b5 | [] | no_license | tdhock/PeakSegJoint | 3d712782cf601f4e0478653ebab7e4eea5684d04 | c80a250b00f6e6a60a0c4dcb3d02e98c9d95cebd | refs/heads/master | 2023-04-27T05:14:53.542606 | 2023-04-24T23:53:31 | 2023-04-24T23:53:31 | 33,544,447 | 6 | 3 | null | 2020-02-14T19:26:32 | 2015-04-07T13:22:43 | R | UTF-8 | R | false | false | 2,773 | r | binSum.R | binSum <- structure(function
### Compute sum of compressed coverage profile in bins, using fast C
### code.
(compressed,
### data.frame with integer columns chromStart, chromEnd, count.
bin.chromStart=0L,
### Base before first bin.
bin.size=1L,
### Bin size.
n.bins=2000L,
### Number of bins.
empty.as.zero=FALSE
### Sometimes the last few bins do not have any overlapping data in
### compressed. If TRUE, set these counts to 0. If FALSE, ignore these
### bins (returning a data.frame with fewer than n.bins rows).
){
stopifnot(is.integer(compressed$chromStart))
stopifnot(is.integer(compressed$chromEnd))
stopifnot(is.integer(compressed$count))
stopifnot(is.integer(bin.chromStart))
stopifnot(length(bin.chromStart) == 1)
stopifnot(is.integer(bin.size))
stopifnot(length(bin.size) == 1)
stopifnot(is.integer(n.bins))
stopifnot(length(n.bins) == 1)
bin.total <- integer(n.bins)
result <-
.C("binSum_interface",
profile.chromStart=as.integer(compressed$chromStart),
profile.chromEnd=as.integer(compressed$chromEnd),
profile.coverage=as.integer(compressed$count),
n.profiles=as.integer(nrow(compressed)),
bin.total=as.double(bin.total),
bin.size=as.integer(bin.size),
n.bins=as.integer(n.bins),
bin.chromStart=as.integer(bin.chromStart),
PACKAGE="PeakSegJoint")
total <- result$bin.total
if(empty.as.zero){
total[total == -1] <- 0L
}
chromStart <- seq(bin.chromStart, by=bin.size, l=n.bins)
chromEnd <- chromStart + bin.size
data.frame(chromStart, chromEnd, count=total,
mean=total/bin.size)[total >= 0, ]
### data.frame with n.bins rows and columns chromStart, chromEnd,
### count, mean.
}, ex=function(){
## bins of size 3bp.
## -1- -3- -5-
## -2- -4-
## 123456789012345 base index.
## --2---
## --1-
## --0-------
## Coverage profile.
profile <- data.frame(chromStart=as.integer(c(0, 6, 10)),
chromEnd=as.integer(c(6, 10, 10000)),
count=as.integer(c(2, 1, 0)))
library(PeakSegJoint)
bins <- binSum(profile,
bin.chromStart=0L,
bin.size=3L,
n.bins=2000L)
library(ggplot2)
bases <- data.frame(position=1:15, base="N")
ggplot()+
ylab("")+
geom_text(aes(position, 0, label=base),
data=bases)+
geom_step(aes(chromStart+0.5, count, color=what),
data=data.frame(profile, what="profile"),
size=2)+
geom_step(aes(chromStart+0.5, count, color=what),
data=data.frame(bins, what="bin total"))+
geom_step(aes(chromStart+0.5, mean, color=what),
data=data.frame(bins, what="bin mean"))+
coord_cartesian(xlim=c(0, max(bases$position)))
})
|
bc2d71d0c746ff63225f06bb00e01beeff9590a9 | fdfca978d2343d3c8eb2b9459e073008678d83be | /R/change data/change data 4-3.R | 2aacdf81c48dfb2dbcfb53a0f739308fd7862e01 | [] | no_license | kaiyu33/getData | dd37d7e28ecac1335f4b06cf1d3402fd91c8a5b5 | d5f93122c5926b22e30e242d7b0ddf8c7f5bb8e7 | refs/heads/master | 2021-01-10T09:37:52.736265 | 2016-04-27T12:08:56 | 2016-04-27T12:08:56 | 54,831,347 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,034 | r | change data 4-3.R | library(dplyr)
a<-dir("F:/000","csv")
length.a<-length(a)
for (m in 1:length.a) {
x1_path<-paste("F:/000/",a[m],sep = "")
csvv<-read.csv(x1_path,header = FALSE,stringsAsFactors = FALSE)
colnames(csvv)<-c("date","vol_S","vol_P","P_S","P_H","P_L","P_E","P_D","exc")
numdata<-sapply(count(csvv),"[",1)
finddata<-colnames(csvv) == "date"
if(sum(finddata)==1){
for (n in 1:numdata) {
b<-sapply(csvv[finddata],"[", n)
bb<-strsplit(b, "/")
c<-as.numeric(sapply(bb,"[",1))+1911
date<-c(date,paste(c,sapply(bb,"[",2),sapply(bb,"[",3),sep = "/"))
}
# for (n in 1:numdata) {
# date2<-as.data.frame(date)
#}
date2<-as.matrix(date)
write.csv(date2, file = "F:/000/tmp.CSV")
Xdate<-read.csv("F:/000/tmp.CSV",stringsAsFactors = FALSE)
A<-select(Xdate,2)
numdate2<-sapply(count(A),"[",1)
B<-slice(A,3:numdate2)
fincsvv<-cbind(B,csvv)
new_path<-paste("F:/000/newnew",a[m],".CSV",sep = "")
write.csv(fincsvv, file = new_path)
date<-NULL
}
}
|
8c91e1e5024af015da0833d5d504c1192a88244c | 8700bdbb18994d172d76b75e41f7097137c547b9 | /man/date_yw.Rd | 8312f4016df8495a9886b7dae7131105ccdf89f2 | [] | no_license | cran/dint | 1b598cd4fd01b18f31d00d6354d2481ab5e41bad | 0245ef537511db22996f4283cae5f388d75636ea | refs/heads/master | 2022-11-01T23:44:08.177571 | 2022-10-17T05:52:38 | 2022-10-17T05:52:38 | 145,910,880 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,231 | rd | date_yw.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date_yw.R
\name{date_yw}
\alias{date_yw}
\alias{is_date_yw}
\alias{as_date_yw}
\title{A Simple S3-Class for Year-Isoweek Dates}
\usage{
date_yw(y, w)
is_date_yw(x)
as_date_yw(x)
}
\arguments{
\item{y}{year}
\item{w}{week (optional)}
\item{x}{any R object}
}
\value{
\code{date_yw} returns an object of type \code{date_yw}
\code{is_date_yw} returns \code{TRUE} or \code{FALSE} depending on whether its
argument is of type \code{date_yw} or not.
\code{as_date_yw} attempts to coerce its argument to \code{date_yw}
}
\description{
A simple data type for storing year-isoweek dates in a human readable integer
format, e.g.: the 52nd isoweek of 2012 is stored as 201252. Supports simple
arithmetic operations such as \code{+} and \code{-} as well formatting.
}
\examples{
date_yw(2013, 12)
as_date_yw(201612)
}
\seealso{
\code{\link[=format.date_yw]{format.date_yw()}}, \code{\link[=seq.date_yw]{seq.date_yw()}}, \code{\link[=date_xx_arithmetic]{date_xx_arithmetic()}}
Other date_xx subclasses:
\code{\link{date_ym}()},
\code{\link{date_yq}()},
\code{\link{date_y}()}
}
\concept{date_xx subclasses}
|
4271ae2e255eaecca12809a4c6c2df7d3895a2c8 | e78ae3fdd458fc4b8bee318025614d5d6fd2af8f | /R/cdtDownloadRFE_tamsat.R | 827585123c7b2903dfe96f519d70b6fa67b5b4e9 | [] | no_license | rijaf-iri/CDT | b42fd670adfad59c08dcf990a95f9f1ebea9a9e4 | e1bb6deac6e814656bf5ed13b8d4af4d09475c9d | refs/heads/master | 2023-07-27T09:16:15.407835 | 2023-07-21T02:16:42 | 2023-07-21T02:16:42 | 136,133,394 | 10 | 12 | null | 2018-09-28T03:56:51 | 2018-06-05T06:53:11 | R | UTF-8 | R | false | false | 7,365 | r | cdtDownloadRFE_tamsat.R |
## toexport
tamsat3.0.download.iridl <- function(GalParams, nbfile = 3, GUI = TRUE, verbose = TRUE){
dlpath <- "https://iridl.ldeo.columbia.edu/SOURCES/.Reading/.Meteorology/.TAMSAT/.TARCAT/.v3p0"
vartime <- paste0(".", GalParams$tstep, "/.rfe")
rlon <- unlist(GalParams$bbox[c('minlon', 'maxlon')])
rlon <- paste(c('X', rlon, 'RANGE'), collapse = "/")
rlat <- unlist(GalParams$bbox[c('minlat', 'maxlat')])
rlat <- paste(c('Y', rlat, 'RANGE'), collapse = "/")
rdate <- iridl.format.date(GalParams$tstep, GalParams$date.range)
urls <- urltools::url_encode(paste0("(", rdate$dates, ")"))
urls <- paste0("T", "/", urls, "/", "VALUE")
urls <- paste(dlpath, vartime, rlon, rlat, urls, 'data.nc', sep = "/")
#########
data.name <- paste0("TAMSATv3_", GalParams$tstep)
outdir <- file.path(GalParams$dir2save, data.name)
dir.create(outdir, showWarnings = FALSE, recursive = TRUE)
destfiles <- file.path(outdir, paste0("tamsatv3_", rdate$out, ".nc"))
ret <- cdt.download.data(urls, destfiles, destfiles, nbfile, GUI,
verbose, data.name, iridl.download.data)
return(ret)
}
## toexport
tamsat3.1.download.iridl <- function(GalParams, nbfile = 3, GUI = TRUE, verbose = TRUE){
dlpath <- "https://iridl.ldeo.columbia.edu/SOURCES/.Reading/.Meteorology/.TAMSAT/.TARCAT/.v3p1"
vartime <- switch(GalParams$tstep,
"daily" = ".daily/.rfe_filled",
"dekadal" = ".dekadal/.rfe_filled",
"monthly" = ".monthly/.rfe_filled"
)
rlon <- unlist(GalParams$bbox[c('minlon', 'maxlon')])
rlon <- paste(c('X', rlon, 'RANGE'), collapse = "/")
rlat <- unlist(GalParams$bbox[c('minlat', 'maxlat')])
rlat <- paste(c('Y', rlat, 'RANGE'), collapse = "/")
rdate <- iridl.format.date(GalParams$tstep, GalParams$date.range)
urls <- urltools::url_encode(paste0("(", rdate$dates, ")"))
urls <- paste("T", urls, "VALUE", sep = "/")
urls <- paste(dlpath, vartime, rlon, rlat, urls, 'data.nc', sep = "/")
#########
data.name <- paste0("TAMSATv3.1_", GalParams$tstep)
outdir <- file.path(GalParams$dir2save, data.name)
dir.create(outdir, showWarnings = FALSE, recursive = TRUE)
destfiles <- file.path(outdir, paste0("tamsatv3.1_", rdate$out, ".nc"))
ret <- cdt.download.data(urls, destfiles, destfiles, nbfile, GUI,
verbose, data.name, iridl.download.data)
return(ret)
}
## toexport
tamsatv3.1.download.reading <- function(GalParams, nbfile = 3, GUI = TRUE, verbose = TRUE){
# baseurl <- "https://www.tamsat.org.uk/public_data/data/v3.1"
baseurl <- "http://www.tamsat.org.uk/public_data/data/v3.1"
fileformat <- switch(GalParams$tstep,
"daily" = "rfe%s_%s_%s.v3.1.nc",
"pentad" = "rfe%s_%s-pt%s.v3.1.nc",
"dekadal" = "rfe%s_%s-dk%s.v3.1.nc",
"monthly" = "rfe%s_%s.v3.1.nc"
)
timestep <- switch(GalParams$tstep, "pentad" = "pentadal", GalParams$tstep)
rdate <- table.format.date.time(GalParams$tstep, GalParams$date.range)
ncfiles0 <- sprintf(fileformat, rdate[, 1], rdate[, 2], rdate[, 3])
urls <- file.path(baseurl, timestep, rdate[, 1], rdate[, 2], ncfiles0)
#########
data.name <- paste0("TAMSATv3.1_", GalParams$tstep)
outdir <- file.path(GalParams$dir2save, data.name)
extrdir <- file.path(outdir, "Extracted")
dir.create(extrdir, showWarnings = FALSE, recursive = TRUE)
origdir <- file.path(outdir, "Data_Africa")
dir.create(origdir, showWarnings = FALSE, recursive = TRUE)
destfiles <- file.path(origdir, ncfiles0)
ncfiles <- file.path(extrdir, ncfiles0)
ret <- cdt.download.data(urls, destfiles, ncfiles, nbfile, GUI, verbose,
data.name, tamsat.download.data,
bbox = GalParams$bbox, version = "3.1")
return(ret)
}
tamsatv3.0.download.reading <- function(GalParams, nbfile = 3, GUI = TRUE, verbose = TRUE){
# baseurl <- "https://www.tamsat.org.uk/public_data/TAMSAT3"
baseurl <- "http://www.tamsat.org.uk/public_data/TAMSAT3"
fileformat <- switch(GalParams$tstep,
"daily" = "rfe%s_%s_%s.v3.nc",
"pentad" = "rfe%s_%s-pt%s.v3.nc",
"dekadal" = "rfe%s_%s-dk%s.v3.nc",
"monthly" = "rfe%s_%s.v3.nc"
)
rdate <- table.format.date.time(GalParams$tstep, GalParams$date.range)
ncfiles0 <- sprintf(fileformat, rdate[, 1], rdate[, 2], rdate[, 3])
urls <- file.path(baseurl, rdate[, 1], rdate[, 2], ncfiles0)
#########
data.name <- paste0("TAMSATv3_", GalParams$tstep)
outdir <- file.path(GalParams$dir2save, data.name)
extrdir <- file.path(outdir, "Extracted")
dir.create(extrdir, showWarnings = FALSE, recursive = TRUE)
origdir <- file.path(outdir, "Data_Africa")
dir.create(origdir, showWarnings = FALSE, recursive = TRUE)
destfiles <- file.path(origdir, ncfiles0)
ncfiles <- file.path(extrdir, ncfiles0)
ret <- cdt.download.data(urls, destfiles, ncfiles, nbfile, GUI, verbose,
data.name, tamsat.download.data,
bbox = GalParams$bbox, version = "3.0")
return(ret)
}
#################################################################################
tamsat.download.data <- function(lnk, dest, ncfl, bbox, version){
xx <- basename(dest)
link.exist <- try(readLines(lnk, 1), silent = TRUE)
if(inherits(link.exist, "try-error")) return(xx)
dc <- try(curl::curl_download(lnk, dest), silent = TRUE)
if(!inherits(dc, "try-error")){
ret <- tamsat.extract.data(dest, ncfl, bbox, version)
if(ret == 0){
xx <- NULL
}else{
unlink(dest)
}
}
return(xx)
}
tamsat.extract.data <- function(dest, ncfl, bbox, version){
nc <- try(ncdf4::nc_open(dest), silent = TRUE)
ret <- 1
if(!inherits(nc, "try-error")){
lon <- nc$var[['rfe']]$dim[[1]]$vals
lat <- nc$var[['rfe']]$dim[[2]]$vals
ix <- lon >= bbox$minlon & lon <= bbox$maxlon
iy <- lat >= bbox$minlat & lat <= bbox$maxlat
start <- c(which(ix)[1], which(iy)[1], 1)
count <- c(diff(range(which(ix))) + 1, diff(range(which(iy))) + 1, 1)
val <- ncdf4::ncvar_get(nc, "rfe", start, count)
if(version == "3.1")
if(all(is.na(val)))
val <- ncdf4::ncvar_get(nc, "rfe_filled", start, count)
ncdf4::nc_close(nc)
lon <- lon[ix]
lat <- lat[iy]
oy <- order(lat)
val <- val[, oy]
lat <- lat[oy]
dx <- ncdf4::ncdim_def("lon", "degreeE", lon, longname = "Longitude")
dy <- ncdf4::ncdim_def("lat", "degreeN", lat, longname = "Latitude")
missval <- -99
ncgrd <- ncdf4::ncvar_def("rfe", "mm", list(dx, dy), missval,
"Rainfall Estimate", "float",
compression = 9)
val[is.na(val)] <- missval
nc <- ncdf4::nc_create(ncfl, ncgrd)
ncdf4::ncvar_put(nc, ncgrd, val)
ncdf4::nc_close(nc)
ret <- 0
}
return(ret)
}
|
31d3cc3e9ac1669b79fb9e6e9fb35af1c49e574e | bd4b57d4f1677d6789513b52f1db752f756936ca | /man/rtnorm.Rd | 311fec77cad076afaa753307f9c9ecf64d3058ef | [] | no_license | cran/MCMCglmm | dd45441b7dac39b065e186e5960f68f8650c7133 | fdd8d46550344b9be1ed429ac8fea24ad0a40761 | refs/heads/master | 2023-07-06T00:24:58.456332 | 2023-06-30T20:00:02 | 2023-06-30T20:00:02 | 17,680,516 | 4 | 16 | null | null | null | null | UTF-8 | R | false | false | 682 | rd | rtnorm.Rd | \name{rtnorm}
\alias{rtnorm}
\title{Random Generation from a Truncated Normal Distribution}
\description{Samples from the Truncated Normal Distribution}
\usage{
rtnorm(n = 1, mean = 0, sd = 1, lower = -Inf, upper = Inf)
}
\arguments{
\item{n}{integer: number of samples to be drawn}
\item{mean}{vector of means}
\item{sd}{vector of standard deviations}
\item{lower}{left truncation point}
\item{upper}{right truncation point}
}
\value{
vector
}
\author{Jarrod Hadfield \email{j.hadfield@ed.ac.uk}}
\references{Robert, C.P. (1995) Statistics & Computing 5 121-125}
\seealso{\code{\link[msm]{rtnorm}}}
\examples{
hist(rtnorm(100, lower=-1, upper=1))
}
\keyword{distribution}
|
ee0700b05639d33967ce983247bc1b758d98cf62 | 08c60f2455bb51281fbf438b8872ee7909b92038 | /R/getNAcores.R | 11d7879de14d2262c51158e78cdaa7265bdeb2e7 | [] | no_license | PalEON-Project/stepps-baconizing | c7d8bf75a500e136b3bf5fbae5a6d263bcf16afc | 859686630f80423dde11470cfef594ada7bc14f6 | refs/heads/master | 2020-05-21T22:24:27.828697 | 2019-11-15T06:56:31 | 2019-11-15T06:56:31 | 25,702,499 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,541 | r | getNAcores.R | #' @title Pull all North American Pollen sites from Neotoma
#' @param version The version number of the run.
#' @description This code takes the current version number of the paper, and then searches for all sites with pollen data within North America, returning the \code{download} objects associated with those sites.
#' @return A long \code{download_list} for all pollen sites in North America.
#'
north_american_cores <- function(version) {
gpid_table <- neotoma::get_table("GeoPoliticalUnits") %>%
filter(GeoPoliticalName %in% c('Canada', 'United States', 'Mexico')) %>%
dplyr::select(GeoPoliticalID) %>%
unlist %>%
map(~ neotoma::get_dataset(gpid = .x, datasettype = 'pollen'))
na_datasets <- neotoma::bind(neotoma::bind(gpid_table[[1]], gpid_table[[2]]), gpid_table[[3]])
if(paste0('all_downloads_v', version, '.rds') %in% list.files('data/output')){
all_downloads <- readRDS(paste0('data/output/all_downloads_v', version, '.rds'))
} else{
all_downloads <- get_download(na_datasets, verbose = FALSE)
saveRDS(all_downloads, file = paste0('data/output/all_downloads_v', version, '.rds'))
}
if(paste0('all_geochron_v', version, '.rds') %in% list.files('data/output')){
all_geochron <- readRDS(paste0('data/output/all_geochron_v', version, '.rds'))
} else{
all_sites <- all_downloads %>% get_site
all_geochron <- get_geochron(all_sites, verbose = TRUE)
saveRDS(all_geochron, file = paste0('data/output/all_geochron_v', version, '.rds'))
}
return(all_downloads)
}
|
436b27341d543fe65eaa77bf5676d7318aa3b22f | 1754113fcf2b24c711ceb1d4b43513cb908cfd32 | /MakeEpisode.R | 63636cc1ad92447d1c573fcb4bf722c630b11d39 | [] | no_license | wuandtan/userInteractivity | 86bc8e26ce9bbe36f2bf95c12e69540b325c3bac | a22b8ba7fcd497942941279050e20614f776c4be | refs/heads/master | 2016-09-06T18:56:13.961695 | 2015-02-26T10:46:30 | 2015-02-26T10:46:30 | 31,362,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,005 | r | MakeEpisode.R | #merge the client info with the server info. Look for matching episodes from client info for server info
MakeEpisode <- function(server_episode, client_episode)
{
client_name <- names(client_episode[[1]])
server_name <- names(server_episode[[1]])
episode <- server_episode
for (i in (1:length(server_episode)))
{
for (j in (i:length(client_episode)))
{
if(i> length(client_episode))
break
t <- as.numeric(as.duration(new_interval(server_episode[[i]]$time[1],client_episode[[j]]$client_time[1])))
if(abs(t)<=10)
{
tmp_server_episode <- server_episode[[i]]
for (k in (1:length(client_name)))
{
tmp_server_episode[client_name] <- NA
}
tmp_server_episode$type <- c("server")
tmp_client_episode <- client_episode[[j]]
for (k in (1:length(server_name)))
{
tmp_client_episode[server_name[k]] <- NA
}
tmp_client_episode$time <- tmp_client_episode$client_time
tmp_client_episode$type <- c("client")
keeps <- colnames(tmp_server_episode);
tmp_client_episode <- tmp_client_episode[keeps] # make the same order
episode[[i]] <- rbind(tmp_server_episode, tmp_client_episode)
#episode[[i]] <- t[do.call(order,t),]
episode[[i]] <- subset(episode[[i]], select = -c(client_time, bandwidthAvg,Idxepisode))
episode[[i]]$x.duration <- as.numeric(episode[[i]]$x.duration)
episode[[i]]$c.starttime <- as.numeric(episode[[i]]$c.starttime)
episode[[i]]$c.buffercount <- as.numeric(episode[[i]]$c.buffercount)
t <- episode[[i]]
episode[[i]] <- arrange(t,time,segidx, c.starttime,-x.duration) #t[order(t$time,t$segidx,-t$x.duration), ]
break
}
}
}
#throw away the last episode, because it's across two files (two days). To be convenient, just throw it away.
episode[[length(episode)]] <- NULL
#return
episode
}
|
ecaa779ada8f2b4e022387351c271de3595382b2 | df02e774e44afcaa5ba88d6475f54ee179395f08 | /R/globals.R | 8230f49a1d873e6af3f78557caf6bc9cef812707 | [] | no_license | digitmix/shiny.reglog | 616c6a5bd3dd3020bac8995b9474ef1ed6f13453 | f9f156848734f82497b7c5e2fd2d8f3783ccd2a5 | refs/heads/master | 2023-07-12T16:56:52.034787 | 2021-08-15T12:29:38 | 2021-08-15T12:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | globals.R | # to remove "no visible binding for global variable" with dplyr functions
utils::globalVariables(c("timestamp", "user_id", "user_mail")) |
1677cb3859620885607657b0a6cf10110ef598ed | f6cd6c90a922fa8e1e5bd49b7b044f5a30bb2a47 | /数据汇总plyr包.R | 97ca123048d694264bcc26b6ad1b98d0ca6027d2 | [] | no_license | pan060757/RScripts | f5a1a2ac707a81457e1f670e7503f957042f7daa | 9a6e75b25a7abc04065eaabd873a998d610420a5 | refs/heads/master | 2021-05-06T12:26:52.425237 | 2018-03-16T13:27:18 | 2018-03-16T13:27:18 | 113,050,722 | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 647 | r | 数据汇总plyr包.R | library(plyr)
head(tips)
aggregate(x=tips$tip,by=list(tips$sex),FUN=mean)
ddply(.data=tips,.variables='sex',.fun=function(x){mean(x$tip)})
ratio_fun <- function(x){
sum(x$tip)/sum(x$total_bill)
}
ddply(tips,.(sex),function(x){mean(x$tip)})
ddply(tips,.(sex),ratio_fun)
iris[,-5]
data <- as.matrix(iris[,-5])
result4 <- adply(
.data=data,
.margins=2, #其中1表示按行进行分组,2表示按照列进行分组
.fun=function(x){
max <- max(x)
min <- min(x)
median <- median(x) 中位数
sd <- round(sd(x),2) 标准差
return (c(max,min,median,sd))
}
)
result4
colwise(mean,is.numeric)(iris)
data()
|
2e02201a1baa489d0787714a261c99c311e2d317 | 09af8d471405b60e75d12a3350cfb26ca4ef3104 | /R/print-method.R | 847312cbe52f7a9bcc4fb2a05daf1799cc727e88 | [] | no_license | justinjm/googleCloudAutoMLTablesR | 20faad2f77856af31e6b44dd6807435863b235e4 | 5477eb53b6a11c01de763b85048feaf44dca87be | refs/heads/master | 2022-12-23T21:42:59.441080 | 2022-12-10T14:56:18 | 2022-12-10T14:56:18 | 181,203,745 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,116 | r | print-method.R | #' @export
print.gcat_location <- function(x,...){
cat("==Google Cloud AutoML Tables Location==\n")
cat0("name: ", x$name)
cat0("locationId: ", x$locationId)
}
#' @export
print.gcat_dataset <- function(x,...){
cat("==Google Cloud AutoML Tables Dataset==\n")
cat0("displayName: ", x$displayName)
cat0("exampleCount: ", x$exampleCount)
cat0("createTime: ", as.character(timestamp_to_r(x$createTime)))
cat0("primaryTableSpecId: ", x[["tablesDatasetMetadata"]][["primaryTableSpecId"]])
cat0("targetColumnSpecId: ", x[["tablesDatasetMetadata"]][["targetColumnSpecId"]])
cat0("statsUpdateTime: ", as.character(timestamp_to_r(x[["tablesDatasetMetadata"]][["statsUpdateTime"]])))
}
#' @export
print.gcat_table_specs <- function(x,...){
cat("==Google Cloud AutoML Tables Table Spec==\n")
cat0("columnCount: ", x$columnCount)
cat0("rowCount: ", x$rowCount)
cat0("validRowCount: ", x$validRowCount)
cat0("eTag: ", x$etag)
}
#' @export
print.gcat_column_spec <- function(x,...){
cat("==Google Cloud AutoML Tables Column Spec==\n")
cat0("displayName: ", x$displayName)
cat0("dataType: ", x[["dataType"]][["typeCode"]])
cat0("distinctValueCount: ", x[["dataStats"]][["distinctValueCount"]])
cat0("eTag: ", x$etag)
}
#' @export
print.gcat_operation <- function(x,...){
cat("==Google Cloud AutoML Tables Operation==\n")
cat0("name: ", x$name)
cat0("createTime: ", as.character(timestamp_to_r(x[["metadata"]][["createTime"]])))
cat0("updateTime: ", as.character(timestamp_to_r(x[["metadata"]][["updateTime"]])))
}
#' @export
print.gcat_model <- function(x,...){
cat("==Google Cloud AutoML Tables Model==\n")
cat0("name: ", x$name)
cat0("displayName: ", x$displayName)
cat0("createTime: ", as.character(timestamp_to_r(x[["createTime"]])))
cat0("deploymentState: ", x$deploymentState)
cat0("updateTime: ", as.character(timestamp_to_r(x[["updateTime"]])))
}
|
2f367318ba4e36ab5356ba53a6d76e1796a95cce | 9a022eb9368eb9c68f22f469cba5473a5fa135bc | /Creating Cluster Charts.r | c548086d5306c841debceb66a49b0a8f2887261f | [] | no_license | Trapman/Intro-R | 1a4ba3fab7402abdc4e87fe657cbc18d8a4522cc | b4458941f370a90fa5865049e4d3917a4776e909 | refs/heads/master | 2020-12-22T10:26:57.696325 | 2020-02-02T21:40:12 | 2020-02-02T21:40:12 | 236,751,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,107 | r | Creating Cluster Charts.r | # Help dispay the CONNECTIONS between data in your dataset
# INSTALL AND LOAD PACKAGES #############################
pacman::p_load(pacman, rio, tidyverse)
# pacman: for loading/unloading packages
# rio: for importing data
# tidyverse: for so many reasons
# LOAD AND PREPARE DATA #############################
# Save data to "df"
# Rename outcome as "y"
# Specify outcome with df$y
df <- import("data\StateData.xlsx") %>%
as.tibble() %>%
select(state_code,
psychRegions,
instagram::modernDance) %>%
mutate(psychRegions = as.factor(psychRegions)) %>%
rename(y = pyschRegions) %>%
print()
# ANALYZE DATA #######################################
# Calculate Clusters
hc <- df %>% # Get data
dist %>% # Computer distance/dissimilarity matrix
hclust # Computer hierarchical cluster
# Plot dendragram (this is just the plot of the clusters)
hc %>% plot(labels = dfstate_code)
# Draw boxes around clusters
hc %>% rect.hcluster(k = 2, border = "gray80") # 2 boxes
hc %>% rect.hcluster(k = 3, border = "gray80") # 3 boxes
hc %>% rect.hcluster(k = 4, border = "gray80") # 4 boxes
|
be93a44fd75afb0b0ad09d7aa6f90c7e601ba4ac | e63df753a1819766d0b40dbf035ebaaa2c0d1526 | /tests/testthat/test41Variofaces.r | 1f9e203ff01e8334bf61adf733db4aca47c894a5 | [] | no_license | cran/asremlPlus | cf74d404854a1f2b37e5acefe7fae86132b01a3e | 7a5b7b2124a5b736fc49b99f91eba4b6a41b5fb3 | refs/heads/master | 2023-08-31T16:36:03.614939 | 2023-08-24T07:30:12 | 2023-08-24T09:31:08 | 36,970,354 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,404 | r | test41Variofaces.r | #devtools::test("asremlPlus")
context("model_selection")
asr41.lib <- "D:\\Analyses\\R ASReml4.1"
cat("#### Test variofaces using Atieno with asreml41\n")
test_that("Variofaces_asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(dae)
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
data(chkpeadat)
testthat::expect_equal(nrow(chkpeadat), 1056)
testthat::expect_equal(ncol(chkpeadat), 18)
# Fit a model and caluclate the Wald table
current.asr <- asreml(fixed = Biomass.plant ~ Lines * TRT + Smarthouse/(vLanes + vPos),
random = ~Smarthouse:Zone + Smarthouse:spl(vLanes),
residual = ~dsum(~ar1(Lane):ar1(Position) | Smarthouse),
data = chkpeadat, trace = FALSE)
summary(current.asr)$varcomp
current.asrt <- as.asrtests(current.asr, denDF = "numeric")
current.asrt$wald.tab
recalcWaldTab(current.asrt, denDF="numeric", dDF.na = "maximum")
testthat::expect_equal(length(current.asrt), 3)
testthat::expect_equal(nrow(current.asrt$wald.tab), 7)
testthat::expect_equal(nrow(current.asrt$test.summary), 0)
# Fit initial model
current.asr <- asreml(Biomass.plant ~ Smarthouse + Lines*TRT ,
random = ~ Smarthouse:(Lane + Position),
residual = ~ dsum(~ar1(Lane):ar1(Position) | Smarthouse),
data=chkpeadat)
summary(current.asr)$varcomp
# Load current fit into an asrtests object
current.asrt <- as.asrtests(current.asr, NULL, NULL)
# Check for and remove any boundary terms
current.asrt <- rmboundary(current.asrt)
print(current.asrt)
# Test Lanes autocorrelation
current.asrt <- testresidual(current.asrt, "~ dsum(~Lane:ar1(Position) | Smarthouse)",
label="Lane autocorrelation", simpler=TRUE)
# Test Pos autocorrelation (depends on whether Lane autocorrelation retained)
k <- match("Lane autocorrelation", current.asrt$test.summary$terms)
p <- current.asrt$test.summary$p
{if (p[k] <= 0.05)
current.asrt <- testresidual(current.asrt, "~ dsum(~ar1(Lane):Position | Smarthouse)",
label="Pos autocorrelation", simpler=TRUE,
update=FALSE)
else
current.asrt <- testresidual(current.asrt, "~ dsum(~Lane:Position | Smarthouse)",
label="Pos autocorrelation", simpler=TRUE,
update=FALSE)
}
testthat::expect_equal(length(current.asrt), 3)
testthat::expect_equal(nrow(current.asrt$wald.tab), 5)
testthat::expect_equal(nrow(current.asrt$test.summary), 3)
print(current.asrt)
# Get current fitted asreml object
current.asr <- current.asrt$asreml.obj
current.asr <- update(current.asr, aom=TRUE)
#Produce variogram faces plot (Stefanaova et al, 2009)
faces <- variofaces(current.asr, nsim = 50, maxit = 20, seed = 14522)
testthat::expect_equal(nrow(faces$face1), 48)
testthat::expect_equal(ncol(faces$face1), 6)
testthat::expect_equal(nrow(faces$face2), 44)
testthat::expect_equal(ncol(faces$face2), 6)
#Get Variety predictions
Var.pv <- predict(current.asr, classify="Lines")$pvals
testthat::expect_equal(nrow(Var.pv), 247)
testthat::expect_equal(ncol(Var.pv), 4)
})
|
286d24171bdc853c3fce31ffe076d4dba01b317a | 634bd18366c38657340d42ffa7556bed0dd148e9 | /CDM/Dash_kor/1_CDM Mapping.R | bd55a265126573daeaff1d455afe3820e348d26a | [] | no_license | hong-sj/Digital_Health | 993fbc714bd42f2e60dc98be05f7b24184e539a3 | 0f53c0477078e40dcfab13c0d452c15a14f4d0e3 | refs/heads/main | 2023-08-26T07:06:51.642223 | 2021-11-01T07:07:30 | 2021-11-01T07:07:30 | 340,215,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,600 | r | 1_CDM Mapping.R |
## 외부 검증용 구현 코드
# update: 2021.04.21
# 패키지 호출
pacman::p_load(tidyverse, lubridate, readxl)
# 경로 설정
mypath <- "directory/"
# 샘플데이터 호출: 접수취소, 진료외방문 등 제외된 데이터
er <- read.csv(paste0(mypath, '/sample.csv'))
sapply(er, class) # class 확인
# 유형 변경
er$id <- as.character(er$id) # 환자번호
er$in_time <- as.POSIXct(er$in_time) # 내원일시
er$out_time <- as.POSIXct(er$out_time) # 퇴실일시
er$birth_Date <- as.POSIXct(er$birth_Date) # 생년월일
er$sex <- as.character(er$sex) # 성별
er$ktas_time <- as.POSIXct(er$ktas_time) # KTAS 기록 시간
er$ktas <- as.numeric(er$ktas) # KTAS 레벨
er$disease_type <- as.character(er$disease_type) # 질병 구분
er$visit_result <- as.numeric(er$visit_result) # 퇴실 결과(번호)
er$visit_result_text <- as.character(er$visit_result_text) # 퇴실 결과(한글)
# ===========================
# mapping : OMOP CDM v5.3.1
# ===========================
#--- Prepare ID ---
## Generate ID
# 1. u_id: 환자 총 기록수 (Record 기준)
er <- er %>% mutate(u_id = as.character(1:n())) # 153
#--- PERSON ---
#cdw data
person <- er %>% select(c(id, sex, birth_Date)) %>%
rename(PT_ID = id,
GENDER_SOURCE_VALUE = sex) %>%
distinct() # 100
#cdm data
join_table_gender <- read_xlsx(paste0(mypath,"/join_table/join_table_gender.xlsx"))
#mapping data
PERSON <- person %>%
mutate(person_id = 1:n(),
# gender_concept_id = NA, # Exist join_table
year_of_birth = year(birth_Date),
month_of_birth = month(birth_Date),
day_of_birth = day(birth_Date),
birth_datetime = NA,
race_concept_id = NA,
ethnicity_concept_id = NA,
location_id = NA,
provider_id = NA,
care_site_id = 1005, # SMC care site value
person_source_value = PT_ID,
gender_source_value = GENDER_SOURCE_VALUE,
# gender_source_concept_id = NA, # Exist join_table
race_source_value = NA,
race_source_concept_id = NA,
ethnicity_source_value = NA,
ethnicity_source_concept_id = NA) %>%
left_join(join_table_gender, by='gender_source_value') %>%
select(person_id, gender_concept_id, year_of_birth, month_of_birth, day_of_birth, birth_datetime,
race_concept_id, ethnicity_concept_id, location_id, provider_id,
care_site_id, person_source_value, gender_source_value, gender_source_concept_id,
race_source_value, race_source_concept_id, ethnicity_source_value, ethnicity_source_concept_id)
# data type 변경: date(as.Date), datetype(as.POSIXct)
type_person <- read.csv(paste0(mypath, "/type/type_person.csv"))
for(i in 1:length(type_person)){type_person[,i] <- as.character(type_person[,i])}
table(type_person$Type)
dt <- type_person[type_person$Type == "datetime",]$Field
i <- type_person[type_person$Type == "integer",]$Field
c50 <- type_person[type_person$Type == "varchar(50)",]$Field
PERSON[,dt] <- as.POSIXct(PERSON[,dt])
PERSON[,i] <- lapply(PERSON[,i], as.integer)
PERSON[,c50] <- lapply(PERSON[,c50], as.character)
rm(dt,i,c50, join_table_gender, type_person)
# person ID와 CDM source value 맞춘 table
personID_sourceValue_map <- PERSON %>%
select(person_id, person_source_value)
head(personID_sourceValue_map)
#--- VISIT_OCCURRENCE------
#cdw data
visit_occurrence <- er %>%
select(id, u_id, in_time, out_time, visit_result) %>%
mutate(VISIT_SOURCE_VALUE = paste0(id,'_',in_time)) %>% # 접수번호 대용 (환자번호 + 내원일시))
rename(VISIT_OCCURRENCE_ID = u_id,
VISIT_START_DATETIME = in_time,
VISIT_END_DATETIME = out_time,
DISCHARGE_TO_SOURCE_VALUE = visit_result)
#join table
join_table_visit_discharge <- read_xlsx(paste0(mypath,"/join_table/join_table_visit_discharge.xlsx"))
#mapping data
VISIT_OCCURRENCE <- visit_occurrence %>%
left_join(personID_sourceValue_map, by = c("id" = "person_source_value")) %>%
mutate(visit_occurrence_id = VISIT_OCCURRENCE_ID,
visit_concept_id = 9203, # Emergence Room Visit
visit_start_date = date(VISIT_START_DATETIME),
visit_start_datetime = VISIT_START_DATETIME,
visit_end_date = date(VISIT_END_DATETIME),
visit_end_datetime = VISIT_END_DATETIME,
visit_type_concept_id = NA,
provider_id = NA,
care_site_id = 1005, # SMC care site value
visit_source_value = VISIT_SOURCE_VALUE,
visit_source_concept_id = NA,
admitting_source_concept_id = NA,
admitting_source_value = NA,
# discharge_to_concept_id = NA, # Exist join table
discharge_to_source_value = DISCHARGE_TO_SOURCE_VALUE,
preceding_visit_occurrence_id = NA) %>%
left_join(join_table_visit_discharge, by = "discharge_to_source_value") %>%
select(visit_occurrence_id, person_id, visit_concept_id, visit_start_date,
visit_start_datetime, visit_end_date, visit_end_datetime, visit_type_concept_id,
provider_id, care_site_id, visit_source_value, visit_source_concept_id,
admitting_source_concept_id, admitting_source_value, discharge_to_concept_id,
discharge_to_source_value, preceding_visit_occurrence_id)
# data type 변경: date(as.Date), datetype(as.POSIXct)
type_visit <- read.csv(paste0(mypath, "/type/type_visit.csv"))
for(i in 1:length(type_visit)){type_visit[,i] <- as.character(type_visit[,i])}
table(type_visit$Type)
d <- type_visit[type_visit$Type == "date",]$Field
dt <- type_visit[type_visit$Type == "datetime",]$Field
i <- type_visit[type_visit$Type == "integer",]$Field
I <- type_visit[type_visit$Type == "Integer",]$Field
c50 <- type_visit[type_visit$Type == "varchar(50)",]$Field
VISIT_OCCURRENCE[,d] <- lapply(VISIT_OCCURRENCE[,d], as.Date)
VISIT_OCCURRENCE[,dt] <- lapply(VISIT_OCCURRENCE[,dt], as.POSIXct)
VISIT_OCCURRENCE[,i] <- lapply(VISIT_OCCURRENCE[,i], as.integer)
VISIT_OCCURRENCE[,I] <- as.integer(VISIT_OCCURRENCE[,I])
VISIT_OCCURRENCE[,c50] <- lapply(VISIT_OCCURRENCE[,c50], as.character)
rm(d,dt,i,I,c50,join_table_visit_discharge)
#--- measurement------
# Triage index : KTAS 사용
#CDW data
measurement <- er %>%
select(id, u_id, ktas_time, ktas) %>%
mutate(MEASUREMENT_SOURCE_VALUE = 'KTAS',
MEASUREMENT_ID = u_id,
VISIT_OCCURRENCE_ID = u_id) %>%
rename(MEASUREMENT_DATE = ktas_time,
VALUE_AS_NUMBER = ktas)
#join table
join_table_measurement <- read_xlsx(paste0(mypath,"/join_table/join_table_measurement.xlsx"))
#mapping data
MEASUREMENT <- measurement %>%
left_join(personID_sourceValue_map, by = c("id" = "person_source_value")) %>%
mutate(measurement_id = MEASUREMENT_ID,
# measurement_concept_id = NA, # Exist join_table
measurement_date = date(MEASUREMENT_DATE),
measurement_datetime = MEASUREMENT_DATE,
measurement_time = NA,
measurement_type_concept_id = NA,
operator_concept_id = NA,
value_as_number = VALUE_AS_NUMBER,
value_as_concept_id = NA,
unit_concept_id = NA,
range_low = NA,
range_high = NA,
provider_id = NA,
visit_occurrence_id = VISIT_OCCURRENCE_ID,
visit_detail_id = NA,
measurement_source_value = MEASUREMENT_SOURCE_VALUE,
measurement_source_concept_id = NA,
unit_source_value = NA,
value_source_value = NA) %>%
left_join(join_table_measurement, by = "measurement_source_value") %>%
select(measurement_id,person_id,measurement_concept_id,measurement_date,measurement_datetime,
measurement_time,measurement_type_concept_id,operator_concept_id,value_as_number,
value_as_concept_id,unit_concept_id,range_low,range_high,provider_id,visit_occurrence_id,
visit_detail_id,measurement_source_value,measurement_source_concept_id,unit_source_value,value_source_value)
# data type 변경: date(as.Date), datetype(as.POSIXct)
type_measure <- read.csv(paste0(mypath, "/type/type_measure.csv"))
for(i in 1:length(type_measure)){type_measure[,i] <- as.character(type_measure[,i])}
table(type_measure$Type)
d <- type_measure[type_measure$Type == "date",]$Field
dt <- type_measure[type_measure$Type == "datetime",]$Field
f <- type_measure[type_measure$Type == "float",]$Field
i <- type_measure[type_measure$Type == "integer",]$Field
c10 <- type_measure[type_measure$Type == "varchar(10)",]$Field
c50 <- type_measure[type_measure$Type == "varchar(50)",]$Field
MEASUREMENT[,d] <- as.Date(MEASUREMENT[,d])
MEASUREMENT[,dt] <- as.POSIXct(MEASUREMENT[,dt])
MEASUREMENT[,f] <- lapply(MEASUREMENT[,f], as.double)
MEASUREMENT[,i] <- lapply(MEASUREMENT[,i], as.integer)
MEASUREMENT[,c10] <- as.character(MEASUREMENT[,c10])
MEASUREMENT[,c50] <- lapply(MEASUREMENT[,c50], as.character)
rm(d,dt,f,i,c10,c50, join_table_measurement)
# Clinical Data Tables
# data merge
df <- left_join(PERSON, VISIT_OCCURRENCE,by = intersect(names(PERSON),names(VISIT_OCCURRENCE))) # 153
df <- left_join(df, MEASUREMENT, by = intersect(names(df),names(MEASUREMENT)))# 153
# remove
rm(list = setdiff(ls(), c('mypath','er','df')))
# save file
save.image(paste0(mypath, '/er_cdm.rdata'))
|
0a105bc33be6188c595d762bbf93f71f57ec83cf | 590be74a6cf2274ef988d789680f5b9fb13fd10d | /Lycopene_data_transformation.R | c1ee0606a03621c54da242f209245b123cf0a6da | [] | no_license | arielhecht/cell-metrics | 13ab5de8d5a0583a93a13c90f785a4d78cc646c7 | 37974a400626d8bc795c0cd4aaa2a880e8b08916 | refs/heads/master | 2020-03-09T20:57:07.378072 | 2018-04-10T23:45:06 | 2018-04-10T23:45:06 | 128,997,141 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,333 | r | Lycopene_data_transformation.R |
#Cell metrics project, lycopene data, analysis notebook
#This notebook reads in the data from the lycopene cell metrics experiments (e26-28, e30-32, e37-38, e44-48),
#and outputs the summary tab file csv file, which is the tidy data frame with all factors in columns and all 256 observations in rows
#original file name "180213 cm e26-48 lycopene analysis.R"
dirnm<-"/Users/ahh1/Documents/My SugarSync/Cell Metrics/Experimental Data/R analysis files/R files for upload"
setwd(dirnm)
library(tidyverse)
library(readxl)
source("Lycopene_analysis.R")
source("DCM_analysis.R")
#source("Lycopene_background_correction.R")
source("Main_effects_calculator.R")
#=====================
#READ IN DATA:
#=====================
OD_raw<-read_excel("Supplementary Data File S4.xlsx",sheet = 1, col_names = T)
lycopene_raw<-read_excel("Supplementary Data File S4.xlsx",sheet = 2, col_names = T)
key<-read_excel("Supplementary Data File S4.xlsx",sheet = 4, col_names = T, na="NA")
#=====================
#CALIBRATION VALUES:
#=====================
#Dry cell calibration curve values. x-axis: OD700, y-axis: Dry cell mass per volume of culture (g/L)
#Calibration values from cm-e42, averaging the runs in cm-e10 and cm-e41
DCM_OD700_slope <- 1.343289
DCM_OD700_intercept <- (-0.016317)
dcm_calibration<-c(DCM_OD700_slope,DCM_OD700_intercept)
#Lycopene calibration curve values. x-axis: Lycopene concentration (mg/L), y-axis: Absorbance (AU)
#Calibration values from cm-e41, averaging the runs in cm-e18, and cm-e41
Lyc_449_slope <- 0.08366689
Lyc_449_intercept <- (-0.008434176)
Lyc_475_slope <- 0.121680
Lyc_475_intercept <- 0.002021
Lyc_507_slope <- 0.108991
Lyc_507_intercept <- 0.001693
lycopene_calibration<-c(Lyc_449_slope,Lyc_449_intercept,
Lyc_475_slope,Lyc_475_intercept,
Lyc_507_slope,Lyc_507_intercept)
#=====================
#DATA PROCESSING STEPS
#=====================
#Convert the raw lycopene absorbance to lycopene concentration. Need the raw lycopene absorbance,
#the master key (to know which wells are samples and which are blanks), and the calibration values.
lycopene_analyzed<-Lycopene_analysis(lycopene_raw,key,lycopene_calibration)
#Do the same for the OD values.
od_analyzed<-DCM_analysis(OD_raw,key,dcm_calibration)
#Merge the calculated lycopene concentration with dry cell mass.
#Calculate yield, the mass of lycopene produced per biomass. Result has units of, mg/g. (titer in mg/L, dcm in g/L).
data_merge<-merge(lycopene_analyzed,select(od_analyzed,Well_id,dcm),by="Well_id")%>%
mutate(yield = titer/dcm)%>%
filter(Category!="Blank")
#============================================
#Create a summary table, summarizing key metrics by well and by type.
#============================================
#Summary_tab is the full experimental design table. Columns are factor levels and responses, rows are runs
#Recode
summary_tab<-data_merge%>%
group_by(Well_id)%>%
summarize_at(c('titer','dcm','yield'), mean, na.rm=T)%>%
merge(key, by="Well_id")%>%
arrange(desc(Category),Exp_number,Run_number)%>%
rename(Well_bottom=MTP_bottom,
Well_cover=MTP_cover,
Well_fill_volume=MTP_fill_volume,
Well_volume=MTP_volume,
Shake_speed=MTP_shake_speed)%>%
write_excel_csv("Lycopene experimental data table.csv")
|
a608aa188302c0cc1b9b641153d7b92dcb2a05bb | 89dd9d04de821baff8080ee3d49c458eb2796631 | /TapsProblem#3.R | b43cec2467c29b29ce166aa39d68d867538743d4 | [] | no_license | joneldominic/R-Analyzer | 08f122fba26555b91d786d159ecc4bcb8ef732f3 | b8ad260ff8c50c820d1756756379c94c710ef672 | refs/heads/master | 2020-03-22T01:42:58.126025 | 2018-07-01T11:52:47 | 2018-07-01T11:52:47 | 139,324,976 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 802 | r | TapsProblem#3.R | #Taps
#Problem #3
> library(rJava)
> library(xlsxjars)
> library(xlsx)
> dataKey <- read.xlsx("StatExer_Taps.xlsx", "keyboard")
> head(dataKey)
Size Feel Speed
1 Small Mushy 33
2 Medium Mushy 36
3 Large Mushy 37
4 Small Mushy 31
5 Medium Mushy 35
6 Large Mushy 34
> anovaKey <- (aov(Speed~Size+Feel+Size:Feel, data=dataKey))
> summary(anovaKey)
Df Sum Sq Mean Sq F value Pr(>F)
Size 2 12.33 6.17 1.762 0.213
Feel 1 117.56 117.56 33.587 8.53e-05 ***
Size:Feel 2 10.11 5.06 1.444 0.274
Residuals 12 42.00 3.50
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> interaction.plot(dataKey$Size, dataKey$Feel, dataKey$Speed)
|
c0fa303be9f9e9cf84fd535427314484c24152eb | a48797beca55474d7b39676389f77f8f1af76875 | /man/get_leafnames.Rd | 7d1835d92d5c6efd9c9cbc3c96a5d603dea3cb68 | [] | no_license | uqrmaie1/admixtools | 1efd48d8ad431f4a325a4ac5b160b2eea9411829 | 26759d87349a3b14495a7ef4ef3a593ee4d0e670 | refs/heads/master | 2023-09-04T02:56:48.052802 | 2023-08-21T21:15:27 | 2023-08-21T21:15:27 | 229,330,187 | 62 | 11 | null | 2023-01-23T12:19:57 | 2019-12-20T20:15:32 | R | UTF-8 | R | false | true | 335 | rd | get_leafnames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toposearch.R
\name{get_leafnames}
\alias{get_leafnames}
\title{Get the population names of a graph}
\usage{
get_leafnames(graph)
}
\arguments{
\item{graph}{An admixture graph}
}
\value{
Population names
}
\description{
Get the population names of a graph
}
|
a646708d82ca7c4751685df7ceb027c098efccac | 97b3532a7a44e9085c2231daa1d0e45a8fc507c3 | /old_source/man/read_sql.Rd | 8a54ccca20f1d39845e0cb9d3fee91062fa04554 | [] | no_license | kamapu/gisrepos | 577757744d6e1ca3f15b39c875eb1e72e997ccc2 | 17c9da9bd96745ef424a27fe7e92b1f14ced4407 | refs/heads/master | 2021-02-11T06:28:37.043588 | 2020-10-27T11:35:37 | 2020-10-27T11:35:37 | 244,464,383 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 836 | rd | read_sql.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_sql.R
\name{read_sql}
\alias{read_sql}
\title{Read SQL scripts and split it in SQL statements}
\usage{
read_sql(file, end = ";", comment = "--", ...)
}
\arguments{
\item{file}{A character value indicating the path to the script that have to
be read.}
\item{end}{Symbol set at the end of a statement (semicolon by default).}
\item{comment}{Symbol used to start a comment in script (two dashes by
default).}
\item{...}{Further arguments passed to \code{\link[=readLines]{readLines()}}.}
}
\description{
Read SQL scripts and split it in SQL statements
}
\details{
SQL scripts may mix SQL statements with comments. This function extract the
respective statements and split them into a list, where each element is a
statement.
}
\author{
Miguel Alvarez
}
|
2ba83c772e1a39c0dabaf6e9754aa9c6d09f69e3 | ea620ed454ef2c94844222f3cf88a5a7329c189e | /CONEW.R | 9c15c3ab03981c4427561a6eafb8bd8633923dfe | [] | no_license | JadynWumanqing/Statistical-Learning | a3b343452611945522b8c2fea41c4fa4a20252e9 | cdc1a1a946a752d69af82b4a018e5d2cb8c8b5f5 | refs/heads/master | 2022-07-26T21:20:39.021046 | 2020-05-20T23:17:11 | 2020-05-20T23:17:11 | 265,615,895 | 0 | 0 | null | 2020-05-20T16:08:36 | 2020-05-20T15:58:20 | null | UTF-8 | R | false | false | 15,528 | r | CONEW.R | rm(list=ls())
cat("\014")
library(randomForest)
library(ggplot2)
library(gridExtra)
library(glmnet)
#Data Source: https://data.world/usafacts/coronavirus-in-the-us
#Background: The dataset contains confirmed Covid-19 cases by U.S. counties from January 22nd, 2020 to May 18th, 2020.
setwd(getwd())
co.ori = na.omit(read.csv("covid_confirmed_usafacts.csv",header=TRUE))
#View(co.ori)
co = co.ori[-1,-c(1,2,3,4)] #deletting row 1: no a data point
#excluding column 1 through 4: geographical information
set.seed(1)
n = dim(co)[1] #3194
p = dim(co)[2]
for (i in 1:n) {
co[i,] = c(0,diff(as.numeric(co[i,])))
}
y = co[,p]
X = data.matrix(co[,-p]) #3194*117
p = dim(X)[2]
sum = as.vector(apply(X, 2, 'sum'))
index.0 = which(sum==0) #on these dates, new cases were 0 for all counties
mu = as.vector(apply(X, 2, 'mean'))
sd = as.vector(apply(X, 2, 'sd'))
X.orig = X
for (i in 1:n){
X[i,-index.0] = ((X[i,-index.0]) - (mu[-index.0]))/(sd[-index.0])
}
# apply(X, 2, 'mean')
# apply(X, 2, 'sd')
#X=X.orig
n.train = floor(0.8*n)
n.test = n-n.train
M=100
Rsq.test.rf = rep(0,M) # rf= randomForest
Rsq.train.rf = rep(0,M)
Rsq.test.en = rep(0,M) # en = elastic net
Rsq.train.en = rep(0,M)
Rsq.test.rid = rep(0,M) # rid = Ridge
Rsq.train.rid = rep(0,M)
Rsq.test.lasso = rep(0,M) # lasso
Rsq.train.lasso = rep(0,M)
for (m in c(1:M)) {
shuffled_indexes = sample(n)
train = shuffled_indexes[1:n.train]
test = shuffled_indexes[(1+n.train):n]
X.train = X[train, ]
y.train = y[train]
X.test = X[test, ]
y.test = y[test]
# ridge alpha = 0
cv.fit.rid = cv.glmnet(X.train, y.train, alpha = 0,family = "gaussian",nfolds=10,type.measure = "mae")
fit.rid = glmnet(X.train, y.train, alpha = 0, family = "gaussian", lambda = cv.fit.rid$lambda.min)
y.train.hat = predict(fit.rid, newx = X.train, type = "response",cv.fit.rid$lambda.min) # y.train.hat=X.train %*% fit$beta + fit$a0
y.test.hat = predict(fit.rid, newx = X.test, type = "response",cv.fit.rid$lambda.min) # y.test.hat=X.test %*% fit$beta + fit$a0
Rsq.test.rid[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2) #1-mean((y.test - y.test.hat)^2)/mean((y.test - mean(y.test))^2)
Rsq.train.rid[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
# elastic-net alpha = 0.5
cv.fit.en = cv.glmnet(X.train, y.train, family = "gaussian", alpha = 0.5, nfolds = 10,type.measure = "mae")
fit.en = glmnet(X.train, y.train,alpha = 0.5, lambda = cv.fit.en$lambda.min,family = "gaussian")
y.train.hat = predict(fit.en, newx = X.train, type = "response",cv.fit.en$lambda.min) # y.train.hat=X.train %*% fit$beta + fit$a0
y.test.hat = predict(fit.en, newx = X.test, type = "response",cv.fit.en$lambda.min) # y.test.hat=X.test %*% fit$beta + fit$a0
Rsq.test.en[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2)
Rsq.train.en[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
# lasso alpha = 1
cv.fit.lasso = cv.glmnet(X.train, y.train, family = "gaussian", alpha = 1, nfolds = 10,type.measure = "mae")
fit.lasso = glmnet(X.train, y.train,alpha = 1, lambda = cv.fit.lasso$lambda.min,family = "gaussian")
y.train.hat = predict(fit.lasso, newx = X.train, type = "response",cv.fit.lasso$lambda.min) # y.train.hat=X.train %*% fit$beta + fit$a0
y.test.hat = predict(fit.lasso, newx = X.test, type = "response",cv.fit.lasso$lambda.min) # y.test.hat=X.test %*% fit$beta + fit$a0
Rsq.test.lasso[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2)
Rsq.train.lasso[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
# fit RF and calculate and record the train and test R squares
rf = randomForest(X.train, y.train, mtry = round(sqrt(p)), importance = TRUE)
y.test.hat = predict(rf, X.test)
y.train.hat = predict(rf, X.train)
Rsq.test.rf[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2)
Rsq.train.rf[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
cat(sprintf("m=%3.f| Rsq.test.rf=%.2f, Rsq.test.en=%.2f| Rsq.train.rf=%.2f, Rsq.train.en=%.2f| \n", m, Rsq.test.rf[m], Rsq.test.en[m], Rsq.train.rf[m], Rsq.train.en[m]))
cat(sprintf("m=%3.f| Rsq.test.rid=%.2f, Rsq.test.lasso=%.2f| Rsq.train.rid=%.2f, Rsq.train.lasso=%.2f| \n", m, Rsq.test.rid[m], Rsq.test.lasso[m], Rsq.train.rid[m], Rsq.train.lasso[m]))
}
#Rsq Plot
Rsq.train = data.frame(c(rep("rf", M), rep("EN", M), rep("ridge", M), rep("lasso",M)) ,
c(Rsq.train.rf, Rsq.train.en, Rsq.train.rid, Rsq.train.lasso))
colnames(Rsq.train) = c("method","Rsq")
Rsq.test = data.frame(c(rep("rf", M), rep("EN", M), rep("ridge", M), rep("lasso",M)),
c(Rsq.test.rf, Rsq.test.en, Rsq.test.rid, Rsq.test.lasso) )
colnames(Rsq.test) = c("method","Rsq")
p1 = ggplot(Rsq.train) + aes(x=method, y = Rsq, fill=method) + geom_boxplot() + xlab(expression("method")) + ylab(expression("R^2")) +
theme(legend.text = element_text(colour = "black", size = 10, face = "bold", family = "Courier")) +
ggtitle("train") +
theme( axis.title.x = element_text(size = 16, face = "bold", family = "Courier"),
plot.title = element_text(size = 20, family = "Courier"),
axis.title.y = element_text(size = 16, face = "bold", family = "Courier"),
axis.text.x = element_text(angle= 45, hjust = 1, size = 10, face = "bold", family = "Courier"),
axis.text.y = element_text(angle= 45, vjust = 0.7, size = 10, face = "bold", family = "Courier"))+
ylim(0.25, 1)
p2 = ggplot(Rsq.test) + aes(x=method, y = Rsq, fill=method) + geom_boxplot() + xlab(expression("method")) + ylab(expression("R^2")) +
theme(legend.text = element_text(colour = "black", size = 10, face = "bold", family = "Courier")) +
ggtitle("test") +
theme( axis.title.x = element_text(size = 16, face = "bold", family = "Courier"),
plot.title = element_text(size = 20, family = "Courier"),
axis.title.y = element_text(size = 16, face = "bold", family = "Courier"),
axis.text.x = element_text(angle= 45, hjust = 1, size = 10, face = "bold", family = "Courier"),
axis.text.y = element_text(angle= 45, vjust = 0.7, size = 10, face = "bold", family = "Courier"))+
ylim(0.25, 1)
grid.arrange(p1, p2, ncol=2)
#CV plots and Residual plots
cv.fit.rid = cv.glmnet(X.train, y.train, alpha = 0,family = "gaussian",nfolds=10,type.measure = "mae")
fit.rid = glmnet(X.train, y.train, alpha = 0, family = "gaussian", lambda = cv.fit.rid$lambda.min)
y.train.hat = predict(fit.rid, newx = X.train, type = "response",cv.fit.rid$lambda.min) # y.train.hat=X.train %*% fit$beta + fit$a0
y.test.hat = predict(fit.rid, newx = X.test, type = "response",cv.fit.rid$lambda.min) # y.test.hat=X.test %*% fit$beta + fit$a0
res.rid = data.frame(c(rep("train", n.train),rep("test", n.test)), c(1:n), c(y.train.hat - y.train, y.test.hat - y.test))
colnames(res.rid) = c("Ridge", "time", "residual")
res.rid.barplot = ggplot(res.rid, aes(x=Ridge, y=residual)) + geom_boxplot(outlier.size = 0.5)
res.rid.barplot
plot(cv.fit.rid)
cv.fit.en = cv.glmnet(X.train, y.train, family = "gaussian", alpha = 0.5, nfolds = 10,type.measure = "mae")
fit.en = glmnet(X.train, y.train,alpha = 0.5, lambda = cv.fit.en$lambda.min,family = "gaussian")
y.train.hat = predict(fit.en, newx = X.train, type = "response",cv.fit.en$lambda.min) # y.train.hat=X.train %*% fit$beta + fit$a0
y.test.hat = predict(fit.en, newx = X.test, type = "response",cv.fit.en$lambda.min) # y.test.hat=X.test %*% fit$beta + fit$a0
res.en = data.frame(c(rep("train", n.train),rep("test", n.test)), c(1:n), c(y.train.hat - y.train, y.test.hat - y.test))
colnames(res.en) = c("EN", "time", "residual")
res.en.barplot = ggplot(res.en, aes(x=EN, y=residual)) + geom_boxplot(outlier.size = 0.5)
res.en.barplot
plot(cv.fit.en)
cv.fit.lasso = cv.glmnet(X.train, y.train, family = "gaussian", alpha = 1, nfolds = 10,type.measure = "mae")
fit.lasso = glmnet(X.train, y.train,alpha = 1, lambda = cv.fit.lasso$lambda.min,family = "gaussian")
y.train.hat = predict(fit.lasso, newx = X.train, type = "response",cv.fit.lasso$lambda.min) # y.train.hat=X.train %*% fit$beta + fit$a0
y.test.hat = predict(fit.lasso, newx = X.test, type = "response",cv.fit.lasso$lambda.min) # y.test.hat=X.test %*% fit$beta + fit$a0
res.lasso = data.frame(c(rep("train", n.train),rep("test", n.test)), c(1:n), c(y.train.hat - y.train, y.test.hat - y.test))
colnames(res.lasso) = c("lasso", "time", "residual")
res.lasso.barplot = ggplot(res.lasso, aes(x=lasso, y=residual)) + geom_boxplot(outlier.size = 0.5)
res.lasso.barplot
plot(cv.fit.lasso)
rf = randomForest(X.train, y.train, mtry = round(sqrt(p)), importance = TRUE)
y.test.hat = predict(rf, X.test)
y.train.hat = predict(rf, X.train)
res.rf = data.frame(c(rep("train", n.train),rep("test", n.test)), c(1:n),c(y.train.hat - y.train, y.test.hat - y.test))
colnames(res.rf) = c("rf", "time", "residual")
res.rf.barplot = ggplot(res.rf, aes(x=rf, y=residual)) + geom_boxplot(outlier.size = 0.5)
res.rf.barplot
#Bootstrap
bootstrapSamples = 100
beta.rf.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
beta.en.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
beta.rid.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
beta.lasso.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
for (m in 1:bootstrapSamples){
bs_indexes = sample(n, replace=T)
X.bs = X[bs_indexes, ]
y.bs = y[bs_indexes]
# fit bs rf
rf = randomForest(X.bs, y.bs, mtry = round(sqrt(p)), importance = TRUE)
beta.rf.bs[,m] = as.vector(rf$importance[,1])
# fit bs en
cv.fit.en = cv.glmnet(X.bs, y.bs, alpha = 0.5, nfolds = 10, family = "gaussian", type.measure = "mae")
fit.en = glmnet(X.bs, y.bs, alpha = 0.5, lambda = cv.fit.en$lambda.min, family = "gaussian")
beta.en.bs[,m] = as.vector(fit.en$beta)
# fit bs ridge
cv.fit.rid = cv.glmnet(X.bs, y.bs, alpha = 0, nfolds = 10, family = "gaussian", type.measure = "mae")
fit.rid = glmnet(X.bs, y.bs, alpha = 0, lambda = cv.fit.rid$lambda.min, family = "gaussian")
beta.rid.bs[,m] = as.vector(fit.rid$beta)
# fit bs lasso
cv.fit.lasso = cv.glmnet(X.bs, y.bs, alpha = 1, nfolds = 10, family = "gaussian", type.measure = "mae")
fit.lasso = glmnet(X.bs, y.bs, alpha = 1, lambda = cv.fit.lasso$lambda.min, family = "gaussian")
beta.lasso.bs[,m]= as.vector(fit.lasso$beta)
cat(sprintf("Bootstrap Sample %3.f \n", m))
}
# calculate bootstrapped standard errors / alternatively you could use qunatiles to find upper and lower bounds
rf.bs.sd = apply(beta.rf.bs, 1, "sd")
en.bs.sd = apply(beta.en.bs, 1, "sd")
rid.bs.sd = apply(beta.rid.bs, 1, "sd")
lasso.bs.sd = apply(beta.lasso.bs, 1, "sd")
# fit rf to the whole data
rf = randomForest(X, y, mtry = round(sqrt(p)), importance = TRUE)
# fit en to the whole data
cv.fit.en = cv.glmnet(X, y, alpha = 0.5, nfolds = 10,family = "gaussian", type.measure = "mae")
fit.en = glmnet(X, y, alpha = 0.5, lambda = cv.fit.en$lambda.min,family = "gaussian")
# fit ridge to the whole data
cv.fit.rid = cv.glmnet(X, y, alpha = 0, nfolds = 10, family = "gaussian", type.measure = "mae")
fit.rid = glmnet(X, y, alpha = 0, lambda = cv.fit.rid$lambda.min, family = "gaussian")
# fit lasso to the whole data
cv.fit.lasso = cv.glmnet(X, y, alpha = 1, nfolds = 10,family = "gaussian", type.measure = "mae")
fit.lasso = glmnet(X, y, alpha = 1, lambda = cv.fit.lasso$lambda.min, family = "gaussian")
betaS.rf = data.frame(c(1:p), as.vector(rf$importance[,1]), 2*rf.bs.sd)
colnames(betaS.rf) = c( "RFfeature", "value", "err")
betaS.en = data.frame(c(1:p), as.vector(fit.en$beta), 2*en.bs.sd)
colnames(betaS.en) = c( "ENfeature", "value", "err")
betaS.rid = data.frame(c(1:p), as.vector(fit.rid$beta), 2*rid.bs.sd)
colnames(betaS.rid) = c( "RIDGEfeature", "value", "err")
betaS.lasso = data.frame(c(1:p), as.vector(fit.lasso$beta), 2*lasso.bs.sd)
colnames(betaS.lasso) = c( "LASSOfeature", "value", "err")
rfPlot = ggplot(betaS.rf, aes(x=RFfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
enPlot = ggplot(betaS.en, aes(x=ENfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
ridPlot = ggplot(betaS.rid, aes(x=RIDGEfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
lassoPlot = ggplot(betaS.lasso, aes(x=LASSOfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
grid.arrange(rfPlot, enPlot, ridPlot, lassoPlot,nrow = 4)
# we need to change the order of factor levels by specifying the order explicitly.
betaS.rf$RFfeature = factor(betaS.rf$RFfeature, levels = betaS.rf$RFfeature[order(betaS.rf$value, decreasing = TRUE)])
betaS.en$ENfeature = factor(betaS.en$ENfeature, levels = betaS.rf$RFfeature[order(betaS.rf$value, decreasing = TRUE)])
betaS.rid$RIDGEfeature = factor(betaS.rid$RIDGEfeature, levels = betaS.rf$RFfeature[order(betaS.rf$value, decreasing = TRUE)])
betaS.lasso$LASSOfeature = factor(betaS.lasso$LASSOfeature, levels = betaS.rf$RFfeature[order(betaS.rf$value, decreasing = TRUE)])
rfPlot = ggplot(betaS.rf, aes(x=RFfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
enPlot = ggplot(betaS.en, aes(x=ENfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
ridPlot = ggplot(betaS.rid, aes(x=RIDGEfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
lassoPlot = ggplot(betaS.lasso, aes(x=LASSOfeature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
grid.arrange(rfPlot, enPlot,ridPlot, lassoPlot,nrow = 4)
|
1f696f6cf28191572a29a922dce696838ae93dc0 | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googlecustomsearchv1.auto/man/Context.Rd | 4566f53046447b0c1c49c45642474168f1529f66 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 773 | rd | Context.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customsearch_objects.R
\name{Context}
\alias{Context}
\title{CustomSearch API Objects
Lets you search over a website or collection of websites}
\usage{
Context(facets = NULL, title = NULL)
}
\arguments{
\item{facets}{No description}
\item{title}{No description}
}
\value{
Context object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2017-03-05 19:41:21
filename: /Users/mark/dev/R/autoGoogleAPI/googlecustomsearchv1.auto/R/customsearch_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
Context Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
a62c870f3e5c5e42dc724dcf254fbf2e266287ae | 9647dfddfea006d498c389dc50bf97204e8134fb | /DataPlotting.R | acab314fbf03b27beb35871f359858677faa0aee | [] | no_license | Libardo1/Walmart-Forecasting-NN | 9e39a0e5e1ba6554ede4c061f1772b7d4b41a2d7 | ccf241410c428b70b45f50d8de8517a35c7006e1 | refs/heads/master | 2020-03-20T18:28:26.116221 | 2014-12-01T21:47:38 | 2014-12-01T21:47:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,350 | r | DataPlotting.R | # Title: DataPlotting.R
# Author: Ssurey Moon
#
# Reference:
# https://www.kaggle.com/c/walmart-recruiting-store-sales-forecasting
#
# Plotting data of Walmart store 1 deapartment 1
#rm(list=ls())
setwd(".")
## @knitr check_na
stores = read.csv("stores.csv", header=TRUE)
train = read.csv("train.csv", header=TRUE)
test = read.csv("test.csv", header = TRUE)
features = read.csv("features.csv", header = TRUE)
sample = read.csv("sampleSubmission.csv", header=TRUE)
sprintf("%d NAs in train data", sum(is.na(train)))
sprintf("%d NAs in store data", sum(is.na(test)))
sprintf("%d NAs in test data", sum(is.na(stores)))
sprintf("%d NAs in feature data", sum(is.na(features)))
for(i in 1:length(features)){
cat(sprintf("%d NAs in %s attribute",
sum(is.na(features[,i])), colnames(features)[i]), "\n")
}
#Let's take a look at data which CPI or Unemployment is NA
CPI_NA = features[which(is.na(features$CPI)),]
sprintf("We do not have any CPI and Unployement rate date from %s to %s", min(as.Date(CPI_NA$Date)), max(as.Date(CPI_NA$Date)))
feat_date_from = min(as.Date(CPI_NA$Date))
feat_date_to = max(as.Date(CPI_NA$Date))
#The period which has at least one Markdown
non_NA_date = as.Date(features$Date[which(features$Store == 1 & !(is.na(features[,5]) &
is.na(features[,6]) & is.na(features[,7]) &
is.na(features[,8]) & is.na(features[,9])))])
sprintf("The period which has at least one Markdown is from %s to %s", min(non_NA_date), max(non_NA_date))
x_axis = as.Date(train$Date[which(train$Store==1 & train$Dept==1)])
y_axis = train$Weekly_Sales[which(train$Store==1 & train$Dept==1)]
plot(x_axis, y_axis, ylab="sales", xlab="date", xlim=range(min(as.Date(train$Date)):feat_date_to), type="n")
lines(x_axis, y_axis) #Weekly sales of the store1 departments 1
polygon(c(min(non_NA_date), min(non_NA_date), max(non_NA_date), max(non_NA_date)),
c(0,70000,70000,0), col='orange', density=50) #Area where markdowns are available
polygon(c(feat_date_from, feat_date_from, feat_date_to, feat_date_to),
c(0,50000,50000,0), col='grey30', density=70) #Area where CPI and Unemployment are missing
test_date_from= min(as.Date(test$Date))
test_date_to= max(as.Date(test$Date))
polygon(c(test_date_from, test_date_from, test_date_to, test_date_to),
c(0,35000,35000,0), col='blue', density=50) #Area we need to predict the weekly sales
lines(x_axis, y_axis)
#Plots about relations bwtween sales and holiday
plot(x_axis, y_axis, ylab="sales", xlab="date", xlim=range(min(as.Date(train$Date)):feat_date_to), type="n")
Super_Bowl = as.Date(c("12-02-10", "11-02-11", "10-02-12", "08-02-13"), "%d-%m-%y")
Labor_Day = as.Date(c("10-09-10", "09-09-11", "07-09-12", "06-09-13"), "%d-%m-%y")
Thanksgiving = as.Date(c("26-11-10", "25-11-11", "23-11-12", "29-11-13"), "%d-%m-%y")
Christmas = as.Date(c("31-12-10", "30-12-11", "28-12-12", "27-12-13"), "%d-%m-%y")
Holidays <- t(data.frame(Super_Bowl, Labor_Day, Thanksgiving, Christmas))
isholiday = as.Date((train$Date[which(train$Store==1 & train$Dept==1 & train$IsHoliday==TRUE)]))
print(Holidays)
polygon(c(min(non_NA_date), min(non_NA_date), max(non_NA_date), max(non_NA_date)),
c(0,70000,70000,0), col='orange', density=50)
polygon(c(feat_date_from, feat_date_from, feat_date_to, feat_date_to),
c(0,50000,50000,0), col='grey30', density=70)
polygon(c(test_date_from, test_date_from, test_date_to, test_date_to),
c(0,35000,35000,0), col='blue', density=50)
#Plot holidays
for(i in 1:4){
lines(rep(Super_Bowl[i], times=4), seq(from=0,to=70000,length=4), col='blue')
lines(rep(Labor_Day[i], times=4), seq(from=0,to=70000,length=4), col='red')
lines(rep(Thanksgiving[i], times=4), seq(from=0,to=70000,length=4), col='green')
lines(rep(Christmas[i], times=4), seq(from=0,to=70000,length=4), col='purple')
}
lines(x_axis, y_axis)
lines(x_axis, y_axis)
polygon(as.Date(c("2010-04-02", "2010-04-02", "2010-04-16", "2010-04-16")),
c(0,70000,70000,0), col='grey5', density=40)
polygon(as.Date(c("2010-10-22", "2010-10-22", "2010-11-05", "2010-11-05")),
c(0,70000,70000,0), col='grey5', density=40)
polygon(as.Date(c("2011-04-22", "2011-04-22", "2011-05-06", "2011-05-06")),
c(0,70000,70000,0), col='grey5', density=40)
|
7bc5af35d0061a74ec623b4a21ea0917af208c10 | 0851201c22e6dbd907445c7b6fbd234cd007951c | /r-demo.r | 0831d8c52fa61e4afc4df9561fa786bc7e1961fd | [
"MIT"
] | permissive | namoshi/r-lecture | 1a369ba4896a7a48c48342f62df1c5ee19b98e9b | 9b2647782c1d65e0fcf267700f19feb089b53ae8 | refs/heads/master | 2023-05-26T12:36:34.447284 | 2021-06-02T05:23:32 | 2021-06-02T05:23:32 | 289,783,517 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17 | r | r-demo.r | demo(graphics)
|
a76bc5e99659efd614708e540238b968d5d6d34e | d912942f4cb3663c676b07c8c67e08be1610fc49 | /tests/testthat.R | 3654309afde09cd63185fd2b2749726c9c131152 | [
"Apache-2.0"
] | permissive | kristenpeach/metajam | d934b39a111efea048f333a7781b62053122cfd9 | baf56e44be685fcf51a40f7ebd1cabbabf5476fc | refs/heads/master | 2023-06-29T02:45:06.489680 | 2021-07-27T18:51:15 | 2021-07-27T18:51:15 | 254,719,583 | 0 | 0 | Apache-2.0 | 2020-04-10T19:32:54 | 2020-04-10T19:32:53 | null | UTF-8 | R | false | false | 58 | r | testthat.R | library(testthat)
library(metajam)
test_check("metajam")
|
8799cc1bd0c60ad629760cf54f48101b92925fd4 | 0ae72d31457a1da58d0447de51f2f48f31faa48b | /code/plot.R | 04bcffc0707e358bacab6a5193eca08fc17a2407 | [] | no_license | luiscape/data-for-frog | 047878dada7f88f00cbc7066d287dd12af86b817 | fc6292498bf92b98d80039b2668595c0b299df2b | refs/heads/master | 2016-09-06T04:50:19.357528 | 2014-07-08T05:15:27 | 2014-07-08T05:15:27 | 19,984,704 | 1 | 0 | null | 2014-06-24T16:30:41 | 2014-05-20T14:42:49 | R | UTF-8 | R | false | false | 1,256 | r | plot.R | # script for plotting metrics from
# the indicators data
print("loading from CSV...")
ind <- read.csv('frog_data/csv/denorm_data.csv')
print("loaded")
# de-factor numbers & date
ind$indValue <- as.numeric(ind$value)
ind$indDate <- as.Date(ind$period, format="%Y")
# filter for numeric indicators
ind <- ind[ind$is_number==1,]
# plot
plotIndicator <- function (region="KEN", indID="CH080", toFile=FALSE) {
pdata <- ind[ind$region==region,]
pdata <- pdata[pdata$indID==indID,]
pdata <- pdata[with(pdata,order(indDate)),]
# background should be white
par(bg = "white")
# name of plot output
plotfn <- paste("plots/",region,"_",indID,".png", sep="")
# output to file when we're in an external Rscript
if (toFile) png(plotfn)
# generate plot
plot(pdata$indDate, pdata$indValue, type="b", main=unique(pdata$indicator_name), ylab=unique(pdata$units), xlab="year")
# output to file if we're in R REPL
if (toFile) {
# dev.copy(png,plotfn)
dev.off()
}
}
# test
#plotIndicator("KEN","CH080",TRUE)
# plot all indicators
plotAllIndicators <- function (region="KEN") {
ind<-ind[ind$region==region,]
for (i in unique(ind$indID)) {
print(paste('creating plot',i))
plotIndicator("KEN", i, TRUE)
}
}
plotAllIndicators()
|
d8582b599b8fb349e5b48b6a393d2d21b4b2ddb1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/clusterGenomics/examples/exData1.rd.R | 2bf832567f79eb0b1bab8c719920cc98a0bb51f7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 164 | r | exData1.rd.R | library(clusterGenomics)
### Name: exData1
### Title: A simulated data set with 5 true clusters
### Aliases: exData1
### ** Examples
#Get data
data(exData1)
|
fbb1646cbb7926ddf6f2ce89f1d573fab19e35e2 | f6123e538f3a530126c4673be2c4679e63f505bd | /R/cal.int.R | 7f3383912f6e14696fdfd7aefa5f9cafec25873b | [] | no_license | cran/pencopula | 8fe1124a1447f28b05a72648b4934c87336f75af | dc7411113e6a15bc9c15ebebb17e3348fb2cb30b | refs/heads/master | 2021-01-01T17:17:14.765551 | 2018-08-31T16:40:06 | 2018-08-31T16:40:06 | 17,698,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 577 | r | cal.int.R | cal.int <- function(len.k,len.b,q,help.env,knots.val) {
INT <- matrix(0,q,len.b)
INT.help <- rep(0,len.b)
for(i in 1:(len.k-(q-1))) {
count <- 0
for(j in 1:q) {
y2 <- knots.val$val[i+1]
y1 <- knots.val$val[i]
coef <- get(paste("coef",i,".",j,sep=""),envir=help.env)
y2 <- 1/(1:(q+1))*y2^(1:(q+1))
y1 <- 1/(1:(q+1))*y1^(1:(q+1))
INT[j,i+count] <- sum(coef*y2)-sum(coef*y1)
assign(paste("INT",i,".",j,sep=""),INT[j,i+count],envir=help.env)
count <- count+1
}
}
assign("stand.num",1/colSums(INT),help.env)
}
|
49e90a866c60b8cf1b0e84d11e2d642909e89c17 | 967686b34b44d8cecf94643563007a4624a3991e | /plot1.R | 76c554861742c441ea9782f17944f52261c818dd | [] | no_license | Ad-007/ExData_Plotting1 | 84f83f0d7c21c78bffcb76d2c48cb5a8b84a7ea3 | 8b09a05733adecd969ba47b0aad2f4c6fc540fc2 | refs/heads/master | 2022-12-05T03:13:49.203396 | 2020-08-12T19:48:46 | 2020-08-12T19:48:46 | 286,930,216 | 0 | 0 | null | 2020-08-12T06:02:31 | 2020-08-12T06:02:30 | null | UTF-8 | R | false | false | 364 | r | plot1.R | plot1<-function(){
png("plot1.png")
model<-read.csv("household_power_consumption.txt",sep = ";")
df1<-subset(model,Date=="1/2/2007")
df2<-subset(model,Date=="2/2/2007")
df<-rbind(df1,df2)
df<-df[complete.cases(df),]
hist(as.numeric(df$Global_active_power),col="red",xlab = "Global Active Power (kilowatts)",main = "Global Active Power")
dev.off()
} |
3ccd45dad8bd4b7fffde3b75dbe5a91ecac0e894 | bd8cd68974cbe33217bf1ec07942dc38403f9aaa | /models/functions/read_fset.R | 125ef30dbd9c9f651805f227724ed179f34140ec | [
"BSD-3-Clause"
] | permissive | nzanl/pilot_clientprofielen_wijkverpleging_2020 | 4c67a1d9d1c6baf61a51bafe4e77574bcedea640 | d70349fd6af7700475a1cf4a839966b08702e875 | refs/heads/master | 2023-01-02T14:38:28.088643 | 2020-10-16T09:08:41 | 2020-10-16T09:08:41 | 304,569,754 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 676 | r | read_fset.R |
# LET OP heeft dir_pilot als global var nodig
# LET OP heeft ds_id als global var nodig
read_fset <- function(fs_id, aanb_type = "all") {
char_fs_id <- sprintf("%02d", fs_id)
char_ds_id <- sprintf("%02d", ds_id)
if(aanb_type == "nanda") {
aanb <- "aanb_nd"
} else if(aanb_type == "omaha"){
aanb <- "aanb_om"
} else {
aanb <- "all"
}
fpattern <- paste0("^FSID_", char_fs_id, "_DSID_", char_ds_id, ".*\\_features_", aanb, ".rds$")
fdir <- paste0(dir_pilot, "/Output")
fname <- list.files(path = fdir,
pattern = fpattern)
fname <- paste0(fdir, "/", fname)
df <- readRDS(fname)
return(df)
}
|
59338e07ba2ab799ef8b5de7c01c75ab69a5a18c | 49113c060add5d1fc558b88b0cf47364d07cd7fd | /R Algos/main.R | 872d506d418986ad279f95dcc8a385a785b9606f | [] | no_license | datainvestor/Optimization-Algorithms | 1396b547db603997a478e33346b2a6b51ab1b23f | 452434d7250c10da780dcf55df9e40dc952f7a34 | refs/heads/master | 2020-03-22T05:47:13.153326 | 2018-07-03T14:31:55 | 2018-07-03T14:31:55 | 139,590,023 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,646 | r | main.R | rm(list=ls())
# Set WD
#setwd('')
# Load functions
source('numGradient.R')
source('steepestDescent.R')
source('simulatedAnnealing.R')
source('geneticAlgorithm.R')
# Objective functions:
# Ellipsoid:
myFun1 = function(x){
return( x[1]^2 + 10*x[2]^2)
}
# Schaffer function:
myFun = function(x){
return( 0.6 + ((sin(x[1]^2-x[2]^2))^2-0.5)/((1+0.001*(x[1]^2+x[2]^2))^2) )
}
# Set Params
xSeed <- c(3, 4)
n_grid <- 100
ub_iter <- 10000
#set.seed(123)
# Run algorithms
sd <- steepestDescent(myFun, xSeed, 0.01, 10^-13, ub_iter)
sa <- simulatedAnnealing(myFun, xSeed, 0.99, 10000, 0.2, ub_iter)
ga <- geneticAlgorithm(myFun, c(-20, -20), c(20, 20), cel=50, popSize = 30, maxIter = ub_iter, pMut = 0.05)
# Plot convergence of f
plot(sa$f_hist[1:ub_iter], col = 'blue', type = 'l', lwd=1)
lines(sd$f_hist[1:ub_iter], col = 'red', type = 'l', lwd=1)
lines(ga$f_hist[1:ub_iter], col = 'green', type = 'l', lwd=1)
# Plot optimization paths
x_seq <- seq(-20, 20, length = n_grid)
matrVal <- matrix(0, nrow = n_grid, ncol = n_grid)
for(iRow in 1 : n_grid){
for(iCol in 1 : n_grid){
matrVal[iRow, iCol] <- myFun(c(x_seq[iRow], x_seq[iCol]))
}
}
contour(x_seq, x_seq, matrVal)
lines(ga$x_hist, col = 'green', type = 'p', lwd=3)
lines(sd$x_hist, col = 'red', type = 'l', lwd=5)
lines(sa$x_hist, col = 'blue', type = 'l', lwd=2)
# Name the best method
algoNames <- c("Steepest Descent", "Simulated Annealing", "Genetic Algorithm")
cat("The best solution was found by: ", algoNames[which.min(c(sd$f_opt, sa$f_opt, ga$f_opt))])
# Plot convergence of GA
plot(ga$f_mean, type = 'l', col='blue')
lines(ga$f_hist, type = 'l', col = 'red')
|
9c695f17d06eac4ad6ef271e3f8fe55916e1bd3b | 58691c7e1dd0c241de7ec2898ea66b5d2e5f5f4a | /man/wine.Rd | 880ef7cb9c660db4f80c1c7f13ca4b1fb02d9771 | [] | no_license | PSegaert/mrfDepth | b7fefd1bc3e897c9b095fac0e0f4c2cf9b3ad45a | a1118ddeef1997c72aedbc65dc83b48deda807e3 | refs/heads/master | 2021-01-18T22:23:13.435472 | 2018-10-12T08:55:47 | 2018-10-12T08:55:47 | 87,052,317 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,392 | rd | wine.Rd | \name{wine}
\alias{wine}
\docType{data}
\title{Proton Nuclear Magnetic Resonance spectra of 40 different wine samples}
\description{
The original data set consists of Proton Nuclear Magnetic Resonance (NMR) spectra of 40 different wine samples in the spectral region from 6.00 to 0.50. This data set corresponds to the region between wavelengths 5.62 and 5.37 only for which \eqn{t = 397} measurements are available for each curve. The data has been analyzed in Hubert et al. (2015), see below.
}
\usage{data("wine")}
\format{
A three dimensional \eqn{t = 397} by \eqn{n = 40} by \eqn{p = 1} array,
with \eqn{t} the number of observed time points,
\eqn{n} the number of functional observations
and \eqn{p} the number of measurements
for every functional observation at every wavelength.
}
\details{
When using this data set, please cite both of the references below.
}
\source{
Larsen F, van den Berg F, Engelsen S (2006) An exploratory chemometric study of NMR spectra of table
wines. \emph{Journal of Chemometrics}, \bold{20} - (5), 198-208}
\references{
Hubert M., Rousseeuw P.J., Segaert P. (2015). Multivariate functional outlier detection (with rejoinder). \emph{Statistical Methods & Applications}, \bold{24}, 177--202.
}
\examples{
data(wine)
matplot(wine[,,1], type="l", lty=1, col = "black")
}
\keyword{datasets}
|
968c3d6dc878f2757928466ceaa3a41049942afe | a2aa9a8801f53ac1149fdbb3334d78ac0fe1782f | /cachematrix.R | b762e697cd240db567b999b136504cf761fc6f1e | [] | no_license | creativeCJ/ProgrammingAssignment2 | 1072a339c72710573ab772b5362653e1d892248d | 4bbcc4ff44c112430655aa2ae81ce73c6ab728fb | refs/heads/master | 2020-12-25T11:05:09.319023 | 2015-03-19T01:12:27 | 2015-03-19T01:12:27 | 32,361,426 | 0 | 0 | null | 2015-03-17T00:27:29 | 2015-03-17T00:27:28 | null | UTF-8 | R | false | false | 2,312 | r | cachematrix.R | # Example
# Caching the Mean of a Vector (Given: Assignment Notes)
makeVector <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
# Exercise
# makeCacheMatrix
# (Given: Assignment Notes) makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) { # here x is set equal to an empty matrix
Ix <- NULL # here inverse is set equal to NULL
set <- function(y){
x <<- y # here the set function assigns the argument to x
Ix <<- NULL # when set function is called, Inverse (Ix) is re-set to NULL
}
get <- function() x # function as before and this function returns the matrix
setInverse <- function(solve) Ix <<- solve # setInverse overrides the previous value of Ix and assigns the argument to Inverse
getInverse <- function() Ix # getInverse returns the Inverse
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) # creates a list of the functions
}
# cacheSolve
# (Given: Assignment Notes) cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
Ix <- x$getInverse()
if(!is.null(I)){
message("getting cached data")
return(Ix) # If the value of Inverse is NOT null, cacheSolve returns that value
}
# If the value of Inverse is NULL, then you retrive matrix x and calculate the inverse with the solve() function
message("newly calculating data")
data <- x$get()
Ix <- solve(data, ...)
x$setInverse(Ix) # Sets Inverse as the calculated value
Ix #Gives the Inverse value
}
|
ca5a73028c69933702a988ee6930f4cd2ccaf7f9 | 942ea2badaa89456ec24e020dea8a52a2eaebea9 | /server.R | 7ce39851525d961df51b270ef04089ed39e362ba | [] | no_license | marrakesh1400/DevelopDataProducts | 242a8324037010ab4c3451b3adc2a6ee652da4d3 | 1392ad82bef27d88d07a00142e36be54ec54a0d8 | refs/heads/master | 2021-01-10T07:18:51.360274 | 2015-09-27T16:43:39 | 2015-09-27T16:43:39 | 43,254,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,088 | r | server.R | # server.R
library(sp)
library(rgdal)
require(maps)
# function to convert longitude to UTM zone
UTMzone <- function(x){
zone <- ceiling((x + 180)/6)
return(zone)
}
# function to convert long/latitude to UTM coordinates
LongLatToUTM<-function(x,y,zone){
xy <- data.frame(ID = 1:length(x), X = x, Y = y)
coordinates(xy) <- c("X", "Y")
proj4string(xy) <- CRS("+proj=longlat +datum=WGS84") ## for example
res <- spTransform(xy, CRS(paste("+proj=utm +zone=",zone," ellps=WGS84",sep='')))
resdf = (as.data.frame(res))
return(resdf)
}
# configure shiny server
shinyServer(
function(input, output) {
output$zone <- renderText({UTMzone(input$lon)})
output$easting <- renderText({unlist(LongLatToUTM(input$lon, input$lat, UTMzone(input$lon))[2])})
output$northing <- renderText({unlist(LongLatToUTM(input$lon, input$lat, UTMzone(input$lon))[3])})
output$sweetMap <- renderPlot({
map("world", fill=TRUE, col="white", bg="lightblue", ylim=c(-90, 90), mar=c(0,0,0,0))
points(input$lon, input$lat, col = "red", pch = 16, cex = 2)
})
}
)
|
e0d3ce572f4a735e9021e395eef2dfd423ae0d7e | 72f2bbe0b5ee226f514c8e317a7c6f999d3938f6 | /man/speed_set_vocabulary.Rd | e22588f13f44ecc2e6b223bee0b2db5c254b3a75 | [] | no_license | bethanyleap/SpeedReader | 4853f6fa2dda451b24b795e11063e843d8a0f9a2 | 8e3bcd38d03dabc3efa7f4f7981ce002aa79a5e8 | refs/heads/master | 2022-04-10T20:36:35.103520 | 2020-03-18T02:07:36 | 2020-03-18T02:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 914 | rd | speed_set_vocabulary.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/speed_set_vocabulary.R
\name{speed_set_vocabulary}
\alias{speed_set_vocabulary}
\title{A function the reorgaizes vocabulary to speed up document term matrix formation using a string stem dictionary.}
\usage{
speed_set_vocabulary(vocab, term_frequency_threshold = 0, cores = 1)
}
\arguments{
\item{vocab}{A vocabulary list object returned by the count_words() function.}
\item{term_frequency_threshold}{A threshold below which all words appearing fewer than that many times in the corpus will be removed. Defaults to 0 in which case no words will be removed.}
\item{cores}{The number of cores we wish to use for parallelization to speed up computation. Defaults to 1.}
}
\value{
A vocabulary list object.
}
\description{
A function the reorgaizes vocabulary to speed up document term matrix formation using a string stem dictionary.
}
|
ba4f51c469bcb7d2f58647ed1c6aaba4686b9fdc | b3441bbadf7f3fcf2346dd1cf0a20f181b2072df | /man/RspProduct.Rd | 859c67148188dbe6fb02fbf93e0a6f89d17ede9a | [] | no_license | HenrikBengtsson/R.rsp | 9bf203326c4c7baa1d5de37dcf9a499bfc9e3e18 | d3ab968460c7c130abcdbf1ea4252de1b5d9da2d | refs/heads/develop | 2023-04-13T00:43:33.830793 | 2023-03-18T04:37:29 | 2023-03-18T04:37:29 | 19,396,072 | 28 | 8 | null | 2022-03-08T03:34:46 | 2014-05-03T05:15:52 | R | UTF-8 | R | false | false | 2,112 | rd | RspProduct.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% RspProduct.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{RspProduct}
\docType{class}
\alias{RspProduct}
\title{The RspProduct class}
\description{
Package: R.rsp \cr
\bold{Class RspProduct}\cr
\code{logical}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[R.rsp]{RspObject}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\emph{\code{RspProduct}}\cr
\bold{Directly known subclasses:}\cr
\link[R.rsp]{RspFileProduct}, \emph{\link[R.rsp]{RspRSourceCode}}, \emph{\link[R.rsp]{RspShSourceCode}}, \emph{\link[R.rsp]{RspSourceCode}}, \emph{\link[R.rsp]{RspStringProduct}}\cr
public abstract class \bold{RspProduct}\cr
extends \link[R.rsp]{RspObject}\cr
An RspProduct object represents an RSP product generated by processing
an RSP document.
}
\usage{
RspProduct(object=NA, ...)
}
\arguments{
\item{object}{The RSP product.}
\item{...}{Arguments passed to \code{\link{RspObject}}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{!} \tab -\cr
\tab \code{\link[R.rsp:findProcessor.RspProduct]{findProcessor}} \tab Locates a processor for an RSP product.\cr
\tab \code{\link[R.rsp:getType.RspProduct]{getType}} \tab Gets the type of an RSP product.\cr
\tab \code{\link[R.rsp:hasProcessor.RspProduct]{hasProcessor}} \tab Checks whether a processor exist or not for an RSP product.\cr
\tab \code{print} \tab -\cr
\tab \code{\link[R.rsp:process.RspProduct]{process}} \tab Processes an RSP file product.\cr
\tab \code{\link[R.rsp:view.RspProduct]{view}} \tab Views the RSP product.\cr
}
\bold{Methods inherited from RspObject}:\cr
print
\bold{Methods inherited from logical}:\cr
Ops,nonStructure,vector-method, Ops,structure,vector-method, Ops,vector,nonStructure-method, Ops,vector,structure-method, as.data.frame, as.raster, coerce,ANY,logical-method, whichVector
}
\author{Henrik Bengtsson}
\keyword{classes}
\keyword{internal}
|
bae1163894ea38e153fc9a7a4e0f5c1cbce7b9b6 | d42e16b46cfd11593c6f0ccef5bcc9b485c7f321 | /DPLYR_Missing_Values.R | e1548f2277ca93d811406a342c13e613077a7602 | [] | no_license | JBalloonist/Capstone | 7ec53442cbe21a70904fb53552d38d0ded17edf3 | c06b353c31ab84e9698bf5ee91e98ce832ae233f | refs/heads/master | 2020-04-23T08:28:18.215591 | 2019-03-03T17:47:47 | 2019-03-03T17:47:47 | 171,038,059 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,082 | r | DPLYR_Missing_Values.R | ##--------------------------------------------------------------##
## Universal Options -----------------------------------------##
##-------------------------------------------------------------##
options(java.parameters = "-Xmx8g")
options("stringsAsFactors" = FALSE)
options(pillar.subtle_num = FALSE)
options(scipen=999)
library(dplyr)
library(mlr)
library(readr)
library(tidyr)
##--------------------------------------------------------------##
## Read In Data --------------------##
##--------------------------------------------------------------##
data <- read.csv('~/Dropbox/MSAn Capstone - Fall 2018/Data/3. 100K Sampled Data/FINAL2_SAMPLED100.csv',
na.strings=c("NA","NaN", " ", ""))
##--------------------------------------------------------------##
## Intro to DPLYR --------------------##
##--------------------------------------------------------------##
#---SELECT certain variables in a data frame -------------------------------------------
data.1 <- data %>%
select(RESP, days_since_last_activity, V90, Upmarket )
#---DROP (-SELECT) certain variables in a data frame -------------------------------------------
data.2 <- data.1 %>%
select(-V90, -Upmarket)
#---FILTER data frame based on conditions -------------------------------------------
data.RESP <- data %>%
filter(RESP==1)
#---CREATE NEW VARIABLES -------------------------------------------
data.3 <- data.1 %>%
mutate(recent_active = ifelse(days_since_last_activity<365, 1, 0))
#---Create SUMMARY METRICS by GROUP --------------------------------------------------
data.4 <- data.1 %>%
group_by(Upmarket) %>%
summarise(avg_RESP = mean(RESP, na.rm=T))
print(data.4)
##--------------------------------------------------------------##
## Missing Imputation --------------------##
##--------------------------------------------------------------##
#---Explore MISSING VARIABLES -------------------------------------------------------
# missing_values <- data %>%
# summarise_all(funs(sum(is.na(.))/n())) %>%
# gather(key="feature", value="missing_pct")
#
# missing_values %>%
# ggplot(aes(x=reorder(feature,-missing_pct),y=missing_pct)) +
# geom_bar(stat="identity",fill="steelblue")+
# coord_flip()+
# theme_few()
#---Remove variables with X% missing (constant)--------------------------------
data.5 <- removeConstantFeatures(data, perc=0.01) #99% the same value
#----Explore the 'Class' Types in your Data---------------------------------------
#Get Class by Variable DF
keep = c('V41', 'V42', 'V47', 'V48')
table(sapply(data.5[, keep], class))
shawn = c('V160', 'V159', 'V161')
table(sapply(data.5[, shawn], class))
keep_variables <- c('RESP', 'V57', 'V90', 'V48', 'V41', 'V42', 'V138', 'V85', 'V47', 'V86',
'V160', 'V161', 'V159', 'V189', 'V260', 'V61', 'V122', 'V124', 'V126',
'V127', 'V128', 'V129', 'V162', 'days_since_last_activity', 'State', 'Upmarket', "MATCH")
data_keep <- data.5[, keep_variables]
#----Impute Missings w/Mean & Mode-----------------------------------------------
imp <- mlr::impute(data_keep, classes = list(numeric=imputeMedian(),integer = imputeMedian()))
# , dummy.classes = c("character", "numeric", "integer"), dummy.type = "numeric") #You can include this or not
data.noMiss <- imp$data
###------ Create Dummy Variables-----------###
# install.packages("fastDummies")
library(fastDummies)
to_dummy <- c('V57', 'V90', 'V48', 'V41', 'V42', 'V138', 'V85', 'V47', 'V86',
'V160', 'V161', 'V159')
# convert to dummy variables
data_dummies <- fastDummies::dummy_cols(data.noMiss, select_columns = to_dummy)
head(data_dummies)
tail(data_dummies)
# get rid of the original columns that were made into dummy variables
list_to_keep <- setdiff(names(data_dummies), to_dummy)
data_dummies <- data_dummies[ , list_to_keep]
tail(data_dummies)
library(knitr)
#Keep just numeric/integer
df<-data_dummies %>%
select_if(is.numeric) %>%
filter(MATCH == 1)
#Create variable list (excluding dep var)
VAR_LIST <- names(df)[ - which(names(df) == "RESP")]
#Set-up df
col_names <- c("Variable", "Corr", "P.Value", "abs.Corr")
list <- data.frame(matrix(nrow = length(VAR_LIST), ncol = length(col_names)))
names(list) <- col_names
n <- 1
#Run corr for each var
for (i in 1:(length(VAR_LIST))) {
p <- cor.test(df[[VAR_LIST[[i]]]], df$RESP)
list[n,"Variable"] <- VAR_LIST[[i]]
list[n,"Corr"] <- p$estimate
list[n,"P.Value"] <- p$p.value
list[n,"abs.Corr"] <- abs(p$estimate)
n <- n + 1
}
#Summarise & Print
list <- list[order(-list$abs.Corr),]
kable(list, caption = "Pearson Correlation Coefficients w/RESP ")
# library(GGally)
#
# top_10 <- c("RESP", "days_since_last_activity", "Upmarket", "V90_1", "V138_1", "V61", "V162", "V138_7", "V86_K",
# "V161_E1", "V138_2")
#
# library(corrplot)
# ggpairs(df[, top_10])
# # corrplot(df[, top_10])
# test <- df[,top_10]
# ggpairs(test)
# corrplot(test, method = 'circle')
|
425f796d30775f54d8216751498350fb048b1265 | 416550c21c0e3f49ae34ef843b4c352910c3c2f9 | /man/peaks-MsChromList-method.Rd | 1d485248d4d2bff83eb1034c2e193624dc8212ec | [] | no_license | thomasp85/MSsary | 34dc8e93fd13a33ba6f78598626bb134d6cb151c | bf182b67b072256c4ff16b8c72678109f899ecc5 | refs/heads/master | 2021-01-22T12:12:39.641522 | 2015-01-26T11:44:40 | 2015-01-26T11:44:40 | 25,297,627 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 333 | rd | peaks-MsChromList-method.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/MsChromList.R
\docType{methods}
\name{peaks,MsChromList-method}
\alias{peaks,MsChromList-method}
\title{Extract peaks from the EIC}
\usage{
\S4method{peaks}{MsChromList}(object, overlaps = FALSE, ...)
}
\description{
Extract peaks from the EIC
}
|
28d0f032e9e216bf3b4bf477e29563e40ff1f06e | e462744bcbcf755cc2425a0cc49e46f4081bc565 | /server.R | 02fdd40f0c39fbe02e5df6bedb660425d20e3549 | [] | no_license | epahang/a8-building-applications | c6d2aebbe563eddb8c624ed495f3ea6cb9bc32db | cda4ed0cf5b8ffdf3270c72efe0141629948d0c4 | refs/heads/master | 2016-08-11T07:39:02.474531 | 2016-02-29T19:15:22 | 2016-02-29T19:15:22 | 52,762,660 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 883 | r | server.R | # Assignment 8 - Building Applications
# Set working directory
# setwd("~/Documents/INFO498F/a8-building-applications")
# Load packages
library(shiny)
library(dplyr)
library(plotly)
# Load iris data
data("iris")
shinyServer(function(input, output) {
createPlot <- eventReactive(input$render, {
runif(input$flower_species != "Choose a species...")
title_plot <- paste0("Petal Length vs. Sepal Length (and Sepal Width) for ", input$flower_species, " Flowers")
dataframe <- filter(iris, Species == input$flower_species)
p <- plot_ly(data = dataframe, x = Sepal.Length, y = Petal.Length, mode = "markers",
color = Sepal.Width, size = Petal.Width) %>%
layout(title = title_plot, width = 825, height = 525)
return (p)
})
# Create a "plot" variable on output
output$plot <- renderPlotly({
createPlot()
})
}) |
2df29be44a6dc8b62038b7963a35db958ebf8043 | 094bb4b008075c6c89d437f6a753e553ee01b03e | /man/to_bin.Rd | 8fa1c580b370fec004fda8047a951907249f4785 | [] | no_license | TommasoCanc/biomonitoR | 025264f6a6d69fae0e896a9f0a7874da7b184003 | 26136a8becdeb052adc5f8ff08d585024d50d223 | refs/heads/master | 2022-02-03T18:31:19.541891 | 2022-01-24T13:19:42 | 2022-01-24T13:19:42 | 141,422,404 | 0 | 0 | null | 2018-07-18T10:57:02 | 2018-07-18T10:57:02 | null | UTF-8 | R | false | true | 406 | rd | to_bin.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/to_bin.R
\name{to_bin}
\alias{to_bin}
\title{Abundance to presence-absence}
\usage{
to_bin(x)
}
\arguments{
\item{x}{a \code{data.frame}}
}
\description{
This function transforms abundance to presence-absence.
}
\details{
\code{to_bin} will transform values greter than 0 to 1.
}
\examples{
data(macro_ex)
to_bin(macro_ex)
}
|
6c7d89b5a13b2c69ca3f6bbdec643d0e7e165feb | 36ca299bbc7c2d429e417a01490275f35ec84346 | /Regression/Polynomial Regression/Polynomial Regression Model.R | 6961919e413bb6fd4797867f37f495accd6c6a8c | [] | no_license | jpedroanascimento123/Data-Science-Projects | 5a5abc470bb5115a88ac7f3cba283420005b3d18 | c027443879aa23dd95b063cc119c17c9c8608ce7 | refs/heads/master | 2021-07-16T22:30:11.120189 | 2020-05-20T17:40:57 | 2020-05-20T17:40:57 | 161,149,527 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,744 | r | Polynomial Regression Model.R | # Import the dataset
dataset = read.csv('Position_Salaries.csv')
# Dealing with categorical data (transforming text into numerical) - since the Level column is the encoding of the Position column, we will use the Level and Salary columns only
# Need to subset the dataset to just have those two columns
dataset = dataset[2:3]
# Since it's a dataset with just 10 rows, it doesn't make sense to split into a training and test set.
# We don't need the feature scaling as well because this is a polynomial regression, which means we will need the "real values" to grab the exponents.
# A Polynomial regression Model is more suited for this problem because there isn't a linear relationship between our independent variable (Level) and our dependent variable (Salary)
dataset$Level2 = dataset$Level^2
dataset$Level3 = dataset$Level^3
dataset$Level4 = dataset$Level^4
dataset$Level5 = dataset$Level^5
regressor = lm(formula = Salary ~ .,
data = dataset)
# Visualising the Polynomial Regression Results
install.packages('ggplot2')
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = "red") +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = "blue") +
ggtitle('Salary vs Level') +
xlab('Level') +
ylab('Salary')
# Predicting a new result with Polynomial Regression
predictor = predict(regressor, data.frame(Level = 6.5,
Level2 = 6.5^2,
Level3 = 6.5^3,
Level4 = 6.5^4,
Level5 = 6.5^5))
|
6bed1239ccf2561874e241eef5cb870b8dbf72d4 | 8130e4802356a44450750d6f006c780fde71c64a | /10+ Guidelines for Better Tables in R.R | c057f5ad97b158a43ab37a906144fdd872083dcf | [] | no_license | chenbingshun98/bsdzp | ba40ab069a4a9abb06abdd8b619b153a87bd132f | e5404f88fd93d6bb752f237429e896207f6074e3 | refs/heads/master | 2021-10-12T05:41:59.539543 | 2021-10-04T06:02:29 | 2021-10-04T06:02:29 | 246,439,915 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,849 | r | 10+ Guidelines for Better Tables in R.R | library(tidyverse)
library(gt)
library(gtsummary)
library(tidytuesdayR)
# This works!
#gt(yield_data_wide)
# pipe also works!
tuesdata <- tidytuesdayR::tt_load(2020, 36)
key_crop_yields <- tuesdata$key_crop_yields
country_sel <- c("China", "India", "United States", "Indonesia", "Mexico", "Pakistan")
yield_data <- tuesdata$key_crop_yields %>%
janitor::clean_names() %>%
rename_with(~str_remove(., "_tonnes_per_hectare")) %>%
select(entity:beans, -code) %>%
pivot_longer(cols = wheat:beans, names_to = "crop", values_to = "yield") %>%
rename(Country = entity)
yield_data %>%
gt()
#Add Groups
key_crop_yields %>%
head() %>%
group_by(Entity) %>% # respects grouping from dplyr
gt(rowname_col = "crop")
key_crop_yields %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = 'Country'
)
#Adjust appearance
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
tab_header(
title = "Crop Yields between 2014 and 2016",
subtitle = "Countries limited to Asia"
) %>%
tab_options(
heading.subtitle.font.size = 12,
heading.align = "left",
table.border.top.color = "black",
column_labels.border.bottom.color = "black",
column_labels.border.bottom.width= px(3),
)
#
yield_data_wide %>%
mutate(crop = str_to_title(crop)) %>%
group_by(crop) %>%
gt(
rowname_col = "Country"
) %>%
fmt_number(
columns = 2:5, # reference cols by position
decimals = 2 # decrease decimal places
) %>%
summary_rows(
groups = TRUE,
columns = vars(`2014`, `2015`, `2016`), # reference cols by name
fns = list(
avg = ~mean(.), # add as many summary stats as you want!
sd = ~sd(.)
)
)
#Add spanners
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
tab_spanner(
label = "Yield in Tonnes/Hectare",
columns = 2:5
)
#Add notes and titles
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
tab_footnote(
footnote = "Yield in Tonnes/Hectare",
locations = cells_column_labels(
columns = 1:3 # note
)
)
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
tab_footnote(
footnote = "Yield in Tonnes/Hectare",
locations = cells_column_labels(
columns = 1:3 # note
)
) %>%
tab_source_note(source_note = "Data: OurWorldInData")
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
tab_header(
title = md("**Crop Yields between 2014 and 2016**"),
subtitle = html("<em>Countries limited to Asia</em>")
)
# Adjust appearance
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
tab_header(
title = "Crop Yields between 2014 and 2016",
subtitle = "Countries limited to Asia"
) %>%
tab_options(
heading.subtitle.font.size = 12,
heading.align = "left",
table.border.top.color = "black",
column_labels.border.bottom.color = "black",
column_labels.border.bottom.width= px(3),
)
my_theme <- function(data) {
tab_options(
data = data,
heading.subtitle.font.size = 12,
heading.align = "left",
table.border.top.color = "black",
column_labels.border.bottom.color = "black",
column_labels.border.bottom.width= px(3),
)
}
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
tab_header(
title = "Crop Yields between 2014 and 2016",
subtitle = "Countries limited to Asia"
) %>%
my_theme()
yield_data_wide %>%
head() %>%
gt() %>%
tab_style(
style = list(
cell_text(weight = "bold")
),
locations = cells_column_labels(everything())
) %>%
tab_style(
style = list(
cell_fill(color = "black", alpha = 0.2),
cell_borders(
side = c("left", "right"),
color = "black",
weight = px(2)
)
),
locations = cells_body(
columns = vars(crop)
)
) %>%
tab_style(
style = list(
cell_text(color = "red", style = "italic")
),
locations = cells_body(
columns = 3:5,
rows = Country == "China"
)
)
yield_data_wide %>%
head() %>%
gt(
groupname_col = "crop",
rowname_col = "Country"
) %>%
data_color(
columns = vars(`2014`, `2015`, `2016`),
colors = scales::col_numeric(
paletteer::paletteer_d(
palette = "ggsci::red_material") %>% as.character(),
domain = NULL
)
)
##
tuesdata <- tidytuesdayR::tt_load(2020, "36")
country_sel <- c("China", "India", "United States", "Indonesia", "Mexico", "Pakistan")
yield_data <- tuesdata$key_crop_yields %>%
janitor::clean_names() %>%
rename_with(~str_remove(., "_tonnes_per_hectare")) %>%
select(entity:beans, -code) %>%
pivot_longer(cols = wheat:beans, names_to = "crop", values_to = "yield") %>%
rename(Country = entity)
#Rule 1: Offset the Heads from the Body
#The goal here is to clearly separate your column titles from the body of the table.
# data prep
potato_data <- yield_data %>%
filter(Country %in% country_sel, crop == "potatoes", year %in% c(2013:2016)) %>%
filter(crop == "potatoes") %>%
pivot_wider(names_from = year, values_from = "yield")
potato_data
# Poor Example
potato_tb <- potato_data %>%
gt() %>%
cols_hide(vars(crop)) %>%
opt_table_lines(extent = "none") %>%
fmt_number(
columns = 3:6,
decimals = 2
)
potato_tb
#improved
rule1_good <- potato_tb %>%
tab_style(
style = list(
cell_text(weight = "bold")
),
locations = cells_column_labels(everything())
) %>%
opt_table_lines(extent = "default") %>%
tab_options(
column_labels.border.top.color = "white",
column_labels.border.top.width = px(3),
column_labels.border.bottom.color = "black",
table_body.hlines.color = "white",
table.border.bottom.color = "white",
table.border.bottom.width = px(2),
# table_body.border.bottom.color = "white",
# table_body.border.bottom.width = px(6)
) %>%
tab_source_note(md("**Table**:@thomas_mock | **Data**: OurWorldInData.org<br>**Inspiration**: @jschwabish"))
rule1_good
#Rule 2: Use Subtle Dividers Rather Than Heavy Gridlines
# data prep
rule2_data <- yield_data %>%
filter(Country %in% country_sel, crop == "potatoes", year %in% c(2007:2016)) %>%
filter(crop == "potatoes") %>%
select(-crop) %>%
pivot_wider(names_from = year, values_from = "yield") %>%
rowwise() %>%
mutate(
avg_07_11 = mean(`2007`:`2011`),
.before = `2012`
) %>%
mutate(
avg_12_16 = mean(`2012`:`2016`)
) %>%
ungroup()
#Poor Example
rule2_tab1 <- rule2_data %>%
gt(
rowname_col = "Country"
) %>%
cols_label(
avg_07_11 = "Avg.",
avg_12_16 = "Avg."
) %>%
cols_width(
1 ~ px(125)
) %>%
fmt_number(
columns = 2:last_col()
) %>%
tab_style(
style = cell_borders(
sides = "all",
color = "grey",
weight = px(1),
style = "solid"
),
locations = list(
cells_body(
everything()
),
cells_column_labels(
everything()
)
)
) %>%
grand_summary_rows(
columns = 2:last_col(),
fns = list(
"Average" = ~mean(.)
),
formatter = fmt_number
)
rule2_tab1
rule2_tab2 <- rule2_data %>%
add_row(
rule2_data %>%
summarise(
across(where(is.double),
list(Average = mean),
.names = "{col}")
) %>%
mutate(Country = "Average")
) %>%
gt() %>%
cols_label(
avg_07_11 = "Avg.",
avg_12_16 = "Avg."
) %>%
fmt_number(
columns = 2:last_col()
) %>%
tab_style(
style = cell_fill(
color = "lightgrey"
),
locations = list(
cells_body(
columns = vars(avg_07_11,avg_12_16)
),
cells_column_labels(
columns = vars(avg_07_11, avg_12_16)
)
)
) %>%
tab_style(
style = cell_borders(
sides = "top",
color = "black",
weight = px(2)
),
locations = cells_body(
columns = everything(),
rows = Country == "Average"
)
) %>%
tab_style(
style = list(
cell_text(weight = "bold")
),
locations = cells_column_labels(everything())
) %>%
tab_options(
column_labels.border.top.color = "black",
column_labels.border.top.width = px(3),
column_labels.border.bottom.color = "black"
) %>%
tab_source_note(md("**Table**: @thomas_mock | **Data**: OurWorldInData.org<br>**Inspiration**: @jschwabish"))
rule2_tab2
#Rule 3: Right-Align Numbers and Heads
rule3_data <- yield_data %>%
filter(Country == "United States", year %in% c(2016)) %>%
mutate(crop = str_to_title(crop)) %>%
pivot_wider(
names_from = year,
values_from = "yield"
) %>%
arrange(crop) %>%
select(-Country, Crop = crop)
rule3_data
#Comparison of alignment
rule3_align <- rule3_data %>%
mutate(`Center align` = `2016`,
`Right align` = `2016`) %>%
rename(`Left align` = 2) %>%
gt() %>%
tab_style(
style = list(
cell_text(weight = "bold")
),
locations = cells_column_labels(everything())
) %>%
fmt_number(
columns = 2:4
) %>%
cols_align(align = "left",
columns = 2) %>%
cols_align(align = "center",
columns = 3) %>%
cols_align(align = "right",
columns = 4) %>%
tab_options(
column_labels.border.top.color = "white",
column_labels.border.top.width = px(3),
column_labels.border.bottom.color = "black",
table_body.hlines.color = "white",
table.border.bottom.color = "white",
table.border.bottom.width = px(3)
) %>%
tab_source_note(md("**Table**: @thomas_mock | **Data**: OurWorldInData.org<br>**Inspiration**: @jschwabish"))
rule3_align
#Addendums to alignment
rule3_data_addendum <- yield_data %>%
filter(
Country %in% c("Africa"),
year >= 2015,
str_length(crop) == 5
) %>%
group_by(year) %>%
mutate(
crop = str_to_title(crop),
max_yield = max(yield),
`Top Crop` = if_else(yield == max_yield, "Y", "N")
) %>%
select(Year = year, Crop = crop, `Top Crop`, Yield = yield) %>%
ungroup()
rule3_data_addendum %>%
gt()
rule3_data_addendum %>%
gt() %>%
gt::cols_align(
align = "center",
columns = vars(`Top Crop`, Crop)
)
rule3_data_addendum %>%
pivot_wider(
names_from = Year,
values_from = Yield
) %>%
gt() %>%
gt::cols_align(
align = "center",
columns = vars(`Top Crop`)
)
#Choose fonts carefully
rule3_text <- rule3_data %>%
mutate(Karla = `2016`,
Cabin = `2016`,
Georgia = `2016`,
`Fira Mono` = `2016`) %>%
rename(Default = 2) %>%
gt() %>%
tab_style(
style = list(
cell_text(font = "Default",
decorate = "underline"),
locations = list(
cells_column_labels(
vars(Default)
),
cells_body(
vars(Default)
)
)
)
)%>%
tab_style(
style = list(
cell_text(
font = "Karla",
decorate = "underline"
),
locations = list(
cells_column_labels(
vars(Karla)
),
cells_body(
vars(Karla)
)
)
)
) %>%
tab_style(
style = list(
cell_text(font = "Cabin", decorate = "underline")
),
locations = list(
cells_column_labels(
vars(Cabin)
),
cells_body(vars(Cabin))
)
) %>%
tab_style(
style = list(
cell_text(font = "Georgia",
decorate = "underline")
),
locations = list(
cells_column_labels(
vars(Georgia)
),
cells_body(
vars(Georgia)
)
)
) %>%
tab_style(
style = list(
cell_text(font = "Fira Mono", decorate = "underline")
),
locations = list(
cells_column_labels(
vars(`Fira Mono`)
),
cells_body(
vars(`Fira Mono`)
)
)
) %>%
fmt_number(columns = 2:6) %>%
tab_spanner(
label = "Good",
columns = c(2, 6)
) %>%
tab_spanner(
"Bad",
3:5
) %>%
tab_options(
column_labels.border.top.color = "white",
column_labels.border.top.width = px(3),
column_labels.border.bottom.color = "black",
table_body.hlines.color = "white",
table.border.bottom.color = "white",
table.border.bottom.width = px(3)
)
|
41ca083de5b77898885b7a9e35814165f629a9ce | 572a321e0a898eb67cbf815789f8e880dd344cc5 | /Catchment/ColibanCatchment/app.R | f46c49abf253fdfd043d44a345f2aca79037682c | [] | no_license | Aggarch/hydroinformatics | 010a567a18136306f4c351fd566b7a60d4636243 | c32498154b8b437a56dd22a002ab8ab700548064 | refs/heads/master | 2022-04-21T17:01:45.146015 | 2020-03-29T21:17:59 | 2020-03-29T21:17:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,726 | r | app.R | ##
## Coliban Water - Reservoir Levels
##
library(shiny)
library(readr)
library(shinydashboard)
library(stringr)
library(tidyverse)
library(DT)
library(leaflet)
rain <- read_csv("Catchment/ColibanCatchment/rain.csv")
streamflows <- read_csv("Catchment/ColibanCatchment/streamflows.csv")
reservoirs <- read_csv("Catchment/ColibanCatchment/reservoirs.csv")
stations <- read.csv("Catchment/ColibanCatchment/stations.csv")
## User Interface
ui <- dashboardPage(
dashboardHeader(title = "Coliban Water Catchment Data Dashboard"),
dashboardSidebar(
sidebarMenu(
menuItem("Overview", tabName = "overview", icon = icon("chart-line")),
menuItem("Meteorology", tabName = "meteo", icon = icon("cloud")),
menuItem("Flows", tabName = "flows", icon = icon("stream")),
menuItem("Reservoirs", tabName = "res", icon = icon("bath"))
),
dateRangeInput('daterange',
label = "Date Range",
start = max(reservoirs$Date) - 28, max(reservoirs$Date),
max = max(reservoirs$Date)
)
),
dashboardBody(
tags$head(tags$style(".sidebar-menu li { margin-bottom: 20px; }")),
tabItems(
tabItem("overview",
h2("Overview"),
tags$div(p("This webiste presents data about rainfall, inflows and reservoir levels in the Coliban Water catchment."),
p("Click on the menu items on the left for more detailed information."),
p("Use the date selector on the left to change the reporting period."),
p("You can download data bu clicking the button below each graph."),
p("Data source: Department of Environment, Land, Water & Planning (DELWP)"),
tags$a(href = "http://data.water.vic.gov.au/", "data.water.vic.gov.au"),
p()),
valueBoxOutput("lauriston_today"),
valueBoxOutput("ucoliban_today"),
valueBoxOutput("malmsbury_today"),
p("Click on the dot to show the name of the measruement station."),
leafletOutput("map")
),
tabItem("meteo",
h2("Meteorology"),
checkboxGroupInput("rain_select",
label = "Select locations to Visualise",
choiceNames = unique(rain$Reservoir),
choiceValue = unique(rain$Reservoir),
selected = rain$Reservoir,
inline = TRUE),
radioButtons("rain_cum", "Daily or cumulative rainfall",
choices = c("Daily", "Cumulative"),
selected = "Daily"),
plotOutput("rainfall_graph"),
downloadButton("download_rain", label = "Download data"),
dataTableOutput("tbl_rain")
),
tabItem("flows",
h2("Stream Flows"),
checkboxGroupInput("flow_select",
label = "Select locations to Visualise",
choiceNames = unique(streamflows$CWName),
choiceValue = unique(streamflows$CWName),
selected = streamflows$CWName,
inline = TRUE),
radioButtons("flow_cum", "Daily or cumulative streamflows",
choices = c("Daily", "Cumulative"),
selected = "Cumulative"),
plotOutput("flow_graph"),
downloadButton("download_flow", label = "Download data"),
dataTableOutput("tbl_flow")
),
tabItem("res",
h2("Reservoir Levels and Volumes"),
checkboxGroupInput("reservoir_select",
label = "Select Reservoir(s) to Visualise",
choiceNames = unique(reservoirs$CWName),
choiceValue = unique(reservoirs$StationFID),
selected = "406222A",
inline = TRUE),
radioButtons("vol_lev", "Volumes or levels?",
choices = c("Volumes", "Levels"),
selected = "Levels"),
plotOutput("reservoir_level_graph"),
downloadButton("download_res", label = "Download data"),
dataTableOutput("tbl_res")
)
)
)
)
server <- function(input, output) {
# MAP
output$map <- renderLeaflet({
pal <- colorFactor(c("darkgreen", "blue"), domain = c("StreamFlow", "Reservoir"))
leaflet(stations) %>%
addTiles() %>%
addCircleMarkers(~Longitude, ~Lattitude,
color = NA,
fillColor = ~pal(SiteType),
fillOpacity = 0.8,
popup = ~CWName)
})
# RAINFALL
rain_select <- reactive({
dates <- input$daterange
df <- filter(rain, Reservoir %in% input$rain_select) %>%
arrange(Date) %>%
filter(Date >= dates[1] & Date <= dates[2])
if(input$rain_cum == "Cumulative") {
df <- group_by(df, Reservoir) %>%
mutate(Cumulative_Rainfall = cumsum(Rainfall)) %>%
ungroup()
}
return(df)
})
current_rain <- reactive({
rain %>%
group_by(Reservoir, Date) %>%
summarise(Rainfall = sum(Rainfall)) %>%
filter(Date == max(Date))
})
output$rainfall_graph <- renderPlot({
df <- rain_select()
if(input$rain_cum == "Cumulative"){
ggplot(df, aes(Date, Cumulative_Rainfall, col = Reservoir)) +
geom_line(size = 1)
} else {
ggplot(df, aes(Date, Rainfall, fill = Reservoir)) +
geom_col(position = "dodge")
}
})
output$tbl_rain <- renderDataTable(rownames= FALSE, {
rain_select()
})
output$download_rain <- downloadHandler(
filename = function() {
paste('rainfall_data', Sys.Date(), '.csv', sep = "_")
},
content = function(con) {
df <- rain_select()
write.csv(df, con, row.names = FALSE)
}
)
# STREAMFLOWS
flow_select <- reactive({
dates <- input$daterange
df <- filter(streamflows, CWName %in% input$flow_select) %>%
arrange(Date) %>%
filter(Date >= dates[1] & Date <= dates[2])
if(input$flow_cum == "Cumulative") {
df <- group_by(df, CWName) %>%
mutate(Cumulative_Flow = cumsum(Flow)) %>%
ungroup()
}
return(df)
})
output$flow_graph <- renderPlot({
df <- flow_select()
if(input$flow_cum == "Cumulative"){
ggplot(df, aes(Date, Cumulative_Flow, col = CWName)) +
geom_line(size = 1)
} else {
ggplot(df, aes(Date, Flow, fill = CWName)) +
geom_col(position = "dodge")
}
})
output$tbl_flow <- renderDataTable(rownames= FALSE, {
flow_select()
})
output$download_flow <- downloadHandler(
filename = function() {
paste('streamflow_data', Sys.Date(), '.csv', sep = "_")
},
content = function(con) {
df <- rain_select()
write.csv(df, con, row.names = FALSE)
}
)
# RESERVOIRS
reservoir_select <- reactive({
dates <- input$daterange
if (input$vol_lev == "Levels") {
filter(reservoirs, StationFID %in% input$reservoir_select) %>%
arrange(Date) %>%
filter(Date >= dates[1] & Date <= dates[2])
} else {
filter(reservoirs, StationFID %in% input$reservoir_select) %>%
group_by(Date) %>%
summarise(Volume = sum(Volume, na.rm = TRUE)) %>%
arrange(desc(Date)) %>%
filter(Date >= dates[1] & Date <= dates[2])
}
})
current_res <- reactive({
reservoirs %>%
group_by(CWName) %>%
filter(Date == max(Date))
})
output$reservoir_level_graph <- renderPlot({
df <- reservoir_select()
if (input$vol_lev == "Levels") {
ggplot(df, aes(Date, Level, col = CWName)) +
geom_line(size = 1) +
geom_line(aes(Date, FSL, col = CWName), linetype = "dashed")
} else {
ggplot(df, aes(Date, Volume)) +
geom_line(size = 1)
}
})
output$tbl_res <- renderDataTable(rownames= FALSE, {
reservoir_select()
})
output$download_res <- downloadHandler(
filename = function() {
paste('reservoir_data', Sys.Date(), '.csv', sep = "_")
},
content = function(con) {
df <- reservoir_select()
write.csv(df, con, row.names = FALSE)
}
)
output$lauriston_today <- renderValueBox({
df <- current_res()
df <- filter(df, CWName == "Lauriston")
valueBox(trunc(df$Volume/df$FSLVolume * 100),
"Lauriston Reservoir",
icon = icon("percent"), href = "#res")
})
output$ucoliban_today <- renderValueBox({
df <- current_res()
df <- filter(df, CWName == "Upper Coliban")
valueBox(trunc(df$Volume/df$FSLVolume * 100),
"Upper Coliban Reservoir",
icon = icon("percent"), href = "#res")
})
output$malmsbury_today <- renderValueBox({
df <- current_res()
df <- filter(df, CWName == "Malmsbury")
valueBox(trunc(df$Volume/df$FSLVolume * 100),
"Malmsbury Reservoir",
icon = icon("percent"), href = "#res")
})
}
options(browser = "firefox")
shinyApp(ui, server)
|
de91ac32af97684cf144dce86200972c7f5bd4ac | f7105536a44be844d652f28e5c5b5bab0db66aa8 | /R/Lagutin/4.R | 0851c8c4c6823a2dad935e8a91715680d55f0d3f | [] | no_license | DmitryZheglov/code | e6f143c21287a250c01c639659b672fdef089bbe | fdec951c8dcf3145109076bc78f0646217b1b822 | refs/heads/master | 2022-12-13T11:43:00.962046 | 2019-08-12T18:58:55 | 2019-08-12T18:58:55 | 93,278,292 | 1 | 0 | null | 2022-12-07T23:49:07 | 2017-06-03T23:00:52 | Jupyter Notebook | UTF-8 | R | false | false | 1,525 | r | 4.R | x=read.table("C:/Users/user/Desktop/proga/R/Lagutin/9/1.txt")
x35=read.table("C:/Users/user/Desktop/proga/R/Lagutin/9/35.txt")
x=unlist(x)
x35=unlist(x35)
for( i in 1:100){
if (x[i]<=0.2){
x[i]=0
}
}
for( i in 1:100){
if (x35[i]<=0){
x35[i]=0
}
}
i1=-log(1-x35)
i2=-(log(1-x)-0.2)
ros.test=function(x,y) {
n=length(x)
m=length(y)
r=sort(rank(c(x,y))[1:n])
rr=seq(1,n,1)
s=sort(rank(c(y,x))[1:m])
ss=seq(1,m,1)
S=n*m/(n+m)*(1/(n*m)*(1/6+1/m*sum((r-rr)^2)+1/n*sum((s-ss)^2))-2/3)
p.value=1-pCvM(S)
return(S)
}
y=t(read.table("sasha.txt"))
x=sqrt(y)
alt=t(read.table("35.txt"))
plot(seq(0.01,1,0.01),y,"l")
#mann uitni test
((sum(rank(c(alt,x))[1:100])-50*101)-50*100)/sqrt(100*100*201/12)
#student test
sqrt(50)*(mean(x)-mean(alt))/sqrt((99*sd(alt)^2+99*sd(x)^2)/198)
qt(0.975,198)
2-2*pnorm(0.7892146)
ks.test(alt,x,alternative = "g")
plot(Vectorize(ecdf(x)))
curve(ecdf(alt)(x),0,1,add=TRUE)
KriteriySmirnov(alt,x)
################################11.4
y=t(read.table("my.txt"))
x=log(y/(1-y))
mean(x)
median(x)
vec=vector("numeric")
for(i in 1:100) {
for(j in i:100) {
vec=c(vec,(x[i]+x[j])/2)
}
}
median(vec)
################################12.5
y=t(read.table("my.txt"))
delta=seq(0,1,1/7)
num=vector("numeric",7)
for(j in 1:7) {
num[j]=sum((y>delta[j])*(y<=delta[j+1]))
}
chisq.test(num,p=rep(1/7,7))
###############################11.5
y=t(read.table("sasha.txt"))
x=log(1/(1-y))
x=x-median(x)
f=function(z) return(ecdf(x)(z))
R=sum((1-f(x)-f(-x))^2)
curve(f(x))
|
32eaea0a698b5bcb4836c4af29d5c38cd854cf2e | 56b87d81fe06642a77955c0f79e6236bc01bc44c | /R/mixIndependK.R | 03d3ce344c3655891841241a020a536fe1bdf0c9 | [] | no_license | ice4prince/mixIndependR | ab8940b768f08ef5409ebacc082a884306033ab9 | 21041d4b857e0dc92ba00a5869c818304777e20e | refs/heads/main | 2023-03-23T07:08:49.742812 | 2021-03-22T08:44:12 | 2021-03-22T08:44:12 | 315,141,861 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,513 | r | mixIndependK.R | #'Quick pvalue of total number of heterozygous loci
#'@details This function is a summary of pipeline for number of heterozygous loci (K), and generates the p-value of K for the target dataset.
#'@usage mixIndependK(x,sep,t,B)
#'@param x a dataset of alleles. Each row denotes each individual.One allele in one cell.In the (2r-1)th column, there is the same locus with the 2r-th column; noted: no column for ID, make row.names=1 when importing.
#'@param sep allele separator in the imported genotype data. Note: when using the special character like "|", remember to protect it as "\\|".
#'@param t times of simulation in "Simulate_DistK" and "Simulate_DistX".
#'@param B times of bootstrapping in Chi Squares Test.
#'@return pvalue (1-cumulative probabilities) for the number of heterozygous loci(K)
#'@export
#'@examples
#'x <- data.frame(SNP1=c("A|A","T|T","A|T","A|T"),
#' STR1=c("12|12","13|14","13|13","14|15"))
#'mixIndependK(x,sep ="\\|",10,10)
mixIndependK<-function(x,sep="\\|",t,B){
ss <- nrow(x)
p <- AlleleFreq(x,sep)
h <- Heterozygous(x,sep)
H <- RxpHetero(h,p,HWE = F)
Obs_DistHetero<-FreqHetero(h)
Exp_DistHetero<-DistHetero(H)
prob<-Exp_DistHetero$Density
obs<-Obs_DistHetero$Freq
s<-Simulate_DistK(H,ss,t)
x2<-Dist_SimuChisq(s,Exp_DistHetero$Density,B)
idx1 <-min(which(!obs==0))
idx2 <- max(which(!obs==0))
x20 <-chisq.test(obs[idx1:idx2],p=prob[idx1:idx2]/sum(prob[idx1:idx2]),simulate.p.value = T,B=B)
P <- ecdf(x2)
return(1-P(x20$statistic))
}
|
c4216f8143f617c85dd0d9765bf5a11b76bf0fb6 | de716bfe81c712e0404ea799096c53bdecd39437 | /app.R | b6ee127357e003f0212a59b8cc56ec1c29cbfbbf | [] | no_license | usfviz/elisesong-hw3 | 041c7579e04681bc603db3232b32a545fa882fe8 | d793a0bcd3c08b99ded2f3d9a17c92b7059b2b19 | refs/heads/master | 2021-01-19T22:59:19.493862 | 2017-05-08T17:49:11 | 2017-05-08T17:49:11 | 88,901,260 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,399 | r | app.R | rm(list = ls())
cat("\014")
if (!require(GGally))
{
install.packages("GGally")
}
if (!require(plotly))
{
install.packages("plotly")
}
library("shiny")
library("ggplot2")
library("GGally")
library("plotly")
#setwd("/Users/Elise/DataViz/HW3")
df <- read.csv("dataset_Facebook.csv", sep=";", header=T)
ui <- fluidPage(
headerPanel("Facebook Visualization"),
mainPanel(
tabsetPanel(
tabPanel(title = "Bubble Plot", plotlyOutput("bubble", width= 900, height=600)),
tabPanel(title = "Scatterplot Matrix", plotlyOutput("scatter", width= 800, height=500)),
tabPanel(title = "Parallel Coordinates Plot", plotlyOutput("parallel", width= 800, height=500))
)
)
)
server <- function(input, output) {
output$bubble <- renderPlotly({
ggplot(df, aes(like, Lifetime.Post.Total.Impressions)) +
geom_point(aes(size = Total.Interactions, colour = Type))
})
output$scatter <- renderPlotly({
scatter_data <- subset(df, select = c("comment", "like", "share", "Total.Interactions", "Type"))
ggplotly(ggpairs(data = scatter_data, columns = 1:4, columnLabels = colnames(scatter_data)[1:4],mapping = ggplot2::aes(alpha = 0.6, color=Type))+
theme(legend.position = "none"))
})
output$parallel <- renderPlotly({
ggparcoord(data = df, columns = 16:19, groupColumn = 'Type', scale = 'center')
})
}
shinyApp(ui = ui, server = server)
|
c329a977e0c49b73ecaca876d3ac8081c6c1d862 | 2e0043e367ea1b79809990ac65b9a4e2e347a57c | /Keras/mango1.R | 8773d1d024666721095b257ce53cabda1eb57aa5 | [] | no_license | anhnguyendepocen/DataScienceWithR | 506e4a689071c913b219307a685949ff4377dc7f | 3579210d701b7bcd15ba1e9c64ef89bb1e3dd9ed | refs/heads/master | 2021-05-26T03:12:13.515462 | 2019-12-17T10:31:13 | 2019-12-17T10:31:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 712 | r | mango1.R | #Erum Day 1
library(keras)
library(caret)
library(recipes)
library(rsample)
use_session_with_seed(06061979)
use_session_with_seed(6061979)
###########
y <- iris[,5]
X <- iris[1:4,]
iris <- initial_split(iris,prop=0.8, strata = "Species")
fullData <-list(train = training(iris),test = testing(iris) )
irisRecipe <- recipe(Species ~ . , fullData$train) %>%
step_dummy(all_outcomes(), one_hot = TRUE, role = "outcome") %>%
prep(fullData$train) %>%
bake(newdata = fullData$train)
irisRecipe <- recipe(Species ~ . , fullData$train) %>%
step_dummy(all_outcomes(), one_hot = TRUE, role = "outcome") %>%
step_center(all_predictors()) %>%
step_scale(all_predictors()) %>%
prep(fullData$train)
|
d2e54a2fe6a8511eb18bdbc1250146db825f86dc | 3cd0cfd6944b5083ab48dc7a48d04d07d9bc6d2a | /cachematrix.R | 3a6ffee7e2aa4a57b4472ff7e11bd42de3269184 | [] | no_license | erikvh/ProgrammingAssignment2 | 56886fc93af32163d81ee8d26c197698eee7b0a6 | 30f742c25e47699624661f26ec3f969ff582699f | refs/heads/master | 2020-12-29T00:41:44.001084 | 2015-05-23T12:15:17 | 2015-05-23T12:15:17 | 36,015,291 | 0 | 0 | null | 2015-05-21T13:47:46 | 2015-05-21T13:47:42 | R | UTF-8 | R | false | false | 2,039 | r | cachematrix.R | # To reduce computation cost of repeatedly computing the inverse of a matrix,
# you can take advantage of caching the result.
# By using the following 2 functions you can do so: Cache the inverse of a Matrix.
# The first function makeCacheMatrix create a list of functions to:
# 1. Set the value of the matrix
# 2. Get the value of the matrix
# 3. Set the value of the inverse of the matrix
# 4. Get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
# Set the value of the matrix
set <- function(y) {
x <<- y
s <<- NULL
}
# Get the value of the matrix
get <- function() x
# Set the value of the inverse of the matrix
setInverse <- function(solve) s <<- solve
# Get the value of the inverse of the matrix
getInverse <- function() s
# Make the list of the functions above
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse )
}
# The function cacheSolve returns the inverse of the matrix
# It test if the inverse of the matrix is already calculated.
# If it has been calculated this result is returned,
# with as message that this result is cached.
# If no previus result has been calculated, the function calculates the inverse of the matrix
# and use the setInverse function to add the result to the cache
# and finaly returns the in inverse of the matrix.
cacheSolve <- function(x, ...) {
# Look for a result in the cache
s <- x$getInverse()
# Test if a result exist. If so return message and the result from cache
if(!is.null(s)) {
message("getting cached data")
return(s)
}
# Otherwise get the value of the matrix and add it to data
data <- x$get()
# Find the inverse of the matrix
s <- solve(data, ...)
# Add the result to the cache
x$setInverse(s)
# return the inverse
s
}
|
800e49d46d429e9eb4ac60f9f74fb085266ba290 | 021498dd1ed1eb755575e7dfbc8b8f9fae927831 | /man/GMLAbstractGeneralOperationParameter.Rd | 0c9c29d7cc18fe1b8e0c59969f65671bd27e084d | [] | no_license | 65MO/geometa | f75fb2903a4f3633a5fcdd4259fd99f903189459 | c49579eb5b2b994c234d19c3a30c5dad9bb25303 | refs/heads/master | 2020-04-08T12:22:44.690962 | 2018-11-22T22:51:57 | 2018-11-22T22:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,322 | rd | GMLAbstractGeneralOperationParameter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GMLAbstractGeneralOperationParameter.R
\docType{class}
\name{GMLAbstractGeneralOperationParameter}
\alias{GMLAbstractGeneralOperationParameter}
\title{GMLAbstractGeneralOperationParameter}
\format{\code{\link{R6Class}} object.}
\usage{
GMLAbstractGeneralOperationParameter
}
\value{
Object of \code{\link{R6Class}} for modelling an GMLAbstractGeneralOperationParameter
}
\description{
GMLAbstractGeneralOperationParameter
}
\section{Fields}{
\describe{
\item{\code{minimumOccurs}}{}
}}
\section{Inherited methods}{
\describe{
from \code{GMLDefinition}
}
}
\section{Methods}{
\describe{
\item{\code{new(xml, defaults, id)}}{
This method is used to instantiate a GML AbstractGeneralOperationParameter
}
\item{\code{setMinimumOccurs(minimumOccurs)}}{
Sets the minimum occurs, object of class \code{integer}
}
}
}
\references{
ISO 19136:2007 Geographic Information -- Geographic Markup Language.
http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=32554
OGC Geography Markup Language. http://www.opengeospatial.org/standards/gml
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{GML}
\keyword{ISO}
\keyword{abstract}
\keyword{general}
\keyword{operation}
\keyword{parameter}
|
c88e8109733be64d86967d08f11bc64d61a44ae9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/merTools/examples/REsdExtract.Rd.R | 35545c988b2da447b53ae2e0d1b20d43b0515074 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 256 | r | REsdExtract.Rd.R | library(merTools)
### Name: REsdExtract
### Title: Extract the standard deviation of the random effects from a
### merMod object
### Aliases: REsdExtract
### ** Examples
fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
REsdExtract(fm1)
|
5419ffec30a4f4746e41e050ed01de58ceb84da8 | 09f85541cd4c7247678046ffca87252c3f68b018 | /R/boxfun.R | 5157b7fb8bf300dae1004dc1d7ae0c2ff99a8cfb | [] | no_license | cran/prim | 00e281bb8d680ad3609384491f9ee7a6e1e64d0b | 1db1e4613b6c30c53242eb8adc6147a0e5805c7c | refs/heads/master | 2023-01-13T02:57:18.395228 | 2023-01-06T17:50:35 | 2023-01-06T17:50:35 | 17,698,723 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,825 | r | boxfun.R |
####################################################################
### Find points of x that are in a single box
###
### Parameters
### x - data matrix
### ranges - matrix of min and max values which define a box
### d - dimension of data
###
### Returns
### Data points which lie within the box
####################################################################
in.box <- function(x, box, d, boolean=FALSE)
{
x.box.ind <- rep(TRUE, nrow(x))
for (i in 1:d)
x.box.ind <- x.box.ind & (box[1,i] <= x[,i]) & (x[,i] <= box[2,i])
if (boolean)
return(x.box.ind)
else
return(x[x.box.ind,])
}
###############################################################################
## Allocates data x according to a sequence of boxes
##
## Parameters
## x - data matrix
## y - response values
## box.seq - list of boxes (output from prim is OK)
##
## Returns
## List with k fields, one for each box
## each field in turn is a list with fields
## x - data in box
## (y - corresponding response values)
## (y.mean - mean of y)
## box - box limits
## box.mass - box mass
##
## NB: if y is missing from the call, then $y and $y.mean aren't computed
###############################################################################
in.box.seq <- function(x, y, box.seq)
{
m <- box.seq$num.class
d <- ncol(x)
n <- nrow(x)
x.ind <- rep(TRUE, n)
xy.list <- list()
for (k in 1:m)
{
x.ind.curr <- x.ind
box.curr <- box.seq$box[[k]]
for (j in 1:d)
x.ind.curr <- x.ind.curr & (x[,j]>= box.curr[1,j]) & (x[,j] <= box.curr[2,j])
x.curr <- x[x.ind.curr & x.ind,]
box.mass.curr <- sum(x.ind.curr)/n
xy.list$x[[k]] <- x.curr
if (!missing(y))
{
y.curr <- y[x.ind.curr & x.ind]
y.mean.curr <- mean(y.curr)
xy.list$y[[k]] <- y.curr
xy.list$y.mean[[k]] <- y.mean.curr
}
xy.list$box[[k]] <- box.curr
xy.list$mass[[k]] <- box.mass.curr
## exclude those in in current box (x.ind.curr) for the next iteration
x.ind <- x.ind & !x.ind.curr
}
return (xy.list)
}
###############################################################################
## Returns the box number which the data points belong in
##
## Parameters
##
## x - data matrix
## box.seq - list of boxes
##
## Returns
##
## Vector of box numbers
###############################################################################
prim.which.box <- function(x, box.seq)
{
if (is.vector(x)) x <- t(as.matrix(x))
ind <- rep(0,nrow(x))
m <- box.seq$num.class
d <- ncol(x)
n <- nrow(x)
x.ind <- rep(TRUE, n)
x.which.box <- rep(0,n)
for (k in 1:m)
{
x.ind.curr <- x.ind
box.curr <- box.seq$box[[k]]
for (j in 1:d)
x.ind.curr <- x.ind.curr & (x[,j]>= box.curr[1,j]) & (x[,j] <= box.curr[2,j])
x.which.box[x.ind.curr & x.ind] <- k
## exclude those in current box (x.ind.curr) for the next iteration
x.ind <- x.ind & !x.ind.curr
}
return (x.which.box)
}
###############################################################################
## Count the number of data points x which fall into a sequence of boxes
##
## Parameters
## x - data matrix
## box.seq - sequence of boxes (prim object)
##
## Returns
## Vector of counts, i-th count corr. to i-th box
###############################################################################
counts.box <- function(x, box.seq)
{
m <- box.seq$num.class
x.counts <- rep(0, m)
x.class <- prim.which.box(x, box.seq)
for (k in 1:m)
x.counts[k] <- sum(x.class==k)
return(x.counts)
}
###############################################################################
## Hypervolume of hyperbox
##
## Parameters
## box - matrix of box limits
##
## Returns
## hypervolume of a hyperbox
###############################################################################
vol.box <- function(box)
{
return(prod(abs(box[2,] - box[1,])))
}
####################################################################
## Decide whether two box sequences overlap each other
##
## Input
## box.seq1 - first box sequence
## box.seq2 - second box sequence
##
## Returns
## TRUE if they overlap, FALSE o/w
####################################################################
overlap.box.seq <-function(box.seq1, box.seq2, rel.tol=0.01)
{
M1 <- box.seq1$num.hdr.class
M2 <- box.seq2$num.hdr.class
d <- ncol(box.seq1$box[[1]])
overlap.mat <- matrix(FALSE, nrow=M1, ncol=M2)
for (i in 1:M1)
{
box1 <- box.seq1$box[[i]]
for (j in 1:M2)
{
box2 <- box.seq2$box[[j]]
overlap.mat[i,j] <- overlap.box(box1, box2, rel.tol=rel.tol)
}
}
return(overlap.mat)
}
####################################################################
## Decide whether two boxes overlap each other
##
## Input
## box1 - first box
## box2 - second box
##
## Returns
## TRUE if they overlap, FALSE o/w
####################################################################
overlap.box <-function(box1, box2, rel.tol=0.01)
{
d <- ncol(box1)
overlap <- TRUE
box1.tol <- box1
box1.range <- abs(apply(box1, 2, diff))
box1.tol[1,] <- box1.tol[1,] + rel.tol*box1.range
box1.tol[2,] <- box1.tol[2,] - rel.tol*box1.range
box2.tol <- box2
box2.range <- abs(apply(box2, 2, diff))
box2.tol[1,] <- box2.tol[1,] + rel.tol*box2.range
box2.tol[2,] <- box2.tol[2,] - rel.tol*box2.range
for (k in 1:d)
overlap <- overlap & (((box1.tol[1,k] <= box2.tol[1,k]) & (box2.tol[1,k] <= box1.tol[2,k]))
| ((box1.tol[1,k] <= box2.tol[2,k]) & (box2.tol[2,k] <= box1.tol[2,k]))
| ((box2.tol[1,k] <= box1.tol[1,k]) & (box1.tol[1,k] <= box2.tol[2,k]))
| ((box2.tol[1,k] <= box1.tol[2,k]) & (box1.tol[2,k] <= box2.tol[2,k])))
return(overlap)
}
|
8cd6f63f4fafba51fd69a2fff3a876bb4745d4b7 | b774f1b284654480f4daa64c13d5d1801c63da5d | /man/create_design.Rd | 8263ad58bb6ab370da97b6c2200b6e9eed2cf46a | [] | no_license | cran/casen | 80f04e7edf5824de774011527dca33045a2b764a | 58f3386a618b4ab4446f84a446d1696776f613dc | refs/heads/master | 2022-09-30T08:52:41.563159 | 2022-09-04T22:20:05 | 2022-09-04T22:20:05 | 249,201,245 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,117 | rd | create_design.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internals.R
\name{create_design}
\alias{create_design}
\title{Crea un objeto de disenio complejo a partir de un data frame}
\usage{
create_design(d, variable, agrupacion, peso, conglomerado, estrato)
}
\arguments{
\item{d}{un data.frame o tibble con la encuesta CASEN (o un subconjunto acotado a una region, etc)}
\item{variable}{una columna de tipo numerico, por ejemplo ytotcorh que es la opcion por defecto}
\item{agrupacion}{una columna de tipo texto/factor, por ejemplo region que es la opcion por defecto}
\item{peso}{una columna de tipo numerico, por defecto es expr que corresponde al factor de expansion regional de acuerdo al manual CASEN 2017}
\item{conglomerado}{una columna de tipo numerico, por defecto es varunit de acuerdo al manual CASEN 2017}
\item{estrato}{una columna de tipo numerico, por defecto es varunit de acuerdo al manual CASEN 2017}
}
\value{
Un objeto de disenio a partir del objeto data.frame de entrada.
}
\description{
Crea un objeto de disenio complejo a partir de un data frame
}
\keyword{internal}
|
81c453285db28d8b0a1dad68abff03b432855bc8 | f530a6debb078426b7d2f07462bc090ad164ea24 | /nest_predation_MA.R | 062d5e77e29e80c48f50327376b00ae065221db0 | [
"MIT"
] | permissive | balint-cyber/nest_predation_MA | fc38a0ffaa5c86d9dc484fe0a2e942dffde9ae87 | 30f8a7cdfa78a85335cb8e01bfd679a7f42f8d83 | refs/heads/master | 2021-06-13T12:12:10.944725 | 2017-04-02T12:05:49 | 2017-04-02T12:05:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 84,667 | r | nest_predation_MA.R | # R script by Malgorzata Lagisz
# for "Does urbanization affect predation of bird nests? A meta-analysis"
# by Ernő Vincze, Gábor Seress, Malgorzata Lagisz, Shinichi Nakagawa, Niels Dingemanse, Philipp Sprau
# Accepted for journal: "Frontiers in Ecology and Evolution", 2017
# LICERNSE
# The MIT License (MIT)
#
# Copyright (c) 2017 Malgorzata Lagisz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Final dataset in the file: nest_predation_MA_data.csv
# #Variables:
# Paper_ID - Identification number of the paper - P + 3 digits (i.e. P001, P035)
# Group_ID - Identification number of the research group (NOTE: this will overlap with Species!)
# Journal_ID - Six-character identification code for the journals (for analysing publication bias - see full list on a separate sheet)
# Pub_year - Year of publication (for analysing publication bias)
# rho - Positive value means higher survival in more urbanized habitat; negative value means higher mortality in more urbanized habitat
# total_nests - The total number of nests used in calculating rho (sample size)
# variance - Variance calculated from sample size (1 / rho-3)
# source - The values rho is calculated from. "numbers" = the exact numbers of predated and non-predated nests is given in the text; "percentage" = the numbers are calculated from percentages of predation/survival given in the text or table; "figure" = the numbers are calculated from measuring plotted data points on a figure; "DPR" = the numbers are converted from daily predation rates; other sources?
# death_cause - 0 = predation and other sources of mortality (weather, abandonment) not differentiated; 1 = only nests that died from predation are included
# partial_predation - How nests that were partially predated are treated. "pred" = nests where at least one egg/offspring died are considered predated, only completely intact nests count as survived; "alive" = nests where at least one egg/offspring survided are considered survived, only nests where no offspring survived count as predated; none = there was only 1 egg/offspring per nest
# Nest_type - Natural nest population (nat) or artificial nests (art)
# Egg_number - Number of eggs in a nest (including real and fake eggs)
# Fake eggs - how many fake eggs per nest
# Nest_position - 1 = on the ground; 2 = elevated (shrubs or trees); 1,5 = both positions alternating, without separate effect sizes given for the two
# Nest_openness - 1 = open cup-nest; 2 = closed nest in a box; 1,5 = orb-like nest on tree
# Study_days - The number of days the eggs were exposed to predation
# Study_year - The year(s) belonging to the predation rate
# Species - The species the nest / eggs belong to
sessionInfo()
#R version 3.2.4 (2016-03-10)
#Platform: x86_64-apple-darwin13.4.0 (64-bit)
#Running under: OS X 10.9.5 (Mavericks)
options(scipen=100)
rm(list=ls())
library(metafor)
library(ape)
# ################## Custom Functions ##################
# ## Function to convert r (rho) to Zr
# r.to.Zr <- function(r){
# Zr <- 0.5*(log(1+r)-log(1-r))
# print(Zr)}
# ## Function to convert Zr to r
# Zr.to.r <- function(Zr){
# r <- (exp(2*Zr)-1)/(exp(2*Zr)+1)
# print(r)}
# ######################################################
# ### Calculating effect sizes - SKIP, as this was already done and is included in the dataset:
# ## Calculate Zr using rho (original urbanisation levels accoording to the study authors, adjusted to 0-1 scale)
# Data$Zr <- r.to.Zr(Data$rho)
# ## Calculate Zr (VZr will be the same) using rho2 (uniformized urbanisation level scores, 1-5 scale)
# Data$Zr2 <- r.to.Zr(Data$rho2)
# ## Calculate variance for Zr, basing on sample sizes (total number of nests)
# Data$VZr <- 1/(Data$total_nests-3)
# #write.csv(Data, file="nest_predation_MA_data.csv", row.names = FALSE)
###########################################################
# DATA SUMMARY
###########################################################
################### full data set
Data <- read.csv(file="nest_predation_MA_data.csv", stringsAsFactors = TRUE)
dim(Data)
str(Data) # 117 ES, 29 columns, 51 papers, 45 groups, 32 species
head(Data)
## Check for missing data ('NA'):
table(is.na(Data)) # NA in total
unlist(lapply(Data, function(x) any(is.na(x)))) #columns that contain NA
unlist(lapply(Data, function(x) sum(is.na(x)))) #how many NA per column: Study_days and Species approx 50% NA, Nest_height 31 missing
## Quick data checks
hist(Data$Pub_year)
range(Data$Pub_year)
table(Data$Study_year) #not usable for analyses due to ranges present, use median
hist(Data$Median_year) #ok
hist(Data$rho)
hist(Data$rho2)
plot(Data$rho, Data$rho2) #just a few different rho vs. rho2 values
par(mfrow=c(1,1))
plot(Data$Median_year, Data$Pub_year) #some delay with publishing relatively to median data collection year, as expected
cor.test(Data$Median_year, Data$Pub_year) # cor=0.79, t=13.822, p<0.0001 - strongly correlated
###########################################################
# ANALYSES on rho2 (uniformized urbanisation scores)
###########################################################
###########################################################
#### Simple meta-analysis - all data
## MA without random effects
MA_0 <- rma(yi=Zr2, vi=VZr, method="REML", data=Data)
summary(MA_0) #I^2 (total heterogeneity / total variability): 91.1%
#funnel plot
plot(jitter(Data$Zr2), jitter(sqrt(1/Data$VZr)), cex=0.75, xlim=c(-1.1,1.1), xlab="Zr", ylab="Precision (1/SE.Zr)", main="all data")
abline(v=0, lwd=0.5)
## MA with random effects only
MA_all <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), method="REML", data=Data)
summary(MA_all) #estimate ns Zr2 -0.0034 CI -0.0806 0.0738
funnel(MA_all)
#forest(MA_all)
# calculate I2 values
s2m <- sum(1/Data$VZr) * (MA_all$k-1) / (sum(1/Data$VZr)^2 - sum((1/Data$VZr)^2)) # typical sampling error
s2t <- sum(MA_all$sigma2)+s2m # total sampling error
I2t <- sum(MA_all$sigma2)/s2t
I2t*100 # 92.7% - total heterogeneity
I2s <- MA_all$sigma2[1]/s2t
I2s*100 # 81.7% varaince due to Study
I2e <- MA_all$sigma2[2]/s2t
I2e*100 # 11.0% - residuals against sampling error
transf.ztor(MA_all$b[1,]) # intrecept as r = -0.003
transf.ztor(MA_all$ci.lb[1]) # CI.lb for the intercept as r = -0.080
transf.ztor(MA_all$ci.ub[1]) # CI.ub for the intercept as r = 0.074
res <- data.frame(Model="Meta-analytic mean, all data", M=MA_all$b, CI.lb=MA_all$ci.lb, CI.ub=MA_all$ci.ub, pch=18) #harvest results (Zr)
#### Meta-regression on all data with nest_type as moderator
MR_nest_intc <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~Nest_type-1, method="REML", data=Data)
summary(MR_nest_intc) # in artificial nests signif negative Zr -0.1163 (more predation with incr urbanization);
# in natural nests ns positive Zr 0.0811 (less predation with incr urbanization)
funnel(MR_nest_intc) # 1 outlier ES < -1 (same point as in the full data set)
forest(MR_nest_intc)
summ <- summary(MR_nest_intc)
forest(x=summ$b, ci.lb=summ$ci.lb, ci.ub=summ$ci.ub, slab=dimnames(summ$b)[[1]], xlab="Zr2")
# calculate I2 values
s2m <- sum(1/Data$VZr) * (MR_nest_intc$k-1) / (sum(1/Data$VZr)^2 - sum((1/Data$VZr)^2)) # typical sampling error
s2t <- sum(MR_nest_intc$sigma2)+s2m # total sampling error
I2t <- sum(MR_nest_intc$sigma2)/s2t
I2t*100 # 91.9% - total heterogeneity
I2s <- MR_nest_intc$sigma2[1]/s2t
I2s*100 # 79.9% varaince due to Study
I2e <- MR_nest_intc$sigma2[2]/s2t
I2e*100 # 12.0% - residuals against sampling error
transf.ztor(MR_nest_intc$b[1,]) #-0.116
transf.ztor(MR_nest_intc$ci.lb[1]) #-0.224
transf.ztor(MR_nest_intc$ci.ub[1]) #-0.005
transf.ztor(MR_nest_intc$b[2,]) #0.081
transf.ztor(MR_nest_intc$ci.lb[2]) #-0.015
transf.ztor(MR_nest_intc$ci.ub[2]) #0.176
res <- rbind(res, data.frame(Model=c("Artificial vs. Natural nests:"," Artificial nests *"," Natural nests"), M=c(NA,MR_nest_intc$b), CI.lb=c(NA,MR_nest_intc$ci.lb), CI.ub=c(NA,MR_nest_intc$ci.ub), pch=c(20,20,20))) #harvest results (Zr)
MR_nest_diff <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~Nest_type, method="REML", data=Data)
summary(MR_nest_diff) # diff between artificial and natural nests signif Zr 0.197 CI 0.050 0.345
transf.ztor(MR_nest_diff$b[2,]) #0.195
transf.ztor(MR_nest_diff$ci.lb[2]) #0.050
transf.ztor(MR_nest_diff$ci.ub[2]) #0.332
res <- rbind(res, data.frame(Model=" Difference: Natural - Artificial nests *", M=MR_nest_diff$b[2], CI.lb=MR_nest_diff$ci.lb[2], CI.ub=MR_nest_diff$ci.ub[2], pch=20)) #harvest results (Zr)
#tidy up extracted results table
res$M <- round(transf.ztor(res$M),3)
res$CI.lb <- round(transf.ztor(res$CI.lb),3)
res$CI.ub <- round(transf.ztor(res$CI.ub),3)
write.csv(res,"MA_MR_alldata_main_res_rho2.csv")
revres <- res[rev(rownames(res)),] #reverse for plotting (from bottom to top)
### PLOT - MA and MR models on all data
#opar <- par() # make a copy of current settings
#par(opar) # restore original settings
pdf(file="Fig_MA_MR_alldata_rho2.pdf",width=4,height=3,pointsize=8)
par(mfrow=c(1,1))
par(mar=c(5,14,1,1))
plot(revres$M, 1:length(revres$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.5,0.5), ylim=c(0.25, length(revres$M)+.5), xlab="effect size [r]", pch=revres$pch, cex=1, cex.axis=.9)
abline(v=0,lty=3)
mtext(revres$Model, 2, 13, at=1:length(revres$M), las=2, cex=1, font=1, adj=0)
segments(revres$CI.lb, 1:length(revres$M), revres$CI.ub, 1:length(revres$M), lwd=1.25)
dev.off()
#################### ARTIFICIAL NESTS ################
DataA <- Data[Data$Nest_type == "art", ]
dim(DataA) # 59 31
DataA <- droplevels(DataA)
unlist(lapply(DataA, function(x) sum(is.na(x)))) #how many NA per column: Study_days 18, Nest_height 1
#### MA
#funnel plot
plot(jitter(DataA$Zr2), jitter(sqrt(1/DataA$VZr)), cex=0.75, xlim=c(-1.1,1.1), xlab="Zr2", ylab="Precision (1/SE.Zr)", main="all data")
abline(v=0, lwd=0.5)
## MA with random effects only
MA_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), method="REML", data=DataA)
summary(MA_A) #estimate ns Zr -0.1183 CI -0.2423 0.0056
funnel(MA_A)
# calculate I2 values
s2m <- sum(1/DataA$VZr) * (MA_A$k-1) / (sum(1/DataA$VZr)^2 - sum((1/DataA$VZr)^2)) # typical sampling error
s2t <- sum(MA_A$sigma2)+s2m # total sampling error
I2t <- sum(MA_A$sigma2)/s2t
I2t*100 # 93.2% - total heterogeneity
I2s <- MA_A$sigma2[1]/s2t
I2s*100 # 83.8% varaince due to Study
I2e <- MA_A$sigma2[2]/s2t
I2e*100 # 9.4% - residuals against sampling error
transf.ztor(MA_A$b[1,]) #-0.118
transf.ztor(MA_A$ci.lb[1]) #-0.2376
transf.ztor(MA_A$ci.ub[1]) #0.0056
resA <- data.frame(Model="Meta-analytic mean", M=MA_A$b, CI.lb=MA_A$ci.lb, CI.ub=MA_A$ci.ub, pch=18) #harvest results (Zr)
#### MR - Meta-Regressions (univariate)
# univariate meta-regression with Nest_openness
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_openness)-1, method="REML", data=DataA)
summary(MR_A) # cup ns Zr2 -0.1196 -0.2435 -0.0042
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.2%
plot(DataA$Zr2 ~ DataA$Nest_openness)
resA <- rbind(resA, data.frame(Model=c("Nest openness:"," Cup"," Hole"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20))) #harvest results (Zr)
# univariate meta-regression with Nest_openness Hole-Cup difference
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_openness), method="REML", data=DataA)
summary(MR_A) # diff ns 0.0834 -0.0510 0.2179
# univariate meta-regression with Nest_position
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_position)-1, method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.0%
plot(DataA$Zr2 ~ DataA$Nest_position)
resA <- rbind(resA, data.frame(Model=c("Nest position:"," Elevated"," Ground"," Mix"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# univariate meta-regression with Egg_number
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(DataA$Egg_number), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.5%
plot(DataA$Zr2 ~ DataA$Egg_number)
resA <- rbind(resA, data.frame(Model=c("Egg number (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with Study_days (NOTE: 18 NA)
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(Study_days), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 95.6%
plot(DataA$Zr2 ~ DataA$Study_days)
resA <- rbind(resA, data.frame(Model=c("Study duration (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with Median_year
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(Median_year), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.3%
plot(DataA$Zr2 ~ DataA$Median_year)
resA <- rbind(resA, data.frame(Model=c("Median study year (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with Pub_year
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(DataA$Pub_year), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.5%
plot(DataA$Zr2 ~ DataA$Pub_year)
resA <- rbind(resA, data.frame(Model=c("Publication year (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with urbmin as a factor
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(urbmin)-1, method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 94.3
plot(DataA$Zr2 ~ DataA$urbmin)
resA <- rbind(resA, data.frame(Model=c("Min urbanisation score:"," 1"," 2"," 3"," 4"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20,20,20))) #harvest results (Zr)
# univariate meta-regression with urbmin as continuous - not used becouse the relationship is driven by just 2 data points at 4
# univariate meta-regression with urbmax as a factor
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(urbmax)-1, method="REML", data=DataA)
summary(MR_A) # urbmax=4 signif Zr2 -0.3232 -0.6290 -0.0174, but driven by only 4 data points! (most are at urbanmax=3)
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.2%
plot(DataA$Zr2 ~ DataA$urbmax)
resA <- rbind(resA, data.frame(Model=c("Max urbanisation score:"," 3"," 4 *"," 5"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# meta-regression with urbmin_scaled*urbmax_scaled interaction (as continuous predictors)
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(urbmin) * scale(urbmax), method="REML", data=DataA)
summary(MR_A) # all ns
# meta-regression with Nest_openness*Nest_position interaction
MR_A <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_openness) * as.factor(Nest_position), method="REML", data=DataA)
summary(MR_A) # ns
table(DataA$Nest_openness, DataA$Nest_position) #not enough data for estimating interaction: 0 hole-like nests at ground and mix levels
boxplot(DataA$Zr2 ~ DataA$Nest_position + DataA$Nest_openness)
# meta-regression with Nest_openness + Nest_position without interaction
MR_A1 <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_position) + as.factor(Nest_openness) -1, method="REML", data=DataA)
summary(MR_A1) # all ns
#resA <- rbind(resA, data.frame(Model=c("Multivariate:"," Cup - elevated"," Cup - ground"," Cup - mix"," Hole - elevated"), M=c(NA,MR_A1$b), CI.lb=c(NA,MR_A1$ci.lb), CI.ub=c(NA,MR_A1$ci.ub), pch=c(20,20,20,20,20))) #harvest results (Zr)
resA <- rbind(resA, data.frame(Model=c("Multivariate:"," Cup - elevated"," Cup - ground"," Cup - mix"), M=c(NA,MR_A1$b[1:3]), CI.lb=c(NA,MR_A1$ci.lb[1:3]), CI.ub=c(NA,MR_A1$ci.ub[1:3]), pch=c(20,20,20,20))) #harvest results (Zr)
MR_A2 <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_position) + relevel(as.factor(Nest_openness), ref="hole") -1, method="REML", data=DataA)
summary(MR_A2) # all ns
resA <- rbind(resA, data.frame(Model=" Hole - elevated", M=c(MR_A2$b[1]), CI.lb=c(MR_A2$ci.lb[1]), CI.ub=c(MR_A2$ci.ub[1]), pch=c(20))) #harvest results (Zr)
# tidy up extracted results table
resA$M <- round(transf.ztor(resA$M),3)
resA$CI.lb <- round(transf.ztor(resA$CI.lb),3)
resA$CI.ub <- round(transf.ztor(resA$CI.ub),3)
write.csv(resA,"MA_MR_dataA_resA_rho2.csv")
revresA <- resA[rev(rownames(resA)),] #reverse for plotting (from bottom to top)
### PLOT - MA and MR models on all data
pdf(file="Fig_MA_MR_dataA_rho2.pdf",width=4,height=6,pointsize=10)
par(mfrow=c(1,1))
par(mar=c(4,10,2,0))
plot(revresA$M, 1:length(revresA$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.6,0.6), ylim=c(0.25, length(revresA$M)+.5), xlab="Zr", pch=revresA$pch, cex=1.1, cex.axis=.9)
abline(v=0,lty=3)
mtext(revresA$Model, 2, 9, at=1:length(revresA$M), las=2, cex=.8, font=1, adj=0)
segments(revresA$CI.lb, 1:length(revresA$M), revresA$CI.ub, 1:length(revresA$M), lwd=1.25)
dev.off()
#################### NATURAL NESTS ################
DataN <- Data[Data$Nest_type == "nat", ]
dim(DataN) # 58 31
DataN <- droplevels(DataN)
unlist(lapply(DataN, function(x) sum(is.na(x)))) #how many NA per column: Study_days 31 NA, Nest_height 30 NA, Egg_number 16 NA
#### MA
#funnel plot
plot(jitter(DataN$Zr2), jitter(sqrt(1/DataN$VZr)), cex=0.75, xlim=c(-1.1,1.1), xlab="Zr2", ylab="Precision (1/SE.Zr)", main="all data")
abline(v=0, lwd=0.5)
## MA with 2 random effects
MA_N <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), method="REML", data=DataN)
summary(MA_N) #estimate Zr ns 0.0794 CI -0.0072 0.1664
# overall ns positive relationship between urbanisation and survival - less predation with increasing urbanization
funnel(MA_N)
forest(MA_N)
# calculate I2 values
s2m <- sum(1/DataN$VZr) * (MA_N$k-1) / (sum(1/DataN$VZr)^2 - sum((1/DataN$VZr)^2)) # typical sampling error
s2t <- sum(MA_N$sigma2)+s2m # total sampling error
I2t <- sum(MA_N$sigma2)/s2t
I2t*100 # 89.92% - total heterogeneity
I2s <- MA_N$sigma2[1]/s2t
I2s*100 # 73.19% varaince due to Study
I2e <- MA_N$sigma2[2]/s2t
I2e*100 # 16.73% - residuals against sampling error
transf.ztor(MA_N$b[1,]) #0.0793
transf.ztor(MA_N$ci.lb[1]) #-0.0071
transf.ztor(MA_N$ci.ub[1]) #0.1646
resN <- data.frame(Model="Meta-analytic mean", M=MA_N$b, CI.lb=MA_N$ci.lb, CI.ub=MA_N$ci.ub, pch=18) #harvest results (Zr)
## MA with 3 random effects, including Species identity
MA_N <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|Species,~1|ES_ID), method="REML", data=DataN)
summary(MA_N) #estimate Zr ns 0.0657 CI -0.0139 0.1452
# overall ns positive relationship between urbanisation and survival - less predation with increasing urbanization
funnel(MA_N)
forest(MA_N)
# calculate I2 values
s2m <- sum(1/DataN$VZr) * (MA_N$k-1) / (sum(1/DataN$VZr)^2 - sum((1/DataN$VZr)^2)) # typical sampling error
s2t <- sum(MA_N$sigma2)+s2m # total sampling error
I2t <- sum(MA_N$sigma2)/s2t
I2t*100 # 88.53% - total heterogeneity
I2s <- MA_N$sigma2[1]/s2t
I2s*100 # 0% varaince due to Study
I2e <- MA_N$sigma2[2]/s2t
I2e*100 # 74.75% varaince due to Species identity - equivalent to using just Study before, usually 1 Species per study, and 1 study per species
#table(DataN$Species,DataN$Paper_ID)
I2e <- MA_N$sigma2[3]/s2t
I2e*100 # 13.77% - residuals against sampling error
## Phylogeny
birds <- unique(DataN$Species_latin) #get list of unique bird species latin names
birds_stree <- read.tree("Ericson.tre") # load birds supertree from a file with Ericson backbone
birds_steer <- collapse.singles(birds_stree)
bird_tree_species <- as.character(birds_stree$tip.label) # extract list of species from the tree
setdiff(birds, bird_tree_species) #Cyanistes_caeruleus not matching
birds_stree$tip.label <- sub("Parus_caeruleus","Cyanistes_caeruleus",birds_stree$tip.label) #replace with synonym Parus_caeruleus
bird_tree_species <- as.character(birds_stree$tip.label) # extract list of species from the tree
intersect(bird_tree_species, birds) #32 species matching, ok
tree <- drop.tip(birds_stree, birds_stree$tip.label[-match(birds, birds_stree$tip.label)]) # prune the supertree tree to a list of taxa from our list
is.binary.tree(tree) #TRUE
is.ultrametric(tree) #TRUE
plot(tree, cex=0.8) #plot with branch lengths
tree2 <- rotate(tree, 37) #rotate some branches
pdf("tree.pdf",width = 4, height = 5, pointsize=8)
par(mfcol=c(1,1),mar=c(0,0,0,0),oma=c(0,0,0,0))
plot(tree2)
dev.off()
write.tree(tree2, file = "birds_32sp_tree.tre", append = FALSE, digits = 10, tree.names = FALSE)
### phylogenetic meta-analysis (does not converge if Species ID added)
tree <- read.tree("birds_32sp_tree.tre")# upload cleaned-up and prepeprocessed phylogenetic tree file
CorMatrix <- vcv(tree, corr=TRUE) # make a phylogenetic correlation matrix
MA_N_phylo <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MA_N_phylo) #intc = 0.0337 ns, CI -0.1645 0.2319
sum(MA_N_phylo$sigma2)/(sum(MA_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MA_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 92.8%
s2t <- sum(MA_N_phylo$sigma2) + sum(1/MA_N_phylo$vi) * (MA_N_phylo$k-1) / (sum(1/MA_N_phylo$vi)^2 - sum((1/MA_N_phylo$vi)^2))
MA_N_phylo$sigma2[1]/s2t*100 # I^2 between study (id) = 3.67%
MA_N_phylo$sigma2[2]/s2t*100 # I^2 phylogeny = 78.46%
MA_N_phylo$sigma2[3]/s2t*100 # I^2 within study or residual = 10.63%
resN <- rbind(resN, data.frame(Model=c("Phylogenetic meta-analytic mean"), M=c(MA_N_phylo$b), CI.lb=c(MA_N_phylo$ci.lb), CI.ub=c(MA_N_phylo$ci.ub), pch=c(18))) #harvest results (Zr)
# plots
Names <- paste(DataN$Paper, DataN$Species, sep="_")
forest(MA_N_phylo, slab=Names)
funnel(MA_N_phylo, yaxis="seinv")
#### Meta-regression
# species meta-regression
MA_N_species <- rma.mv(yi=Zr2, V=VZr, mod=~Species_latin-1, random=list(~1|Paper_ID,~1|ES_ID), data=DataN, method="REML") #without phylogeny
summary(MA_N_species) # some species very signif + or -
sum(MA_N_species$sigma2)/(sum(MA_N_species$sigma2)+(sum(1/DataN$VZr)*(MA_N_species$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 57.2%
s2t <- sum(MA_N_species$sigma2) + sum(1/MA_N_species$vi) * (MA_N_species$k-1) / (sum(1/MA_N_species$vi)^2 - sum((1/MA_N_species$vi)^2))
MA_N_species$sigma2[1]/s2t # I^2 between study (id) = 0.00% (studies are almost equivalent to the species)
MA_N_species$sigma2[2]/s2t # I^2 within study or residual = 57.2%
# plots
Names <- paste(DataN$Paper, DataN$Species, sep="_")
forest(MA_N_phylo, slab=Names)
funnel(MA_N_phylo, yaxis="seinv")
#plot(tree)
# extract point estimates and their CI for each species
ssp <- summary(MA_N_species)
sp_df <- data.frame(sp = substr(attr(ssp$b,"dimnames")[[1]], 14, 36), M = ssp$b, CI.lb = ssp$ci.lb, CI.ub = ssp$ci.ub)
sp_df <- sp_df[match(tree$tip.label, sp_df$sp),] #reorder dataframe to match order of the tip labels on the tree
# tidy up extracted results table
sp_df$M <- round(transf.ztor(sp_df$M),3)
sp_df$CI.lb <- round(transf.ztor(sp_df$CI.lb),3)
sp_df$CI.ub <- round(transf.ztor(sp_df$CI.ub),3)
sp_df$Signif <- ifelse(sp_df$CI.lb>0 | sp_df$CI.ub<0, "*", "") #add column with stars for estimates that significantly differ from zero
write.csv(sp_df,"MA_MR_dataN_sp_df_rho2.csv")
revres_sp <- sp_df[rev(rownames(sp_df)),] #reverse for plotting (from bottom to top)
### PLOT - species phylogeny and forest plot
pdf(file="Fig_species_rho2.pdf",width=6,height=4,pointsize=9)
par(mfrow=c(1,2))
par(mar=c(4.5,2,1.35,0))
plot(tree, font=1, cex=0.8, x.lim=90, show.tip.label = FALSE)
par(mar=c(4,6,1,0))
plot(sp_df$M, 1:length(sp_df$M), ylab=NA, yaxt="n", bty="n", xlim=c(-1.5,1.5), ylim=c(0.25, length(sp_df$M)+.5), xlab="effect size [r]", pch=16, cex=0.8, cex.axis=.9)
abline(v=0,lty=3)
mtext(sp_df$sp, 2, -1, at=1:length(sp_df$M), las=2, cex=.8, font=3)
segments(sp_df$CI.lb, 1:length(sp_df$M), sp_df$CI.ub, 1:length(sp_df$M), lwd=1.25)
for (i in 1:length(rownames(sp_df))) mtext(sp_df$Signif[i], 2, -1.5, at=i, las=2, cex=.7) #add stars
dev.off()
# Phylogenetic meta-regression with death_cause
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 88.4
plot(DataN$Zr2 ~ DataN$death_cause) # positive values (less predation in urbanised areas) when "yes" = only nests that died from predation are included
resN <- rbind(resN, data.frame(Model=c("Predation as only source of mortality:"," No"," Yes"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20))) #harvest results (Zr)
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # ns diff
#MR_N <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause), random=list(~1|Paper_ID,~1|ES_ID), data=DataN, method="REML") #without phylogeny
#summary(MR_N) # signif diff 0.1712 CI 0.0096 0.3328 * - result without phylogeny
# Phylogenetic meta-regression with Nest_openness
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(Nest_openness)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
#MR_N <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(Nest_openness)-1, random=list(~1|Paper_ID,~1|ES_ID), data=DataN, method="REML") #without phylogeny
#summary(MR_N) # hole signif 0.2285 CI 0.0406 0.4163 * - result without phylogeny
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 91.9
plot(DataN$Zr2 ~ DataN$Nest_openness)
resN <- rbind(resN, data.frame(Model=c("Nest openness:"," Cup"," Hole"," Orb"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# Phylogenetic meta-regression with Nest_position
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(Nest_position)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 92.8
plot(DataN$Zr2 ~ DataN$Nest_position) # more positive values in ground-located nests
resN <- rbind(resN, data.frame(Model=c("Nest position:"," Elevated"," Ground"," Mix"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# Phylogenetic meta-regression with log(Nest_height+1)
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~scale(log(Nest_height+1)), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # similar ns trend as above -0.0909 -0.1999 0.0182
plot(DataN$Zr2 ~ log(DataN$Nest_height+1))
resN <- rbind(resN, data.frame(Model="Nest height above ground (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
hist(DataN$Nest_height[DataN$death_cause=="yes"], col="blue", xlim=c(0,25))
hist(DataN$Nest_height[DataN$death_cause=="no"], col=rgb(0, 0, 0, 0.5), add=TRUE)
table(DataN$death_cause, DataN$Nest_height) # measure predation = "no" in nest height > 10
t.test(DataN$Nest_height ~ DataN$death_cause, var.equal = TRUE) #t = 3.3704, df = 26, p-value = 0.002354
#high nests usually report overall mortrality, not by predation
table(is.na(DataN$Nest_height), DataN$Nest_position) # 30 missing height values are from elevated nests, i.e. only a subset of heights known
# Phylogenetic meta-regression with Egg_number (NOTE: 16 NA)
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~scale(DataN$Egg_number), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 94.4
plot(DataN$Zr2 ~ DataN$Egg_number)
resN <- rbind(resN, data.frame(Model="Egg number (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
# Phylogenetic meta-regression with Study_days (NOTE: 28 NA)
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~scale(DataN$Study_days), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 98.3
plot(DataN$Zr2 ~ DataN$Study_days)
resN <- rbind(resN, data.frame(Model="Study duration (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
# Phylogenetic meta-regression with Median_year as moderator
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~scale(DataN$Median_year), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 92.7
plot(DataN$Zr2 ~ DataN$Median_year) # all ns
resN <- rbind(resN, data.frame(Model="Median study year (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
hist(DataN$Median_year[DataN$death_cause=="yes"], col="blue")
hist(DataN$Median_year[DataN$death_cause=="no"], col=rgb(0, 0, 0, 0.5), add=TRUE)
table(DataN$death_cause, DataN$Median_year) # recent studies more likely to only measure predation = "yes"
t.test(DataN$Median_year ~ DataN$death_cause, var.equal = FALSE) #t = -2.3125, df = 26.758, p-value = 0.02868
# Phylogenetic meta-regression with Pub_year as moderator
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~scale(DataN$Pub_year), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 92.8
plot(DataN$Zr2 ~ DataN$Pub_year)
resN <- rbind(resN, data.frame(Model="Publication year (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
# Phylogenetic meta-regression with urbmin as a factor
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(urbmin)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 93.8
plot(DataN$Zr2 ~ DataN$urbmin) # only 1 data point at urbmin=4, and 2 at 3
resN <- rbind(resN, data.frame(Model=c("Min urbanisation score:"," 1"," 2"," 3"," 4"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20,20))) #harvest results (Zr)
# Phylogenetic meta-regression with urbmax as a factor
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(urbmax)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 93.6
plot(DataN$Zr2 ~ DataN$urbmax)
resN <- rbind(resN, data.frame(Model=c("Min urbanisation score:"," 3"," 4"," 5"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# meta-regression with urbmin_scaled*urbmax_scaled interaction (as continuous predictors)
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(urbmin) * scale(urbmax), method="REML", data=DataN)
summary(MR_N_phylo) # ns interaction
# meta-regression with Death_cause and Nest_openness, without interaction
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause) + as.factor(Nest_openness) -1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # (death_cause)yes signif 0.1668 0.0528 0.2808 **
# meta-regression with Death_cause*Nest_openness interaction
MR_N_phylo <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause) * as.factor(Nest_openness), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # signif interaction
MR_N_phylo1 <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause) * as.factor(Nest_openness) - 1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo1) # signif interaction
resN <- rbind(resN, data.frame(Model=c("Multivariate meta-regression:"," Cup - No"," Cup - Yes *"), M=c(NA,MR_N_phylo1$b[1:2]), CI.lb=c(NA,MR_N_phylo1$ci.lb[1:2]), CI.ub=c(NA,MR_N_phylo1$ci.ub[1:2]), pch=c(20,20,20))) #harvest results (Zr)
MR_N_phylo2 <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause) * as.factor(relevel(Nest_openness, ref="hole")) - 1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo2) # signif interaction
resN <- rbind(resN, data.frame(Model=c(" Hole - No"," Hole - Yes *"), M=c(MR_N_phylo2$b[1:2]), CI.lb=c(MR_N_phylo2$ci.lb[1:2]), CI.ub=c(MR_N_phylo2$ci.ub[1:2]), pch=c(20,20))) #harvest results (Zr)
MR_N_phylo3 <- rma.mv(yi=Zr2, V=VZr, mod= ~as.factor(death_cause) * as.factor(relevel(Nest_openness, ref="orb")) - 1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo3) # signif interaction
resN <- rbind(resN, data.frame(Model=c(" Orb - Yes"), M=c(MR_N_phylo3$b[2]), CI.lb=c(MR_N_phylo3$ci.lb[2]), CI.ub=c(MR_N_phylo3$ci.ub[2]), pch=c(20))) #harvest results (Zr)
resN <- rbind(resN, data.frame(Model=c(" Nest openness - Death cause interaction *"), M=c(MR_N_phylo3$b[5]), CI.lb=c(MR_N_phylo3$ci.lb[5]), CI.ub=c(MR_N_phylo3$ci.ub[5]), pch=c(20))) #harvest results (Zr)
table(DataN$death_cause, DataN$Nest_openness) # only 5 data points are hole/yes, 2 are are orb/yes, 0 are orb/no
boxplot(DataN$Zr2 ~ DataN$death_cause * DataN$Nest_openness, varwidth=TRUE) #most positive values in hole/yes subest - less predation with increasing urbanisation
# tidy up extracted results table
resN$M <- round(transf.ztor(resN$M),3)
resN$CI.lb <- round(transf.ztor(resN$CI.lb),3)
resN$CI.ub <- round(transf.ztor(resN$CI.ub),3)
write.csv(resN,"MA_MR_dataN_resN_rho2.csv")
revresN <- resN[rev(rownames(resN)),] #reverse for plotting (from bottom to top)
### PLOT - MA and MR models on all data
opar <- par() # make a copy of current settings
#par(opar) # restore original settings
pdf(file="Fig_MA_MR_dataN_rho2.pdf",width=4,height=6,pointsize=10)
par(mfrow=c(1,1))
par(mar=c(4,10,2,0))
plot(revresN$M, 1:length(revresN$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.8,0.8), ylim=c(0.25, length(revresN$M)+.5), xlab="effect size [r]", pch=revresN$pch, cex=1.1, cex.axis=.9)
abline(v=0,lty=3)
mtext(revresN$Model, 2, 9, at=1:length(revresN$M), las=2, cex=.8, font=1, adj=0)
segments(revresN$CI.lb, 1:length(revresN$M), revresN$CI.ub, 1:length(revresN$M), lwd=1.25)
dev.off()
## OVERALL for natural nests: tendency for less predation with increasing urbanisation (positive Zr) - small overall effect size,
# this effect is more pronounced when only nest lost due to predation included
# and no much effect of urbanisation when other causes of mortality (confounding) are potentially present.
# Tendency for more negative valus in nests higher above ground - more mortality in urbanised areas,
# in lower nests more positive values more likely - less predation in urbanised areas,
# but this result is likely to be related on death_cause variable.
### PLOT - bubble plots for natural nests: r-death_cause, r-nest_height
plot(DataN$Zr2 ~ DataN$death_cause) # positive values (less predation in urbanised areas) when "yes" = only nests that died from predation are included
plot(DataN$Zr2 ~ DataN$Nest_height) # K=23
pdf(file="Fig_bubble2_dataN_rho2.pdf",width=6,height=4,pointsize=10)
par(mfrow=c(1,2))
par(mar=c(4,4,4,2))
#A
symbols(DataN$death_cause, DataN$rho2, circles=sqrt(1/DataN$VZr),inches=0.4, xlab="Predation as only source of mortality",ylab="effect size [r]",main="A",xlim=c(0.5,2.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2), labels=c("No","Yes"))
abline(h=0,lty=3)
#B
symbols(DataN$Nest_openness, DataN$rho2, circles=sqrt(1/DataN$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="B",xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
dev.off()
pdf(file="Fig_bubble2_dataN_v2_rho2.pdf",width=6,height=4,pointsize=10)
par(mfrow=c(1,2))
par(mar=c(4,4,4,2))
DataNy <- subset(DataN, DataN$death_cause=="yes", select=c(Nest_openness, rho2, VZr))
DataNn <- subset(DataN, DataN$death_cause=="no", select=c(Nest_openness, rho2, VZr))
#A
symbols(DataNy$Nest_openness, DataNy$rho2, circles=sqrt(1/DataNy$VZr),inches=0.4, xlab="Nest openness",ylab="effect size [r]",main="A. Only mortality from predation",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
#B
symbols(DataNn$Nest_openness, DataNn$rho2, circles=sqrt(1/DataNn$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="B. Mortality from all sources",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
dev.off()
pdf(file="Fig_bubble4_dataN_rho2.pdf",width=6,height=6,pointsize=10)
par(mfrow=c(2,2))
par(mar=c(4,4,4,2))
#A
symbols(DataN$death_cause, DataN$rho2, circles=sqrt(1/DataN$VZr),inches=0.4, xlab="Predation as only source of mortality",ylab="effect size [r]",main="A. Mortality sources",xlim=c(0.5,2.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2), labels=c("No","Yes"))
abline(h=0,lty=3)
#B
symbols(DataN$Nest_openness, DataN$rho2, circles=sqrt(1/DataN$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="B. Nest openness",xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
#C
DataNy <- subset(DataN, DataN$death_cause=="yes", select=c(Nest_openness, rho2, VZr))
symbols(DataNy$Nest_openness, DataNy$rho2, circles=sqrt(1/DataNy$VZr),inches=0.4, xlab="Nest openness",ylab="effect size [r]",main="C. Only mortality from predation",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
#D
DataNn <- subset(DataN, DataN$death_cause=="no", select=c(Nest_openness, rho2, VZr))
symbols(DataNn$Nest_openness, DataNn$rho2, circles=sqrt(1/DataNn$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="D. Mortality from all sources",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
dev.off()
### PLOT - Fig_MA_MR_dataA and Fig_MA_MR_dataN subsets in one figure (skip min and max scores)
pdf(file="Fig_MA_MR_dataA_dataN_rho2.pdf",width=9,height=5,pointsize=10)
par(mfcol=c(1,2))
par(mar=c(4,10,2,0))
revresAA <- revresA[c(15:length(revresA$M)), ]
plot(revresAA$M, 1:length(revresAA$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.6,0.6), ylim=c(0.25, length(revresAA$M)+.5), xlab="effect size [r]", pch=revresAA$pch, cex=1.1, cex.axis=.9, main="A. Artificial nests")
abline(v=0,lty=3)
mtext(revresAA$Model, 2, 9, at=1:length(revresAA$M), las=2, cex=.8, font=1, adj=0)
segments(revresAA$CI.lb, 1:length(revresAA$M), revresAA$CI.ub, 1:length(revresAA$M), lwd=1.25)
revresNN <- revresN[c(17:length(revresN$M)), ]
plot(revresNN$M, 1:length(revresNN$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.6,0.6), ylim=c(0.25, length(revresNN$M)+.5), xlab="effect size [r]", pch=revresNN$pch, cex=1.1, cex.axis=.9, main="B. Natural nests")
abline(v=0,lty=3)
mtext(revresNN$Model, 2, 9, at=1:length(revresNN$M), las=2, cex=.8, font=1, adj=0)
segments(revresNN$CI.lb, 1:length(revresNN$M), revresNN$CI.ub, 1:length(revresNN$M), lwd=1.25)
dev.off()
#mtext("a)",side=2,line=7,at=14,las=2)
###### PUBLICATION BIAS
MR_nest_intc <- rma.mv(yi=Zr2, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~Nest_type-1, method="REML", data=Data)
summary(MR_nest_intc) # in artificial nests signif negative r (more predation with incr urbanization)
# for natural nests ns positive r (less predation with incr urbanization)
Residuals <- residuals(MR_nest_intc)
Precision <- sqrt(1/MR_nest_intc$vi)
plot(Residuals,Precision, xlim=c(-1,1), xlab="Residuals", ylab="Precision [1/SE]")
abline(v=0,lty=3)
model <- rma(yi=Residuals,sei=1/Precision)
summary(model) # ns est -0.0087 CI -0.0564 0.0390
funnel(model,yaxi="seinv")
#Trim and fill
TF <- trimfill(model)
TF #Estimated number of missing studies on the right side: 0 (SE = 6.2815)
funnel(TF)
#Egger's regression test
regtest(model,model="lm") #test for funnel plot asymmetry: t = 0.7158, df = 115, p = 0.4756
ranktest(model) #Kendall's tau = -0.0540, p = 0.3897 (warning about ties, do not use)
### PLOT - funnel plots
pdf(file="Fig_funnels2_alldata_rho2.pdf",width=6,height=4,pointsize=10)
par(mfcol=c(1,2))
par(mar=c(4,4,2,1))
plot(Data$rho2, sqrt(1/Data$variance), xlim=c(-1.2,1.2), xlab = "Effect size [r]", ylab="Precision [1/SE]", main="A")
abline(v=0,lty=3)
plot(Residuals,Precision, xlim=c(-1.2,1.2), xlab = "Residuals", ylab="Precision [1/SE]", main="B")
abline(v=0,lty=3)
dev.off()
###########################################################
# ANALYSES on rho (original urbanisation scores)
###########################################################
###########################################################
#### Simple meta-analysis - all data
## MA without random effects
MA_0 <- rma(yi=Zr, vi=VZr, method="REML", data=Data)
summary(MA_0) #I^2 (total heterogeneity / total variability): 91.0%
#funnel plot
plot(jitter(Data$Zr), jitter(sqrt(1/Data$VZr)), cex=0.75, xlim=c(-1.1,1.1), xlab="Zr", ylab="Precision (1/SE.Zr)", main="all data")
abline(v=0, lwd=0.5)
## MA with random effects only
MA_all <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), method="REML", data=Data)
summary(MA_all) #estimate ns Zr0.0063 CI -0.0723 0.0849
funnel(MA_all) # 1 outlier with large error
#forest(MA_all)
# calculate I2 values
s2m <- sum(1/Data$VZr) * (MA_all$k-1) / (sum(1/Data$VZr)^2 - sum((1/Data$VZr)^2)) # typical sampling error
s2t <- sum(MA_all$sigma2)+s2m # total sampling error
I2t <- sum(MA_all$sigma2)/s2t
I2t*100 # 92.8% - total heterogeneity
I2s <- MA_all$sigma2[1]/s2t
I2s*100 # 84.5% varaince due to Study
I2e <- MA_all$sigma2[2]/s2t
I2e*100 # 8.3% - residuals against sampling error
transf.ztor(MA_all$b[1,]) # intrecept as r = -0.007
transf.ztor(MA_all$ci.lb[1]) # CI.lb for the intercept as r = -0.085
transf.ztor(MA_all$ci.ub[1]) # CI.ub for the intercept as r = 0.071
res <- data.frame(Model="Meta-analytic mean, all data", M=MA_all$b, CI.lb=MA_all$ci.lb, CI.ub=MA_all$ci.ub, pch=18) #harvest results (Zr)
#### Meta-regression on all data with nest_type as moderator
MR_nest_intc <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~Nest_type-1, method="REML", data=Data)
summary(MR_nest_intc) # in artificial nests signif negative Zr -0.1238 (more predation with incr urbanization);
# in natural nests ns positive Zr 0.0802 (less predation with incr urbanization)
funnel(MR_nest_intc) # 1 outlier ES < -1 (same point as in the full data set)
forest(MR_nest_intc)
summ <- summary(MR_nest_intc)
forest(x=summ$b, ci.lb=summ$ci.lb, ci.ub=summ$ci.ub, slab=dimnames(summ$b)[[1]], xlab="Zr")
# calculate I2 values
s2m <- sum(1/Data$VZr) * (MR_nest_intc$k-1) / (sum(1/Data$VZr)^2 - sum((1/Data$VZr)^2)) # typical sampling error
s2t <- sum(MR_nest_intc$sigma2)+s2m # total sampling error
I2t <- sum(MR_nest_intc$sigma2)/s2t
I2t*100 # 91.9% - total heterogeneity
I2s <- MR_nest_intc$sigma2[1]/s2t
I2s*100 # 82.7% varaince due to Study
I2e <- MR_nest_intc$sigma2[2]/s2t
I2e*100 # 9.1% - residuals against sampling error
transf.ztor(MR_nest_intc$b[1,]) #-0.123
transf.ztor(MR_nest_intc$ci.lb[1]) #-0.232
transf.ztor(MR_nest_intc$ci.ub[1]) #-0.012
transf.ztor(MR_nest_intc$b[2,]) #0.080
transf.ztor(MR_nest_intc$ci.lb[2]) #-0.017
transf.ztor(MR_nest_intc$ci.ub[2]) #0.175
res <- rbind(res, data.frame(Model=c("Artificial vs. Natural nests:"," Artificial nests *"," Natural nests"), M=c(NA,MR_nest_intc$b), CI.lb=c(NA,MR_nest_intc$ci.lb), CI.ub=c(NA,MR_nest_intc$ci.ub), pch=c(20,20,20))) #harvest results (Zr)
MR_nest_diff <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~Nest_type, method="REML", data=Data)
summary(MR_nest_diff) # diff between artificial and natural nests signif Zr 0.203 CI 0.056 0.352
transf.ztor(MR_nest_diff$b[2,]) #0.201
transf.ztor(MR_nest_diff$ci.lb[2]) #0.056
transf.ztor(MR_nest_diff$ci.ub[2]) #0.338
res <- rbind(res, data.frame(Model=" Difference: Natural - Artificial nests *", M=MR_nest_diff$b[2], CI.lb=MR_nest_diff$ci.lb[2], CI.ub=MR_nest_diff$ci.ub[2], pch=20)) #harvest results (Zr)
#tidy up extracted results table
res$M <- round(transf.ztor(res$M),3)
res$CI.lb <- round(transf.ztor(res$CI.lb),3)
res$CI.ub <- round(transf.ztor(res$CI.ub),3)
write.csv(res,"MA_MR_alldata_main_res.csv")
revres <- res[rev(rownames(res)),] #reverse for plotting (from bottom to top)
### PLOT - MA and MR models on all data
#opar <- par() # make a copy of current settings
#par(opar) # restore original settings
pdf(file="Fig_MA_MR_alldata.pdf",width=4,height=3,pointsize=8)
par(mfrow=c(1,1))
par(mar=c(5,14,1,1))
plot(revres$M, 1:length(revres$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.5,0.5), ylim=c(0.25, length(revres$M)+.5), xlab="effect size [r]", pch=revres$pch, cex=1, cex.axis=.9)
abline(v=0,lty=3)
mtext(revres$Model, 2, 13, at=1:length(revres$M), las=2, cex=1, font=1, adj=0)
segments(revres$CI.lb, 1:length(revres$M), revres$CI.ub, 1:length(revres$M), lwd=1.25)
dev.off()
#################### ARTIFICIAL NESTS ################
DataA <- Data[Data$Nest_type == "art", ]
dim(DataA) # 59 31
DataA <- droplevels(DataA)
unlist(lapply(DataA, function(x) sum(is.na(x)))) #how many NA per column: Study_days 18, Nest_height 1
#### MA
#funnel plot
plot(jitter(DataA$Zr), jitter(sqrt(1/DataA$VZr)), cex=0.75, xlim=c(-1.1,1.1), xlab="Zr", ylab="Precision (1/SE.Zr)", main="all data")
abline(v=0, lwd=0.5)
## MA with random effects only
MA_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), method="REML", data=DataA)
summary(MA_A) #estimate signif Zr -0.1256 CI -0.2498 -0.0015
funnel(MA_A)
# calculate I2 values
s2m <- sum(1/DataA$VZr) * (MA_A$k-1) / (sum(1/DataA$VZr)^2 - sum((1/DataA$VZr)^2)) # typical sampling error
s2t <- sum(MA_A$sigma2)+s2m # total sampling error
I2t <- sum(MA_A$sigma2)/s2t
I2t*100 # 93.1% - total heterogeneity
I2s <- MA_A$sigma2[1]/s2t
I2s*100 # 86.0% varaince due to Study
I2e <- MA_A$sigma2[2]/s2t
I2e*100 # 7.1% - residuals against sampling error
transf.ztor(MA_A$b[1,]) #-0.1250
transf.ztor(MA_A$ci.lb[1]) #-0.2447
transf.ztor(MA_A$ci.ub[1]) #-0.0015
#sum(MA_A$sigma2)/(sum(MA_A$sigma2)+(sum(1/DataA$VZr)*(MA_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity
resA <- data.frame(Model="Meta-analytic mean*", M=MA_A$b, CI.lb=MA_A$ci.lb, CI.ub=MA_A$ci.ub, pch=18) #harvest results (Zr)
#### MR - Meta-Regressions (univariate)
# univariate meta-regression with Nest_openness
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_openness)-1, method="REML", data=DataA)
summary(MR_A) # cup signif Zr -0.1269 -0.2509 -0.0029
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.1%
plot(DataA$Zr ~ DataA$Nest_openness)
resA <- rbind(resA, data.frame(Model=c("Nest openness:"," Cup *"," Hole"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20))) #harvest results (Zr)
# univariate meta-regression with Nest_openness Hole-Cup difference
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_openness), method="REML", data=DataA)
summary(MR_A) # diff Zr ns 0.0805 -0.0459 0.2070
# univariate meta-regression with Nest_position
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_position)-1, method="REML", data=DataA)
summary(MR_A) # only ground Zr signif -0.1376 -0.2697 -0.0054
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.0%
plot(DataA$Zr ~ DataA$Nest_position)
resA <- rbind(resA, data.frame(Model=c("Nest position:"," Elevated"," Ground *"," Mix"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# univariate meta-regression with Egg_number
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(DataA$Egg_number), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.5%
plot(DataA$Zr ~ DataA$Egg_number)
resA <- rbind(resA, data.frame(Model=c("Egg number (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with Study_days (NOTE: 18 NA)
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(Study_days), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 95.4%
plot(DataA$Zr ~ DataA$Study_days)
resA <- rbind(resA, data.frame(Model=c("Study duration (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with Median_year
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(Median_year), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.2%
plot(DataA$Zr ~ DataA$Median_year)
resA <- rbind(resA, data.frame(Model=c("Median study year (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with Pub_year
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(DataA$Pub_year), method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.5%
plot(DataA$Zr ~ DataA$Pub_year)
resA <- rbind(resA, data.frame(Model=c("Publication year (slope)"), M=c(MR_A$b[2]), CI.lb=c(MR_A$ci.lb[2]), CI.ub=c(MR_A$ci.ub[2]), pch=c(20))) #harvest results (Zr)
# univariate meta-regression with urbmin as a factor
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(urbmin)-1, method="REML", data=DataA)
summary(MR_A) # all ns
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 94.2
plot(DataA$Zr ~ DataA$urbmin)
resA <- rbind(resA, data.frame(Model=c("Min urbanisation score:"," 1"," 2"," 3"," 4"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20,20,20))) #harvest results (Zr)
# univariate meta-regression with urbmin as continuous - not used becouse the relationship is driven by just 2 data points at 4
# univariate meta-regression with urbmax as a factor
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(urbmax)-1, method="REML", data=DataA)
summary(MR_A) # urbmax=4 signif Zr -0.3363 -0.6413 -0.0314, but driven by only 4 data points! (most are at urbanmax=3)
sum(MR_A$sigma2)/(sum(MR_A$sigma2)+(sum(1/DataA$VZr)*(MR_A$k-1)/(sum(1/DataA$VZr)^2-sum((1/DataA$VZr)^2))))*100 # total heterogeneity 93.2%
plot(DataA$Zr ~ DataA$urbmax)
resA <- rbind(resA, data.frame(Model=c("Max urbanisation score:"," 3"," 4 *"," 5"), M=c(NA,MR_A$b), CI.lb=c(NA,MR_A$ci.lb), CI.ub=c(NA,MR_A$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# meta-regression with urbmin_scaled*urbmax_scaled interaction (as continuous predictors)
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(urbmin) * scale(urbmax), method="REML", data=DataA)
summary(MR_A) # ns
# meta-regression with Nest_openness*Nest_position interaction
MR_A <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_openness) * as.factor(Nest_position), method="REML", data=DataA)
summary(MR_A) # ns
table(DataA$Nest_openness, DataA$Nest_position) #not enough data for estimating interaction: 0 hole-like nests at ground and mix levels
boxplot(DataA$Zr ~ DataA$Nest_position + DataA$Nest_openness)
# meta-regression with Nest_openness + Nest_position without interaction
MR_A1 <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_position) + as.factor(Nest_openness) -1, method="REML", data=DataA)
summary(MR_A1) # all ns
#resA <- rbind(resA, data.frame(Model=c("Multivariate:"," Cup - elevated"," Cup - ground"," Cup - mix"," Hole - elevated"), M=c(NA,MR_A1$b), CI.lb=c(NA,MR_A1$ci.lb), CI.ub=c(NA,MR_A1$ci.ub), pch=c(20,20,20,20,20))) #harvest results (Zr)
resA <- rbind(resA, data.frame(Model=c("Multivariate:"," Cup - elevated"," Cup - ground"," Cup - mix"), M=c(NA,MR_A1$b[1:3]), CI.lb=c(NA,MR_A1$ci.lb[1:3]), CI.ub=c(NA,MR_A1$ci.ub[1:3]), pch=c(20,20,20,20))) #harvest results (Zr)
MR_A2 <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~as.factor(Nest_position) + relevel(as.factor(Nest_openness), ref="hole") -1, method="REML", data=DataA)
summary(MR_A2) # all ns
resA <- rbind(resA, data.frame(Model=" Hole - elevated", M=c(MR_A2$b[1]), CI.lb=c(MR_A2$ci.lb[1]), CI.ub=c(MR_A2$ci.ub[1]), pch=c(20))) #harvest results (Zr)
# tidy up extracted results table
resA$M <- round(transf.ztor(resA$M),3)
resA$CI.lb <- round(transf.ztor(resA$CI.lb),3)
resA$CI.ub <- round(transf.ztor(resA$CI.ub),3)
write.csv(resA,"MA_MR_dataA_resA.csv")
revresA <- resA[rev(rownames(resA)),] #reverse for plotting (from bottom to top)
### PLOT - MA and MR models on all data
opar <- par() # make a copy of current settings
#par(opar) # restore original settings
pdf(file="Fig_MA_MR_dataA.pdf",width=4,height=6,pointsize=10)
par(mfrow=c(1,1))
par(mar=c(4,10,2,0))
plot(revresA$M, 1:length(revresA$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.6,0.6), ylim=c(0.25, length(revresA$M)+.5), xlab="Zr", pch=revresA$pch, cex=1.1, cex.axis=.9)
abline(v=0,lty=3)
mtext(revresA$Model, 2, 9, at=1:length(revresA$M), las=2, cex=.8, font=1, adj=0)
segments(revresA$CI.lb, 1:length(revresA$M), revresA$CI.ub, 1:length(revresA$M), lwd=1.25)
dev.off()
#################### NATURAL NESTS ################
DataN <- Data[Data$Nest_type == "nat", ]
dim(DataN) # 58 31
DataN <- droplevels(DataN)
unlist(lapply(DataN, function(x) sum(is.na(x)))) #how many NA per column: Study_days 31 NA, Nest_height 30 NA, Egg_number 16 NA
#### MA
#funnel plot
plot(jitter(DataN$Zr), jitter(sqrt(1/DataN$VZr)), cex=0.75, xlim=c(-1.1,1.1), xlab="Zr", ylab="Precision (1/SE.Zr)", main="all data")
abline(v=0, lwd=0.5)
## MA with 2 random effects
MA_N <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), method="REML", data=DataN)
summary(MA_N) #estimate Zr ns 0.0790 CI -0.0083 0.1664
# overall ns positive relationship between urbanisation and survival - less predation with increasing urbanization
funnel(MA_N)
forest(MA_N)
# calculate I2 values
s2m <- sum(1/DataN$VZr) * (MA_N$k-1) / (sum(1/DataN$VZr)^2 - sum((1/DataN$VZr)^2)) # typical sampling error
s2t <- sum(MA_N$sigma2)+s2m # total sampling error
I2t <- sum(MA_N$sigma2)/s2t
I2t*100 # 89.97% - total heterogeneity
I2s <- MA_N$sigma2[1]/s2t
I2s*100 # 77.43% varaince due to Study
I2e <- MA_N$sigma2[2]/s2t
I2e*100 # 12.54% - residuals against sampling error
transf.ztor(MA_N$b[1,]) #0.0788
transf.ztor(MA_N$ci.lb[1]) #-0.0083
transf.ztor(MA_N$ci.ub[1]) #0.1648
resN <- data.frame(Model="Meta-analytic mean", M=MA_N$b, CI.lb=MA_N$ci.lb, CI.ub=MA_N$ci.ub, pch=18) #harvest results (Zr)
## MA with 3 random effects, including Species identity
MA_N <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|Species,~1|ES_ID), method="REML", data=DataN)
summary(MA_N) #estimate Zr ns 0.0671 CI -0.0118 0.1461
# overall ns positive relationship between urbanisation and survival - less predation with increasing urbanization
funnel(MA_N)
forest(MA_N)
# calculate I2 values
s2m <- sum(1/DataN$VZr) * (MA_N$k-1) / (sum(1/DataN$VZr)^2 - sum((1/DataN$VZr)^2)) # typical sampling error
s2t <- sum(MA_N$sigma2)+s2m # total sampling error
I2t <- sum(MA_N$sigma2)/s2t
I2t*100 # 88.36% - total heterogeneity
I2s <- MA_N$sigma2[1]/s2t
I2s*100 # 0% varaince due to Study
I2e <- MA_N$sigma2[2]/s2t
I2e*100 # 74.0% varaince due to Species identity - equivalent to using just Study before, usually 1 Species per study, and 1 study per species
I2e <- MA_N$sigma2[3]/s2t
I2e*100 # 14.36% - residuals against sampling error
table(DataN$Species,DataN$Paper_ID)
## Phylogeny
birds <- unique(DataN$Species_latin) #get list of unique bird species latin names
str(birds)
birds_stree <- read.tree("Ericson.tre") # load birds supertree from a file
birds_stree #9993 tips = species
str(birds_stree) #has edge (branch) lengths
birds_steer <- collapse.singles(birds_stree)
bird_tree_species <- as.character(birds_stree$tip.label) # extract list of species from the tree
intersect(bird_tree_species, birds) #31 matching
setdiff(birds, bird_tree_species) #Cyanistes_caeruleus not matching
birds_stree$tip.label <- sub("Parus_caeruleus","Cyanistes_caeruleus",birds_stree$tip.label) #replace with synonym Parus_caeruleus
bird_tree_species <- as.character(birds_stree$tip.label) # extract list of species from the tree
intersect(bird_tree_species, birds) #32 matching, ok
tree <- drop.tip(birds_stree, birds_stree$tip.label[-match(birds, birds_stree$tip.label)]) # prune the supertree tree to a list of taxa from our list
is.binary.tree(tree) #TRUE
is.ultrametric(tree) #TRUE
plot(tree, cex=0.8) #plot with branch lengths
nodelabels()
tree2 <- rotate(tree, 37) #rotate some branches
pdf("tree.pdf",width = 4, height = 5, pointsize=8)
par(mfcol=c(1,1),mar=c(0,0,0,0),oma=c(0,0,0,0))
plot(tree2)
dev.off()
write.tree(tree2, file = "birds_32sp_tree.tre", append = FALSE, digits = 10, tree.names = FALSE)
### phylogenetic meta-analysis (does not converge if Species ID added)
tree <- read.tree("birds_32sp_tree.tre")# upload cleaned-up and prepeprocessed phylogenetic tree file
CorMatrix <- vcv(tree, corr=TRUE) # make a phylogenetic correlation matrix
MA_N_phylo <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MA_N_phylo) #intc = 0.0446 ns, CI -0.1264 0.2155
sum(MA_N_phylo$sigma2)/(sum(MA_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MA_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 91.5%
s2t <- sum(MA_N_phylo$sigma2) + sum(1/MA_N_phylo$vi) * (MA_N_phylo$k-1) / (sum(1/MA_N_phylo$vi)^2 - sum((1/MA_N_phylo$vi)^2))
MA_N_phylo$sigma2[1]/s2t*100 # I^2 between study (id) = 23.9%
MA_N_phylo$sigma2[2]/s2t*100 # I^2 phylogeny = 56.9%
MA_N_phylo$sigma2[3]/s2t*100 # I^2 within study or residual = 10.67%
resN <- rbind(resN, data.frame(Model=c("Phylogenetic meta-analytic mean"), M=c(MA_N_phylo$b), CI.lb=c(MA_N_phylo$ci.lb), CI.ub=c(MA_N_phylo$ci.ub), pch=c(18))) #harvest results (Zr)
# plots
Names <- paste(DataN$Paper, DataN$Species, sep="_")
forest(MA_N_phylo, slab=Names)
funnel(MA_N_phylo, yaxis="seinv")
#### Meta-regression
# species meta-regression
MA_N_species <- rma.mv(yi=Zr, V=VZr, mod=~Species_latin-1, random=list(~1|Paper_ID,~1|ES_ID), data=DataN, method="REML") #without phylogeny
summary(MA_N_species) # some species very signif + or -
sum(MA_N_species$sigma2)/(sum(MA_N_species$sigma2)+(sum(1/DataN$VZr)*(MA_N_species$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 58.1%
s2t <- sum(MA_N_species$sigma2) + sum(1/MA_N_species$vi) * (MA_N_species$k-1) / (sum(1/MA_N_species$vi)^2 - sum((1/MA_N_species$vi)^2))
MA_N_species$sigma2[1]/s2t # I^2 between study (id) = 0.00% (studies are almost equivalent to the species)
MA_N_species$sigma2[2]/s2t # I^2 within study or residual = 58.1%
# plots
Names <- paste(DataN$Paper, DataN$Species, sep="_")
forest(MA_N_phylo, slab=Names)
funnel(MA_N_phylo, yaxis="seinv")
plot(tree)
# extract point estimates and their CI for each species
ssp <- summary(MA_N_species)
sp_df <- data.frame(sp = substr(attr(ssp$b,"dimnames")[[1]], 14, 36), M = ssp$b, CI.lb = ssp$ci.lb, CI.ub = ssp$ci.ub)
sp_df <- sp_df[match(tree$tip.label, sp_df$sp),] #reorder dataframe to match order of the tip labels on the tree
# tidy up extracted results table
sp_df$M <- round(transf.ztor(sp_df$M),3)
sp_df$CI.lb <- round(transf.ztor(sp_df$CI.lb),3)
sp_df$CI.ub <- round(transf.ztor(sp_df$CI.ub),3)
sp_df$Signif <- ifelse(sp_df$CI.lb>0 | sp_df$CI.ub<0, "*", "") #add column with stars for estimates that significantly differ from zero
write.csv(sp_df,"MA_MR_dataN_sp_df.csv")
revres_sp <- sp_df[rev(rownames(sp_df)),] #reverse for plotting (from bottom to top)
opar <- par() # make a copy of current settings
par(opar) # restore original settings
### PLOT - species phylogeny and forest plot
pdf(file="Fig_species.pdf",width=6,height=4,pointsize=9)
par(mfrow=c(1,2))
par(mar=c(4.5,2,1.35,0))
plot(tree, font=1, cex=0.8, x.lim=90, show.tip.label = FALSE)
par(mar=c(4,6,1,0))
plot(sp_df$M, 1:length(sp_df$M), ylab=NA, yaxt="n", bty="n", xlim=c(-1.5,1.5), ylim=c(0.25, length(sp_df$M)+.5), xlab="effect size [r]", pch=16, cex=0.8, cex.axis=.9)
abline(v=0,lty=3)
mtext(sp_df$sp, 2, -1, at=1:length(sp_df$M), las=2, cex=.8, font=3)
segments(sp_df$CI.lb, 1:length(sp_df$M), sp_df$CI.ub, 1:length(sp_df$M), lwd=1.25)
for (i in 1:length(rownames(sp_df))) mtext(sp_df$Signif[i], 2, -1.5, at=i, las=2, cex=.7) #add stars
dev.off()
# Phylogenetic meta-regression with death_cause
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # yes signif 0.1615 CI 0.0501 0.2729
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 88.4
plot(DataN$Zr ~ DataN$death_cause) # positive values (less predation in urbanised areas) when "yes" = only nests that died from predation are included
resN <- rbind(resN, data.frame(Model=c("Predation as only source of mortality:"," No"," Yes *"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20))) #harvest results (Zr)
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # signif diff 0.1780 CI 0.0155 0.3404 *
#MR_N <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause), random=list(~1|Paper_ID,~1|ES_ID), data=DataN, method="REML") #without phylogeny
#summary(MR_N) # signif diff 0.1780 CI 0.0155 0.3404 * - same result without phylogeny
# Phylogenetic meta-regression with Nest_openness
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(Nest_openness)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # hole signif 0.2227 CI 0.0320 0.4133
#MR_N <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(Nest_openness)-1, random=list(~1|Paper_ID,~1|ES_ID), data=DataN, method="REML") #without phylogeny
#summary(MR_N) # hole signif 0.2227 CI 0.0320 0.4133 - same result without phylogeny
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 89.6
plot(DataN$Zr ~ DataN$Nest_openness)
resN <- rbind(resN, data.frame(Model=c("Nest openness:"," Cup"," Hole *"," Orb"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# Phylogenetic meta-regression with Nest_position
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(Nest_position)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 90.6
plot(DataN$Zr ~ DataN$Nest_position) # more positive values in ground-located nests
resN <- rbind(resN, data.frame(Model=c("Nest position:"," Elevated"," Ground"," Mix"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# Phylogenetic meta-regression with Nest_height (NOTE: 31 NA, 23 present)
# MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~scale(Nest_height), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
# summary(MR_N_phylo) # scale(Nest_height) signif slope -0.1347 CI -0.2677 -0.0018 - in low-placed nests tendency for positive Zr, in high nests negative Zr
# sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 91.92
# plot(DataN$Zr ~ DataN$Nest_height) # positive values more likely in lower nests, negative values in nests higher than 6m
# Phylogenetic meta-regression with log(Nest_height+1)
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~scale(log(Nest_height+1)), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # similar ns trend as above -0.0991 -0.2099 0.0117
plot(DataN$Zr ~ log(DataN$Nest_height+1))
resN <- rbind(resN, data.frame(Model="Nest height above ground (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
hist(DataN$Nest_height[DataN$death_cause=="yes"], col="blue", xlim=c(0,25))
hist(DataN$Nest_height[DataN$death_cause=="no"], col=rgb(0, 0, 0, 0.5), add=TRUE)
table(DataN$death_cause, DataN$Nest_height) # measure predation = "no" in nest height > 10
t.test(DataN$Nest_height ~ DataN$death_cause, var.equal = TRUE) #t = 3.3704, df = 26, p-value = 0.002354
#high nests usually report overall mortrality, not by predation
table(is.na(DataN$Nest_height), DataN$Nest_position) # 30 missing height values are from elevated nests, i.e. only a subset of heights known
# Phylogenetic meta-regression with Egg_number (NOTE: 16 NA)
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~scale(DataN$Egg_number), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 94.4
plot(DataN$Zr ~ DataN$Egg_number)
resN <- rbind(resN, data.frame(Model="Egg number (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
# Phylogenetic meta-regression with Study_days (NOTE: 28 NA)
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~scale(DataN$Study_days), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 98.2
plot(DataN$Zr ~ DataN$Study_days)
resN <- rbind(resN, data.frame(Model="Study duration (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
# Phylogenetic meta-regression with Median_year as moderator
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~scale(DataN$Median_year), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 90.6
plot(DataN$Zr ~ DataN$Median_year) # all ns
resN <- rbind(resN, data.frame(Model="Median study year (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
hist(DataN$Median_year[DataN$death_cause=="yes"], col="blue")
hist(DataN$Median_year[DataN$death_cause=="no"], col=rgb(0, 0, 0, 0.5), add=TRUE)
table(DataN$death_cause, DataN$Median_year) # recent studies more likely to only measure predation = "yes"
t.test(DataN$Median_year ~ DataN$death_cause, var.equal = FALSE) #t = -2.3125, df = 26.758, p-value = 0.02868
# Phylogenetic meta-regression with Pub_year as moderator
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~scale(DataN$Pub_year), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 90.2
plot(DataN$Zr ~ DataN$Pub_year)
resN <- rbind(resN, data.frame(Model="Publication year (slope)", M=MR_N_phylo$b[2], CI.lb=MR_N_phylo$ci.lb[2], CI.ub=MR_N_phylo$ci.ub[2], pch=20)) #harvest results (Zr)
# Phylogenetic meta-regression with urbmin as a factor
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(urbmin)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 94.2
plot(DataN$Zr ~ DataN$urbmin) # only one data point at urbmin=4
resN <- rbind(resN, data.frame(Model=c("Min urbanisation score:"," 1"," 2"," 3"," 4"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20,20))) #harvest results (Zr)
# Phylogenetic meta-regression with urbmax as a factor
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(urbmax)-1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # all ns
sum(MR_N_phylo$sigma2)/(sum(MR_N_phylo$sigma2)+(sum(1/DataN$VZr)*(MR_N_phylo$k-1)/(sum(1/DataN$VZr)^2-sum((1/DataN$VZr)^2))))*100 # total heterogeneity 90.6
plot(DataN$Zr ~ DataN$urbmax)
resN <- rbind(resN, data.frame(Model=c("Min urbanisation score:"," 3"," 4"," 5"), M=c(NA,MR_N_phylo$b), CI.lb=c(NA,MR_N_phylo$ci.lb), CI.ub=c(NA,MR_N_phylo$ci.ub), pch=c(20,20,20,20))) #harvest results (Zr)
# meta-regression with urbmin_scaled*urbmax_scaled interaction (as continuous predictors)
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~scale(urbmin) * scale(urbmax), method="REML", data=DataN)
summary(MR_N_phylo) # ns interaction
# meta-regression with Death_cause and Nest_openness, without interaction
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause) + as.factor(Nest_openness) -1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # (death_cause)yes signif 0.1711 0.0570 0.2851
# meta-regression with Death_cause*Nest_openness interaction
MR_N_phylo <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause) * as.factor(Nest_openness), random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo) # signif interaction
MR_N_phylo1 <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause) * as.factor(Nest_openness) - 1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo1) # signif interaction
resN <- rbind(resN, data.frame(Model=c("Multivariate meta-regression:"," Cup - No"," Cup - Yes *"), M=c(NA,MR_N_phylo1$b[1:2]), CI.lb=c(NA,MR_N_phylo1$ci.lb[1:2]), CI.ub=c(NA,MR_N_phylo1$ci.ub[1:2]), pch=c(20,20,20))) #harvest results (Zr)
MR_N_phylo2 <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause) * as.factor(relevel(Nest_openness, ref="hole")) - 1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo2) # signif interaction
resN <- rbind(resN, data.frame(Model=c(" Hole - No"," Hole - Yes *"), M=c(MR_N_phylo2$b[1:2]), CI.lb=c(MR_N_phylo2$ci.lb[1:2]), CI.ub=c(MR_N_phylo2$ci.ub[1:2]), pch=c(20,20))) #harvest results (Zr)
MR_N_phylo3 <- rma.mv(yi=Zr, V=VZr, mod= ~as.factor(death_cause) * as.factor(relevel(Nest_openness, ref="orb")) - 1, random=list(~1|Paper_ID,~1|Species_latin,~1|ES_ID), R=list(Species_latin=CorMatrix), data=DataN, method="REML") #with phylogeny
summary(MR_N_phylo3) # signif interaction
resN <- rbind(resN, data.frame(Model=c(" Orb - Yes"), M=c(MR_N_phylo3$b[2]), CI.lb=c(MR_N_phylo3$ci.lb[2]), CI.ub=c(MR_N_phylo3$ci.ub[2]), pch=c(20))) #harvest results (Zr)
resN <- rbind(resN, data.frame(Model=c(" Nest openness - Death cause interaction *"), M=c(MR_N_phylo3$b[5]), CI.lb=c(MR_N_phylo3$ci.lb[5]), CI.ub=c(MR_N_phylo3$ci.ub[5]), pch=c(20))) #harvest results (Zr)
table(DataN$death_cause, DataN$Nest_openness) # only 5 data points are hole/yes, 2 are are orb/yes, 0 are orb/no
boxplot(DataN$Zr ~ DataN$death_cause * DataN$Nest_openness, varwidth=TRUE) #most positive values in hole/yes subest - less predation with increasing urbanisation
# tidy up extracted results table
resN$M <- round(transf.ztor(resN$M),3)
resN$CI.lb <- round(transf.ztor(resN$CI.lb),3)
resN$CI.ub <- round(transf.ztor(resN$CI.ub),3)
write.csv(resN,"MA_MR_dataN_resN.csv")
revresN <- resN[rev(rownames(resN)),] #reverse for plotting (from bottom to top)
### PLOT - MA and MR models on all data
opar <- par() # make a copy of current settings
#par(opar) # restore original settings
pdf(file="Fig_MA_MR_dataN.pdf",width=4,height=6,pointsize=10)
par(mfrow=c(1,1))
par(mar=c(4,10,2,0))
plot(revresN$M, 1:length(revresN$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.8,0.8), ylim=c(0.25, length(revresN$M)+.5), xlab="effect size [r]", pch=revresN$pch, cex=1.1, cex.axis=.9)
abline(v=0,lty=3)
mtext(revresN$Model, 2, 9, at=1:length(revresN$M), las=2, cex=.8, font=1, adj=0)
segments(revresN$CI.lb, 1:length(revresN$M), revresN$CI.ub, 1:length(revresN$M), lwd=1.25)
dev.off()
## OVERALL for natural nests: tendency for less predation with increasing urbanisation (positive Zr) - small overall effect size,
# this effect is more pronounced when only nest lost due to predation only included
# and no much effect of urbanisation when other causes of mortality (confounding) are potentially present.
# Tendency for more negative valus in nests higher above ground - more mortality in urbanised areas,
# in lower nests more positive values more likely - less predation in urbanised areas,
# but this result is likely to be related on death_cause variable.
### PLOT - bubble plots for natural nests: r-death_cause, r-nest_height
plot(DataN$Zr ~ DataN$death_cause) # positive values (less predation in urbanised areas) when "yes" = only nests that died from predation are included
plot(DataN$Zr ~ DataN$Nest_height) # K=23
pdf(file="Fig_bubble2_dataN.pdf",width=6,height=4,pointsize=10)
par(mfrow=c(1,2))
par(mar=c(4,4,4,2))
#A
symbols(DataN$death_cause, DataN$rho, circles=sqrt(1/DataN$VZr),inches=0.4, xlab="Predation as only source of mortality",ylab="effect size [r]",main="A",xlim=c(0.5,2.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2), labels=c("No","Yes"))
abline(h=0,lty=3)
#B
symbols(DataN$Nest_openness, DataN$rho, circles=sqrt(1/DataN$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="B",xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
dev.off()
pdf(file="Fig_bubble2_dataN_v2.pdf",width=6,height=4,pointsize=10)
par(mfrow=c(1,2))
par(mar=c(4,4,4,2))
DataNy <- subset(DataN, DataN$death_cause=="yes", select=c(Nest_openness, rho, VZr))
DataNn <- subset(DataN, DataN$death_cause=="no", select=c(Nest_openness, rho, VZr))
#A
symbols(DataNy$Nest_openness, DataNy$rho, circles=sqrt(1/DataNy$VZr),inches=0.4, xlab="Nest openness",ylab="effect size [r]",main="A. Only mortality from predation",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
#B
symbols(DataNn$Nest_openness, DataNn$rho, circles=sqrt(1/DataNn$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="B. Mortality from all sources",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
dev.off()
pdf(file="Fig_bubble4_dataN.pdf",width=6,height=6,pointsize=10)
par(mfrow=c(2,2))
par(mar=c(4,4,4,2))
#A
symbols(DataN$death_cause, DataN$rho, circles=sqrt(1/DataN$VZr),inches=0.4, xlab="Predation as only source of mortality",ylab="effect size [r]",main="A. Mortality sources",xlim=c(0.5,2.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2), labels=c("No","Yes"))
abline(h=0,lty=3)
#B
symbols(DataN$Nest_openness, DataN$rho, circles=sqrt(1/DataN$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="B. Nest openness",xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
#C
DataNy <- subset(DataN, DataN$death_cause=="yes", select=c(Nest_openness, rho, VZr))
symbols(DataNy$Nest_openness, DataNy$rho, circles=sqrt(1/DataNy$VZr),inches=0.4, xlab="Nest openness",ylab="effect size [r]",main="C. Only mortality from predation",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
#D
DataNn <- subset(DataN, DataN$death_cause=="no", select=c(Nest_openness, rho, VZr))
symbols(DataNn$Nest_openness, DataNn$rho, circles=sqrt(1/DataNn$VZr),inches=0.4,xlab="Nest openness",ylab="effect size [r]",main="D. Mortality from all sources",ylim=c(-1,1),xlim=c(0,3.5),xaxp=c(1,2,1), xaxt="n")
axis(1, c(1,2,3), labels=c("Cup","Hole","Orb"))
abline(h=0,lty=3)
dev.off()
### PLOT - Fig_MA_MR_dataA and Fig_MA_MR_dataN subsets in one figure (skip min and max scores)
pdf(file="Fig_MA_MR_dataA_dataN.pdf",width=9,height=5,pointsize=10)
par(mfcol=c(1,2))
par(mar=c(4,10,2,0))
revresAA <- revresA[c(15:length(revresA$M)), ]
plot(revresAA$M, 1:length(revresAA$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.6,0.6), ylim=c(0.25, length(revresAA$M)+.5), xlab="effect size [r]", pch=revresAA$pch, cex=1.1, cex.axis=.9, main="A. Artificial nests")
abline(v=0,lty=3)
mtext(revresAA$Model, 2, 9, at=1:length(revresAA$M), las=2, cex=.8, font=1, adj=0)
segments(revresAA$CI.lb, 1:length(revresAA$M), revresAA$CI.ub, 1:length(revresAA$M), lwd=1.25)
revresNN <- revresN[c(17:length(revresN$M)), ]
plot(revresNN$M, 1:length(revresNN$M), ylab=NA, yaxt="n", bty="n", xlim=c(-0.6,0.6), ylim=c(0.25, length(revresNN$M)+.5), xlab="effect size [r]", pch=revresNN$pch, cex=1.1, cex.axis=.9, main="B. Natural nests")
abline(v=0,lty=3)
mtext(revresNN$Model, 2, 9, at=1:length(revresNN$M), las=2, cex=.8, font=1, adj=0)
segments(revresNN$CI.lb, 1:length(revresNN$M), revresNN$CI.ub, 1:length(revresNN$M), lwd=1.25)
dev.off()
#mtext("a)",side=2,line=7,at=14,las=2)
###### PUBLICATION BIAS
MR_nest_intc <- rma.mv(yi=Zr, V=VZr, random=list(~1|Paper_ID,~1|ES_ID), mod=~Nest_type-1, method="REML", data=Data)
summary(MR_nest_intc) # in artificial nests signif negative r (more predation with incr urbanization)
# for natural nests ns positive r (less predation with incr urbanization)
Residuals <- residuals(MR_nest_intc)
Precision <- sqrt(1/MR_nest_intc$vi)
plot(Residuals,Precision, xlim=c(-1,1), xlab="Residuals", ylab="Precision [1/SE]")
abline(v=0,lty=3)
model <- rma(yi=Residuals,sei=1/Precision)
summary(model) # ns est -0.0074 CI -0.0547 0.0400
funnel(model,yaxi="seinv")
#Trim and fill
TF <- trimfill(model)
TF #Estimated number of missing studies on the right side: 0 (SE = 6.2815)
funnel(TF)
#Egger's regression test
regtest(model,model="lm") #test for funnel plot asymmetry: t = 0.6458, df = 115, p = 0.5197
ranktest(model) #Kendall's tau = -0.0540, p = 0.3897 (warning about ties, do not use)
### PLOT - funnel plots
pdf(file="Fig_funnels2_alldata.pdf",width=6,height=4,pointsize=10)
par(mfcol=c(1,2))
par(mar=c(4,4,2,1))
plot(Data$rho, sqrt(1/Data$variance), xlim=c(-1.2,1.2), xlab = "Effect size [r]", ylab="Precision [1/SE]", main="A")
abline(v=0,lty=3)
plot(Residuals,Precision, xlim=c(-1.2,1.2), xlab = "Residuals", ylab="Precision [1/SE]", main="B")
abline(v=0,lty=3)
dev.off()
|
73b1830db362af2287866f3ee263cbf5791b1451 | 3648369d01c8a80107c3a05c6a79ff8c2bd9f033 | /R-scripts/2_Modeling_richness.R | 7eb31c77a30526104ff6baa546438d1f2f1a2b12 | [] | no_license | KIT-IfGG/SpeciesRichness-GLMvsRF-LiDAR | 86fe025dea9fb9a639c0798fb5d8068841b266ad | db8f06190ca51b3c3f90bdedab4faba49c2488bb | refs/heads/master | 2021-01-18T15:41:56.669178 | 2017-07-19T11:48:01 | 2017-07-19T11:48:01 | 63,490,766 | 0 | 1 | null | 2017-07-19T11:48:02 | 2016-07-16T16:02:06 | Jupyter Notebook | UTF-8 | R | false | false | 11,581 | r | 2_Modeling_richness.R | ## R-Script - modeling species richness
## author: Javier Lopatin
## mail:javierlopatin@gmail.com
## Manuscript: Comparing Generalized Linear Models and random forest to model vascular plant species richness using LiDAR data in a natural forest in central Chile
## last changes: 12/11/2015
library(lme4)
library(hier.part)
library(splines)
library(MASS)
library(randomForest)
##### set working directory
setwd("direction/to/your/folder")
#### Load data
dat <- read.table("Richness_model.csv", header=T, sep=",", dec=".")
attach(dat)
summary(dat)
###############################
## Prepare Bootstrap samples
###############################
set.seed(550)
# create empty lists in which subsets can be stored
train <- list()
validation <- list()
# set the bootstrap parameters
N = length(dat[,1]) # N° of observations
B = 500 # N° of bootstrap iterations
# start loop
for(i in 1:B){
# create random numbers with replacement to select samples from each group
idx = sample(1:N, N, replace=TRUE)
# select subsets of the five groups based on the random numbers
train[[i]] <- dat[idx,]
validation[[i]] <- dat[-idx,]
}
#################################################
## start regression modelling with Random Forest
#################################################
# create empty lists in which the models accuracies can be stored
# Obs = observed variable
# Pred = predictors
# r2 = squared Pearson's correlation
# Nrmse = normalized root mean squared error
# imp = variable importance
# bias = bias of the model
# Total
Obs.rf<-list()
Pred.rf<-list()
r2.rf<-list()
rmse.rf<-list()
Nrmse.rf<-list()
imp.rf<-list()
bias.rf<-list()
# Tree
Obs.rf.A<-list()
Pred.rf.A<-list()
r2.rf.A<-list()
rmse.rf.A<-list()
Nrmse.rf.A<-list()
imp.rf.A<-list()
bias.rf.A <- list()
# Shrub
Obs.rf.AR<-list()
Pred.rf.AR<-list()
r2.rf.AR<-list()
rmse.rf.AR<-list()
Nrmse.rf.AR<-list()
imp.rf.AR<-list()
bias.rf.AR <- list()
# Herbs
Obs.rf.H<-list()
Pred.rf.H<-list()
r2.rf.H<-list()
rmse.rf.H<-list()
Nrmse.rf.H<-list()
imp.rf.H<-list()
bias.rf.H<-list()
# Run RF
for(i in 1:B){
TRAIN <- train[[i]]
VALIDATION <- validation[[i]]
len<-length( VALIDATION[,1])
# store and select the observations
obs <- VALIDATION[,2]
Obs.rf[[i]]<-obs
#select the predictors for the train and validation
variables<-na.roughfix(TRAIN[,6:17])
variables.t<-na.roughfix(VALIDATION[,6:17])
# run the RF model using ntrees=500 (selected by bibliography) and mtry=7 (selected after initial tuning procidure)
RF_total<-randomForest(TRAIN[,2] ~ ., data= variables, ntrees=500, na.action= na.roughfix,importance=F, do.trace=100, mtry=7)
# predict richness values using the
Pred<-predict(RF_total,variables.t)
# store the model accuracies
Pred<-Pred[1:len]
Pred.rf[[i]]<-Pred
r2.rf[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.rf[[i]]<-s1
Nrmse.rf[[i]]<-(s1/(max(obs)-min(obs)))*100
imp.rf[[i]]<-importance(RF_total, type=1)
lm = lm(Pred ~ obs-1)
bias.rf[[i]] <-1-coef(lm)
# starting here this process is repeated for tree, shrub and herb richness
# tree
obs <- VALIDATION[,3]
Obs.rf.A[[i]]<-obs
RF_tree<-randomForest(TRAIN$Tree_richness ~ ., data= variables, ntrees=500, na.action= na.roughfix,importance=TRUE, do.trace=100, mtry=12)
Pred<-predict(RF_tree,variables.t)
Pred<-Pred[1:len]
Pred.rf.A[[i]]<-Pred
r2.rf.A[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.rf.A[[i]]<-s1
Nrmse.rf.A[[i]]<-(s1/(max(obs)-min(obs)))*100
imp.rf.A[[i]]<-importance(RF_tree, type=1)
lm = lm(Pred ~ obs-1)
bias.rf.A[[i]] <-1-coef(lm)
# shrub
obs <- VALIDATION[,4]
Obs.rf.AR[[i]]<-obs
RF_shrub<-randomForest(TRAIN$Shrub_richness ~ ., data= variables, ntrees=500, na.action= na.roughfix,importance=TRUE, do.trace=100, mtry=7)
Pred<-predict(RF_shrub,variables.t)
Pred<-Pred[1:len]
Pred.rf.AR[[i]]<-Pred
r2.rf.AR[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.rf.AR[[i]]<-s1
Nrmse.rf.AR[[i]]<-(s1/(max(obs)-min(obs)))*100
imp.rf.AR[[i]]<-importance(RF_shrub, type=1)
lm = lm(Pred ~ obs-1)
bias.rf.AR[[i]] <-1-coef(lm)
# herb
obs <- VALIDATION[,5]
Obs.rf.H[[i]]<-obs
RF_herb<-randomForest(TRAIN$Herb_richness ~ ., data= variables, ntrees=500, na.action= na.roughfix,importance=TRUE, do.trace=100, mtry=12)
Pred<-predict(RF_herb,variables.t)
Pred<-Pred[1:len]
Pred.rf.H[[i]]<-Pred
r2.rf.H[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.rf.H[[i]]<-s1
Nrmse.rf.H[[i]]<-(s1/(max(obs)-min(obs)))*100
imp.rf.H[[i]]<-importance(RF_herb, type=1)
lm = lm(Pred ~ obs-1)
bias.rf.H[[i]] <-1-coef(lm)
}
######################################
## start regression modelling with GLM
######################################
# create empty lists in which the models accuracies can be stored
# Total
ID.nb<-list()
Obs.nb<-list()
Pred.nb<-list()
r2.nb<-list()
rmse.nb<-list()
Nrmse.nb<-list()
imp.nb<-list()
bias.nb <- list()
# Tree
ID.nb.A<-list()
Obs.nb.A<-list()
Pred.nb.A<-list()
r2.nb.A<-list()
rmse.nb.A<-list()
Nrmse.nb.A<-list()
imp.nb.A<-list()
bias.nb.A <- list()
# Shrub
ID.nb.AR<-list()
Obs.nb.AR<-list()
Pred.nb.AR<-list()
r2.nb.AR<-list()
rmse.nb.AR<-list()
Nrmse.nb.AR<-list()
imp.nb.AR<-list()
bias.nb.AR <- list()
# Herb
ID.nb.H<-list()
Obs.nb.H<-list()
Pred.nb.H<-list()
r2.nb.H<-list()
rmse.nb.H<-list()
Nrmse.nb.H<-list()
imp.nb.H<-list()
bias.nb.H <- list()
# Run GLM
for(i in 1:B){
TRAIN <- train[[i]]
VALIDATION <- validation[[i]]
len<-length( VALIDATION[,1])
# store and select the observations
ID<-VALIDATION$ID
ID.nb [[i]]<-ID
obs <- VALIDATION[,2]
Obs.nb[[i]]<-obs
# run the GLM using Negative Binomial family. Three variables were selected using previous tuning procidere
GLM_total <- glm(Total_richness ~ one_mean + DTM_1_mean + slope_1m_std, data=TRAIN, family=negative.binomial(theta=1 , link="log"))
# predict richness values
Pred<-stats:::predict(GLM_total, newdata=VALIDATION, type="response")
# store the model accuracies
Pred.nb[[i]]<-Pred
r2.nb[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.nb[[i]]<-s1
Nrmse.nb[[i]]<-(s1/(max(obs)-min(obs)))*100
hp <- hier.part(VALIDATION$Total_richness, VALIDATION[,c("DTM_1_mean", "slope_1m_std", "norm_H_1_mean", "Asp_1m", "TWI_1m", "one_mean", "one_std", "homogeneity_1", "contrast_1", "dissimilarity_1", "entropy_1", "second_moment_1" )], family=negative.binomial(theta=1 , link="log"))
imp.nb[[i]]<-hp$I.perc
lm = lm(Pred ~ obs-1)
bias.nb[[i]] <-1-coef(lm)
# starting here this process is repeated for tree, shrub and herb richness
# tree
ID<-VALIDATION$ID
ID.nb.A[[i]]<-ID
obs <- VALIDATION[,3]
Obs.nb.A[[i]]<-obs
GLM_tree <- glm(Tree_richness ~ one_mean + DTM_1_mean + slope_1m_std, data=TRAIN, family=negative.binomial(theta=1 , link="log"))
Pred<-stats:::predict(GLM_tree, newdata=VALIDATION, type="response")
Pred.nb.A[[i]]<-Pred
r2.nb.A[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.nb.A[[i]]<-s1
Nrmse.nb.A[[i]]<-(s1/(max(obs)-min(obs)))*100
hp <- hier.part(VALIDATION$Total_richness, VALIDATION[,c("DTM_1_mean", "slope_1m_std", "norm_H_1_mean", "Asp_1m", "TWI_1m", "one_mean", "one_std", "homogeneity_1", "contrast_1", "dissimilarity_1", "entropy_1", "second_moment_1" )], family=negative.binomial(theta=1 , link="log"))
imp.nb.A[[i]]<-hp$I.perc
lm = lm(Pred ~ obs-1)
bias.nb.A[[i]] <-1-coef(lm)
# shrub
ID<-VALIDATION$ID
ID.nb.AR[[i]]<-ID
obs <- VALIDATION[,4]
Obs.nb.AR[[i]]<-obs
GLM_shrub <- glm(Shrub_richness ~ one_mean + DTM_1_mean + TWI_1m, data=TRAIN, family=negative.binomial(theta=1 , link="log"))
Pred<-stats:::predict(GLM_shrub, newdata=VALIDATION, type="response")
Pred.nb.AR[[i]]<-Pred
r2.nb.AR[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.nb.AR[[i]]<-s1
Nrmse.nb.AR[[i]]<-(s1/(max(obs)-min(obs)))*100
hp <- hier.part(VALIDATION$Total_richness, VALIDATION[,c("DTM_1_mean", "slope_1m_std", "norm_H_1_mean", "Asp_1m", "TWI_1m", "one_mean", "one_std", "homogeneity_1", "contrast_1", "dissimilarity_1", "entropy_1", "second_moment_1" )], family=negative.binomial(theta=1 , link="log"))
imp.nb.AR[[i]]<-hp$I.perc
lm = lm(Pred ~ obs-1)
bias.nb.AR[[i]] <-1-coef(lm)
# herb
ID<-VALIDATION$ID
ID.nb.H[[i]]<-ID
obs <- VALIDATION[,5]
Obs.nb.H[[i]]<-obs
GLM_herb <- glm(Herb_richness ~ one_mean + DTM_1_mean + slope_1m_std, data=TRAIN, family=negative.binomial(theta=1 , link="log"))
Pred<-stats:::predict(GLM_herb, newdata=VALIDATION, type="response")
Pred.nb.H[[i]]<-Pred
r2.nb.H[[i]]<-(cor(Pred, obs, method="pearson"))^2
s1<-sqrt(mean((obs-Pred)^2))
rmse.nb.H[[i]]<-s1
Nrmse.nb.H[[i]]<-(s1/(max(obs)-min(obs)))*100
hp <- hier.part(VALIDATION$Total_richness, VALIDATION[,c("DTM_1_mean", "slope_1m_std", "norm_H_1_mean", "Asp_1m", "TWI_1m", "one_mean", "one_std", "homogeneity_1", "contrast_1", "dissimilarity_1", "entropy_1", "second_moment_1" )], family=negative.binomial(theta=1 , link="log"))
imp.nb.H[[i]]<-hp$I.perc
lm = lm(Pred ~ obs-1)
bias.nb.H[[i]] <- 1-coef(lm)
}
# export the variable importances
# RF
write.table(imp.rf, file="importancia.rf.csv")
write.table(imp.rf.A, file="importancia.rf.A.csv")
write.table(imp.rf.AR, file="importancia.rf.AR.csv")
write.table(imp.rf.H, file="importancia.rf.H.csv")
# GLM
write.table(imp.nb, file="importancia.nb.csv")
write.table(imp.nb.A, file="importancia.nb.A.csv")
write.table(imp.nb.AR, file="importancia.nb.AR.csv")
write.table(imp.nb.H, file="importancia.nb.H.csv")
# merge all accuracies together
BOOTS_ACC <- data.frame(unlist(r2.rf), unlist(r2.rf.A), unlist(r2.rf.AR), unlist(r2.rf.H),
unlist(r2.nb), unlist(r2.nb.A), unlist(r2.nb.AR), unlist(r2.nb.H),
unlist(Nrmse.rf), unlist(Nrmse.rf.A), unlist(Nrmse.rf.AR), unlist(Nrmse.rf.H),
unlist(Nrmse.nb), unlist(Nrmse.nb.A), unlist(Nrmse.nb.AR), unlist(Nrmse.nb.H),
unlist(bias.rf), unlist(bias.rf.A), unlist(bias.rf.AR), unlist(bias.rf.H),
unlist(bias.nb), unlist(bias.nb.A), unlist(bias.nb.AR), unlist(bias.nb.H))
colnames(BOOTS_ACC) <- c("r2.rf.Total","r2.rf.Tree", "r2.rf.Shrub", "r2.rf.Herb",
"r2.nb.Total","r2.nb.Tree", "r2.nb.Shrub", "r2.nb.Herb",
"Nrmse.rf.Total","Nrmse.rf.Tree", "Nrmse.rf.Shrub", "Nrmse.rf.Herb",
"Nrmse.nb.Total","Nrmse.nb.Tree", "Nrmse.nb.Shrub", "Nrmse.nb.Herb",
"bias.rf.Total","bias.rf.Tree", "bias.rf.Shrub", "bias.rf.Herb",
"bias.nb.Total","bias.nb.Tree", "bias.nb.Shrub", "bias.nb.Herb")
# export the results
write.table(BOOTS_ACC, file="BOOTS_ACC.csv")
## Residuals
# RF
res.rf<- unlist(Pred.rf)-unlist(Obs.rf)
res.rf.A<- unlist(Pred.rf.A)-unlist(Obs.rf.A)
res.rf.AR<- unlist(Pred.rf.AR)-unlist(Obs.rf.AR)
res.rf.H<- unlist(Pred.rf.H)-unlist(Obs.rf.H)
#GLM
res.nb<- unlist(Pred.nb)-unlist(Obs.nb)
res.nb.A<- unlist(Pred.nb.A)-unlist(Obs.nb.A)
res.nb.AR<- unlist(Pred.nb.AR)-unlist(Obs.nb.AR)
res.nb.H<- unlist(Pred.nb.H)-unlist(Obs.nb.H)
# median accuracies of the models
MED <- list()
medians <- for (i in 1:length(BOOTS_ACC[1,])){
a <- median(BOOTS_ACC[,i])
a <- round(a, 2)
MED[[i]] <- a
}
MED <- as.data.frame(unlist(MED), names(BOOTS_ACC))
MED
save.image("Richness.RData")
|
23fc0657f3f753043b53540e394eed00e89f63da | 70ab8814c14fa2350f1b69ba5254eb7936712c52 | /Pol initial condition.R | 5eb0183d134f731de12627d9e9d6ab19f9973656 | [] | no_license | MikawaFumika/Lattice_KMC_Polymerase-DNA-movement-simulation | b3e95e19d725a60ee067f02b68754064d7e71be8 | bef5b6bfbd73e26bc7610113fd795781615fb587 | refs/heads/main | 2023-03-22T17:58:25.238203 | 2021-03-18T11:17:52 | 2021-03-18T11:17:52 | 331,918,951 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,961 | r | Pol initial condition.R | #simulation for
#difine matrix and initial condition
size=30
kbt=1
kp=1
polprob=0.1
totalpol=0
indexpoli=c()
indexpolj=c()
indexpol=0
lattice_1=matrix(nrow=size, ncol=size)
lattice_2=matrix(nrow=size, ncol=size)
heatmaplattice=matrix(nrow=size, ncol=size)
for(i in 1:size)
{
for(j in 1:size)
{
heatmaplattice[i,j]=0
}
}
coefficientmatrix=matrix(0,nrow=5,ncol=5)
coefficientmatrix[1,1]=-0.2
coefficientmatrix[2,4]=-0.5
coefficientmatrix[4,2]=-0.5
coefficientmatrix[3,4]=0.5
coefficientmatrix[4,3]=0.5
coefficientmatrix[4,4]=-0.25
coefficientmatrix[4,5]=0
coefficientmatrix[5,4]=0
for(i in 1:size)
{
for(j in 1:size)
{
if(runif(1)<polprob)
{
lattice_1[i,j]=4
indexpol=indexpol+1
indexpoli=c(indexpoli,i)
indexpolj=c(indexpolj,j)
totalpol=totalpol+1
}
else
lattice_1[i,j]=5
}
}
lattice_2=lattice_1
par(mar=c(1,1,1,1))
plot(1,1,type="p",tck=0.03,cex=0.5,las=1,xlab="",col='white',pch=19, ylab="", main="",xlim=c(0,size),ylim=c(0,size),xaxt="n",yaxt="n",bty="n")
for(i in 1:size)
{
for(j in 1:size)
{
par(new=T)
if(lattice_1[i,j]==4)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'red')
if(lattice_1[i,j]==5)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'white')
}
}
for(i in 1:size)
{
for(j in 1:size)
{
par(new=T)
if(lattice_cho1[i,j]==1)
polygon(c(j-1+0.2,j-0.2,j-0.2,j-1+0.2),c(size-i+0.2,size-i+0.2,size-i+1-0.2,size-i+1-0.2), density = NULL, border = F, col = 'black')
if(lattice_cho1[i,j]==2)
polygon(c(j-1+0.2,j-0.2,j-0.2,j-1+0.2),c(size-i+0.2,size-i+0.2,size-i+1-0.2,size-i+1-0.2), density = NULL, border = F, col = 'blue')
if(lattice_cho1[i,j]==3)
polygon(c(j-1+0.2,j-0.2,j-0.2,j-1+0.2),c(size-i+0.2,size-i+0.2,size-i+1-0.2,size-i+1-0.2), density = NULL, border = F, col = 'grey')
}
}
|
20465640b5dc84473722ac1909661aceb17b0a58 | 28c1da5514d52311051520abbb3babc8b775a881 | /RnBeads/r-packages/RnBeads/man/rnb.plot.ct.heatmap.Rd | 2da160678f8d74a01607627f749c8901c531e177 | [
"MIT"
] | permissive | relshire/epiGBS | d60bcca37f58139c5e2a2542f5170b0d713e4517 | 78e495a642a8b86c08041d69cd854a7be2b263c8 | refs/heads/master | 2020-12-30T21:50:44.842966 | 2019-09-04T00:24:28 | 2019-09-04T00:24:28 | 59,452,648 | 0 | 0 | MIT | 2019-09-04T00:24:29 | 2016-05-23T04:46:20 | R | UTF-8 | R | false | false | 819 | rd | rnb.plot.ct.heatmap.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{rnb.plot.ct.heatmap}
\alias{rnb.plot.ct.heatmap}
\title{rnb.plot.ct.heatmap}
\usage{
rnb.plot.ct.heatmap(ct.obj, type = "nonnegative", writeToFile = FALSE, ...)
}
\arguments{
\item{ct.obj}{Object of class \code{CellTypeInferenceResult} as returned by \link{rnb.execute.ct.estimation}.}
\item{type}{Type of cell type contributions to plot.}
\item{writeToFile}{If \code{TRUE}, the plot will be written to a file.}
\item{...}{Other arguments passed to \code{\link{createReportPlot}}.}
}
\value{
if \code{writeToFile=TRUE} an object of class \code{\linkS4class{ReportPlot}},
or the protted matrix otherwise
}
\description{
Plot contributions of the cell types
}
\details{
The cell type contributions are visualized as a heatmap
}
\author{
Pavlo Lutsik
}
|
7162f503505bf1ed1b8af7345fb76f1925546a6a | 2faa4567a89b3cbf3ee2fba8578942876c38417b | /Visualisation.R | 850742d757cf9cab587feab99ed8f60246df5608 | [] | no_license | SufyanIqbal1/RTraining | 53cc9302df80636d9fbd69fa6bd9a80607eff082 | 016ce28b558b79758aa595ff6f252df74df8fd6e | refs/heads/master | 2021-01-17T18:13:10.869428 | 2017-06-27T09:58:49 | 2017-06-27T09:58:49 | 95,544,176 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 204 | r | Visualisation.R | require(graphics)
pairs(OrchardSprays, main = "OrchardSprays data")
#print (max(OrchardSprays["decrease"]))
s<- rbind(OrchardSprays)
print(s[which.max(s[,1]),])
#View(OrchardSprays)
|
8faf2d47406de37dc8edd4cd84a5a987de0f273a | b022572938addca6df3c51709ac2654855c238db | /oneTimeDataPreparation.R | 3bc702fbcab33d61beffe51a51bcd290a5a48e4a | [
"MIT"
] | permissive | vojtechhuser/Athenian | 9d50f7f358c53af7f2d50f2fc33231d8d8cc4d94 | 4d750ed5d57ecda4d82800cd16a7dd4f48949829 | refs/heads/master | 2021-04-30T04:31:30.089290 | 2020-05-12T23:09:25 | 2020-05-12T23:09:25 | 121,537,235 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,337 | r | oneTimeDataPreparation.R |
#athena_folder is expected to contain files parsed athena CSV files
athena_folder='n:/athena'
library(tidyverse)
library(magrittr)
concept <-read.delim(file.path(athena_folder,'concept.csv'),as.is=T,quote = "")
#save(concept,file = file.path(athena_folder,'concept.rda'))
vocabulary <-read.delim(file.path(athena_folder,'vocabulary.csv'),as.is=T,quote = "")
relationship<-read.delim(file.path(athena_folder,'relationship.csv') ,as.is=T,quote = "")
#large files
concept_relationship <-read.delim(file.path(athena_folder,'concept_relationship.csv'),as.is=T,quote = "")
concept_ancestor <-read.delim(file.path(athena_folder,'concept_ancestor.csv') ,as.is=T,quote = "")
a<-Sys.time()
save.image(file = file.path(athena_folder,'athena.rda'))
print(Sys.time()-a)
#ignore code below------------
#make a smaller file without some vocab tables
#rm(concept_ancestor)
rm(relationship);rm(vocabulary);rm(concept_ancestor);rm(concept_relationship)
# a<-Sys.time()
# save.image(file = file.path(athena_folder,'concept.rda'))
# save.image(file = file.path(athena_folder,'athena-medium.rda'))
# print(Sys.time()-a)
#rm(athena_folder)
# #version of vocab
# print(dplyr::filter(vocabulary,VOCABULARY_ID=='None'))
|
38da956093acc3b8cb96112430903bd99a673a86 | 34aacdad79d993e55eca4da3c1cc40423dd86738 | /scenarios/prep_types_paris.R | 10916b91d37f682b6b8ad1a819a1faa12dc342fe | [] | no_license | bfrggit/R-sc | 1d5cb37915bed8f325c0ad299f2c77dbb7b4ce96 | 0ca0a9c1c3c27226ab383df96911836758898779 | refs/heads/master | 2020-04-20T22:31:15.788936 | 2019-01-17T00:06:46 | 2019-01-17T00:06:46 | 169,142,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 734 | r | prep_types_paris.R | #!/usr/bin/env Rscript
#
# prep_types_paris.R
#
# Created: 2019-01-03
# Updated: 2019-01-08
# Author: Charles Zhu
#
# as always, run this script from the project root
rm(list = ls())
NUM_TYPES = 8L
lockBinding("NUM_TYPES", globalenv())
sensor_type_period <- c(28L, 123L, 61L, 56L, 45L, 42L, 31L, 31L)
sensor_type_cali_t <- c(150L, 15L, 900L, 60L, 60L, 90L, 90L, 90L)
names(sensor_type_period) <- names(sensor_type_cali_t) <-
c(
"type_1",
"noise",
"air_quality",
paste("type", as.character(4L:8L), sep = "_")
)
st_specs <- list()
st_specs$st_period <- sensor_type_period
st_specs$st_cali_t <- sensor_type_cali_t
save(
NUM_TYPES,
st_specs,
file = "scenarios/types_paris.RData"
)
|
28b1faf49a274b0fa6bc32fc91b7af6c761c033c | 34e1f0cdf5f2abf3416233705ac67dfb0d0b2a73 | /code/01a_enrichment_RAP.R | 181c9f63805ec4269d1c749382fddc768fc15f3a | [] | no_license | munschauerlab/SCoV2-proteome-atlas | fc9d82302f4d0700302aa96e4c899394c567d566 | c2f4b234e920159c0b5760d31493db8d7780f72d | refs/heads/master | 2023-06-23T02:50:05.119503 | 2023-06-20T17:23:41 | 2023-06-20T17:23:41 | 273,319,036 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,458 | r | 01a_enrichment_RAP.R | library(data.table)
library(dplyr)
library(Matrix)
library(BuenColors)
library(network)
# Import database
if(!exists("string_v11")){
source('00_string_v11.R')
}
# Import gene names
rap <- fread("../data/SCoV2_RAPms.txt")
rap_filt <- rap%>%
filter(adj.P.Val.SCoV2.over.RMRP < 0.2 & species == "HOMO SAPIENS" & logFC.SCoV2.over.RMRP > 0)
rap_genes <- rap_filt %>% pull(geneSymbol)
# Make string network
bulk <- fread("../data/SCoV2_bulkProteome.txt") %>% arrange(log(P.Value.SCoV.over.Mock)) %>%
filter(species == "HOMO SAPIENS")
all_ms_genes <- unique(bulk$geneSymbol, rap_genes)
observed <- pull_string_v11(min_score = 550, genes_keep = rap_genes) %>%
filter(node1 != node2)
permuted_connections <- sapply(1:1000, function(i){
set.seed(i)
perm <- pull_string_v11(min_score = 550, genes_keep = sample(all_ms_genes, length(rap_genes))) %>%
filter(node1 != node2)
dim(perm)[1]
})
(dim(observed)[1]-mean(permuted_connections))/sd(permuted_connections)
mean(permuted_connections)
p1 <- ggplot(data.frame(connections = permuted_connections), aes(x = connections)) +
geom_histogram(fill = "lightgrey", color = "black") + scale_x_log10() +
geom_vline(xintercept = 1534, color = "firebrick") +
pretty_plot(fontsize = 8 ) + L_border() + scale_y_continuous(expand = c(0,0))+
labs(x = "Total connections (log10 scale)", y = "Count")
cowplot::ggsave2(p1, file = "../output/permuted_RAP_string_network.pdf", width = 2.0, height = 1.8)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.