blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6f3d77e485762402b15e407814750d7d24e2ad5
|
36b14b336e0efdda255fa3f163af65127e88105f
|
/man/Problem5.29.Rd
|
a26ecfb75958bb0574e3fa6c5696cfeef69cd3d1
|
[] |
no_license
|
ehassler/MontgomeryDAE
|
31fcc5b46ae165255446e13beee9540ab51d98b3
|
43a750f092410208b6d1694367633a104726bc83
|
refs/heads/master
| 2021-06-24T13:46:19.817322
| 2021-03-11T16:36:37
| 2021-03-11T17:13:18
| 199,803,056
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
rd
|
Problem5.29.Rd
|
\name{Problem5.29}
\alias{Problem5.29}
\docType{data}
\title{Exercise 5.29}
\usage{data("Problem5.29")}
\format{A data frame with 36 observations on the following variable(s).\describe{
\item{\code{Frequency}}{a character vector}
\item{\code{Environment}}{a character vector}
\item{\code{CrackGrowth}}{a numeric vector}
}}
\references{Montgomery, D.C.(2017, 10th ed.) \emph{Design and Analysis of Experiments}, Wiley, New York.}
\examples{data(Problem5.29)}
\keyword{{datasets}}
|
3d8534540a46b20eb54786d7fad2cc0137e5489f
|
838a8e946b58b415ae51fc4d6e67260d2990c7fb
|
/man/list_to_string.Rd
|
a1f83be7ef55b7e9e5de73830ddf2cae51190d05
|
[] |
no_license
|
fennerm/fen.R.util
|
7bf7c8f8593d7760939b3e5106ecdf996ecce72f
|
c9aeb26617d2e78ee1046626334ee6cc7cbc7682
|
refs/heads/master
| 2021-01-20T02:53:13.563011
| 2018-05-17T18:30:26
| 2018-05-17T18:30:26
| 101,340,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 320
|
rd
|
list_to_string.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{list_to_string}
\alias{list_to_string}
\title{Convert a list to a space separated string}
\usage{
list_to_string(lst)
}
\arguments{
\item{lst}{A list}
}
\value{
A string
}
\description{
Convert a list to a space separated string
}
|
85bde80c46295ded81149382edeb2f52a7b7bc81
|
5038e6f203a7f29c0c21986f5caf61ba11f081c5
|
/R/inc03_impactsRLEpubs.R
|
1aff5b046811948a6811e964797595074272e7d6
|
[] |
no_license
|
jrfep/CEBA.LEE
|
2c5b1c916ef40f41042f418b78d57dba35719f61
|
51c9953ca5cfa235bb8e0d14b918b1eed2f89920
|
refs/heads/master
| 2021-01-20T03:54:00.510319
| 2019-03-24T19:21:39
| 2019-03-24T19:21:39
| 89,601,859
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,705
|
r
|
inc03_impactsRLEpubs.R
|
##https://github.com/ropensci/rcrossref
##~/.Renviron con la línea crossref_email= "jose.ferrer@provitaonline.org"
##altmetrics api...
##R --vanilla
require(textcat)
require(rAltmetric)
require(RJSONIO)
require(rcrossref)
require(xml2)
##require(dplyr)
require(tidyverse)
require(gdata)
mi.key <- ""
##require(ROpenOffice)
##RLEpubs <- read.ods("~/Descargas/RLE Publications - 2018 List.ods")
##RLEpubs <- read.csv("~/Descargas/RLE_Publications.csv")
##
## https://www.iucnredlist.org/resources/list
## http://www.keybiodiversityareas.org/publications
RLEpubs <- read.csv("~/mi.git/CEBA.LEE/data/IUCN_KP_Publications.csv")
mis.dois <- unique(as.character(RLEpubs$DOI))
mis.dois <- mis.dois[mis.dois!=""]
slc.RLE <- mis.dois %in% subset(RLEpubs,RLE)$DOI
slc.RLTS <- mis.dois %in% subset(RLEpubs,RLTS)$DOI
slc.KBA <- mis.dois %in% subset(RLEpubs,KBA)$DOI
ref.info <- cr_works(dois = mis.dois,.progress="text")
## Intro en https://poldham.github.io/abs/crossref.html
ref.info$data %>% count(subject, sort = TRUE)
ref.info$data %>% separate_rows(subject, sep = ",") %>%
count(subject=trim(subject), sort = TRUE) -> subjects # output subjects
library(ggplot2)
subjects %>%
ggplot2::ggplot(aes(subject, n, fill = subject)) +
geom_bar(stat = "identity", show.legend = FALSE) +
coord_flip()
library(stringr)
library(ggplot2)
library(plotly)
ref.info$data$year <- as.numeric(substr(ref.info$data$issued,0,4))
ref.info$data %>%
count(year) %>%
ggplot(aes(x = year, y = n, group = 1)) +
geom_line() -> out # data out
out
library(tidyr)
# Remove null, unnest list column, create full_name
list.authors <- ref.info$data[ref.info$data$author != "NULL", ] %>%
tidyr::unnest(., author) %>%
tidyr::unite(auth_full_name, c(family, given), sep = " ", remove = FALSE) %>%
rename("auth_family" = family, "auth_given" = given, "auth_aff" = affiliation.name)
# get initials...
list.authors$initials <- tolower(sapply(list.authors$auth_given,function(x) {
y <- strsplit(x," ")[[1]]
paste(substr(y,1,1),collapse="")
}))
## last name + initials
list.authors$valid_name <- trim(tolower(paste(list.authors$auth_family,list.authors$initials,sep=" ")))
library(dplyr)
library(stringr)
length(unique(list.authors$valid_name))
length(unique(list.authors$auth_full_name))
list.authors %>% count(valid_name, sort = TRUE)
list.authors$auth_alt2 <- list.authors$valid_name
list.authors$auth_alt3 <- list.authors$valid_name
list.authors$auth_alt4 <- list.authors$valid_name
mtz.names <- adist(list.authors$valid_name)
for (k in unique(list.authors$valid_name)) {
idx <- first(match(k,list.authors$auth_alt2))
if (length(idx)==1) {
list.authors$auth_alt2[which(mtz.names[idx,]<2)] <- k
list.authors$auth_alt3[which(mtz.names[idx,]<3)] <- k
list.authors$auth_alt4[which(mtz.names[idx,]<4)] <- k
}
}
list.authors %>% count(auth_alt2, sort = TRUE) ## poco
list.authors %>% count(auth_alt3, sort = TRUE) ## mejor
subset(list.authors,auth_alt3 %in% "butchart s")$valid_name
## error en Ma Keeping y Ma...
subset(list.authors,auth_alt3 %in% "ma z")$valid_name
## dos autores diferentes
subset(list.authors,auth_alt3 %in% "lei g")$valid_name
subset(list.authors,auth_alt3 %in% "keith da")$valid_name
for (bsc in c("ma z","lei g","keith da","murray nj","regan tj","wilson al")) {
list.authors[list.authors$auth_alt3 %in% bsc,"auth_alt3"] <- list.authors[list.authors$auth_alt3 %in% bsc,"valid_name"]
}
list.authors %>% count(auth_alt4, sort = TRUE) ## demasiado
list.authors <- subset(list.authors,!valid_name %in% "na na")
length(unique(list.authors$auth_alt3))
length(unique(list.authors$valid_name))
mtz.LRE <- as.matrix(with(subset(list.authors,doi %in% subset(RLEpubs,RLE)$DOI),table(doi,auth_alt3)))
mtz.OTR <- as.matrix(with(subset(list.authors,doi %in% subset(RLEpubs,RLTS | KBA)$DOI),table(doi,auth_alt3)))
##save(file="~/mi.git/CEBA.LEE/Rdata/20190306_pubsLRE.rda",mtz.LRE,mtz.OTR)
save(file="~/mi.git/CEBA.LEE/Rdata/20190324_pubsLRE.rda",mtz.LRE,mtz.OTR,ref.info)
co_occurrence1 <- t(mtz) %*% mtz
co_occurrence2 <- t(mtz2) %*% mtz2
##mtz <- mtz[,colSums(mtz)>1]
##mtz <- mtz[rowSums(mtz)>1,]
##mtz <- mtz[,colSums(mtz)>1]
##http://kateto.net/netscix2016
require(igraph)
layout(matrix(c(1,1,3,3,
1,1,3,3,
2,2,4,4),ncol=4,byrow=T))
par(mar=c(0,0,1,0))
g1 <- graph.adjacency(co_occurrence1,
weighted=TRUE,
mode="undirected",
diag=FALSE)
deg <- degree(g1, mode="all")
md <- rev(sort(degree(g1)))[1:10]
mc <- rev(sort(closeness(g1)))[1:10]
mb <- rev(sort(betweenness(g1)))[1:10]
l1 <- layout_with_kk(g1)
n <- V(g1)$name
plot(g1, layout=l1,edge.arrow.size=.15, vertex.color=rgb(.2,.3,.4,.5), vertex.size=sqrt(deg)/2,vertex.label=NA) ## vertex.label=ifelse(n %in% c(names(md),names(mc),names(mb)),n,NA)
par(mar=c(4,4,0,0))
hist(deg, breaks=seq(0,vcount(g2),length=20),
col=rgb(.2,.3,.4,.5),main="Histogram of node degree")
cfg <- cluster_fast_greedy(g1)
membership(cfg)
sizes(cfg)
par(mar=c(0,0,1,0))
g2 <- graph.adjacency(co_occurrence2,
weighted=TRUE,
mode="undirected",
diag=FALSE)
deg <- degree(g2, mode="all")
md <- rev(sort(degree(g2)))[1:10]
mc <- rev(sort(closeness(g2)))[1:10]
mb <- rev(sort(betweenness(g2)))[1:10]
l2 <- layout_with_kk(g2)
n <- V(g2)$name
plot(g2, layout=l2,edge.arrow.size=.15, vertex.color=rgb(.4,.3,.2,.5), vertex.size=sqrt(deg)/2,vertex.label=NA)
##vertex.label=ifelse(n %in% c(names(md),names(mc),names(mb)),n,NA))
par(mar=c(4,4,0,0))
hist(deg, breaks=seq(0,vcount(g2),length=20), main="Histogram of node degree")
centr_degree(g1, mode="all", normalized=T)
centr_degree(g2, mode="all", normalized=T)
centr_eigen(g1, directed=T, normalized=T)$centralization
centr_eigen(g2, directed=T, normalized=T)$centralization
centr_betw(g1, directed=T, normalized=T)$centralization
centr_betw(g2, directed=T, normalized=T)$centralization
## not well defined for disconnected graphs
##centr_clo(g1, mode="all", normalized=T)
## number of clusters
clusters(g1)$no
clusters(g2)$no
graph.density(g1)
graph.density(g2)
mean_distance(g1)
mean_distance(g2)
transitivity(g1, type="average")
transitivity(g2, type="average")
################
##
l <- layout_in_circle(g)
l <- layout_on_sphere(g)
l <- layout_with_fr(g)
l <- layout_with_kk(g)
coords <- layout_(g, nicely())
plot(g, edge.arrow.size=.5, vertex.color="gold", vertex.size=15,
vertex.frame.color="gray", vertex.label.color="black",
vertex.label.cex=0.8, vertex.label.dist=2, edge.curved=0.2)
plot(g, layout=coords,
main='Coauthor networks', vertex.label.dist=0.15, vertex.frame.color='maroon', vertex.label.color='black', vertex.label.font=2, vertex.label=V(g)$name, vertex.label.cex=.53)
table(slc.RLTS | slc.KBA, slc.RLE)
lst.qry <- vector("list",length(mis.dois))
art.info <- data.frame()
k <- 0
for (midoi in mis.dois) {
k <- k+1
qry <- try(altmetrics(doi=midoi,apikey=mi.key))
if (any(class(qry) %in% "try-error")) {
lst.qry[[k]] <- "error"
} else {
if (is.null(qry)) {
lst.qry[[k]] <- NA
} else {
lst.qry[[k]] <- qry
art.info <- rbind(art.info,
data.frame(doi=midoi,
cr_cites=cr_citation_count(doi = midoi),
altscore=qry$score,
date=ifelse(is.null(qry$published_on),NA,1970+(qry$published_on/(365*60*60*24))),
feeds=ifelse(is.null(qry$cited_by_feeds_count),NA,qry$cited_by_feeds_count),
gplus=ifelse(is.null(qry$cited_by_gplus_count),NA,qry$cited_by_gplus_count),
msm=ifelse(is.null(qry$cited_by_msm_count),NA,qry$cited_by_msm_count),
policies=ifelse(is.null(qry$cited_by_policies_count),NA,qry$cited_by_policies_count),
posts=ifelse(is.null(qry$cited_by_posts_count),NA,qry$cited_by_posts_count),
rh=ifelse(is.null(qry$cited_by_rh_count),NA,qry$cited_by_rh_count),
tweeters=ifelse(is.null(qry$cited_by_tweeters_count),NA,qry$cited_by_tweeters_count),
wikipedia=ifelse(is.null(qry$cited_by_wikipedia_count),NA,qry$cited_by_wikipedia_count),
fbwalls=ifelse(is.null(qry$cited_by_fbwalls_count),NA,qry$cited_by_fbwalls_count),
accounts=ifelse(is.null(qry$cited_by_accounts_count),NA,qry$cited_by_accounts_count),
connotea=ifelse(is.null(qry$readers.connotea),NA,qry$readers.connotea),
citeulike=ifelse(is.null(qry$readers.citeulike),NA,qry$readers.citeulike),
mendeley=ifelse(is.null(qry$readers.mendeley),NA,qry$readers.mendeley)))
}
}
}
art.info$RLE <- art.info$doi %in% subset(RLEpubs,RLE)$DOI
art.info$RLTS <- art.info$doi %in% subset(RLEpubs,RLTS)$DOI
art.info$KBA <- art.info$doi %in% subset(RLEpubs,KBA)$DOI
plot(cr_cites~date,art.info,log="y",col=1+(RLE))
boxplot(sqrt(altscore)~RLE,art.info,notch=T)
plot(altscore~date,art.info,log="y",col=1+(RLE))
with(art.info,aggregate(msm,list(RLE),sum,na.rm=T))
dim(art.info)
boxplot(ccs~slc.RLE)
table(art.info$RLE)
with(art.info,aggregate((2020-date),list(RLE),sum,na.rm=T))
with(art.info,aggregate(data.frame(date),list(RLE),mean,na.rm=T))
with(art.info,aggregate(data.frame(cr_cites,altscore,msm,policies,posts,accounts,wikipedia,
readers=as.numeric(as.character(citeulike))+as.numeric(as.character(connotea))+as.numeric(as.character(mendeley))),list(RLE),sum,na.rm=T))
## se podría hacer algo como una red de coautores... para ver el tamaño y las interconexiones más importantes... tal vez
## rh is research highlights
##unique(unlist(lapply(lst.qry,function(x) grep("cited_by",names(x),value=T))))
counts <- c("feeds","gplus","msm","policies","posts","rh","tweeters","wikipedia","fbwalls","accounts")
readers <- c("mendeley","connotea","citeulike")
ttls <- matrix(nrow=3,ncol=length(counts)+length(readers))
colnames(ttls) <- c(counts,readers)
rownames(ttls) <- c("RLE","RLTS","KBA")
for (k in counts) {
ttls["RLE",k] <- sum(as.numeric(unlist(as.vector(sapply(lst.qry[slc.RLE],function(x) x[sprintf("cited_by_%s_count",k)])))),na.rm=T)
ttls["RLTS",k] <- sum(as.numeric(unlist(as.vector(sapply(lst.qry[slc.RLTS],function(x) x[sprintf("cited_by_%s_count",k)])))),na.rm=T)
ttls["KBA",k] <- sum(as.numeric(unlist(as.vector(sapply(lst.qry[slc.KBA],function(x) x[sprintf("cited_by_%s_count",k)])))),na.rm=T)
}
for (k in readers) {
ttls["RLE",k] <- sum(as.numeric(unlist(as.vector(sapply(lst.qry[slc.RLE],function(x) x[sprintf("readers.%s",k)])))),na.rm=T)
ttls["RLTS",k] <- sum(as.numeric(unlist(as.vector(sapply(lst.qry[slc.RLTS],function(x) x[sprintf("readers.%s",k)])))),na.rm=T)
ttls["KBA",k] <- sum(as.numeric(unlist(as.vector(sapply(lst.qry[slc.KBA],function(x) x[sprintf("readers.%s",k)])))),na.rm=T)
}
aggregate(ccs,list(slc.RLE),median,na.rm=T)
aggregate(ccs,list(slc.RLE),mad,na.rm=T)
hist(ccs[slc.RLE],breaks=seq(0,2000,by=100))
hist(ccs[!slc.RLE],breaks=seq(0,2000,by=100))
asc <- unlist(lapply(lst.qry,function(x) ifelse(class(x) %in% "altmetric",x$score,NA)))
|
c80174b79b2b5f3621efa307bae5c671fcf7d6b2
|
5899da0c526ba2bdd9756abb2ab9fdaed72d922a
|
/man/my.copy.fct.Rd
|
ce70b0d43be9672e6cedf8f6873d78365f5c2ee4
|
[] |
no_license
|
msperlin/GetDFPData
|
4daef2ec30d453de75a74934e6798375e24affae
|
b734310176f5b70dc9730a21d7e30c201c610976
|
refs/heads/master
| 2021-05-07T05:45:01.434240
| 2021-04-01T13:05:18
| 2021-04-01T13:05:18
| 111,594,746
| 38
| 9
| null | 2018-11-26T19:52:02
| 2017-11-21T19:54:33
|
R
|
UTF-8
|
R
| false
| true
| 950
|
rd
|
my.copy.fct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdfpd_export_DFP_Data.R
\name{my.copy.fct}
\alias{my.copy.fct}
\title{Copies data to external file}
\usage{
my.copy.fct(
df.in,
name.df,
base.file.name,
type.export = "xlsx",
csv.dir = tempdir()
)
}
\arguments{
\item{df.in}{Dataframe to be copied}
\item{name.df}{Name of dataframe to be copied}
\item{base.file.name}{The basename of excel file (make sure you dont include the file extension)}
\item{type.export}{The extension of the desired format: 'xlsx' (default) or 'csv'}
\item{csv.dir}{Location where to save csv files prior to zipping (default = tempdir())}
}
\value{
TRUE (invisible), if successfull
}
\description{
Copies data to external file
}
\examples{
test.data <- data.frame(test.data = runif(100))
name.df <- 'TestData'
base.file.name <- 'TestData'
type.export <- 'csv'
my.copy.fct(df.in = test.data, name.df, base.file.name, type.export)
}
|
440ef5b5ae0fd34c06a46067ec86091156bbc4c7
|
ddb525b0a9d9c45161f28140cc786e66af722d57
|
/man/signalSeries.Rd
|
a64ff8bc73bc3eb2137b493d7b7ff53c8bdba72a
|
[] |
no_license
|
cran/QRMlib
|
ebd393ae06d6770c906cbd72a4174794db273365
|
914a855242662effc9bd6f9e60ef45119bfdd882
|
refs/heads/master
| 2020-06-04T09:13:15.520228
| 2010-02-22T00:00:00
| 2010-02-22T00:00:00
| 17,717,887
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,777
|
rd
|
signalSeries.Rd
|
\name{signalSeries}
\alias{signalSeries}
\title{signalSeries object}
\description{
Structured after the S-Plus signalSeries object. It contains a
data slot of any type and a NUMERIC positions slot rather than
the date slot of a timeSeries. In other words, each data value
has a numeric value associated with its position in the overall list
}
\usage{
signalSeries(data, positions., units, units.position, from = 1, by = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{a component which is typically a dataframe}
\item{positions.}{a numeric component describing the positions of the data values }
\item{units}{ character vector describing the type of units used in the data structure }
\item{units.position}{character vector describing the type of units used for the positions }
\item{from}{ starting value of positions }
\item{by}{ amount to skip between positions }
}
\details{
If no arguments are supplied, the default (empty) signalSeries object is returned. Otherwise,
a signalSeries object is created with the given positions and data, and units if they are
supplied. As an alternative to supplying the positions directly, they can be supplied by giving
from and by, in which case the positions are generated as a numeric sequence with the right length
to match the data
}
\value{
a signalSeries object with the given data and positions
}
\seealso{
\code{\link{aggregateSignalSeries}}
}
\examples{
signalSeries(); #default object with no data or positions
#Create matrix of simulated values from multivariate-t distribution
m <- 90; n <- 3000;
dataSim <- rmt(m*n,df=3,rho=0.5,d=2);
dataSimSS <- signalSeries(dataSim);
}
\keyword{ts}
\keyword{classes}
|
a20ff4adce85fafc3ef590d7ba5b49bb91f09e23
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qvcalc/examples/worstErrors.Rd.R
|
7f24407502939c4ebf395fa987c36a27b95b056c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 641
|
r
|
worstErrors.Rd.R
|
library(qvcalc)
### Name: worstErrors
### Title: Accuracy of a Quasi-variance Approximation
### Aliases: worstErrors
### Keywords: regression models
### ** Examples
## Overdispersed Poisson loglinear model for ship damage data
## from McCullagh and Nelder (1989), Sec 6.3.2
library(MASS)
data(ships)
ships$year <- as.factor(ships$year)
ships$period <- as.factor(ships$period)
shipmodel <- glm(formula = incidents ~ type + year + period,
family = quasipoisson,
data = ships, subset = (service > 0), offset = log(service))
shiptype.qvs <- qvcalc(shipmodel, "type")
summary(shiptype.qvs, digits = 4)
worstErrors(shiptype.qvs)
|
3eef7968d8ac4397b774bc593026a05e1c899734
|
465a6edd74b0fccfe7307cb11527ff1cb88a238f
|
/ExperimentalResults/ratio_to_optimal.R
|
f919e160a2b53cd99ede5269a8dc91dd0ffcc235
|
[] |
no_license
|
surakuma/communication-scheuling
|
64bbda63bdb8f1151c3ca410c8424b65d16f40c1
|
96691dcd5c5684d90091a39007f46b9d58c73353
|
refs/heads/master
| 2020-04-20T18:54:59.167718
| 2019-08-08T14:33:28
| 2019-08-08T14:33:28
| 169,034,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 883
|
r
|
ratio_to_optimal.R
|
#!/usr/bin/env Rscript
ratio_to_optimal_func <-function(file_name)
{
library(ggplot2)
library(reshape2)
dftest<-read.table(file_name, header=T)
dftest[,2:ncol(dftest)]<-dftest[,2:ncol(dftest)]/dftest[,6]
dftest[,2:6]<-NULL
dftestMelted<-melt(dftest, "capacity")
names(dftestMelted)<-c("capacity", "Heuristic", "ratio_to_optimal")
p<-ggplot(dftestMelted, aes(x=Heuristic, y=ratio_to_optimal, color=Heuristic)) + geom_boxplot(outlier.color="black") +facet_wrap(~capacity) + theme(axis.ticks = element_blank(), axis.text.x = element_blank()) + xlab("")
p <- p + theme(plot.title = element_text(hjust = 0.5)) + ggtitle("Ratio to Optimal")
return (p)
}
p<-ratio_to_optimal_func('all_results_hf.txt')
ggsave(file="ratio_to_optimal_hf.pdf", p, width=23.6, height=13.4)
p<-ratio_to_optimal_func('all_results_ccsd.txt')
ggsave(file="ratio_to_optimal_ccsd.pdf", p, width=23.6, height=13.4)
|
25c01d65e9941326463ae9e769bb7d8d8f7cbcb4
|
d121f587f7e0678030d33a4c5428e594c5978dad
|
/man/normaliseSE_quantile.Rd
|
1de0b3604eb95a813eada921f23d8cc723f11092
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/eQTLUtils
|
fcf0907721b3a8f19fe68e611cecb4f16d7a0c9d
|
26242562a4e244334fd9691d03bc1ef4d2d6c1d9
|
refs/heads/master
| 2023-03-05T19:10:45.247191
| 2023-03-03T13:33:08
| 2023-03-03T13:33:08
| 149,779,618
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 555
|
rd
|
normaliseSE_quantile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SummarizedExperiment_helpers.R
\name{normaliseSE_quantile}
\alias{normaliseSE_quantile}
\title{Quantile normalise SummarizedExperiment by rows}
\usage{
normaliseSE_quantile(se, assay_name = "usage")
}
\arguments{
\item{se}{SummarizedExperiment object}
\item{assay_name}{Name of the assay to be normalised in the se object}
}
\value{
SummarizedExperiment object with quantile-normalised data in the qnorm assay
}
\description{
Quantile normalise SummarizedExperiment by rows
}
|
e6b29a125fcbc6114806f9690f88187438b3148c
|
a0937f7d085ec1bf789d07d3154eb842f71b315c
|
/Project 2/cleaned_data2.R
|
5af1be65ba5951ca7976e050731bac89144fcdf8
|
[] |
no_license
|
EnyingGao/BIOS611Git
|
d13f29ee2ce2dda9947103492580cddc559292d6
|
a151468cfb2c1eba20fafb82990a4920b4017099
|
refs/heads/master
| 2020-03-27T10:48:57.574080
| 2019-08-22T01:34:19
| 2019-08-22T01:34:19
| 146,446,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
r
|
cleaned_data2.R
|
library(tidyverse)
source_pm <- read.csv('Database_Source_apport_studies.csv')
new_data2 <-
as.tibble(source_pm) %>%
rename(Year = Reference.year, sea_salt = SEA.SALT.,
dust = DUST., traffic = TRAFFIC.,
industry = INDUSTRY., biomass_burn_residual = BIOM..BURN..RES..,
other_unspecified_human_origin = OTHER..unspecified.human.origin..) %>%
select(Year, sea_salt, dust, traffic, industry,
biomass_burn_residual, other_unspecified_human_origin)
write_csv(new_data2, "./newdata2.csv")
|
59d4cc1e35abd192fa2fe62db6553211faa8840c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TSrepr/examples/repr_windowing.Rd.R
|
88c11f19aaec32d9a783605f43126c601c29817b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
repr_windowing.Rd.R
|
library(TSrepr)
### Name: repr_windowing
### Title: Windowing of time series
### Aliases: repr_windowing
### ** Examples
# func without arguments
repr_windowing(rnorm(48), win_size = 24, func = repr_feaclip)
# func with arguments
repr_windowing(rnorm(48), win_size = 24, func = repr_featrend,
args = list(func = maxC, order = 2, pieces = 2))
|
1621641378588e551851f24c119198b09b8790ff
|
23e04234732f477daef084260536b92aa66d8a59
|
/R/2017-09-08-Challenges_week2.R
|
1836d440c97b71e3ba161958d24fc46a3ff96d86
|
[] |
no_license
|
chrischizinski/SNR_R_Group
|
6bbf1dd467b437ae82807622e5c840de43b9979d
|
d02c91fcd6c947bbd99dba095b88e33320270844
|
refs/heads/master
| 2020-05-21T22:09:39.883698
| 2017-12-08T13:34:49
| 2017-12-08T13:34:49
| 65,237,322
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,425
|
r
|
2017-09-08-Challenges_week2.R
|
#data explaination
# http://www.datacarpentry.org/R-ecology-lesson/02-starting-with-data.html
library(tidyverse)
library(lubridate)
mydata <- read_csv("https://ndownloader.figshare.com/files/2292169")
glimpse(mydata)
###EMAIL to michael.whitby@huskers.unl.edu if taking for credit.
###FYI: I just send a list of who completed the assignment to Chris - refer any questions about the class to him
# IN CLASS
#1.put data into long form
mydata %>%
gather(attribute,value,sex:weight) %>%
glimpse()
#2.create new columns in one pipe -
##a.Species (Genus + specific epithet)
mydata<-mydata %>%
unite(UID,genus,species, sep=' ')
head(mydata_n)
##b. Date column
mydata<-mydata %>%
unite(Date,year,month,day, sep='-', remove=FALSE) %>%
mutate(Date=ymd(Date))
head(mydata)
# all together
mydata<-mydata %>%
unite(spp,genus,species, sep=' ', remove=FALSE) %>%
unite(Date,year,month,day, sep='-', remove=FALSE) %>%
mutate(Date=ymd(Date))
head(mydata)
#3.Average weight of Male and Female rodents - in one table!
mydata %>%
filter(taxa=="Rodent") %>%
group_by(sex) %>%
summarise(mean_weight=mean(weight,na.rm=TRUE))
#############################################################################
######################TAKE home challenges###################################
#############################################################################
#1. How many animals caught in each plot type?
mydata %>%
group_by(plot_type) %>%
summarize(count=n())
# A tibble: 5 ? 2
#plot_type count
#<chr> <int>
# 1 Control 15611
#2 Long-term Krat Exclosure 5118
#3 Rodent Exclosure 4233
#4 Short-term Krat Exclosure 5906
#5 Spectab exclosure 3918
#2. How many species are in each taxa?
mydata %>%
group_by(taxa) %>%
summarize(count=n_distinct(spp))
# A tibble: 4 ? 2
#taxa count
#<chr> <int>
# 1 Bird 11
#2 Rabbit 1
#3 Reptile 7
#4 Rodent 29
#3. Average hindfoot length of each Rodent species?
mydata %>%
filter(taxa=="Rodent") %>%
group_by(spp) %>%
summarize(mean_length=mean(hindfoot_length,na.rm=TRUE))
# A tibble: 29 ? 2
#UID mean_length
#<chr> <dbl>
# 1 Ammospermophilus harrisi 33.00000
#2 Baiomys taylori 13.00000
#3 Chaetodipus baileyi 26.11592
#4 Chaetodipus intermedius 22.22222
#5 Chaetodipus penicillatus 21.75157
#6 Chaetodipus sp. 19.50000
#7 Dipodomys merriami 35.98235
#8 Dipodomys ordii 35.60755
#9 Dipodomys sp. NaN
#10 Dipodomys spectabilis 49.94887
# ... with 19 more rows
#4. Average weight of all rodents during of each season
#- (Spring = MARCH-MAY, SUMMER= JUNE-AUGUST, FALL=SEPT-NOV, WINTER = DEC-FEB) <- USE MUTATE and case_when to create a season column
mydata %>%
filter(taxa=="Rodent") %>%
mutate(Season=case_when(.$month %in% c(3,4,5)~"Spring",
.$month %in% c(6,7,8)~"Summer",
.$month %in% c(9,10,11)~"Fall",
.$month %in% c(12,1,2)~"Winter")) %>%
group_by(spp,Season) %>%
summarize(mean_weight=mean(weight,na.rm=TRUE))
# A tibble: 4 ? 2
#Season mean_weight
#<chr> <dbl>
# 1 Fall 43.57334
#2 Spring 44.80459
#3 Summer 41.02269
#4 Winter 40.84584
|
552ebb9f960719c8ea6b5f00797d0078c152c916
|
c7cde3e623021ef9df1a3a0f3c6f8a5c1bd21bf3
|
/code/tests/compute_ess_mcse.r
|
3e22409cdc768f96d433f42b3ac7da87ccc9a1d3
|
[] |
no_license
|
NaitongChen/STAT520A_Project
|
422eb6c3960d56729e855b4c3e5da067b8f3b596
|
b1cdc75e2a54b7e7dfa044816dcda49be83b1680
|
refs/heads/master
| 2023-04-11T12:13:43.583163
| 2021-04-21T03:52:19
| 2021-04-21T03:52:19
| 341,904,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,262
|
r
|
compute_ess_mcse.r
|
library(mcmcse)
setwd("../../data/posterior_samples")
file_names = list.files(pattern="4_diffind2_locs.csv")
seed=5
to_store1 = data.frame(matrix(NA, nrow = length(file_names), ncol = 6))
colnames(to_store1) = c("ess first cp", "mcse first cp",
"ess second cp", "mcse second cp",
"ess third cp", "mcse third cp")
rownames(to_store1) = c("20000c1 Gibbs", "20000c1 MWG",
"50c1 Gibbs", "50c1 MWG",
"100c2 Gibbs", "100c2 MWG",
"100c3 Gibbs", "100c3 MWG",
"60c3 Gibbs", "60c3 MWG")
burnin_gibbs = c(0,0,0,0,0,
0,0,0,0,0,
0,5000,100,100,100,
20,20,20,20,20,
4000,1000,2000,500,500)
bgs = matrix(burnin_gibbs, nrow=5, byrow=T)
burnin_mwg = c(1000,1000,1000,1000,1000, # 20000 1
0,0,0,0,0, # 50 1
200000,100000,1000,1000,1000, # 100 2
1000,1000,Inf,1000,1000, # 100 3
250000,Inf,Inf,400000,300000) # 60 3
bms = matrix(burnin_mwg, nrow=5, byrow=T)
for (i in 1:(length(file_names)/2)) {
dat1 = read.csv(file_names[i])
dat2 = read.csv(file_names[i+5])
m = dim(dat1)[2]
dat1 = dat1[c( bgs[i, seed] :dim(dat1)[1]),]
if (bms[i,seed] < Inf) {
dat2 = dat2[c( bms[i,seed] :dim(dat2)[1]),]
}
for (j in 1:m) {
if (m>1){
if (bms[i,seed] < Inf) {
to_store1[2*i-1, 2*j - 1] = ess(dat1[,j], method='obm', size='cuberoot') # 1,3,5
to_store1[2*i-1, 2*j] = mcse(dat1[,j], method='obm', size='cuberoot')$se # 2,4,6
}
to_store1[2*i, 2*j - 1] = ess(dat2[,j], method='obm', size='cuberoot')
to_store1[2*i, 2*j] = mcse(dat2[,j], method='obm', size='cuberoot')$se
} else {
if (bms[i,seed] < Inf) {
to_store1[2*i-1, 2*j - 1] = ess(dat1, method='obm', size='cuberoot') # 1,3,5
to_store1[2*i-1, 2*j] = mcse(dat1, method='obm', size='cuberoot')$se # 2,4,6
}
to_store1[2*i, 2*j - 1] = ess(dat2, method='obm', size='cuberoot')
to_store1[2*i, 2*j] = mcse(dat2, method='obm', size='cuberoot')$se
}
}
}
write.csv(to_store1, "4_ess_se.csv", row.names = TRUE)
|
94b1720a396ca767d7618499c1a983f7d9ad7518
|
0af65a43909f9bd8fd8e3ec61f2ca53524f3f7b1
|
/corr.R
|
cdc44ebe4c0607a14d168531fbe134cb0f018c90
|
[] |
no_license
|
w00lf/datasciencecoursera
|
29522850b0e9c050a10c8e6e9a71e9582c30a0e8
|
7ed6c723e0c9743075ac4ceabf4930aa08906eda
|
refs/heads/master
| 2021-01-20T14:40:39.699861
| 2017-09-08T08:32:59
| 2017-09-08T08:32:59
| 90,645,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,267
|
r
|
corr.R
|
# Data
#
# The zip file containing the data can be downloaded here:
#
# specdata.zip [2.4MB]
# The zip file contains 332 comma-separated-value (CSV) files containing pollution monitoring data for fine particulate matter (PM) air pollution at 332 locations in the United States. Each file contains data from a single monitor and the ID number for each monitor is contained in the file name. For example, data for monitor 200 is contained in the file "200.csv". Each file contains three variables:
#
# Date: the date of the observation in YYYY-MM-DD format (year-month-day)
# sulfate: the level of sulfate PM in the air on that date (measured in micrograms per cubic meter)
# nitrate: the level of nitrate PM in the air on that date (measured in micrograms per cubic meter)
# For this programming assignment you will need to unzip this file and create the directory 'specdata'. Once you have unzipped the zip file, do not make any modifications to the files in the 'specdata' directory. In each file you'll notice that there are many days where either sulfate or nitrate (or both) are missing (coded as NA). This is common with air pollution monitoring data in the United States.
#
# Part 3
#
# Write a function that takes a directory of data files and a threshold for complete cases and calculates the correlation between sulfate and nitrate for monitor locations where the number of completely observed cases (on all variables) is greater than the threshold. The function should return a vector of correlations for the monitors that meet the threshold requirement. If no monitors meet the threshold requirement, then the function should return a numeric vector of length 0. A prototype of this function follows
corr <- function(directory, threshold = 0) {
result <- vector()
for(i in 1:332) {
csvFile <- file.path(directory, sprintf('%03d.csv', i))
csvData <- read.csv(csvFile)
# Existing rows can be checked with complete.cases(x):
# csvData[complete.cases(csvData),]
existing <- subset(csvData, !is.na(Date) & !is.na(sulfate) & !is.na(nitrate) & !is.na('ID'))
if( nrow(existing) < threshold ) next()
result <- c(result, cor(existing[,'sulfate'], existing[,'nitrate']))
}
if(length(result) == 0) return(0)
result
}
|
7b033ae21aab45f6e54a0d7331c132beec1aaaf2
|
1d1066efb5fa6e0e69b1855e8d0d41f9387ee9d0
|
/Rscript/EdgeR_glm_5per2FC_organs_190109.R
|
fc8e43dfa6cf0ace99e6536c9c6c20083309d5f1
|
[] |
no_license
|
isst001/eggshell_color_gene
|
a7b35d1dc031de7e64fcf0bc0dde51ec4881be07
|
264ace9e2fad4f2427fa8eedb3f05552c3ba0ca4
|
refs/heads/master
| 2021-10-18T19:33:53.258293
| 2019-01-18T12:17:28
| 2019-01-18T12:17:28
| 164,637,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,227
|
r
|
EdgeR_glm_5per2FC_organs_190109.R
|
inf="raw_count_data_all_organs_tissues_190108.txt"
x <- read.table(inf, header=T, sep="\t", row.names=1, quote="")
x <- x[!duplicated(x$gene_short_name),]
write.table(x, "raw_count_data_all_organs_tissues_unique_190108.txt", row.names=T,col.names = T, sep = "\t", quote=FALSE)
inf2="raw_count_data_all_organs_tissues_unique_190108.txt"
x2 <- read.table(inf2, header=T, sep="\t", row.names=1, quote="")
y <- round(x2[,3:56],0)
y2 <- y[rowSums(y) > 0,]
group <- factor(c("A","A","A","A","A","A", ##Uterus
"B","B","B","B","B","B", ##Isthmus
"C","C","C","C","C","C", ##Brain
"D","D","D","D","D","D", ##Heart
"E","E","E","E","E","E", ##Intestine
"F","F","F","F","F","F", ##Kidney
"G","G","G","G","G","G", ##Liver
"H","H","H","H","H","H", ##Lung
"I","I","I","I","I","I" ##Muscle
))
design <- model.matrix(~ group)
count <- y2
library("edgeR")
d <- DGEList(counts = count, group = group)
d <- calcNormFactors(d)
d <- estimateGLMCommonDisp(d, design)
d <- estimateGLMTrendedDisp(d, design)
d <- estimateGLMTagwiseDisp(d, design)
fit <- glmFit(d, design)
param_fdr <- 0.05
foldchange <- log(2,2)
#AvsB (Uterus vs. Isthmus)
lrt <- glmLRT(fit, coef = 2)
topTags(lrt)
table1 <- as.data.frame(topTags(lrt, n = nrow(count)))
table1$logFCrev <- table1$logFC*-1
head(table1)
estimatedDEG <- table1$FDR
FDR <- table1$FDR
logFC <- table1$logFCrev
estimatedDEG[FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr | abs(logFC) <= foldchange] <- 0
table1 <- cbind(table1, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table1 <- cbind(table1, direction)
head(table1)
write.table(table1, file = "DEGanalysis_result_Uterus_vs_Isthmus.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
#AvsC(Uterus vs. Brain)
lrt <- glmLRT(fit, coef=3)
topTags(lrt)
table2 <- as.data.frame(topTags(lrt, n = nrow(count)))
table2$logFCrev <- table2$logFC*-1
FDR<-table2$FDR
logFC <- table2$logFCrev
estimatedDEG <- FDR
estimatedDEG[table2$FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr | abs(logFC) <= foldchange] <- 0
table2 <- cbind(table2, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table2 <- cbind(table2, direction)
head(table2)
write.table(table2, file = "DEGanalysis_result_Uterus_vs_Brain.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
#AvsD (Uterus vs. Heart)
lrt <- glmLRT(fit, coef = 4)
topTags(lrt)
table3 <- as.data.frame(topTags(lrt, n = nrow(count)))
table3$logFCrev <- table3$logFC*-1
FDR<-table3$FDR
logFC <- table3$logFCrev
estimatedDEG <- FDR
estimatedDEG[table3$FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr | abs(logFC) <= foldchange] <- 0
table3 <- cbind(table3, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table3 <- cbind(table3, direction)
head(table3)
write.table(table3, file = "DEGanalysis_result_Uterus_vs_Heart.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
#AvsE (Uterus vs. Intestine)
lrt <- glmLRT(fit, coef = 5)
topTags(lrt)
table4 <- as.data.frame(topTags(lrt, n = nrow(count)))
table4$logFCrev <- table4$logFC*-1
FDR<-table4$FDR
logFC <- table4$logFCrev
estimatedDEG <- FDR
estimatedDEG[table4$FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr | abs(logFC) <= foldchange] <- 0
table4 <- cbind(table4, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table4 <- cbind(table4, direction)
head(table4)
write.table(table4, file = "DEGanalysis_result_Uterus_vs_Intestine.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
#AvsF (Uterus vs. Intestine)
lrt <- glmLRT(fit, coef = 6)
topTags(lrt)
table5 <- as.data.frame(topTags(lrt, n = nrow(count)))
table5$logFCrev <- table5$logFC*-1
FDR<-table5$FDR
logFC <- table5$logFCrev
estimatedDEG <- FDR
estimatedDEG[table5$FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr| abs(logFC) <= foldchange] <- 0
table5 <- cbind(table5, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table5 <- cbind(table5, direction)
head(table5)
write.table(table5, file = "DEGanalysis_result_Uterus_vs_Kidney.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
#AvsG (Uterus vs. Liver)
lrt <- glmLRT(fit, coef = 7)
topTags(lrt)
table6 <- as.data.frame(topTags(lrt, n = nrow(count)))
table6$logFCrev <- table6$logFC*-1
FDR<-table6$FDR
logFC <- table6$logFCrev
estimatedDEG <- FDR
estimatedDEG[table6$FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr| abs(logFC) <= foldchange] <- 0
table6 <- cbind(table6, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table6 <- cbind(table6, direction)
head(table6)
write.table(table6, file = "DEGanalysis_result_Uterus_vs_Liver.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
#AvsH (Uterus vs. Lung)
lrt <- glmLRT(fit, coef = 8)
topTags(lrt)
table7 <- as.data.frame(topTags(lrt, n = nrow(count)))
table7$logFCrev <- table7$logFC*-1
FDR<-table7$FDR
logFC <- table7$logFCrev
estimatedDEG <- FDR
estimatedDEG[table7$FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr| abs(logFC) <= foldchange] <- 0
table7 <- cbind(table7, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table7 <- cbind(table7, direction)
head(table7)
write.table(table7, file = "DEGanalysis_result_Uterus_vs_Lung.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
#AvsI (Uterus vs. Muscle)
lrt <- glmLRT(fit, coef = 9)
topTags(lrt)
table8 <- as.data.frame(topTags(lrt, n = nrow(count)))
table8$logFCrev <- table8$logFC*-1
FDR<-table8$FDR
logFC <- table8$logFCrev
estimatedDEG <- FDR
estimatedDEG[FDR < param_fdr & abs(logFC) > foldchange] <- 1
estimatedDEG[FDR >= param_fdr | abs(logFC) <= foldchange] <- 0
table8 <- cbind(table8, estimatedDEG)
direction <- estimatedDEG
direction[estimatedDEG==1 & logFC > 0] <-1
direction[estimatedDEG==1 & logFC < 0] <-3
direction[estimatedDEG==0] <-2
table8 <- cbind(table8, direction)
head(table8)
write.table(table8, file = "DEGanalysis_result_Uterus_vs_Muscle.txt", col.names = T, row.names = T, sep = "\t", quote=FALSE)
X <- rownames(table1)
table1 <- cbind(table1, X)
X <- rownames(table2)
table2 <- cbind(table2, X)
X <- rownames(table3)
table3 <- cbind(table3, X)
X <- rownames(table4)
table4 <- cbind(table4, X)
X <- rownames(table5)
table5 <- cbind(table5, X)
X <- rownames(table6)
table6 <- cbind(table6, X)
X <- rownames(table7)
table7 <- cbind(table7, X)
X <- rownames(table8)
table8 <- cbind(table8, X)
x<-table1 #causion
y<-table2
#Merge by X
temp1 = merge (x, y, by.x = "X", by.y = "X", sort =T, all = T)
y <-table3 #be careful
temp1 = merge (temp1, y, by.x = "X", by.y = "X", sort =T, all = T)
y<-table4 #be careful
temp1 = merge (temp1, y, by.x = "X", by.y = "X", sort =T, all = T)
y<-table5 #be careful
temp1 = merge (temp1, y, by.x = "X", by.y = "X", sort =T, all = T)
y<-table6 #be careful
temp1 = merge (temp1, y, by.x = "X", by.y = "X", sort =T, all = T)
y<-table7 #be careful
temp1 = merge (temp1, y, by.x = "X", by.y = "X", sort =T, all = T)
y<-table8 #be careful
temp1 = merge (temp1, y, by.x = "X", by.y = "X", sort =T, all = T)
#high(1), even(2), low(3) in uterus compared to other organs or tissues
direction <- temp1[,c(9,17,25,33,41,49,57,65)]
head(direction)
concate <- paste(direction[,1], direction[,2], sep="")
concate <- paste(concate, direction[,3], sep="")
concate <- paste(concate, direction[,4], sep="")
concate <- paste(concate, direction[,5], sep="")
concate <- paste(concate, direction[,6], sep="")
concate <- paste(concate, direction[,7], sep="")
concate <- paste(concate, direction[,8], sep="")
temp2 <- cbind(temp1, concate)
write.table(temp2, file = "DEGanalysis_result_all_tissues_organs_merge.txt", col.names = T, sep = "\t", quote=FALSE)
inf <- "raw_count_data_all_organs_tissues_unique_190108.txt"
inf2 <- "DEGanalysis_result_all_tissues_organs_merge.txt"
x <- read.table(inf, header=T, sep="\t", quote="")
y <- read.table(inf2, header=T, sep="\t", quote="")
x$X <- row.names(x)
temp2 = merge (x, y, by.x = "X", by.y = "X", sort =T, all = T)
head(temp2)
write.table(temp2, file = "DEGanalysis_result_all_tissues_organs_merge_info.txt", col.names = T, row.names=FALSE,sep = "\t", quote=FALSE)
|
963a0b8e82fef94b61f46f4f9dee828520f0d768
|
a6d935931bf7804801747b276960713375c13295
|
/LogisticRegressionHeart.R
|
5da0f8517ac636524a1c4b5119298220c7303840
|
[] |
no_license
|
amitnke/Regression
|
53b9f4d53292c2031806843ceed56f29dacac5ba
|
a95680d7a0857459eafd328ea1d473d8ff8008f4
|
refs/heads/master
| 2021-01-10T19:13:22.708692
| 2015-10-09T02:42:35
| 2015-10-09T02:42:35
| 41,615,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191
|
r
|
LogisticRegressionHeart.R
|
heart = read.table("/Users/amitkumar/R/data/heart.data", sep = ",", header = T)
names(heart)
heart = heart[-c(1, 5, 7)]
heartfit = glm(chd~., data = heart, family=binomial)
summary(heartfit)
|
ad3ce9e292dc9d05869c5ea7aead2e0146cd2e00
|
68f70521040e85b89aacd8e81536a030da5d43d0
|
/R/methods/plots.R
|
401863d48aaea0c480132155e9c963e8e4f0ac26
|
[] |
no_license
|
FAU-Paleo/macrocology_2020-06-16_biogeography
|
847224da1cff2283a8c48f1e695625bec865bb9a
|
f783718f1d8c8ff3dd56a66650312666c1a9a8d6
|
refs/heads/master
| 2022-11-01T23:26:04.670394
| 2020-06-17T11:03:33
| 2020-06-17T11:03:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,030
|
r
|
plots.R
|
#' Plotting biogeographic membership
#' Adam T. Kocsis (Erlangen, 2020-06-17)
#' CC-BY 4.0
#' @param mem Membership vector.
#' @param cols color vector.
#' @param bg Spatial object, background.
#' @param alpha alpha values of region colors
#' @param labels should the labels be plotted
#' @param gri icosa grid used for plotting
biogeoplot <- function(mem, cols=allHex, bg=land, alpha="99", labels=TRUE, gri=gr){
# empty vector for the colors
member <- rep(NA, nrow(gri@faces))
# every entry corresponds to a face, ordered in the grid
names(member) <- rownames(gri@faces)
# color every entry
reorder <- cols[mem] # implies: names(reorder) <- names(mem)
# assign colors to appropriate face
member[names(mem)] <- paste0(reorder, alpha)
# plot empty background
plot(bg, col="gray70")
# plot colors
plot(gri, col=member, add=TRUE)
if(labels){
# centroids reordered to match the memberhsip vector
cent <- centers(gri)[names(mem),]
# plot the membership
text(x=cent[,1], y=cent[,2], label=mem, cex=0.6)
}
}
|
c76d0cdec2494d0b7349d2ebeadf58704191d7e6
|
4b9303d6c78fc023626f9267997ca23634479af6
|
/man/count_unique.Rd
|
8412432f4083c5e45d06ed11f8dde493164e5a5c
|
[
"MIT"
] |
permissive
|
aassumpcao/ColombiaDashboard
|
e28ee968631317f95e4b9cfdad99a46fe7439bc3
|
4919e17e466858ce25160da642ae65846bc4f597
|
refs/heads/master
| 2023-04-08T18:44:55.409457
| 2021-04-17T01:28:25
| 2021-04-17T01:28:25
| 346,501,519
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 522
|
rd
|
count_unique.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_helpers.R
\name{count_unique}
\alias{count_unique}
\title{helper function}
\usage{
count_unique(data, question_analysis, group = NULL, sort = TRUE)
}
\arguments{
\item{data}{a dataset}
\item{question_analysis}{the main variable in the analysis}
\item{group}{the variable to use as the grouping variable}
\item{sort}{the variable to use as the sorting variable}
}
\description{
function to count the values of single-choice questions
}
|
a75af82a8653f938fd3e69967195797404752e05
|
1fa14a9f2be3b3704416e5084ce1c61009426eb7
|
/Fitbit_HMM_part1.R
|
20591717e90ba598f545ca3233c914cff2dbf046
|
[] |
no_license
|
idaocolorado/IDAO_HMM_Part1
|
e69346c06fa72763640c5f833c4a82fcf221a0fc
|
40398d9d91b0a371a8a8b3ce3e5e0044eddf61d2
|
refs/heads/master
| 2021-06-24T19:17:14.556725
| 2017-09-14T13:27:25
| 2017-09-14T13:27:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,890
|
r
|
Fitbit_HMM_part1.R
|
## Hidden Markov Models for time series
# Added to Github
# First load the requisite packages
rm(list=ls()) # Clear memory
graphics.off() # Clears graphics
library(forecast) # Needed to run forecast and auto.arima functions
library(astsa) # To run acf
# Load data
fitbit <- read.csv("~/Syncplicity/Science_files/Ongoing_active_projects/Individualized_Data_Analysis_Organization/IDAO_Correlation_Continuous/Mike_Fitbit_data.csv", as.is = TRUE, header = TRUE)
fitbit$Date <- as.Date(fitbit$Date)
# Remove technical outliers where not wearing (either make = mean or zero)
# Create indicator based on time
fitbit$timetotal <- fitbit$Minutes.Sedentary + fitbit$Minutes.Lightly.Active + fitbit$Minutes.Fairly.Active + fitbit$Minutes.Very.Active + fitbit$Time.in.Bed
fitbit$perc.recorded <- fitbit$timetotal/(max(fitbit$timetotal, na.rm=TRUE))
#Remove fitbit recordings with less than 70% of time captured
fitbit <- subset(fitbit, fitbit$perc.recorded >= 0.7)
# Create variable for quality of sleep
fitbit$sleepquality <- fitbit$Minutes.Asleep/fitbit$Time.in.Bed
attach(fitbit)
##########################################################################
# Examine number of awakenings data
# Plot histogram
filename1 <- "./HMM_number_of_awakenings_example_histogram.jpeg"
jpeg(filename = filename1, width = 400, height = 300, quality = 90)
hist(Number.of.Awakenings, 15)
dev.off()
# Plot of data
filename2 <- "./HMM_number_of_awakenings_example_tsplot.jpeg"
jpeg(filename = filename2, width = 400, height = 300, quality = 90)
plot(Date, Number.of.Awakenings, type = "l", main = "Number of Awakenings")
dev.off()
#########################################################################
## HMM Analysis using 2-4 state Poisson models
# Use HMM to fit multiple state models to number of awakenings
source("./HMM_functions.R") # To get functions needed for variable transformations
# Fit 2-state HMM with poisson distributions
x <- Number.of.Awakenings
d <- Date
m<-2 # Number of states
lambda0<-c(5,15) # Initial guess for state means
gamma0<-matrix(
c(
0.9,0.1,
0.1,0.9
),m,m,byrow=TRUE) # Initial guess for state transition matrix
# Fit stationary model
mod2s<-pois.HMM.mle(x,m,lambda0,gamma0,stationary=TRUE)
delta0<-c(1,1)/2
# Fit nonstationary model (delta is initial state)
mod2h<-pois.HMM.mle(x,m,lambda0,gamma0,delta=delta0,stationary=FALSE)
mod2s; mod2h
# Fit 3 state models (similar to above)
x <- Number.of.Awakenings
d <- Date
m<-3
lambda0<-c(5,10, 15)
gamma0<-matrix(
c(
0.8,0.1,0.1,
0.1,0.8,0.1,
0.1,0.1,0.8
),m,m,byrow=TRUE)
mod3s<-pois.HMM.mle(x,m,lambda0,gamma0,stationary=TRUE)
delta0 <- c(1,1,1)/3
mod3h<-pois.HMM.mle(x,m,lambda0,gamma0,delta=delta0,stationary=FALSE)
mod3s; mod3h
# Fit 4 state models (similar to above)
x <- Number.of.Awakenings
d <- Date
m<-4
lambda0<-c(1,5,10,15)
gamma0<-matrix(
c(
0.85,0.05,0.05,0.05,
0.05,0.85,0.05,0.05,
0.05,0.05,0.85,0.05,
0.05,0.05,0.05,0.85
),m,m,byrow=TRUE)
mod4s<-pois.HMM.mle(x,m,lambda0,gamma0,stationary=TRUE)
delta0<-c(1,1,1,1)/4
mod4h<-pois.HMM.mle(x,m,lambda0,gamma0,delta=delta0,stationary=FALSE)
mod4s; mod4h
# Compare BIC
mod2s$BIC
mod2h$BIC
mod3s$BIC
mod3h$BIC
mod4s$BIC
mod4h$BIC
# Compare AIC
mod2s$AIC
mod2h$AIC
mod3s$AIC
mod3h$AIC
mod4s$AIC
mod4h$AIC
# Both select 3 state stationary as best model
mod3s$lambda
mod3s$delta
round(mod3s$gamma, 2)
# Local decoding of results
### A.1.13 Local decoding
localdecode <- pois.HMM.local_decoding(x,mod3s)
# Global decoding
globaldecode <- pois.HMM.viterbi(x, mod3s)
# Assign state to state mean
states <- mod3s$lambda[localdecode]
# Assign for global decoding
statesglobal <- mod3s$lambda[globaldecode]
# Plot states and values of local and global decoding
# par(mfrow=c(1,2)) # To compare (here look the same)
filename3 <- "./HMM_number_of_awakenings_example_localdecode.jpeg"
jpeg(filename = filename3, width = 400, height = 300, quality = 90)
plot(Date, Number.of.Awakenings, type = "l",
main = "Number of Awakenings:\n 3 State HMM with Poisson Distributon \n Local Decoding")
lines(Date, states, col = "red")
legend("topleft",
c("Number of Awakenings", paste0("State Mean (=", round(mod3s$lambda[1], 1), ", ", round(mod3s$lambda[2], 1), ", ", round(mod3s$lambda[3], 1), " per night)")), #Text
col= c("black", "red"), #Line colors
lty=c("solid","solid"), #Line types
lwd=c(2.0, 2.0), #Line thickness
bty= "n", #No border ("o" if border)
cex=0.9, #Text size
y.intersp=0.9
)#Spacing between text/lines
dev.off()
# Plot states and values of global decode
plot(Date, Number.of.Awakenings, type = "l",
main = "Number of Awakenings:\n 3 State HMM with Poisson Distributon \n Global Decoding")
lines(Date, statesglobal, col = "red")
legend("topleft",
c("Number of Awakenings", paste0("State Mean (=", round(mod3s$lambda[1], 1), ", ", round(mod3s$lambda[2], 1), ", ", round(mod3s$lambda[3], 1), " per night)")), #Text
col= c("black", "red"), #Line colors
lty=c("solid","solid"), #Line types
lwd=c(2.0, 2.0), #Line thickness
bty= "n", #No border ("o" if border)
cex=0.9, #Text size
y.intersp=0.9
)#Spacing between text/lines
# 1 Step ahead forecast
h<-1
n <- length(d)
xf<-0:20
forecasts<-pois.HMM.forecast(xf,h,x,mod3s)
fc<-forecasts[1,]
par(mfrow=c(1,1),las=1)
filename4 <- "./HMM_number_of_awakenings_example_onestepforecast.jpeg"
jpeg(filename = filename4, width = 400, height = 300, quality = 90)
plot(xf,fc,type="h",
main=paste("Number of Awakenings\n Forecast distribution for", d[n]+1),
xlim=c(0,max(xf)),xlab="count",ylab="probability",lwd=3)
dev.off()
#=== This is also the long-term forecast (Marginal distribution, dStat).
par(mfrow=c(1,1))
m<-3
lambda<-mod3s$lambda
delta<-solve(t(diag(m)-mod3s$gamma+1),rep(1,m))
dstat<-numeric(length(xf))
for (j in 1:m) dstat <- dstat + delta[j]*dpois(xf,lambda[j])
plot(dstat, type = "h", main="Marginal (Long Term) Forcast", ylab = "Probability", xlab = "Number of awakenings")
#=== Compare the 30 night-ahead forecast with the long-term forecast.
h<-30
xf<-0:20
forecasts<-pois.HMM.forecast(xf,h,x,mod3s)
fc<-forecasts[h,]
par(mfrow=c(1,1),las=1)
filename5 <- "./HMM_number_of_awakenings_example_marginalvs50dayforecast.jpeg"
jpeg(filename = filename5, width = 400, height = 300, quality = 90)
plot(xf,fc,type="h",
main=paste("Forecast distribution for", d[n]+h),
xlim=c(0,max(xf)),xlab="count",ylab="probability",lwd=3, col = "black")
lines(xf,dstat,col="gray",lwd=3)
legend("topright",
c("30 Night-ahead predicted", "Marginal Prediction"), #Text
col= c("black", "gray"), #Line colors
lty=c("solid","solid"), #Line types
lwd=c(2.0, 2.0), #Line thickness
bty= "n", #No border ("o" if border)
cex=0.9, #Text size
y.intersp=0.9
)#Spacing between text/lines
dev.off()
|
2224909b1784bc6561374d98a66b00d6aaa8140d
|
8902a1139209246adc0c2a7f6c50f01318b36e99
|
/studies/simParams_iteration.R
|
21804786de45e7c774f93b69c0fc0a228c67ee60
|
[] |
no_license
|
brianconroy/dataInt
|
d948f7e476303f070b3ba061ee6329af1d077a5d
|
d4b261577b9aec781b2810d215f04f77e7176a5c
|
refs/heads/master
| 2021-05-13T11:34:23.702692
| 2019-10-31T22:36:18
| 2019-10-31T22:36:18
| 117,129,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,887
|
r
|
simParams_iteration.R
|
#############################
# Choose simulation parameter
# values
#############################
library(plyr)
library(mvtnorm)
library(R.utils)
sourceDirectory('Documents/research/dataInt/R/')
#### Prism Principal Components
caPr <- load_prism_pcs()
caPr.disc <- aggregate(caPr, fact=8)
n_values(caPr.disc[[1]])
plot(caPr.disc)
#### Simulate gaussian process
Theta <- 6
Phi <- 12
cells.all <- c(1:ncell(caPr.disc))[!is.na(values(caPr.disc[[1]]))]
coords <- xyFromCell(caPr.disc, cell=cells.all)
d <- as.matrix(dist(coords, diag=TRUE, upper=TRUE))
Sigma <- Exponential(d, range=Theta, phi=Phi)
set.seed(40)
W <- mvrnorm(n=1, mu=rep(0, length(cells.all)), Sigma)
N <- length(W)
#### Simulate locations
r <- caPr.disc[[1]]
locs <- simLocW(W, r, beta=0, seed=11) # 42
sum(locs$status)
hist(W)
plot(r)
points(locs$coords)
sampling <- 'medium'
prev <- 'low'
Alpha.case <- 1
beta.case <- c(-0.5, 0.75, -0.5)
Alpha.ctrl <- -1
beta.ctrl <- c(4.5, 1, 0.5)
#### Simulate counts given locations
cov.disc <- caPr.disc
case.data <- simConditionalGp2(cov.disc, locs, beta.case, Alpha.case, W, seed=42)
ctrl.data <- simConditionalGp2(cov.disc, locs, beta.ctrl, Alpha.ctrl, W, seed=40)
sum(case.data$y)/sum(case.data$y + ctrl.data$y)
sum(case.data$y)
sum(ctrl.data$y)
print(case.data$y)
print(ctrl.data$y)
print(round(case.data$y/(case.data$y + ctrl.data$y), 3))
new_row <- list(
sampling=sampling,
prevalence=prev,
beta.case=paste(as.character(beta.case), collapse=' '),
beta.ctrl=paste(as.character(beta.ctrl), collapse=' '),
alpha.case=Alpha.case,
alpha.ctrl=Alpha.ctrl,
total.y.ca=sum(case.data$y),
total.y.co=sum(ctrl.data$y),
prev=round(sum(case.data$y)/sum(case.data$y + ctrl.data$y), 2)
)
write.table(data.frame(new_row),
file='/Users/brianconroy/Documents/research/dataInt/output/simParams.txt',
row.names=F,
append=T)
|
fba1e8806cffb6482279a483d832dd3b23c49c5d
|
9db131194754ed5e7d11abedc341e9f8faa58f2d
|
/R/civicr.R
|
65c11ef7aae6dd70ba8e56b29250c241cc600639
|
[
"Apache-2.0"
] |
permissive
|
agduncan94/civicr
|
00b022f015274e5ad7a1a825f1c93c10517a378e
|
4e1fde5121011cfa4bec1792d883f8ff8a2e3bc3
|
refs/heads/develop
| 2020-07-06T14:51:32.393636
| 2019-09-03T16:56:18
| 2019-09-03T16:56:18
| 203,057,121
| 0
| 0
|
Apache-2.0
| 2019-09-03T16:58:49
| 2019-08-18T21:14:17
|
R
|
UTF-8
|
R
| false
| false
| 10,608
|
r
|
civicr.R
|
# CIViC API Client for the R programming language
# Load required libraries
library(httr)
library(jsonlite)
# Set some constants
baseAPIUrl <- "https://civicdb.org/"
userAgent <- user_agent("https://github.com/agduncan94/civicr")
#' Get a list of genes
#'
#' Retrieve all genes from the CIViC DB
#' @param page the page number to retrieve
#' @param count the number of genes to retrieve
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords genes
#' @examples
#' getAllGenes(count = 10)
#' getAllGenes(page = 2, count = 10)
getAllGenes <- function(page = 1, count = 25) {
return(.commonIndexEndpoint("genes", page, count))
}
#' Get a specific gene
#'
#' Retrieve a specific gene from the CIViC DB
#' @param id ID of the gene of interest
#' @param identifier_type Type of gene identifier (entrez_id, entrez_symbol, civic_id)
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords gene
#' @examples
#' getGene(id = 1)
#' getGene(id = "ALK", identifier_type = "entrez_symbol")
#' getGene(id = 238, identifier_type = "entrez_id")
getGene <- function(id, identifier_type = "civic_id") {
return(.commonDetailEndpoint("genes", id, NULL, list("identifier_type" = identifier_type)))
}
#' Get a specific gene metadata
#'
#' Retrieve metadata for a specific gene from the CIViC DB
#' @param id ID of the gene of interest
#' @param identifier_type Type of gene identifier (entrez_id, entrez_symbol, civic_id)
#' @param type Type of metadata (comments, suggested_changes, revisions, variants)
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords gene, metadata
#' @examples
#' getGeneMetadata(id = 1, type = "comments")
#' getGeneMetadata(id = 1, type = "variants")
getGeneMetadata <- function(id, type) {
return(.commonDetailEndpoint("genes", id, type, NULL))
}
#' Get a list of variants
#'
#' Retrieve all variants from the CIViC DB
#' @param page the page number to retrieve
#' @param count the number of variants to retrieve
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords variants
#' @examples
#' getAllVariants(count = 10)
#' getAllVariants(page = 2, count = 10)
getAllVariants <- function(page = 1, count = 25) {
return(.commonIndexEndpoint("variants", page, count))
}
#' Get a specific variant
#'
#' Retrieve a specific variant from the CIViC DB
#' @param id Internal CIViC ID of the variant of interest
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords variant
#' @examples
#' getVariant(id = 1)
getVariant <- function(id) {
return(.commonDetailEndpoint("variants", id))
}
#' Get a specific variant metadata information
#'
#' Retrieve metadata information for a specific variant from the CIViC DB
#' @param id Internal CIViC ID of the variant of interest
#' @param type Type of metadata (comments, suggested_changes, revisions, evidence_items)
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords variant, metadata
#' @examples
#' getVariantMetadata(id = 1, type = "comments")
#' getVariantMetadata(id = 1, type = "revisions")
getVariantMetadata <- function(id, type) {
return(.commonDetailEndpoint("variants", id, type))
}
#' Get a list of evidence items
#'
#' Retrieve all evidence items from the CIViC DB
#' @param page the page number to retrieve
#' @param count the number of evidence items to retrieve
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords evidence items
#' @examples
#' getAllEvidenceItems(count = 10)
#' getAllEvidenceItems(page = 2, count = 10)
getAllEvidenceItems <- function(page = 1, count = 25) {
return(.commonIndexEndpoint("evidence_items", page, count))
}
#' Get a specific evidence item
#'
#' Retrieve a specific evidence item from the CIViC DB
#' @param id Internal CIViC ID of the evidence item of interest
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords evidence item
#' @examples
#' getEvidenceItem(id = 1)
getEvidenceItem <- function(id) {
return(.commonDetailEndpoint("evidence_items", id))
}
#' Get a specific evidence item metadata information
#'
#' Retrieve metadata information for a specific evidence item from the CIViC DB
#' @param id Internal CIViC ID of the evidence item of interest
#' @param type Type of metadata (comments, suggested_changes, revisions)
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords evidence item, metadata
#' @examples
#' getEvidenceItemMetadata(id = 1, type = "comments")
#' getEvidenceItemMetadata(id = 1, type = "revisions")
getEvidenceItemMetadata <- function(id, type) {
return(.commonDetailEndpoint("evidence_items", id, type))
}
#' Get a list of variant groups
#'
#' Retrieve all variant groups from the CIViC DB
#' @param page the page number to retrieve
#' @param count the number of variant groups to retrieve
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords variant groups
#' @examples
#' getAllVariantGroups(count = 10)
#' getAllVariantGroups(page = 2, count = 10)
getAllVariantGroups <- function(page = 1, count = 25) {
return(.commonIndexEndpoint("variant_groups", page, count))
}
#' Get a specific variant group
#'
#' Retrieve a specific variant group from the CIViC DB
#' @param id Internal CIViC ID of the variant group of interest
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords variant group
#' @examples
#' getVariantGroup(id = 1)
getVariantGroup <- function(id) {
return(.commonDetailEndpoint("variant_groups", id))
}
#' Get a specific variant group metadata information
#'
#' Retrieve metadata information for a specific variant group from the CIViC DB
#' @param id Internal CIViC ID of the variant group of interest
#' @param type Type of metadata (comments, suggested_changes, revisions)
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords variant group, metadata
#' @examples
#' getVariantGroupMetadata(id = 1, type = "comments")
#' getVariantGroupMetadata(id = 1, type = "revisions")
getVariantGroupMetadata <- function(id, type) {
return(.commonDetailEndpoint("variant_groups", id, type))
}
#' Get a list of assertions
#'
#' Retrieve all assertions from the CIViC DB
#' @param page the page number to retrieve
#' @param count the number of assertions to retrieve
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords assertions
#' @examples
#' getAllAssertions(count = 10)
#' getAllAssertions(page = 2, count = 10)
getAllAssertions <- function(page = 1, count = 25) {
return(.commonIndexEndpoint("assertions", page, count))
}
#' Get a specific assertion
#'
#' Retrieve a specific assertion from the CIViC DB
#' @param id Internal CIViC ID of the assertion of interest
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords assertion
#' @examples
#' getAssertion(id = 1)
getAssertion <- function(id) {
return(.commonDetailEndpoint("assertions", id))
}
#' Get a specific assertion metadata information
#'
#' Retrieve metadata information for a specific variant group from the CIViC DB
#' @param id Internal CIViC ID of the assertion of interest
#' @param type Type of metadata (comments, suggested_changes, revisions)
#' @return An S3 Object of type civic_api containing the content, url, and response
#' @export
#' @keywords assertion, metadata
#' @examples
#' getAssertionMetadata(id = 1, type = "comments")
#' getAssertionMetadata(id = 1, type = "suggested_changes")
getAssertionMetadata <- function(id, type) {
return(.commonDetailEndpoint("assertions", id, type))
}
#' Handle common index endpoints
#'
#' @param type Type of index endpoint
#' @param page the page number to retrieve
#' @param count the number of assertions to retrieve
.commonIndexEndpoint <- function(type, page, count) {
url <- httr::modify_url(baseAPIUrl, path = paste("api", type, sep = "/"))
response <- httr::GET(url, httr::accept_json(), userAgent, query = list("page" = page, "count" = count))
.verifyJsonResponse(response)
.handleFailure(response)
indexResponse <- httr::content(response, "parsed")
return(.createReturnStructure(indexResponse, url, response))
}
#' Handle common detail endpoints
#'
#' @param type Type of detail endpoint
#' @param id The internal CIViC ID
#' @param metadataType Optional type for base detail endpoints
#' @param queryParameters Optional query parameters for base detail endpoints
.commonDetailEndpoint <- function(type, id, metadataType=NULL, queryParameters=NULL) {
appendedPath <- paste("api", type, id, sep = "/")
if (!is.null(metadataType)) {
appendedPath <- paste(appendedPath, metadataType, sep = "/")
}
url <- httr::modify_url(baseAPIUrl, path = appendedPath)
if (!is.null(queryParameters)) {
response <- httr::GET(url, httr::accept_json(), userAgent, query = queryParameters)
} else {
response <- httr::GET(url, httr::accept_json(), userAgent)
}
.verifyJsonResponse(response)
.handleFailure(response)
detailResponse <- content(response, "parsed")
return(.createReturnStructure(detailResponse, url, response))
}
#' Handle failure case for httr
#'
#' @param response httr error response
.handleFailure <- function(response) {
if (httr::http_error(response)) {
errorResponse <- content(response, "parsed")
stop(
sprintf(
"CIViC API request failed [%s]\n%s",
httr::status_code(response),
errorResponse$error
),
call. = FALSE
)
}
}
#' Verify that the httr response is of type "application/json"
#'
#' @param response httr response
.verifyJsonResponse <- function(response) {
if (httr::http_type(response) != "application/json") {
stop("CIViC API did not return a JSON", call. = FALSE)
}
}
#' Create an s3 return structure for exported functions
#'
#' @param content Content of the response
#' @param url URL of the request
#' @param response httr response
#' @return An S3 Object of type civic_api containing the content, url, and response
.createReturnStructure <- function(content, url, response) {
return(structure(
list(
content = content,
url = url,
response = response
),
class = "civic_api"
))
}
|
a0dcef763e1cc82c02f05da800c1bf72d91006ab
|
11c642306822f49953e8dfa04abe54d89cadc0f8
|
/cachematrix.R
|
20622edc465fd8bb2205cf1c0e661c9492dbadc5
|
[] |
no_license
|
shetimaj/ProgrammingAssignment2
|
ed439c9944606baa734e7f3652687208f50a3a20
|
789f549cfb266e8255d0c1b5343501555f882893
|
refs/heads/master
| 2020-12-14T06:16:56.105432
| 2015-07-23T03:50:47
| 2015-07-23T03:50:47
| 39,516,583
| 0
| 0
| null | 2015-07-22T16:16:25
| 2015-07-22T16:16:24
| null |
UTF-8
|
R
| false
| false
| 1,574
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix is a function with a list of four functions that creates an inverse of a matrix and caches the data
## set() function assigns or re-assigns the matrix used in the makeCacheMatrix function
## the inverse matrix is reset and has to be recalcualated with getinverse()
## get() function returns the matrix used as argument or returns matrix assigned by set()
## setinverse() function applies solve function on input matrix
## getinverse() returns the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve returns inverse of matrix from makeCacheMatrix function
## If it is still cached from makeCacheMatrix it will return this data
## Otherwise it performs an inverse on the matrix with the solve function and
## it caches the data
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) { # checks for cached inverse matrix
message("getting cached data")
return(m) # returns cached data
}
data <- x$get() # if no cached data the argument matrix data is used
m <- solve(data, ...)
x$setinverse(m) # cache inverse matrix data
m
}
|
cf53de194c5964560027490a64694bad6fdf40d8
|
5ae7baef9896050936b986eb40ef97f3dd0bd65d
|
/R/reduce.r
|
8043a3ab2a023e1dfcd41a4fbdcc8d6282409538
|
[] |
no_license
|
jdthorpe/LifeTable
|
d4064d617861f7c2324ecbe8cf069da7aab9ae3d
|
823dfcfa6d569d85bb2a5b0e077ab4920f70f446
|
refs/heads/master
| 2020-06-08T00:10:30.078615
| 2015-07-25T22:19:28
| 2015-07-25T22:19:28
| 30,566,170
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,561
|
r
|
reduce.r
|
reduce <- function(sharedParamList,nullSwitches){
# --------------------------------------------------
# this function reduces the problem space that will
# be optimized (if necessary)
# --------------------------------------------------
stopifnot(all(!is.null(nullSwitches)))
stopifnot(length(nullSwitches)>0)
stopifnot(mode(nullSwitches)=='character')
# --------------------------------------------------
# reduce the proportional hazard matrix
# --------------------------------------------------
reduced_propHazMx <- sharedParamList$propHazMx
for(name in nullSwitches)
reduced_propHazMx[sharedParamList$hazNameMX == name] <- 1
# --------------------------------------------------
# txHazList
# --------------------------------------------------
# (calculated identically to the orignal variable
# but using the reduced propHazMx and txHazNameMat)
reduced_txHazList <- list() # (tx == transition)
for(i in 1:nrow(reduced_propHazMx))
for(j in 1:nrow(reduced_propHazMx)){
if(is.na(reduced_propHazMx[i,j]) || reduced_propHazMx[i,j] == 1)
next
reduced_txHazList <- c(reduced_txHazList,
list(c(i,j,reduced_propHazMx[i,j])))
}
# --------------------------------------------------
# reduce the hazard name matrix
# --------------------------------------------------
reduced_hazNameMX <- sharedParamList$hazNameMX
for(name in nullSwitches)
reduced_hazNameMX[reduced_hazNameMX == name] <- NA
# --------------------------------------------------
# txParamNames
# --------------------------------------------------
reduced_txParamNames <- uniqueStrings(reduced_hazNameMX)
# --------------------------------------------------
# txHazNameMat
# --------------------------------------------------
(reduced_txHazNameMat <- sharedParamList$txHazNameMat)
reduced_txHazNameMat[sharedParamList$hazNameMX %in% nullSwitches] <- NA
# --------------------------------------------------
# exit switches
# --------------------------------------------------
retuced_ES <- apply(rbind(reduced_txHazNameMat, reduced_hazNameMX ), 2,uniqueStrings)
# --------------------------------------------------
# txParamList - just remove the unneeded entries...
# --------------------------------------------------
reduced_txParamList <- sharedParamList$txParamList
for(name in nullSwitches)
reduced_txParamList[[name]] <- NULL
# ------------------------------------------------------------
# reduced_switchNames
# ------------------------------------------------------------
reduced_switchNames <- c(reduced_txParamNames,
if(length(reduced_txHazList))
uniqueStrings(reduced_txHazNameMat))
# ------------------------------------------------------------
# allGatesMX
# ------------------------------------------------------------
# (calculated identically to the orignal variable
# but using the reduced propHazMx and txHazNameMat)
reduced_switchStates <- rep(1,length(reduced_switchNames))
names(reduced_switchStates) <- reduced_switchNames
reduced_allGatesMX <- allGatesMX_alt <- switchStatesToGateMx(
reduced_switchStates,
reduced_txParamList,
reduced_txHazList,
reduced_txParamNames)
# ------------------------------------------------------------
# allFromToMX
# ------------------------------------------------------------
reduced_allFromToMX <- matrix(F,length(states),length(states))
for(from in 1:length(states))
for(to in 1:length(states))
if(length(alt_pathsFromAtoB(from,to,reduced_allGatesMX)))
reduced_allFromToMX[from,to] <- TRUE
#-- print('allFromToMX')
#-- print('allFromToMX')
#-- print('allFromToMX')
#-- print('allFromToMX')
#-- while(TRUE)browser()
# ------------------------------------------------------------
# return the reduced parameters
# ------------------------------------------------------------
return(list('ES'=retuced_ES,
'txParamNames'=reduced_txParamNames,#txParamNames,
'allGatesMX'=reduced_allGatesMX,#allGatesMX,
'txHazList'=reduced_txHazList,#txHazList,
'txParamList'=reduced_txParamList,#txParamList,
'txHazNameMat'=reduced_txHazNameMat,
'allFromToMX'=reduced_allFromToMX,#allFromToMX,
# parameters
'propHazMx'=reduced_propHazMx,
'hazNameMX'=reduced_hazNameMX,
# these are unchanged...
'hashSwitchNames'=sharedParamList$hashSwitchNames,
'hasExitHash'=sharedParamList$hasExitHash,
'AB_prob_cond_on_switches_hash'=sharedParamList$AB_prob_cond_on_switches_hash,
# don't clear the hashes
'clear'=function(){ # pass
}))
}
|
149152e393fa7f822105dafb67e4d395ce0350e2
|
29ce175254b27a361714074b99fe5644ca2c3158
|
/man/mutate.Rd
|
16e8fa2f8ff4cd088d038deee0fe2cddaa258a88
|
[] |
no_license
|
raz1/Momocs
|
148903d3f05428e72d7cef8ede46c0bba7f80f04
|
09a817bb0720d87969d48dd9e0f16516e042e13e
|
refs/heads/master
| 2021-01-17T18:09:55.724182
| 2014-08-15T14:49:17
| 2014-08-15T14:49:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 974
|
rd
|
mutate.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{mutate}
\alias{mutate}
\alias{mutate.Coe}
\alias{mutate.default}
\title{Mutate Coe (and others) objects}
\usage{
mutate(x, ...)
\method{mutate}{default}(x, margin = 2, size = dim(x)[ifelse(margin == 1, 2,
1)], ...)
\method{mutate}{Coe}(x, size = nrow(x$coe), ...)
}
\arguments{
\item{x}{the object to permute}
\item{margin}{numeric whether 1 or 2 (rows or columns)}
\item{size}{numeric the required size for the final object}
\item{...}{useless here}
}
\description{
This methods applies column-wise on the \code{coe} of any
\link{Coe} object but relies on a function that can be used on any matrix. It
simply uses \link{rnorm} with the mean and sd calculated for every column (or row).
}
\examples{
m <- matrix(1:12, nrow=3)
mutate(m, margin=2, size=4)
mutate(m, margin=1, size=10)
data(bot)
bot.f <- eFourier(bot, 12)
bot.m <- mutate(bot.f, 80)
bot.m
panel(bot.m)
}
\seealso{
\link{perm}
}
\keyword{Coe}
|
cb06dffab1c48e8b0946728da6ead006bdf366e7
|
be62417876def276106d632e5ec81823eb85bca6
|
/compositeScenes.r
|
27550145aa0c852b03ff2c32ad52960fcbe08c66
|
[] |
no_license
|
AlecNelson/MekongScripts
|
b7d6f2e6f3aa6a7a41777913425669df346bedac
|
43dc1304b1dcafb38e50f57cb5f96b86aa51c98f
|
refs/heads/master
| 2021-01-23T05:34:41.967743
| 2014-10-16T15:14:59
| 2014-10-16T15:14:59
| 25,307,955
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
compositeScenes.r
|
# compositeScenes.r
#
# Intended for compositing both terra and aqua scenes by taking the
# greater or lesser value'd pixel of both scenes
# input: rasters
# output: a composited RasterLayer object
compositeScenes <- function(scene1, scene2, method)
{
require(raster)
scene1_mat <- as.matrix(scene1)
scene2_mat <- as.matrix(scene2)
if(method=="max"){
# take the max values
# NAs are not taken if another value available.
# if a pixel is NA in both scenes, leave it for now
composite <- pmax(scene1_mat, scene2_mat, na.rm=TRUE)
}else if(method=="min"){
composite <- pmin(scene1_mat, scene2_mat, na.rm=TRUE)
}
# ready for output, use scene1's georef
proj <- scene1@crs@projargs
composite_raster <- raster(composite)
projection(composite_raster) <- CRS(proj)
ex_xmin <- scene1@extent@xmin
ex_xmax <- scene1@extent@xmax
ex_ymin <- scene1@extent@ymin
ex_ymax <- scene1@extent@ymax
extent(composite_raster) <- c(ex_xmin, ex_xmax, ex_ymin, ex_ymax)
return(composite_raster)
}
|
16f8815c512c5ac0b8c5914744bfbb640a1eec5a
|
b23e2dc0052aceeec4c0fd1818e7e44f87604178
|
/TFM 2. arima.R
|
78c231f5e22a00263268527a1730849a488899bb
|
[] |
no_license
|
JuanferMG/TFM
|
091be99283c05547adce6c12d9984c97b9a642e0
|
7952e87f0c0a0a9de4e7232a601f72f953316962
|
refs/heads/master
| 2023-01-24T12:10:13.076667
| 2020-11-30T13:09:39
| 2020-11-30T13:09:39
| 277,876,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,880
|
r
|
TFM 2. arima.R
|
# ***** DATOS *****
# Descargamos los datos de los barrios de mi cuenta de GitHub
url<-"https://raw.githubusercontent.com/JuanferMG/TFM/master"
# Los datos de demografia de 2004 a 2019
url1<-paste0(url,"/DatosBarrioDemografia.xlsx")
GET(url1, write_disk(datos <- tempfile(fileext = ".xlsx")))
DatosBA <- as.data.frame(read_excel(datos))
# Los datos de renta de 2015 a 2017
url2<-paste0(url,"/DatosBarrioRenta.rda")
GET(url2, write_disk(datos2 <- tempfile(fileext = ".rda")))
load(datos2)
# El shape con los poligonos para dibujar los mapas
url3<-paste0(url,"/barrios.rda")
GET(url3, write_disk(barrios <- tempfile(fileext = ".rda")))
load(barrios)
# ***** PROYECCIONES MEDIANTE MODELOS ARIMA *****
# Definimos una funcion que usa modelos ARIMA para proyectar cada indicador a 2021
sibila<-function(variable){
# Los valores de 2019 del indicador serviran para medir la bondad del ajuste
valor.observado<-DatosBA[which(DatosBA$Anyo==2019),variable]
# Los valores de 2004 a 2018 del indicador serviran como predictores
predictores<-c()
for(i in 2004:2018){
predictores<-rbind(predictores,DatosBA[which(DatosBA$Anyo==i),variable])
}
barrio=unique(DatosBA$BA)
colnames(predictores)<-paste0("BA",barrio)
# Inicializamos las variables usadas en la prediccion
evolucion.st<-c()
auto<-c()
modelo<-c()
modelo.res<-c()
shapiro<-c()
pvalor<-c()
dependencia.res<-c()
nivel.acf<-c()
modelo.prediccion<-c()
valor.estimado<-c()
# Para cada barrio se usara el modelo ARIMA que mejor se ajuste a la serie
for(k in 1:length(barrio)){
# Obtenemos la serie temporal del barrio
evolucion<-data.frame(fecha=2004:2018, valor=predictores[,paste0("BA",barrio[k])])
evolucion.st[[k]] <- ts(evolucion$valor, start = 2004, deltat = 1)
# Calibramos las componentes p,d,q que mejor se ajustan a la serie
auto[[k]]<-auto.arima(y = evolucion.st[[k]])
# Construimos el modelo ARIMA asociado a la serie
modelo[[k]] <- arima(evolucion.st[[k]], order = arimaorder(auto[[k]]))
# Calculamos los residuos
modelo.res[[k]] <- residuals(modelo[[k]])
# Comprobamos si los residuos estan normalmente distribuidos
shapiro[[k]]<-shapiro.test(modelo.res[[k]])
pvalor[[k]]<-shapiro[[k]]$p.value
# Calculamos la funcion de autocorrelacion sobre los residuos
dependencia.res[[k]]<-acf(modelo.res[[k]], plot = F, na.action = na.pass)$acf
# Predecimos con el modelo los valores a corto plazo de los proximos 3 anyos
modelo.prediccion[[k]] <- forecast(object = modelo[[k]], h = 3)
valor.estimado<-cbind(valor.estimado,modelo.prediccion[[k]]$mean)
}
colnames(valor.estimado)<-NULL
# Comprobamos que los coeficientes acf no son significativos y
# que por tanto los residuos son independientes entre si
nivel.acf<- qnorm((1 + 0.95)/2)/sqrt(15)
signficativos.acf<-lapply(dependencia.res,
function(x) sum(abs(x) >= nivel.acf) - 1 )
residuos.acf<-table(unlist(signficativos.acf))
# Calculamos la raiz del error cuadratico medio
RMSE<-rmse(valor.observado,valor.estimado[1,])
# La funcion devuelve el valor de 2019, los valores proyectados, el error,
# los p-valores del test de shapiro y el numero de coeficientes acf significativos
lista<-list(Valor2019=valor.observado,
Prediccion=valor.estimado,
Error2019=RMSE,
P.Valores=pvalor,
ACF=residuos.acf)
return(lista)
}
# Proyectamos las series temporales de los 30 indicadores calculados
Ind01<-sibila("Crecimiento_Vegetativo")
Ind02<-sibila("Saldo_Migratorio")
Ind03<-sibila("Saldo_Movimientos_Intraurbanos")
Ind04<-sibila("Tasa_Natalidad")
Ind05<-sibila("Tasa_General_Fecundidad")
Ind06<-sibila("Tasa_MortalidadT")
Ind07<-sibila("Tasa_Inmigracion")
Ind08<-sibila("Tasa_Emigracion")
Ind09<-sibila("Tasa_Llegadas_Cambio_Domicilio")
Ind10<-sibila("Tasa_Salidas_Cambio_Domicilio")
Ind11<-sibila("Relacion_Maculinidad_Nacimiento")
Ind12<-sibila("Indice_Sundbarg")
Ind13<-sibila("Indice_Friz")
Ind14<-sibila("Indice_Burgdofer")
Ind15<-sibila("Indice_Generacional_Ancianos")
Ind16<-sibila("Indice_Envejecimiento")
Ind17<-sibila("Indice_Sobreenvejecimiento")
Ind18<-sibila("Indice_Demografico_Dependencia")
Ind19<-sibila("Indice_Estructura_Poblacion_Activa")
Ind20<-sibila("Indice_Reemplazamiento_Poblacion_Activa")
Ind21<-sibila("Indice_Carga_Preescolar")
Ind22<-sibila("Razon_Progresividad_Demografica")
Ind23<-sibila("Relacion_Maculinidad")
Ind24<-sibila("Porcentaje_PobTExtr")
Ind25<-sibila("Porcentaje_PobT65_mas")
Ind26<-sibila("Porcentaje_PobT0_15")
Ind27<-sibila("Porcentaje_PobT_Nacidos_Valencia")
Ind28<-sibila("Porcentaje_Hojas_Fam_Solo80_mas")
Ind29<-sibila("Porcentaje_Hojas_Fam_Menores0")
Ind30<-sibila("Media_Personas_Hojas_Fam")
# Guardamos los valores en un archivo RDA
save(Ind01, Ind02, Ind03, Ind04, Ind05, Ind06, Ind07, Ind08, Ind09, Ind10,
Ind11, Ind12, Ind13, Ind14, Ind15, Ind16, Ind17, Ind18, Ind19, Ind20,
Ind21, Ind22, Ind23, Ind24, Ind25, Ind26, Ind27, Ind28, Ind29, Ind30,
file = "Predicciones.RDA")
load("Predicciones.RDA")
# Creamos el data.frame con los indicadores con los que vamos a trabajar
f=3 #La tercera fila contiene las estimaciones para el anyo 2021
DataSet<-data.frame(Ind01$Prediccion[f,], Ind02$Prediccion[f,], Ind03$Prediccion[f,],
Ind04$Prediccion[f,], Ind05$Prediccion[f,], Ind06$Prediccion[f,],
Ind07$Prediccion[f,], Ind08$Prediccion[f,], Ind09$Prediccion[f,],
Ind10$Prediccion[f,], Ind11$Prediccion[f,], Ind12$Prediccion[f,],
Ind13$Prediccion[f,], Ind14$Prediccion[f,], Ind15$Prediccion[f,],
Ind16$Prediccion[f,], Ind17$Prediccion[f,], Ind18$Prediccion[f,],
Ind19$Prediccion[f,], Ind20$Prediccion[f,], Ind21$Prediccion[f,],
Ind22$Prediccion[f,], Ind23$Prediccion[f,], Ind24$Prediccion[f,],
Ind25$Prediccion[f,], Ind26$Prediccion[f,], Ind27$Prediccion[f,],
Ind28$Prediccion[f,], Ind29$Prediccion[f,], Ind30$Prediccion[f,])
# Definimos las variables de renta elegidas
# Obtendremos la media ponderada de cada variable
Pond1<-(DatosBA$PobT[which(DatosBA$Anyo==2016)])-
(DatosBA$PobT0_15[which(DatosBA$Anyo==2016)])
Pond2<-(DatosBA$PobT[which(DatosBA$Anyo==2017)])-(
DatosBA$PobT0_15[which(DatosBA$Anyo==2017)])
Pond3<-(DatosBA$PobT[which(DatosBA$Anyo==2018)])-
(DatosBA$PobT0_15[which(DatosBA$Anyo==2018)])
# Indicador 31: Renta media por persona
Ind31<-(Pond1*DatosBA_RENTA$Indicador1[which(DatosBA_RENTA$Anyo==2015)]+
Pond2*DatosBA_RENTA$Indicador1[which(DatosBA_RENTA$Anyo==2016)]+
Pond3*DatosBA_RENTA$Indicador1[which(DatosBA_RENTA$Anyo==2017)])
Ind31<-Ind31/(Pond1+Pond2+Pond3)
# Indicador 32: Renta media por hogar
Ind32<-(Pond1*DatosBA_RENTA$Indicador2[which(DatosBA_RENTA$Anyo==2015)]+
Pond2*DatosBA_RENTA$Indicador2[which(DatosBA_RENTA$Anyo==2016)]+
Pond3*DatosBA_RENTA$Indicador2[which(DatosBA_RENTA$Anyo==2017)])
Ind32<-Ind32/(Pond1+Pond2+Pond3)
# Indicador 33: Poblacion con ingresos por unidad de consumo por
# debajo del 60 % de la mediana
Ind33<-(Pond1*DatosBA_RENTA$Indicador64[which(DatosBA_RENTA$Anyo==2015)]+
Pond2*DatosBA_RENTA$Indicador64[which(DatosBA_RENTA$Anyo==2016)]+
Pond3*DatosBA_RENTA$Indicador64[which(DatosBA_RENTA$Anyo==2017)])
Ind33<-Ind33/(Pond1+Pond2+Pond3)
# Incorporamos al conjunto de indicadores demograficos las variables de renta
DataSet<-cbind(DataSet, Ind31, Ind32, Ind33)
# Normalizamos el conjunto de datos tipifcando con la funcion scale()
DataSet<-as.data.frame(apply(DataSet, 2, scale))
colnames(DataSet)<-c(paste0("Ind0",1:9),paste0("Ind",10:33))
|
1beb6a60657d53c4cb2b74b28eb1723643054dee
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/texmex/R/ggplot.boot.R
|
55fcb96d03ce28d0ffe1713354007e8f49d62285
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,870
|
r
|
ggplot.boot.R
|
ggbootdensplots <- function(x, denscol, histcol, linecol){
v <- colnames(x$replicates)
n <- length(v)
p <- vector("list", length=n)
for (i in 1:n){
d <- data.frame(x$replicates[, i])
names(d) <- "x"
p[[i]] <- ggplot(data=d, aes(x=x)) +
geom_density(fill=denscol,colour=denscol) +
geom_histogram(aes(y=..density..),fill=histcol,bins=20,alpha=0.5) +
scale_x_continuous(v[i]) +
scale_y_continuous("") +
geom_vline(xintercept=coef(x$map)[i], col=linecol)
}
p
}
#' Diagnostic plots for the replicate estimated parameter values in an evmBoot object
#' @param data An object of class 'evmBoot'.
#' @param denscol Colour for the densities. Defaults to 'light blue'.
#' @param histcol Colour for the histograms. Defaults to 'dark blue'.
#' @param linecol Colour for the point estimate lines. Decaults to 'orange'.
#' @param plot.it Whether or not to actually print the plots. Defaults
#' to \code{plot.it=TRUE}. If \code{plot.it=FALSE}, you might
#' want to control the layout. Do this with
#' \code{do.call("grid.arrange", c(plots, ncol=2))}, for example,
#' where \code{plots} is the objected returned by
#' \code{ggplot.evmBoot}.
#' @param mapping,environment ignored
#' @param ... Additional arguments to \code{ggplot}, currently unused.
#' @aliases ggbootdensplots
#' @keywords hplot
#' @method ggplot evmBoot
#' @export
ggplot.evmBoot <- function(data=NULL, mapping, denscol="light blue", histcol="dark blue", linecol="orange",
plot.it=TRUE,
..., environment){
res <- ggbootdensplots(data, denscol=denscol, histcol=histcol, linecol=linecol)
if (plot.it) do.call("grid.arrange", c(res, ncol=ncol(data$replicates)))
invisible(res)
}
|
8322b553d515168134298d324e400ef8c9e5c4a8
|
ece76676ce0e36438b626e0a9394766e9d658ad2
|
/R/utils.R
|
f33374752aa80aef5db1564ab68dd1da0f49e869
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"MIT"
] |
permissive
|
rstudio/fontawesome
|
8a1b65937626b5228ff6c978a65ae502faca59f9
|
b056ecaa9169b98e9d01eacbb66dd43e8b4e7cd7
|
refs/heads/main
| 2023-09-01T02:24:26.547607
| 2023-08-24T00:32:47
| 2023-08-24T00:32:47
| 135,504,089
| 277
| 45
|
NOASSERTION
| 2023-08-23T21:58:29
| 2018-05-30T22:31:54
|
R
|
UTF-8
|
R
| false
| false
| 181
|
r
|
utils.R
|
# @staticimports pkg:staticimports
# s3_register
# nocov start
`%||%` <- function(x, y) {
if (is.null(x)) y else x
}
is_na <- function(x) {
isTRUE(is.na(x))
}
# nocov end
|
b3d16750c7df8ae46b0e4e235abe88eea182e960
|
504ddda4aa27f155f197cd7bac505eb686a86cae
|
/dcsr2BE.R
|
6c98beb0df06f22aa2972c02bc7924e8b890b8f0
|
[] |
no_license
|
NKweiwang/dscrOmega
|
00635a370364fe6f3cbfb48b3d2a9471f1549d44
|
26a7e83bb6ed6e455bf2cfa80a27533a90dd6603
|
refs/heads/master
| 2021-01-18T18:22:52.401133
| 2016-06-10T20:57:55
| 2016-06-10T20:57:55
| 30,993,049
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 784
|
r
|
dcsr2BE.R
|
library("dscr")
library("BatchExperiments")
source("scenarios.R")
source("methods.R")
source("score.R")
reset_dsc(scenarios,methods, force=TRUE)
res=run_dsc(scenarios,methods,score)
dsc = list(scenarios=scenarios,methods=methods,scorefn=score)
system("rm -r one_sample_location-files")
reg=dsc2BE(dsc,"one_sample_location")
summarizeExperiments(reg)
id1 <- findExperiments(reg, algo.pattern="mean")[1]
testJob(reg,id1)
chunked <- chunk(getJobIds(reg), n.chunks = 10, shuffle = TRUE)
timetaken=system.time(submitJobs(reg, chunked))
res2=reduceResultsExperiments(reg, ids=findDone(reg))
aggregate(squared_error~algo+prob,data=res2,mean)
aggregate(squared_error~method+scenario,res,mean)
aggregate(abs_error~algo+prob,data=res2,mean)
aggregate(abs_error~method+scenario,res,mean)
|
ca62bafa2252c2c55a0e3ca981366231aefd7b80
|
0ade666a7d09e202307d49480f09f584a80682d7
|
/3. getting and cleaning data/1. read xml file.R
|
1def1f44676a8c0c86bf134fd95c0380e356b43d
|
[] |
no_license
|
richarddeng88/Data_science_specialization_JHU
|
a77e0d7b3ca6cdec11550fd35d038008eba7d968
|
80c5471f0bab5b2fb7f99e00080a8f03a635c6d1
|
refs/heads/master
| 2021-01-10T05:10:12.461571
| 2016-09-24T16:21:04
| 2016-09-24T16:21:04
| 54,853,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,409
|
r
|
1. read xml file.R
|
library(XML)
url <- "http://www.w3schools.com/xml/simple.xml"
xmlFile <- xmlTreeParse(url, useInternalNodes = T)
xmlFile <- xmlParse(url)
# print root size, root name, child name
root <- xmlRoot(xmlFile) #gives content of root
class(root)
xmlName(root) #give name of node, PubmedArticleSet
xmlSize(root) #how many children in node, 5
names(root) #name of root's children
# have a look at the content of the first child entry
root[[1]]
# have a look at the content of the 2nd child entry
root[[2]]
xmlSize(root[[1]]) #number of nodes in each child
xmlApply(root[[1]], xmlValue)
xmlApply(root[[1]], xmlName)
xmlApply(root[[1]], xmlAttrs)
xmlApply(root[[1]], xmlSize)
price <- xpathSApply(df, "//li[@class='price']",xmlValue)
# XML TO DATAFRAME
data <- xmlToDataFrame(df)
########################################################################################
library(XML)
library(methods)
library(plyr)
# df <- xmlTreeParse("data/XML/resume_w_xsl.xml", useInternalNodes = T)
df <- xmlParse(file = "data/XML/resume_w_xsl.xml")
root <- xmlRoot(df)
xmlName(root)
xmlSize(root)
xmlSize(root[[1]])
xmlApply(root[[1]], xmlName)
root[[1]][[10]]
xmlSize(root[[1]][[10]])
xmlApply(root[[1]][[10]], xmlName)
a <- xmlToDataFrame(root[[1]])
#### teat on db
df <- xmlParse(file = "data/XML/departments.xml")
root <- xmlRoot(df)
xmlName(root)
xmlSize(root)
xmlSize(root[[1]])
xmlApply(root[[1]], xmlName)
|
d14059edc9cce2338fac24f23307219157cfa21c
|
6596b98a42c2604e5dc4708b15eaa29206ddcde9
|
/man/cd03-3-justClusters.Rd
|
6b07d8480a63690b666c38d92dfcd90a5f6e5985
|
[] |
no_license
|
cran/ClassDiscovery
|
15d61d3d1a22cf7b9ba966892295a5e1e2950cc7
|
343881a42a93d88f259698d8244a12be4c6c621b
|
refs/heads/master
| 2021-07-25T12:48:28.313719
| 2021-07-16T14:30:02
| 2021-07-16T14:30:02
| 96,978,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,490
|
rd
|
cd03-3-justClusters.Rd
|
\name{justClusters}
\alias{cutHclust}
\alias{cutPam}
\alias{cutKmeans}
\alias{repeatedKmeans}
\alias{cutRepeatedKmeans}
\title{Get the List of Classes From A Clustering Algorithm }
\description{
Unsupervised clustering algorithms, such as partitioning around medoids
(\code{\link[cluster]{pam}}), K-means (\code{\link{kmeans}}), or
hierarchical clustering (\code{\link{hclust}}) after cutting the tree,
produce a list of class assignments along with other structure. To
simplify the interface for the \code{\link{BootstrapClusterTest}} and
\code{\link{PerturbationClusterTest}}, we have written these routines
that simply extract these cluster assignments.
}
\usage{
cutHclust(data, k, method = "average", metric = "pearson")
cutPam(data, k)
cutKmeans(data, k)
cutRepeatedKmeans(data, k, nTimes)
repeatedKmeans(data, k, nTimes)
}
\arguments{
\item{data}{A numerical data matrix}
\item{k}{The number of classes desired from the algorithm}
\item{method}{Any valid linkage method that can be passed to the
\code{\link{hclust}} function}
\item{metric}{Any valid distance metric that can be passed to the
\code{\link{distanceMatrix}} function}
\item{nTimes}{An integer; the number of times to repeat the K-means
algorithm with a different random starting point}
}
\details{
Each of the clustering routines used here has a different
structure for storing cluster assignments. The \code{\link{kmeans}}
function stores the assignments in a \sQuote{cluster} attribute. The
\code{\link[cluster]{pam}} function uses a \sQuote{clustering} attribute. For
\code{\link{hclust}}, the assignments are produced by a call to the
\code{\link{cutree}} function.
It has been observed that the K-means algorithm can converge to
different solutions depending on the starting values of the group
centers. We also include a routine (\code{repeatedKmeans}) that runs
the K-means algorithm repeatedly, using different randomly generated
staring points each time, saving the best results.
}
\value{
Each of the \code{cut...} functions returns a vector of integer values
representing the cluster assignments found by the algorithm.
The \code{repeatedKmeans} function returns a list \code{x} with three
components. The component \code{x$kmeans} is the result of the call
to the \code{kmeans} function that produced the best fit to the
data. The component \code{x$centers} is a matrix containing the list
of group centers that were used in the best call to \code{kmeans}.
The component \code{x$withinss} contains the sum of the within-group
sums of squares, which is used as the measure of fitness.
}
\author{
Kevin R. Coombes \email{krc@silicovore.com}
}
\seealso{
\code{\link{cutree}},
\code{\link{hclust}},
\code{\link{kmeans}},
\code{\link[cluster]{pam}}
}
\examples{
# simulate data from three different groups
d1 <- matrix(rnorm(100*10, rnorm(100, 0.5)), nrow=100, ncol=10, byrow=FALSE)
d2 <- matrix(rnorm(100*10, rnorm(100, 0.5)), nrow=100, ncol=10, byrow=FALSE)
d3 <- matrix(rnorm(100*10, rnorm(100, 0.5)), nrow=100, ncol=10, byrow=FALSE)
dd <- cbind(d1, d2, d3)
cutKmeans(dd, k=3)
cutKmeans(dd, k=4)
cutHclust(dd, k=3)
cutHclust(dd, k=4)
cutPam(dd, k=3)
cutPam(dd, k=4)
cutRepeatedKmeans(dd, k=3, nTimes=10)
cutRepeatedKmeans(dd, k=4, nTimes=10)
# cleanup
rm(d1, d2, d3, dd)
}
\keyword{cluster}
\keyword{multivariate}
|
a7d61c87c575502ef2df8bd794ff16282d161824
|
8405666329221b7012692a4a61fd6ebdf1df22c5
|
/app.R
|
c52994d32a9d9c47651bf7dd3fc92cc6dae0e2ad
|
[] |
no_license
|
UBC-MDS/mds532_viz_group25-R
|
225a3b2601f11e64fad62025956dc49bb3eb134d
|
3bb5ae8a37ab42fcd59d63c786a355088c6ccf5e
|
refs/heads/main
| 2023-02-25T07:19:09.877556
| 2021-01-31T06:40:49
| 2021-01-31T06:40:49
| 334,164,921
| 0
| 3
| null | 2021-01-31T06:37:09
| 2021-01-29T14:12:54
|
R
|
UTF-8
|
R
| false
| false
| 5,609
|
r
|
app.R
|
library(readr)
library(dash)
library(dashCoreComponents)
library(dashHtmlComponents)
library(dashBootstrapComponents)
library(ggplot2)
library(plotly)
library(tidyverse)
app <- Dash$new(external_stylesheets = dbcThemes$BOOTSTRAP)
df <- read.csv("data/processed/clean_data.csv",row.names = 1 )
app$layout(
dbcContainer(
list(
htmlH2('U.S. City Crime Visualization'),
htmlBr(),
dbcRow(
list (
dbcCol(
list(
htmlDiv(id = 'input-area'),
htmlH4('State:'),
dccDropdown(
id="state",
value="California",
options= df %>% pull(state_name) %>% unique() %>% purrr::map(function(col) list(label = col, value = col)),
),
htmlH4('City:'),
dccDropdown(
id="city",
value="Los Angeles",
options= df %>%filter(state_name == "California") %>% select(city_name) %>% pull %>% unique() %>% purrr::map(function(col) list(label = col, value = col)),
),
htmlH4('Year:'),
dccDropdown(
id="year",
value=2015,
options= df %>% pull(year) %>% unique() %>% purrr::map(function(col) list(label = col, value = col)),
)
)
,md = 2),
dbcCol(
list(
htmlDiv(id = 'output-area'),
dccGraph(id='plot-area')
)
,md = 5),
dbcCol(
list(
htmlDiv(id = 'output-area2'),
dccGraph(id='plot-area2'),
htmlH4('Year Range'),
dccRangeSlider(
id="yrange",
min = 1975,
max = 2015,
marks = list(
'1975' = '1975',
'1980' = '1980',
'1985' = '1985',
'1990' = '1990',
'1995' = '1995',
'2000' = '2000',
'2005' = '2005',
'2010' = '2010',
'2015' = '2015'
),
value= list(1975, 2015), # REQUIRED to show the plot on the first page load
)
)
)
)
)
)
)
)
app$callback(
output("city", "options"),
list(input("state", "value")),
function(state){
df %>%filter(state_name == state) %>% select(city_name) %>% pull %>% unique() %>% purrr::map(function(col) list(label = col, value = col))
}
)
app$callback(
output('plot-area','figure'),
list(input("state", "value"),
input("city", "value"),
input("year", "value")),
function(state,city,year){
df_select <- df %>% filter(city_name == city, year == year , state_name == state)
data_bar_plot <- df_select %>% select(homs_per_100k:agg_ass_per_100k) %>%
pivot_longer(homs_per_100k:agg_ass_per_100k,names_to = "type",values_to = "value") %>%
mutate (type = str_replace(type,"homs_per_100k","Homicide")) %>%
mutate (type = str_replace(type,"rape_per_100k","Rape")) %>%
mutate (type = str_replace(type,"rob_per_100k","Robery")) %>%
mutate (type = str_replace(type,"agg_ass_per_100k","Aggravated Assault"))
bar <- data_bar_plot %>% ggplot(aes(x = type,y = value, fill = type)) +
geom_bar(stat = "identity") +
labs(y = "Crime per 100K", x = "Violent Crime") +
ggtitle("City violent crimes at year of interest") +
theme(
plot.title = element_text(size = 18),
axis.text = element_text(size = 16),
axis.title = element_text(size = 16),
legend.position = "none"
)
bar <- bar+theme_classic()+ggthemes::scale_color_tableau()
ggplotly(bar) %>%
layout(legend = list(
orientation = "h",x = 0.4, y = -0.2
)
)
}
)
app$callback(
output('plot-area2','figure'),
list(input("state", "value"),
input("city", "value"),
input("year", "value"),
input("yrange","value")),
function(state,city,year,yrange){
df_select <- df %>% filter(city_name == city, year == year , state_name == state)
data_line_plot <- df %>% filter(city_name == city, state_name == state, year >= yrange[1], year<=yrange[2])
data_line_plot <- data_line_plot %>% select(year,violent_per_100k:agg_ass_per_100k) %>%
pivot_longer(violent_per_100k:agg_ass_per_100k,names_to = "type",values_to = "value") %>%
mutate (type = str_replace(type,"violent_per_100k","Total")) %>%
mutate (type = str_replace(type,"homs_per_100k","Homicide")) %>%
mutate (type = str_replace(type,"rape_per_100k","Rape")) %>%
mutate (type = str_replace(type,"rob_per_100k","Robery")) %>%
mutate (type = str_replace(type,"agg_ass_per_100k","Aggravated Assault"))
line <- data_line_plot %>% ggplot(aes(x = year,y = value, color = type)) +
geom_line(size = 1) +
labs(y = "Crime per 100K", x = "Year", color = "Crime types") +
ggtitle("Violent crimes by Years") +
scale_x_continuous (breaks = c(seq(1975,2015,5)))+
theme(
plot.title = element_text(size = 18),
axis.text = element_text(size = 16),
axis.title = element_text(size = 16),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14),
)
line <-line + theme_classic()+ggthemes::scale_color_tableau()
ggplotly(line)%>%
layout(legend = list(
orientation = "h",x = 0.4, y = -0.2
)
)
}
)
app$run_server(host = '0.0.0.0')
#app$run_server(debug = T)
|
5c259bcd9629c8b5defa8d903d162a8eb87143a4
|
af89a6dc5a65e482a7e846e5bf679a262deb0d35
|
/imageSynth.R
|
4d27ae2687f868103c6f7ae00a4e71dbc774044a
|
[] |
no_license
|
kamenoseiji/imageSynth
|
12d360625fa65b22dfb5b70ab6ffc29afee7475a
|
9764166e41a84afc199a6e9a7615d5b06631925c
|
refs/heads/master
| 2021-01-19T01:13:49.316532
| 2018-08-13T02:51:04
| 2018-08-13T02:51:04
| 20,640,604
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,143
|
r
|
imageSynth.R
|
# Usage: type in command line: Rscript imageSynth.R [filenames of images]
#
Arguments <- commandArgs(trailingOnly = T)
setwd('.')
library(png)
library(jpeg)
library(tiff)
#-------- Parse arguments
parseArg <- function( args ){
Xtr <- 0; Ytr <- 0; FoV <- 0; calFlag <- F; LCH <- c(); tfnFlag <- F
argNum <- length(args)
fileNum <- argNum
for( index in 1:argNum ){
if(substr(args[index], 1,2) == "-X"){ Xtr <- as.numeric(substring(args[index], 3)); fileNum <- fileNum - 1}
if(substr(args[index], 1,2) == "-Y"){ Ytr <- as.numeric(substring(args[index], 3)); fileNum <- fileNum - 1}
if(substr(args[index], 1,2) == "-F"){ FoV <- as.numeric(substring(args[index], 3)); fileNum <- fileNum - 1}
if(substr(args[index], 1,2) == "-D"){ Dark <- as.integer(unlist(strsplit(substring(args[index], 3),','))); fileNum <- fileNum - 1}# Dark frames
if(substr(args[index], 1,2) == "-L"){ LCH <- as.integer(unlist(strsplit(substring(args[index], 3),','))); fileNum <- fileNum - 1} # LRGB L channel frames
if(substr(args[index], 1,2) == "-C"){ calFlag <- T; fileNum <- fileNum - 1} # commet tracking calibration
if(substr(args[index], 1,2) == "-T"){ tfnFlag <- T; fileNum <- fileNum - 1} # transfer function for contrast
}
return( list(calFlag = calFlag, tfnFlag = tfnFlag, Xtr = Xtr, Ytr = Ytr, Dark = Dark, LCH = LCH, FoV = FoV, fname = args[(argNum - fileNum + 1):argNum]))
}
#-------- Identify image file format and read
imageType <- function(fname){
header <- readBin(fname, what='raw', n=8)
if( isJPEG(header) ){ return(readJPEG(fname))}
if( isPNG(header) ){ return(readPNG(fname)[,,1:3])}
if( isTIFF(header) ){ return(readTIFF(fname)[,,1:3])}
cat("Format is other than JPEG, PNG, or TIFF\n"); return(-1)
}
#-------- Identify image format (JPEG?)
isJPEG <- function(header){
headKey <- as.raw(c(0xff, 0xd8))
for( index in 1:length(headKey)){
if( headKey[index] != header[index] ){ return(FALSE)}
}
return(TRUE)
}
#-------- Identify image format (PNG?)
isPNG <- function(header){
headKey <- as.raw(c(0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A))
for( index in 1:length(headKey)){
if( headKey[index] != header[index] ){ return(FALSE)}
}
return(TRUE)
}
#-------- Identify image format (TIFF?)
isTIFF <- function(header){
headKey <- as.raw(c(0x00, 0x2a))
for( index in 1:length(headKey)){
if( headKey[index] != header[index+2] ){ return(FALSE)}
}
return(TRUE)
}
#-------- Extract image profile to register multiple images
XYprofile <- function(intensity){
Xprofile <- apply(intensity, 2, which.max)
Yprofile <- apply(intensity, 1, which.max)
return( list(X=Xprofile, Y=Yprofile) )
}
#-------- Determine image shifts using cross correlation functions
crossCorr <- function( X, Y ){
pixNum <- length(X)
halfNum <- floor(pixNum/2)
XF <- fft(X, inverse=F); XF[1] <- 0.0
YF <- fft(Y, inverse=F); YF[1] <- 0.0
XYF <- XF * Conj(YF)
CF <- fft( XYF, inverse=T)
index <- c( 0:halfNum, (halfNum-pixNum+1):(-1))
# plot(index, Mod(CF), type='s')
return( index[which.max(Mod(CF))] )
}
#-------- Indexing image pointer for shifting
shiftRange <- function( dimension, shift=0 ){
if(shift == 0){ return(1:dimension)}
if(shift > 0){ return( c( (shift+1):dimension, (1:shift))) }
return(c( (dimension+shift+1):dimension, 1:(dimension+shift)) )
}
#-------- Shift image in X and Y direction
imXYshift <- function(image, shiftX=0, shiftY=0){
Xrange <- shiftRange( nrow(image), shiftX)
Yrange <- shiftRange( ncol(image), shiftY)
return( image[Xrange, Yrange] )
}
#-------- Image Flatter
imFlat <- function(image, FoV){
NX <- nrow(image); NY <- ncol(image)
pitch <- pi* FoV / sqrt(NX^2 + NY^2) / 180.0 # radian per pixel
Correction <- 1.0 / cos(pitch* sqrt(outer( ((-NX/2 + 0.5):(NX/2 - 0.5))^2, rep(1.0, NY)) + outer( rep(1.0, NX), ((-NY/2 + 0.5):(NY/2 - 0.5))^2)))^2
cat(sprintf('Correction[max, min] = %f %f\n', max(Correction), min(Correction)))
return(Correction* (image - min(image)))
}
#-------- Image Scale : set (0, 1]
imScale <- function(image){
offset <- range(image)
scale <- diff(offset)
scaleImage <- image - offset[1]
return(scaleImage/scale)
}
#-------- Image Contrast
imContrast <- function(image){
refLevel <- mean(image)
sdLevel <- sd(image)
contImage <- atan( (image - refLevel)/ sdLevel )
offset <- range(contImage)
return((contImage - offset[1]) / diff(offset))
}
#-------- Procedure
argList <- parseArg(Arguments)
fileNum <- length(argList$fname)
FoV <- argList$FoV
#-------- Classify Dark, L, and RGB files
DarkList <- argList$Dark; numDark <- length(DarkList)
LCHList <- argList$LCH; numLCH <- length(LCHList)
RGBList <- setdiff(seq(fileNum), c(DarkList, LCHList))
numRGB <- length(RGBList)
#-------- Reference Image
refRGB <- imageType(argList$fname[RGBList[1]]) # Read reference image
refProfile <- XYprofile(refRGB[,,1] + refRGB[,,2] + refRGB[,,3]) # Use green channel
accumRGB <- refRGB # Image buffer to accumulate
if( argList$calFlag ){ accumRGB[,,1] <- 0.5*accumRGB[,,1]; accumRGB[,,2] <- 0.7*accumRGB[,,2]}
outFname <- sprintf("%s_synth.tiff", argList$fname[RGBList[1]])
cat(sprintf('Stack %d frames and save as %s.\n', numRGB + numLCH, outFname))
#-------- Dark Loop
accumDark <- 0.0* accumRGB
if(numDark > 0){
for(index in 1:numDark){
cat(sprintf('Reading %s as a dark frame\n', argList$fname[DarkList[index]]))
accumDark <- accumDark + imageType(argList$fname[DarkList[index]])
}
accumDark <- accumDark* (1.0/numDark)
}
cat(sprintf('Dark mean=%f sd=%f\n', mean(accumDark), sd(accumDark)))
accumRGB <- accumRGB - accumDark
#-------- RGB Loop for non-reference images
for(index in 2:numRGB){
currentRGB <- imageType(argList$fname[RGBList[index]]) - accumDark # Read image
if( argList$calFlag ){ currentRGB[,,3] <- 0.5* currentRGB[,,3]; currentRGB[,,2] <- 0.7* currentRGB[,,2]}
currentProfile <- XYprofile( currentRGB[,,1] + currentRGB[,,2] + currentRGB[,,3] ) # Use green channel for image shift
Xlag <- crossCorr( refProfile$X, currentProfile$X) # Image shift in X axis
Ylag <- crossCorr( refProfile$Y, currentProfile$Y) # Image shift in Y axis
#---- Comet Tracker
Xlag <- Xlag - floor( (RGBList[index] - 1)* argList$Xtr / (fileNum - 1))
Ylag <- Ylag + floor( (RGBList[index] - 1)* argList$Ytr / (fileNum - 1))
cat(sprintf("RGB[%d/%d] %s: Shift (%d, %d) pixels in (X, Y).\n", index, numRGB, argList$fname[RGBList[index]], Xlag, Ylag))
#-------- Shift and Accumurate image
for(colIndex in 1:3){
accumRGB[,,colIndex] <- accumRGB[,,colIndex] + imXYshift(currentRGB[,,colIndex], -Ylag, -Xlag)
}
}
#-------- LCH Loop
if(numLCH > 0){
accumLCH <- 0.0* accumRGB[,,1]
for(index in 1:numLCH){
currentLCH <- imageType(argList$fname[LCHList[index]]) - accumDark # Read image
currentProfile <- XYprofile( currentLCH[,,1] + currentLCH[,,2] + currentLCH[,,3] ) # Use red channel for image shift
Xlag <- crossCorr( refProfile$X, currentProfile$X) # Image shift in X axis
Ylag <- crossCorr( refProfile$Y, currentProfile$Y) # Image shift in Y axis
cat(sprintf("LCH[%d/%d] %s: Shift (%d, %d) pixels in (X, Y).\n", index, numLCH, argList$fname[LCHList[index]], Xlag, Ylag))
accumLCH <- accumLCH + imXYshift(currentLCH[,,1], -Ylag, -Xlag)
}
}
#------- Image Flattener
if( FoV > 0.0 ){
cat('Image flattening...\n')
for(color_index in 1:3){
accumRGB[,,color_index] <- imFlat( accumRGB[,,color_index], FoV )
}
if(numLCH > 0){ accumLCH <- imFlat(accumLCH, FoV) }
}
#------- Image normalization
transferFn <- imScale
if(argList$tfnFlag){ transferFn <- imContrast }
scaleRGB <- transferFn(accumRGB)
#------- LRGB composit
if(numLCH > 0){
sumRGB <- (accumRGB[,,1] + accumRGB[,,2] + accumRGB[,,3])
scaleLCH <- transferFn(accumLCH)
scaleSUM <- transferFn(sumRGB)
LCH <- 0.5*(scaleLCH + scaleSUM)
for(color_index in 1:3){
accumRGB[,,color_index] <- LCH* scaleRGB[,,color_index]
}
} else { accumRGB <- scaleRGB }
if(argList$tfnFlag){ accumRGB <- accumRGB^0.6 }
#-------- Save to PNG
#writePNG(accumRGB, outFname)
writeTIFF(accumRGB, sprintf("%s_synth.tiff", outFname), bits.per.sample=16 )
|
6257ae6f1881e6be95bccfdb9e294bd603916fec
|
7695d9fa40a26df410954287b48400dc5377abc7
|
/man/vipermat_brca.Rd
|
4c5f53eb2e9f06788bfd21c753fbbe3ee688a135
|
[] |
no_license
|
andrewholding/ZMIZ1
|
e95049b8590bebfec1851f9f144ffaa4a4d83b43
|
898ed2ffbd18339663ebdd0d3555a10d6263ae06
|
refs/heads/master
| 2023-08-18T01:04:23.432511
| 2023-08-03T17:14:48
| 2023-08-03T17:14:48
| 177,729,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 402
|
rd
|
vipermat_brca.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{vipermat_brca}
\alias{vipermat_brca}
\title{VIPER Activity Matrix for TCGA BRCA patient samples.}
\format{
An object of class \code{matrix} (inherits from \code{array}) with 4386 rows and 1037 columns.
}
\usage{
vipermat_brca
}
\description{
.
}
\examples{
vipermat_brca
}
\keyword{datasets}
|
c1e73373610b593d466321dc9454a85a0c768f9e
|
0eac6f72fc988546ee57127b5741e3d12e2379a5
|
/man/fitHigherOrder.Rd
|
77f9fec7fb096f6181df289c45bbf7b2ec676955
|
[
"MIT"
] |
permissive
|
spedygiorgio/markovchain
|
4e70064a749f55d52bcdfffb7559e7027b161cc1
|
4eb1ec1b67f9231c129db5da3cc2ba51bd5f4121
|
refs/heads/master
| 2023-06-09T15:48:30.895373
| 2023-05-16T21:25:26
| 2023-05-16T21:25:26
| 31,481,152
| 111
| 58
|
NOASSERTION
| 2023-05-18T22:00:52
| 2015-02-28T23:54:38
|
R
|
UTF-8
|
R
| false
| true
| 1,127
|
rd
|
fitHigherOrder.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitHigherOrder.R
\name{fitHigherOrder}
\alias{fitHigherOrder}
\alias{seq2freqProb}
\alias{seq2matHigh}
\title{Functions to fit a higher order Markov chain}
\usage{
fitHigherOrder(sequence, order = 2)
seq2freqProb(sequence)
seq2matHigh(sequence, order)
}
\arguments{
\item{sequence}{A character list.}
\item{order}{Markov chain order}
}
\value{
A list containing lambda, Q, and X.
}
\description{
Given a sequence of states arising from a stationary state, it
fits the underlying Markov chain distribution with higher order.
}
\examples{
sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b",
"c", "a", "b", "c", "a", "b", "a", "b")
fitHigherOrder(sequence)
}
\references{
Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov
chains. In Markov Chains (pp. 141-176). Springer US.
Ching, W. K., Ng, M. K., & Fung, E. S. (2008). Higher-order multivariate
Markov chains and their applications. Linear Algebra and its Applications,
428(2), 492-507.
}
\author{
Giorgio Spedicato, Tae Seung Kang
}
|
f1a80acaf98d800183bb32c2666bf12fab02b2c6
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609956127-test.R
|
61e6a684e343644214cd3e2f09929f3a3654e13e
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
r
|
1609956127-test.R
|
testlist <- list(id = integer(0), x = numeric(0), y = c(1.55923433229464e-47, 1.51979061388169e-47, 1.51979061388169e-47, 1.75532729598899e-317, -3.38043295900995e+221, 7.28756190143209e-304, -1.557374211109e-207, 7.36157812303457e-322, 0, 0, 0, 0, 0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result)
|
3b3c9f55e078f54e1c40119bc4e099d364be1e25
|
f18da959a5b0818a2b64236d7fd57d15738beea1
|
/R/PRcalc.R
|
dec324eda7c064926a85063dd9c712b9cd41a6aa
|
[
"MIT"
] |
permissive
|
JaehyunSong/PRcalc
|
9c80f01058d51b6ddc0e213beeb6d623d844e819
|
37d27f02812f480485d472b6f0a6f4e57a52626d
|
refs/heads/master
| 2022-04-26T10:34:21.928644
| 2022-03-11T02:16:53
| 2022-03-11T02:16:53
| 34,711,565
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,894
|
r
|
PRcalc.R
|
# Generated by fusen: do not edit by hand
#' 比例代表の議席割当計算
#'
#' @param x \code{data.frame}、または\code{tibble}オブジェクト
#' @param seats 議席数; 長さ1以上の\code{numeric}型ベクトル
#' @param method 議席割当方式;
#' @param threshold 阻止条項 (0 ~ 1); 既定値は0
#'
#' @details
#' 各引数の詳細は以下の通りです。
#' \describe{
#' \item{\code{x}}{2列以上の\code{data.frame}、または\code{tibble}オブジェクトです。1列目は政党名、2列目以降は各ブロックにおける当該政党の得票数が入ります。}
#' \item{\code{seats}}{総議席数。全国区で50議席の場合は\code{seats = 50}、3ブロックで議席数がそれぞれ10、20、30の場合は\code{seats = c(10, 20, 30)}のように指定します。}
#' \item{\code{method}}{
#' 議席割当方式
#' \itemize{
#' \item{\code{"dt"}: ドント方式 (d’Hondt / Jefferson)}
#' \item{\code{"sl"}: サン=ラゲ方式 (Sainte-Laguë / Webster)}
#' \item{\code{"msl"}: 修正サン=ラゲ方式 (Modified Sainte-Laguë)}
#' \item{\code{"denmark"}: デンマーク方式 (Danish)}
#' \item{\code{"adams"}: アダムス方式 (Adams)}
#' \item{\code{"imperiali"}: インペリアリ方式 (Imperiali)}
#' \item{\code{"hh"}: ハンチントン=ヒール方式 (Huntington-Hill)}
#' \item{\code{"dean"}: ディーン方式 (Dean)}
#' \item{\code{"hare"}: ヘア=ニーマイヤー方式 (Hare-Niemeyer)}
#' \item{\code{"droop"}: ドループ方式 (Droop)}
#' \item{\code{"imperialiQ"}: インペリアリ・クオタ―方式 (Imperiali Quota)}
#' }
#' }
#' \item{\code{threshold}}{阻止条項。既定値は0ですが、例えば得票率が2%未満の政党に議席を割り当てない場合は\code{threshold = 0.02}のように指定します。}
#' }
#'
#' @importFrom dplyr case_when
#' @importFrom purrr map_df
#' @importFrom stringr str_replace
#'
#' @return
#' \code{PRcalc}型オブジェクト
#' \describe{
#' \item{\code{Vote}}{得票数}
#' \item{\code{Seat}}{獲得議席数}
#' \item{\code{VoteShare}}{得票率}
#' \item{\code{SeatShare}}{獲得議席率}
#' \item{\code{N_Seat}}{各ブロックの議席数および総議席数}
#' \item{\code{N_Block}}{ブロック数}
#' \item{\code{threshold}}{阻止条項の閾値}
#' \item{\code{method}}{議席割当方式}
#' }
#'
#' @seealso
#' \code{\link{print.PRcalc}}, \code{\link{summary.PRcalc}}, \code{\link{index.PRcalc}}, \code{\link{compare.PRcalc}}, \code{\link{plot.PRcalc}}
#'
#' @export
#'
#' @examples
#' \donttest{
#' data(jp_upper_2019)
#' PRcalc(jp_upper_2019, seats = 50, method = "dt")
#'
#' data(jp_lower_2021)
#' japan_2021 <- PRcalc(jp_lower_2021, method = "hare",
#' seats = c(8, 14, 20, 21, 17, 11, 21, 30, 11, 6, 21))
#' print(japan_2021)
#' print(japan_2021, prop = TRUE, digits = 1)
#' summary(japan_2021)
#' plot(japan_2021)
#' }
PRcalc <- function (x,
seats,
method,
threshold = 0) {
method_list <- c("hare", "droop", "imperialiQ", "dt",
"sl", "msl", "adams", "danish", "hh",
"dean", "imperiali")
if (!(method %in% method_list)) {
stop("Invalid allocation method. Please see ?PRcalc")
}
if (threshold < 0 | threshold >= 1) {
stop("Invalid threshold (0 > threshold >= 0)")
}
seat_vec <- seats
temp_df <- x
PRcalc_Result <- prcalc_block(votes = temp_df, seat = seat_vec,
method = method, threshold = threshold)
VS_df <- PRcalc_Result$Vote
SS_df <- PRcalc_Result$Seat
if (ncol(VS_df) > 2 & ncol(SS_df) > 2) {
PRcalc_Result$Vote$Total <- rowSums(PRcalc_Result$Vote[, -1])
PRcalc_Result$Seat$Total <- rowSums(PRcalc_Result$Seat[, -1])
VS_df$Total <- rowSums(VS_df[, -1])
SS_df$Total <- rowSums(SS_df[, -1])
}
if (ncol(VS_df) == 2 & ncol(SS_df) == 2) {
VS_df[, -1] <- VS_df[, -1] / sum(VS_df[, -1]) * 100
SS_df[, -1] <- SS_df[, -1] / sum(SS_df[, -1]) * 100
} else if (ncol(VS_df) > 2 & ncol(SS_df) > 2) {
VS_df[, -1] <- as.data.frame(map_df(VS_df[, -1], ~(.x / sum(.x) * 100)))
SS_df[, -1] <- as.data.frame(map_df(SS_df[, -1], ~(.x / sum(.x) * 100)))
}
names(PRcalc_Result$Vote) <- str_replace(names(PRcalc_Result$Vote), ":Vote", "")
names(PRcalc_Result$Seat) <- str_replace(names(PRcalc_Result$Seat), ":Seat", "")
names(VS_df) <- str_replace(names(VS_df), ":Vote", "")
names(SS_df) <- str_replace(names(SS_df), ":Seat", "")
Method_full_name <- case_when(method == "hare" ~ "Hare\u2013Niemeyer",
method == "droop" ~ "Droop",
method == "imperialiQ" ~ "Imperiali Quota",
method == "dt" ~ "D\'Hondt (Jefferson)",
method == "sl" ~ "Sainte\u2013Lagu\u00eb (Webster)",
method == "msl" ~ "Modified Sainte\u2013Lagu\u00eb",
method == "hh" ~ "Huntington\u2013Hill",
method == "danish" ~ "Danish",
method == "adams" ~ "Adams\'s",
method == "imperiali" ~ "Imperiali",
method == "dean" ~ "Dean")
PRcalc_Result <- list(Vote = PRcalc_Result$Vote,
Seat = PRcalc_Result$Seat,
VoteShare = VS_df,
SeatShare = SS_df,
N_Seat = c(seats, sum(seats)),
N_Block = length(seats),
threshold = threshold,
Method = Method_full_name)
class(PRcalc_Result) <- c("PRcalc", "list")
return(structure(PRcalc_Result, class = c("PRcalc", "list")))
}
#' Print \code{PRclac} Object
#'
#' @method print PRcalc
#'
#' @param x \code{PRcalc} object
#' @param prop a logical value
#' @param digits a integer
#' @param ... Ingnored
#'
#' @importFrom dplyr left_join
#' @importFrom dplyr mutate
#' @importFrom dplyr across
#'
#' @return
#' none
#'
#' @export
#'
print.PRcalc <- function (x, prop = FALSE, digits = 3, ...) {
Party <- NULL
if (prop == FALSE) {
result <- left_join(x$Vote, x$Seat, by = "Party",
suffix = c("_V", "_S"))
} else if (prop == TRUE) {
temp_vote <- mutate(x$VoteShare,
across(.cols = -Party,
~sprintf(paste0("%.", digits, "f"), .x)))
temp_seat <- mutate(x$SeatShare,
across(.cols = -Party,
~sprintf(paste0("%.", digits, "f"), .x)))
result <- left_join(temp_vote, temp_seat, by = "Party",
suffix = c("_V", "_S"))
}
print(result)
}
#' Summarise \code{PRclac} Object
#'
#' @method summary PRcalc
#'
#' @param object \code{PRcalc} object
#' @param prop a logical value
#' @param digits a integer
#' @param ... Ingnored
#'
#' @importFrom dplyr left_join
#' @importFrom dplyr mutate
#' @importFrom dplyr last_col
#' @importFrom dplyr across
#'
#' @return
#' A data.frame
#'
#' @export
#'
summary.PRcalc <- function (object, prop = FALSE, digits = 3, ...) {
result <- vote <- seat <- NULL
if (prop == FALSE) {
vote <- select(object$Vote, 1, Total_Vote = last_col())
seat <- select(object$Seat, 1, Total_Seat = last_col())
result <- left_join(vote, seat, by = "Party")
} else if (prop == TRUE) {
vote <- select(object$VoteShare, 1, Total_Vote = last_col())
seat <- select(object$SeatShare, 1, Total_Seat = last_col())
result <- left_join(vote, seat, by = "Party")
result <- mutate(result,
across(.cols = -1,
~sprintf(paste0("%.", digits, "f"), .x)))
}
print(result)
}
|
7367df1acd9c3cd0d356c1851a042bf9b342e57f
|
66a10b4451297210ab6dbdb31703b20bdd703c7e
|
/ui.r
|
7613883b5167c55abbac41fd26fbbfba9e4cf084
|
[] |
no_license
|
eakmail/DevDataProds_CourseProject
|
7ccecbc0aae258863c9c4324d1ef52318e93f405
|
0b401f912e5be497d11c1e628c4329ad6c5f9814
|
refs/heads/master
| 2016-09-09T20:24:08.690528
| 2015-02-12T19:21:02
| 2015-02-12T19:21:02
| 30,719,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,658
|
r
|
ui.r
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Simple MtCars DataSet Graphical Explorer"),
sidebarLayout(
# Sidebar with a slider input
sidebarPanel(
p("Choose which variables you want to see on the graph:"),
selectInput(inputId = "varY", "Y Axis:",
c("Miles/(US) gallon" = 1,
"Number of cylinders" = 2,
"Displacement (cu.in.)" = 3,
"Gross horsepower" = 4,
"Rear axle ratio" = 5,
"Weight (lb/1000)" = 6,
"1/4 mile time" = 7,
"V/S" = 8,
"Transmission (0 = automatic, 1 = manual)" = 9,
"Number of forward gears" = 10,
"Number of carburetors" = 11), selected = 1),
selectInput(inputId = "varX", "X Axis:",
c("Miles/(US) gallon" = 1,
"Number of cylinders" = 2,
"Displacement (cu.in.)" = 3,
"Gross horsepower" = 4,
"Rear axle ratio" = 5,
"Weight (lb/1000)" = 6,
"1/4 mile time" = 7,
"V/S" = 8,
"Transmission (0 = automatic, 1 = manual)" = 9,
"Number of forward gears" = 10,
"Number of carburetors" = 11), selected = 2),
checkboxInput("showfit", "Check box to show simple regression line", TRUE),
p("On the right side you see simple 2D graph on chosen variables of standard 'mtcars' dataset.")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot")
)
)
))
|
45ed2fb7daf28cb47ffb54bc356b723d983559e2
|
15a8d92018494b0924ce4dd55cbd032f5d88a671
|
/Supervised/SVM/loan.R
|
b47d90f1920edeb9a4f421faf32f8480f4869f7b
|
[] |
no_license
|
Raunaq98/ML
|
def6ec86c47a6b609bddeaca436274b89656095b
|
8e3aef02f3833351535e7c76c8e8ecc09fe1a150
|
refs/heads/master
| 2022-12-05T07:55:02.739050
| 2020-08-22T11:01:35
| 2020-08-22T11:01:35
| 287,127,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,820
|
r
|
loan.R
|
dir<- getwd()
path1<- paste0(dir,"/Supervised/SVM/loan_data.csv")
loan<- read.csv(path1,header = TRUE, stringsAsFactors = TRUE)
# To classify and predict whether or not the borrower paid back their loan in full.
#### converts categorical columns to factor types
loan$credit.policy<- factor(loan$credit.policy)
loan$inq.last.6mths<- factor(loan$inq.last.6mths)
loan$delinq.2yrs<- factor(loan$delinq.2yrs)
loan$pub.rec<- factor(loan$pub.rec)
loan$not.fully.paid<- factor(loan$not.fully.paid)
#### EDA
library(ggplot2)
library(dplyr)
# histogram of fico scores coloured by not.fully.paid
ggplot(loan,aes(x=fico)) +
geom_histogram(aes(fill=not.fully.paid),color="black")+
theme_bw() +
labs(title = "Fico scores histogram with not paid colour")
# as fico scores get higher, people tend to pay their loans more
# barplot of purpose
ggplot(loan,aes(x=purpose)) +
geom_bar(aes(fill=not.fully.paid), color="black")+
theme_bw()+
labs(title = "Purpose barplot with fill=bot.fully.paid")
# fico vs interest rate
ggplot(loan,aes(x=fico,y=int.rate)) +
geom_point(position = "jitter",aes(color=not.fully.paid),alpha=0.4)+
theme_bw() +
labs(title = "Interest rate vs Fico Scores")
# if fico score goes down then interest rate increases
#### TRAIN TEST SPLIT
library(caTools)
sample<- sample.split(loan$not.fully.paid,SplitRatio = 0.7)
train_loan<- subset(loan,sample==TRUE)
test_laon<-subset(loan,sample==FALSE)
#### BUILDING MODEL
library(e1071)
loan_svm<- svm(not.fully.paid ~., data = train_loan)
summary(loan_svm)
## cost = 1 & gamma = 0.25
#### PREDICTIONS
predicted_values <- predict(loan_svm, test_laon[1:13]) # removing label from test
table(predicted_values,test_laon$not.fully.paid)
#predicted_values 0 1
# 0 2413 460
# 1 0 0
# this is very wrong and happens because of wrong C and Gamma
#### TUNING THE MODEL
tuned_loan_svm <- tune(svm,
train.x = not.fully.paid~.,
data=train_loan ,
kernel = "radial",
ranges = list(cost=c(50,100,200), gamma=c(0.1,0.75)))
summary(tuned_loan_svm)
#best parameters:
# cost gamma
# 50 0.75
# best performance: 0.1907534 ie. error of 19%
tuned_final<- svm(not.fully.paid~. ,
data = train_loan,
cost=50,
gamma=0.75)
predicted_values<- predict(tuned_final,
test_laon[1:13])
tb<-table(predicted_values,test_laon$not.fully.paid)
#predicted_values 0 1
# 0 2265 439
# 1 148 21
accuracy<- sum(diag(tb))/sum(tb)
# [1] 0.795684
## for better results, use a large range of c and gamma for hyper parameter tuning
|
8858cf8b9d39462526b309c6d7160b57006d0f01
|
ffad87e6940136b8c01e99214b2a7ed6da7da63e
|
/ui.R
|
ad6262ed6d99e2b67ab0e850b6a41850cf5205e9
|
[] |
no_license
|
JulieRas/MidtermINT
|
691474b51fe4bada9ee246d3a8e1b4a611097cef
|
4d9acf40db7515108eafbfcf4f0ab4a87b2ee89a
|
refs/heads/master
| 2021-09-03T08:18:26.788951
| 2018-01-07T13:01:17
| 2018-01-07T13:01:17
| 116,565,697
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 455
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Event Study of Thailand"),
sidebarLayout(
sidebarPanel(
sliderInput("sliderMPG", "Events", 10, 35, value = 20),
checkboxInput("showEvent1", "Show/Hide Event 1", value = TRUE),
checkboxInput("showEvent2", "Show/Hide Event 2", value = TRUE),
checkboxInput("showEvent3", "Show/Hide Event 3", value = TRUE)
),
mainPanel(
plotOutput("plot1")
)
)
))
|
88af31896fb51285fcf0a03061d2872db60a8fec
|
022ac7769fbe27d472d84bb5faccbf3ada7242b7
|
/data-preprocessing-and-visualization/data-handling/purrr.R
|
cd9db02409ebcc26249084c248f9e7e651d3293e
|
[
"MIT"
] |
permissive
|
fisproject/R-Study
|
63f354b03d8b1f50677f795eb237bfe16a29999f
|
924a34fd7bc3094646dbb45d3242fe1e8a3f2234
|
refs/heads/master
| 2020-05-21T22:20:52.622002
| 2019-04-03T14:00:02
| 2019-04-03T14:00:02
| 24,105,553
| 13
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,608
|
r
|
purrr.R
|
library(dplyr)
library(purrr)
# nested list
attractions <- list(
list(name = 'カリブの海賊', time_required = 15, tags = c('アドベンチャーランド', 'ファストパス')),
list(name = 'ビッグサンダー・マウンテン', time_required = 4, tags = c('ウエスタンランド', 'ファストパス', '人気')),
list(name = 'スプラッシュ・マウンテン', time_required = 10, tags = c('クリッターカントリー', 'ファストパス', '人気'))
)
Hmisc::list.tree(attractions)
# attractions = list 3 (2560 bytes)
# . [[1]] = list 3
# . . name = character 1= カリブの海賊
# . . time_required = double 1= 15
# . . tags = character 2= アドベンチャーランド ファストパス
# . [[2]] = list 3
# . . name = character 1= ビッグサンダー・マウンテン
# . . time_required = double 1= 4
# . . tags = character 3= ウエスタンランド ファストパス 人気
# . [[3]] = list 3
# . . name = character 1= スプラッシュ・マウンテン
# . . time_required = double 1= 10
# . . tags = character 3= クリッターカントリー ファストパス 人気
attractions %>% map('name')
# [[1]]
# [1] "カリブの海賊"
#
# [[2]]
# [1] "ビッグサンダー・マウンテン"
#
# [[3]]
# [1] "スプラッシュ・マウンテン"
attractions %>% map(~ .$time_required + 5)
# [[1]]
# [1] 20
#
# [[2]]
# [1] 9
#
# [[3]]
# [1] 15
new_list <- attractions %>%
map(~ list(name = .$name, park = 'ディズニーランド', popular = '人気' %in% .$tags))
Hmisc::list.tree(new_list)
# new_list = list 3 (2168 bytes)
# . [[1]] = list 3
# . . name = character 1= カリブの海賊
# . . park = character 1= ディズニーランド
# . . popular = logical 1= FALSE
# . [[2]] = list 3
# . . name = character 1= ビッグサンダー・マウンテン
# . . park = character 1= ディズニーランド
# . . popular = logical 1= TRUE
# . [[3]] = list 3
# . . name = character 1= スプラッシュ・マウンテン
# . . park = character 1= ディズニーランド
# . . popular = logical 1= TRUE
over_ten_min <- attractions %>% keep(~ .$time_required >= 10)
Hmisc::list.tree(over_ten_min)
# over_ten_min = list 2 (1688 bytes)
# . [[1]] = list 3
# . . name = character 1= カリブの海賊
# . . time_required = double 1= 15
# . . tags = character 2= アドベンチャーランド ファストパス
# . [[2]] = list 3
# . . name = character 1= スプラッシュ・マウンテン
# . . time_required = double 1= 10
# . . tags = character 3= クリッターカントリー ファストパス 人気
|
cbae065afc40e2da2150b7d34e4d8faff2b08427
|
eaa87197fffdd4898067a1294a38319fefb90797
|
/man/add_global_md_clustering.Rd
|
31c68faae42b5f8c6cd88713477c3c255a4b229d
|
[
"MIT"
] |
permissive
|
aertslab/SCopeLoomR
|
44c91fe5428a24ab28e491ea22b0dde7f7bba2d3
|
20f4e0af5ecdbb748a088d52400e24b42ed30a24
|
refs/heads/master
| 2022-05-01T17:44:07.220814
| 2022-04-08T13:26:13
| 2022-04-08T13:26:13
| 122,311,843
| 28
| 14
|
MIT
| 2021-04-07T09:27:48
| 2018-02-21T08:47:21
|
R
|
UTF-8
|
R
| false
| true
| 639
|
rd
|
add_global_md_clustering.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loom.R
\name{add_global_md_clustering}
\alias{add_global_md_clustering}
\title{add_global_md_clustering}
\usage{
add_global_md_clustering(loom, id, group, name, clusters, annotation = NULL)
}
\arguments{
\item{loom}{The loom file handler.}
\item{group}{The name of the group of clusterings.}
\item{name}{The name given to the given clustering.}
\item{clusters}{A list of the the cluster id for each cell present in the same order as in the columns of gene expression matrix.}
}
\description{
Add the clustering annotation to the global MetaData attribute.
}
|
2fd9154f6bcccfbd8ddb6ce012494c0300242a06
|
f65fb425744cb0e367438b2e69019b6645048cdb
|
/R/unfolding.R
|
b484f92ee0a0e6c7540fc351b6d31001a2b02e24
|
[] |
no_license
|
cran/smacof
|
2dc3946ee615eef6846212c3e0fbb6bbd3d94e9a
|
c89277ac6d174f1c8d191994fc2fe0ee849f6a25
|
refs/heads/master
| 2022-05-27T05:52:02.937125
| 2022-05-06T06:04:47
| 2022-05-06T06:04:47
| 17,699,709
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47
|
r
|
unfolding.R
|
unfolding <- smacofRect
prefscal <- smacofRect
|
7ea7b7be62af51449e1c0aeeccea9d177794ba7a
|
b6d41be1821b5fe4eaa145c85128da485ba38a3f
|
/day2/day2_practice.R
|
500399e620155131fa958241b48cd1895afbda48
|
[] |
no_license
|
shoestringpsycholing/r_programming_bsos
|
a5d32f6edeeed7949fb1fd51c41df64ce8f8f631
|
9052a06d5ae3aefda584eb4505487c4e2b087577
|
refs/heads/master
| 2016-09-09T18:30:21.737753
| 2015-01-20T16:43:22
| 2015-01-20T16:43:22
| 28,814,001
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,820
|
r
|
day2_practice.R
|
# 1. Run a t-test with the sleep data set
# a. save the values as objects/variables first,
# then supply as arguments to t.test()
# b. try skipping that step and referring to the values directly
group1 <- sleep$extra[1:10]
group2 <- sleep$extra[11:20]
group1 <- sleep$extra[sleep$group == 1]
group2 <- sleep$extra[sleep$group == 2]
t.test(group1, group2)
# 2. Do the same thing with the "iris" data set
# - compare sepal width
# - all three pairwise comparisons
# 3. Do the same (run a t-test) thing again on "women" data
# - compare the two columns
# - (yes, this doesn't make much sense as a comparison,
# it's just a programming exercise)
t.test(women[1], women[2])
t.test(women[[1]], women[[2]])
t.test(women[["height"]], women[["weight"]])
t.test(women$height, women$weight)
# 4. Re-do "sleep" t-test (#1 above) as paired
# 5. Run a true "Student's" t.test for #1
# 6. Try the "formula" method for t.test()
# a. Read the help
# b. Look at the "formula" argument
# c. Try to use that to replicate the results from #1 above
t.test(extra ~ group, data = sleep)
# 7. Run a Wilcoxon Signed Rank test for the comparisons
# in #1 through #3
# - use help.search to find a function to do this
# 8. Run a correlation test on the "women" data
# a. Pearson
# b. Spearman
# 9. save all results (and only those results!) in an
# .RData object
# - try to think of (at least) two ways to do this
sleep.test <- t.test(extra ~ group, data = sleep)
save(sleep.test, file = "results.RData")
# save(sleep.test, otherthing1, otherthing2, file = "bunchofresults.RData")
# 10. clean, comment, and save this script with your answers
# 11. Look at the format of your Homework data set
# - find a function to read in your data
# - needed for tonight's Homework
|
8c4648e5a130b08333a5a1e7a00a37c78a2878fa
|
ca548044ec6410ad6bef7210bb08e22c9f8a43c8
|
/man/getSampleNames.Rd
|
2b74de427e822a6472851ef87c3c488f8215e92f
|
[] |
no_license
|
bentobioinformatics/yamp
|
a0ecb3b33e5734763f94584b0e85aa7885e99ca5
|
6fe927a3d7c7478191eabb61574a03acf5f1cb8e
|
refs/heads/master
| 2021-01-11T05:15:13.327840
| 2016-11-24T11:36:51
| 2016-11-24T11:36:51
| 69,193,008
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 510
|
rd
|
getSampleNames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getSampleNames.R
\name{getSampleNames}
\alias{getSampleNames}
\title{getSampleNames}
\usage{
getSampleNames(file)
}
\arguments{
\item{file}{the name of the OTU table file which the data are to be read from. The OTU table must have derived from biom.}
}
\description{
This function retrieves sample names from an OTU table.
}
\examples{
sampleNames = getSampleNames("otu_table.txt")
}
\keyword{OTU}
\keyword{names}
\keyword{sample}
|
0975045a2d0670c0f27493a748d41bf3553c268e
|
3819c5c65f13b185b8fb714d7349abfecb793a72
|
/R/checkInputs.R
|
07826fb6018836ebde1ac5c1ce6783bfce64e8ac
|
[] |
no_license
|
cran/DynTxRegime
|
ed877579c6ffc6156fb6c84298a58d1db5940dff
|
9ecb35dfd9abf9617e0179d3d4d552dce22314e5
|
refs/heads/master
| 2023-06-25T03:37:01.776586
| 2023-04-25T13:50:11
| 2023-04-25T13:50:11
| 37,244,072
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,356
|
r
|
checkInputs.R
|
.checkModelObjOrListModelObjSubset <- function(object, nm) {
if (is.null(x = object)) return(object)
if (is(object = object, class2 = "ModelObjSubset")) {
object <- list(object)
}
if (is(object = object, class2 = "modelObj")) return(object)
if (!is.list(x = object) || length(x = object) == 0L) {
stop("single modelObj or a list of ModelObjSubset objects expected")
}
if (length(x = object) == 1L &&
is(object = object[[ 1L ]], class2 = "modelObj") &&
!is(object = object[[ 1L ]], class2 = "ModelObjSubset")) {
return( object[[ 1L ]] )
}
for (i in 1L:length(x = object)) {
if (!is(object = object[[ i ]], class2 = "ModelObjSubset")) {
stop("single modelObj or a list of ModelObjSubset objects expected")
}
}
# Convert list into recognized internal class.
return( .newModelObjSubset(object = object) )
}
.checkTxData <- function(txName, data) {
# txName must be an object of class character
if (!is.character(x = txName)) stop("txName must be a character")
# txName can only be a single name
if (length(x = txName) != 1L) {
stop("txName must be of length 1 for this method")
}
# test to see if tx is in data set provided.
txVec <- tryCatch(expr = data[,txName],
condition = function(x) {
cat(x$message, "\n")
stop(paste0(txName, " not found in data"))
})
# if tx is not a factor or an integer, attempt to coerce to integer
if (!is.factor(x = txVec) && !is.integer(x = txVec)) {
if (is.character(x = txVec)) {
data[,txName] <- factor(x = txVec)
} else {
if (!isTRUE(x = all.equal(target = txVec, current = round(x = txVec)))) {
stop("treatment variable must be a factor or an integer")
}
data[,txName] <- as.integer(x = round(x = data[,txName]))
}
}
return( data )
}
.checkBinaryTx <- function(txName, data) {
txVec <- numeric(length = nrow(x = data))
# identify the levels of treatment
if (is.factor(x = data[,txName])) {
levs <- levels(x = data[,txName])
} else {
levs <- unique(x = data[,txName])
levs <- levs[!is.na(x = levs)]
levs <- sort(x = levs)
}
# if more than 2 tx options throw error
if (length(x = levs) > 2L ) {
stop("only binary tx options can be used in this method")
}
# Create treatment vector cast as +/- 1 where -1 = base level
txVec[data[,txName] == levs[1L]] <- -1.0
txVec[data[,txName] == levs[2L]] <- 1.0
return( txVec )
}
.checkModelObjOrListModelObjSubsetOrList <- function(object, nm) {
# if object is null, return object unchanged
if (is.null(x = object)) return(object)
# if object is a single modelObjSubset object, convert to a list
if (is(object = object, class2 = "ModelObjSubset")) object <- list(object)
# if object is a single modelObj, return object unchanged
if (is(object = object, class2 = "modelObj")) return(object)
# if object is now not a list or has zero length, stop with error
if (!is.list(x = object) || length(x = object) == 0L) {
stop("single modelObj, a list of modelObj, or a list of ModelObjSubset objects expected")
}
# if only one object is in the list and it is a modelObj, return object as
# a modelObj
if (length(x = object) == 1L &&
is(object = object[[ 1L ]], class2 = "modelObj") &&
!is(object = object[[ 1L ]], class2 = "ModelObjSubset")) {
return( object[[ 1L ]] )
}
# ensure that all elements of the list are modelObj or that all elements
# of the list are ModelObjSubset
firstClass <- class(x = object[[ 1L ]])
if (!is(object = object[[ 1L ]], class2 = "modelObj") &&
!is(object = object[[ 1L ]], class2 = "ModelObjSubset")) {
stop("single modelObj, a list of modelObj, or a list of ModelObjSubset objects expected")
}
for (i in 1L:length(x = object)) {
if (!is(object = object[[ i ]], class2 = firstClass)) {
stop("single modelObj, a list of modelObj, or a list of ModelObjSubset objects expected")
}
}
# if the list contains only modelObj, return as a ModelObj_DecisionPointList
if (!is(object = object[[ 1L ]], class2 = "ModelObjSubset")) {
return( new("ModelObj_DecisionPointList", object) )
}
# Convert list into recognized internal class.
return( .newModelObjSubset(object = object) )
}
|
22f998e9ddcc4c0b3092984ca39ec5e7af2282f6
|
0d4a48b47deed948603cc03bad032d3236cd3be7
|
/homework/hw3_inc/hw3_inc_v2.R
|
7a15d385d365b2a82f8d874ce70ec32115475abe
|
[] |
no_license
|
UCB-Epi-R/phw250fg2019
|
e67c1d3cd5ee292c4e63abe3704f3f74ad1001b5
|
7b9eefa1cb52d9fa439cffd0e8dfeba08a6b24ff
|
refs/heads/master
| 2020-06-18T09:13:28.748513
| 2019-12-10T00:16:28
| 2019-12-10T00:16:28
| 196,247,267
| 0
| 0
| null | 2019-09-18T16:41:47
| 2019-07-10T17:22:38
|
R
|
UTF-8
|
R
| false
| false
| 4,353
|
r
|
hw3_inc_v2.R
|
#################################################
# R-for-Epi
# Epidemiologic Methods II (PHW250F, PHW250G)
# created by Jade Benjamin-Chung
# Homework 3: Incidence
#################################################
# Don't change these lines, just run them!
# Load okR autograder
tryCatch({source('setup/autograder-setup/hw3_inc/hw3_inc.ok.R')},
warning = function(e){print("Error: did you remember to load the phw250fg2019.Rproj file?")})
AutograderInit()
#################################################
# Read in the data and view the data
#################################################
# Load the dplyr package
library(dplyr)
# In this problem set we will calculate incidence
# using different formulas.
# Read in the data for this assignment.
# The data is saved as a .RData file, so you need
# to use the load command to load the data.
# Fill the R data file name in the quotes below
# without the data directory information.
load(paste0("data/hw3_incidence.RData"))
#################################################
# Section 1: Use the Kaplan-Meier method
# to calculate the incidence in the data
# frame called "km"
#################################################
# First, display the data.frame "km"
km
#-----------------------------------------------
# Problem 1: calculate conditional risk as a new
# column called "cond_risk" in the km dataframe
#-----------------------------------------------
km = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem1()
#-----------------------------------------------
# Problem 2: calculate conditional survival as a new
# column in the km dataframe called cond_surv
#-----------------------------------------------
km = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem2()
#-----------------------------------------------
# Problem 3: calculate cumulative risk and save it
# as a scalar named km_cum_risk
#-----------------------------------------------
km_cum_risk = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem3()
#-----------------------------------------------
# Problem 4: calculate cumulative survival and save it
# as a scalar named km_cum_surv
#-----------------------------------------------
km_cum_surv = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem4()
#################################################
# Section 2: Use the Density method
# to calculate the incidence in the data
# frame called "dm"
#################################################
# display the data.frame "dm"
dm
#-----------------------------------------------
# Problem 5: calculate person-time. Add a new
# column named PT.
#-----------------------------------------------
delta_t=2
dm = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem5()
#-----------------------------------------------
# Problem 6: calculate the conditional risk as a new
# column in the dm dataframe. Add a new column
# named ID.
#-----------------------------------------------
dm = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem6()
#-----------------------------------------------
# Problem 7: calculate cumulative risk and save it
# as a scalar named dm_cum_risk
#-----------------------------------------------
dm_cum_risk = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem7()
#-----------------------------------------------
# Problem 8: calculate cumulative survival and save it
# as a scalar named dm_cum_surv
#-----------------------------------------------
dm_cum_surv = "<<<<<<<<<<<<< YOUR CODE HERE >>>>>>>>>>>>>>>"
# Check your answer
CheckProblem8()
# --------------------------------------------
# Check your total score
MyTotalScore()
# --------------------------------------------
# Follow the instructions on bCourses to submit your work.
######################################
# ALTERNATE SUBMISSION INSTRUCTIONS
# 1. Click on the "Terminal" tab in the panel below
# 2. Copy and paste the following line of code without the #
# and press "enter":
# cd; cd phw250fg2019/homework/hw3_inc; python3 ok --submit;
# 3. Follow the prompts in the terminal
# Video tutorial: https://www.youtube.com/watch?v=NYNDi_zJRGE
######################################
|
04d11b0a82cc0eb4336d28ca8746bdce27fbcef7
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612728105-test.R
|
4b67177666c2aad13062cfd5c2dc8a54c4842810
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 449
|
r
|
1612728105-test.R
|
testlist <- list(latLongs = structure(c(3.02668741796475e+267, 3.02668741796475e+267, 3.02668741796475e+267, 3.02668741796475e+267, 3.02668741796475e+267, 3.02668741796475e+267, 3.02668741796475e+267, 3.02668741796475e+267, 3.02668741796475e+267, 5.90602436456152e+223, 1.68936878664978e-104, 2.54166853232633e+117, 1.50998732907014e+158, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 2L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
775262e42e80d969678e33a9743a35f093035744
|
11a910addebae08f6db43f9d5a4de616cd511fa1
|
/Samples/sample.R
|
3219c3d537b064416f6f4b520d0e6c3e8fe3020d
|
[] |
no_license
|
stefanroata/quantitative-reasoning
|
df52e600e3af404855b4825abc21e5f0ce552750
|
3866e360b61bb41a46641528f0cf69e18f863881
|
refs/heads/main
| 2023-02-20T17:02:04.987126
| 2021-01-27T17:26:08
| 2021-01-27T17:26:08
| 333,504,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,492
|
r
|
sample.R
|
sample(x=1:3, size=2)
sample(x=3, size=2)
mtcars[sample(x=nrow(mtcars), size=2), ]
#dice rolls
sample(1:6, size=1, replace=T)
sample(6, size=1, replace=F)
die.rolls<-sample(1:6, size=20, replace=T)
die.rolls
dice<-sample(1:6, size=3, replace=T)
mean(dice)
trials<-20
numeric(4)
res<-numeric(trials)
for(i in 1:trials){
dice<-sample(1:6, size=3, replace=T)
res[i]<-mean(dice)
}
res
table(res)
trials<-20
d<-5
res<-matrix(NA, trials, d)
for(i in 1:trials){
res[i,]<-sample(1:10, 5, replace=F)
}
res[2,]
res[,5]
#in-class code
sample(1:10, size=5, replace=T)
sum(sample(1:10, size=5, replace=T))
#sample size=5
#sample statistic=the sum
#population=the infinite set attained if we were to keep repeating the sample an infinite number of times
#population parameter= the mean(miu)
#you would want to simulate your code x number of times (100? 1000?)
sum(sample(1:6, size=2, replace=T))
res<-numeric(100)
for(i in 1:100){
res[i]<-sum(sample(1:6, size=2, replace=T))
}
hist(res, col="red", main="Histogram of the results of the simulation",
xlab="The sum of the two dice")
res<-numeric(100000)
for(i in 1:100000){
res[i]<-sum(sample(1:6, size=2, replace=T))
}
hist(res, col="red", main="Histogram of the results of the simulation",
xlab="The sum of the two dice")
barplot(table(res)/length(res), col="purple", main="Barplot of the frequencies of the sum of the dice", ylim=c(0,0.2))
grid()
mean(res)
sd(res)
pop<-read.csv("population.csv")
mean(sample(x=pop$height, size=50))
hist(sample(x=pop$height, size=50))
SRS<-pop[sample(x=pop$height, size=50),]
mean(pop$height)
mean(SRS$height)
mean(pop$height)-mean(SRS$height)
height.res<-numeric(10000)
height.sds<-numeric(10000)
for(i in 1:10000){
SRS<-pop[sample(x=nrow(pop), size=50),]
height.res[i]<-mean(SRS$height)
height.sds[i]<-sd(SRS$height)
}
hist(height.res)
hist(height.sds)
qqnorm(height.res)
qqline(height.res)
height.res<-numeric(10000)
height.sds<-numeric(10000)
for(i in 1:10000){
SRS<-pop[sample(x=nrow(pop), size=3),]
height.res[i]<-mean(SRS$height)
height.sds[i]<-sd(SRS$height)
}
hist(height.res)
hist(height.sds)
qqnorm(height.res)
qqline(height.res)
height.res<-numeric(100000)
height.sds<-numeric(100000)
for(i in 1:100000){
SRS<-pop[sample(x=nrow(pop), size=75),]
height.res[i]<-mean(SRS$height)
height.sds[i]<-sd(SRS$height)
}
hist(height.res)
hist(height.sds)
qqnorm(height.res)
qqline(height.res)
mean(height.res)
sd(height.res)
|
8b1c10cc850d89f6b093ef0215988e3990b0dd1c
|
ab1f1756758a44cdec52ce421be9f863fd9627f0
|
/Resources/Chaos/julia.r
|
ff8cddb5f8af380b9ffec88c3f28afea9df76b44
|
[] |
no_license
|
sone/LitterPower
|
c6a648421c47c8bbf547075868bdf7e477cde9aa
|
6b93433a2ce76f63d6eff57cd4ea6ed40cec037d
|
refs/heads/master
| 2021-10-24T01:19:21.694675
| 2017-07-11T13:33:12
| 2017-07-11T13:33:12
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 2,112
|
r
|
julia.r
|
/*
File: julia.r
Contains: Resources for Max external object julia.
Written by: Peter Castine.
Copyright: © 2001-2002 Peter Castine. All rights reserved.
Change History (most recent first):
<3> 9Ð1Ð04 pc Make vers information carbon/classic-savvy. Drop faux 'Vers'
resource.
<2> 29Ð11Ð2002 pc Tidy up initial check in.
<1> 29Ð11Ð2002 pc Initial Check-in.
*/
//
// Configuration values for this object
//
// These must be unique within Litter Package. The Names must match constant values used
// in the C/C++ source code (we try never to access resources by ID).
#define LPobjID 17546
#define LPobjName "lp.julie"
// Values used in the 'vers'(1) resource
#define LPobjStarter 1 // Comment out for Pro Bundles
#ifdef __LP_CARBON_VERSION__
#define LPobjMajorRev 0x01
#define LPobjMinorRev 0x00
#define LPobjStage development
#define LPobjStageBuild 0x02
#define LPobjRegion 0 // US
#define LPobjShortStr "1.0d2 (OS X)"
#define LPobjLongStr "1.0d2 (OS X), Copyright © 2001-2003 Peter Castine."
#else
#define LPobjMajorRev 0x01
#define LPobjMinorRev 0x00
#define LPobjStage development
#define LPobjStageBuild 0x02
#define LPobjRegion 0 // US
#define LPobjShortStr "1.0d2 (Classic OS)"
#define LPobjLongStr "1.0d2 (Classic OS), Copyright © 2001-2003 Peter Castine."
#endif
// The following sets up the 'mAxL' and 'vers' resources
// It relies on the values above for resource IDs and names, as
// well as concrete values for the 'vers'(1) resource.
#include "Litter Globals.r"
//
// Resource definitions
//
resource 'STR#' (LPobjID, LPobjName) {
{ /* array StringArray */
// Copies of 'vers' strings
LPobjShortStr, LPobjLongStr, LPShortStr, LPLongStr,
// Assist strings
/* Inlets */
"Bang (Generate next point), Float (z0.real)",
"Float (z0.imaginary)",
"Float (c.real)",
"Float (c.imaginary)",
/* Outlets */
"Float (Real component)",
"Float (Imaginary component)"
}
};
|
a8b6bbeb644e40614d5da0e2c707ba0e378cf9e1
|
aaa94db31fe4adc10ab85dd79ee9fc445db0f8ea
|
/server.r
|
145c9f5024a0caa4af1d80ea63eeb12e80ab5482
|
[] |
no_license
|
ShelleyStall/DataProd_ShinyApp
|
30f5e81a08e4366b1b7742ce3bdd78a454199f53
|
f5d49bfb4ebd8e329aa60b20cf99d6bd367cd35f
|
refs/heads/master
| 2021-01-19T20:21:51.801263
| 2015-04-25T19:03:18
| 2015-04-25T19:03:18
| 34,581,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,930
|
r
|
server.r
|
library(shiny)
suppressPackageStartupMessages(library(googleVis))
## read the school data and group by state. Assign column names.
schooldata <- read.csv("postscndryunivsrvy2013dirinfo.csv", stringsAsFactors = FALSE)
school_sum_by_state <- as.data.frame(table(schooldata$STABBR))
colnames(school_sum_by_state) <- c("state.abb", "Num_of_schools")
## Make available the state information in the R package datasets. Create a data frame of state abbreviations and names.
require(datasets)
state_and_abb <- cbind(state.abb,state.name)
## Merging the school data with the state information drops the 9 territories that don't match the 50 states
school_by_state <- merge(school_sum_by_state,state_and_abb,by="state.abb")
## Add a new colume with the latitude and longitude information needed by gvisMap.
schooldata$latlong=paste(schooldata$LATITUDE, schooldata$LONGITUD, sep=":")
## Function determineNum that takes as input the name of the state selected and provides
## the number of schools in that state.
determineNum <- function(state) school_by_state[school_by_state$state.name == state,2]
## Function determineAbb that takes as input the name of the state selected and provides
## the state abbreviation to be used on the Information tab for the state map.
determineAbb <- function(state) school_by_state[school_by_state$state.name == state,1]
shinyServer(function(input,output) {
## Used on the Information by State Tab
output$Text1 <- renderText(input$state)
output$Text2 <- renderPrint({determineNum(input$state)})
## Renders an interactive US Map. Used on the US Map Tab
output$myMap <- renderGvis({
gvisGeoChart(school_by_state, locationvar = "state.name", colorvar = "Num_of_schools", options=list(region="US", displayMode="regions", resolution="provinces",width=600, height=400))
})
## Renders the markers for a selected state. Used on the Information by State Tab.
## Items of note. The markers are custom. gvisMap has a limitation of only 400 markers.
output$myStateMap <- renderGvis({
stateabb <- determineAbb(input$state)
state_schooldata <- subset(schooldata,STABBR==stateabb)
##the max number rows is 400
if (nrow(state_schooldata) > 400) {
state_schooldata2 <- state_schooldata[1:400, c("latlong","INSTNM")]
} else {
state_schooldata2 <- state_schooldata[, c("latlong","INSTNM")]
}
gvisMap(state_schooldata2, "latlong" , "INSTNM",
options=list(showTip=TRUE, showLine=TRUE, enableScrollWheel=TRUE, mapType='terrain', useMapTypeControl=TRUE, width=400, height=400,
icons=paste0("{",
"'default': {'normal': 'http://icons.iconarchive.com/",
"icons/fatcow/farm-fresh/32/",
"bullet-blue-icon.png',\n",
"'selected': 'http://icons.iconarchive.com/",
"icons/fatcow/farm-fresh/32/",
"bullet-blue-icon.png'",
"}}")))
})
})
|
65d04ef4d061df08a393b3c5c78ef960eeeeead1
|
6b41b7034e5f9d41c2d5ffbda120c1ae04683f5c
|
/discrete-fourier-transform.r
|
ac6649fc3dab5d35ef6c28acb12f1d789dc0e0e5
|
[] |
no_license
|
joyofdata/miscellaneous
|
5c5606a7eafb88daf684ca6bbddd755e5c49c015
|
403419f8f4eb1f2db3dfb3820d389ce0486cb405
|
refs/heads/master
| 2021-01-15T13:36:51.293094
| 2014-05-15T14:07:42
| 2014-05-15T14:07:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
r
|
discrete-fourier-transform.r
|
I <- complex(imaginary = 1)
x <- c(1,1,0,1,1,0,1,1,0)
N <- length(x)
x.abs.max <- max(abs(max(x)), abs(min(x)))
my.fft <- function(k,x,N) x %*% exp(-I*2*pi*k*(0:(N-1))/N)
my.ifft <- function(n,X,N) (X %*% exp(I*2*pi*(0:(N-1))*n/N))/N
my.X <- sapply(0:(N-1), function(k) my.fft(k,x,N))
my.X
sapply(0:(N-1), function(n) my.ifft(n,my.X,N))
X <- fft(x)
X
fft(X, inverse=TRUE)/N
Mod(X)
Arg(X)
x.fun <- function(t) (Mod(X) %*% cos(t*(0:(N-1)) + Arg(X)))/N
x.period <- (0:(N-1))/N*2*pi
my.x <- sapply(x.period,x.fun)
plot(x.period, my.x, xlim=c(0,2*pi), ylim=c(-x.abs.max,x.abs.max))
x.fun.comp <- function(i,t) sapply(t, function(t) Mod(X)[i+1] * cos(t*i + Arg(X)[i+1]))/N
x.period.plot <- 0:999/1000*2*pi
comp <- list()
comp[[1]] <- x.fun.comp(0,x.period.plot)
comp[[2]] <- x.fun.comp(3,x.period.plot)
comp[[3]] <- x.fun.comp(6,x.period.plot)
lines(x.period.plot, comp[[1]],col="magenta")
lines(x.period.plot, comp[[2]],col="green")
lines(x.period.plot, comp[[3]],col="blue")
lines(x.period.plot, comp[[1]]+comp[[2]]+comp[[3]],col="red")
|
e5d6838ee38039774b4c3cdfc494f70decb5a583
|
c4e5bb9bcad63afd514a7d5a124d4b572ceda870
|
/plot 1.R
|
f4d52d57b4b0691050a8a6a679cc8f6324f2be2e
|
[] |
no_license
|
lucasteong/ExData_Plotting1
|
a8c76cb6055dea9d384ee6092d5f1690f734b33a
|
3c249b9c8fd28477932858c7a8be7de1094cc3b9
|
refs/heads/master
| 2020-07-19T17:59:11.180791
| 2019-09-05T09:52:26
| 2019-09-05T09:52:26
| 206,490,523
| 0
| 0
| null | 2019-09-05T06:26:32
| 2019-09-05T06:26:32
| null |
UTF-8
|
R
| false
| false
| 1,173
|
r
|
plot 1.R
|
#########################################
# 1 - Download data to working directory
#########################################
# Download zipfile if it does not exists
filename <- "exdata_data_household_power_consumption.zip"
if (!file.exists(filename)){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", filename, method="curl")
}else{
print("Zip file already exists.")
}
# Checking if file is unzipped in working directory
if (!file.exists("household_power_consumption.txt")) {
unzip(filename)
} else{
print("File already exists.")
}
#########################################
# 2 - Read data
#########################################
household <- read.table("household_power_consumption.txt", sep =";", header = TRUE)
household <- subset(household,household$Date=="1/2/2007" | household$Date =="2/2/2007")
#########################################
# 3 - Plotting histogram
#########################################
png("plot1.png", width=480, height=480)
hist(as.numeric(as.character(household$Global_active_power)),col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
59df2d92c270128a29dda89ae273a0c5608948d9
|
d9dfd485199c3148148cd74e66aab20536cf553f
|
/mu_x_til.R
|
2690b369f4992042993786c5376a4d6e8ac8cf1d
|
[] |
no_license
|
cardorl/AlgoritmoR_Repository
|
72de09b0a569bf1d389bd9aa7b7c6bd4e9f4555e
|
7914fc67836c71ed4d4321e3261ab5baf8c8fc05
|
refs/heads/master
| 2016-09-11T03:07:13.811181
| 2015-09-01T11:30:26
| 2015-09-01T11:30:26
| 24,561,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 205
|
r
|
mu_x_til.R
|
mu.x.til.f<-function(X.barra,Y.barra,beta.til,alfa.til,lambda.e){
result<-((Y.barra*beta.til)+(lambda.e*X.barra)-(alfa.til*beta.til));
result<-result/(beta.til^2)+lambda.e;
return(result);
}
|
3087854d987e02d1260d803c1d59a9689e775108
|
747d26c2c2693d51affbb3b34c36dc65f50b0fb8
|
/CrossSelection.Lasso.R
|
d51aefd9e85268607ce3f2c50ac86333d5435cc6
|
[] |
no_license
|
georgiasalanti/NMApredictionsRiskModel
|
20655668ccce72c1961e529ccd964dcabc0452d5
|
184b7bda4af1df56de8619d3ec58d2462397bdad
|
refs/heads/master
| 2020-06-05T00:31:57.761939
| 2019-06-14T10:18:24
| 2019-06-14T10:18:24
| 192,252,251
| 0
| 0
| null | 2019-06-17T01:00:39
| 2019-06-17T01:00:39
| null |
UTF-8
|
R
| false
| false
| 2,513
|
r
|
CrossSelection.Lasso.R
|
#######################################################################################################
### Selects variables via LASSO based on half RCTs ################################
### with 100 cross validations (100 different half-datasets)###########################
############# 1 YEAR RELAPSES ###################################
################################################################################################################
dataset=MSrelapse
library(glmnet)
library(Hmisc)
####################random half RCTs from studies###############################
for (i in 1:100) {
Advance<-dataset[which(dataset$STUDYID=="ADVANCE"),]
Advance.risk<-Advance[sample(nrow(Advance), nrow(Advance)/2),]
todrop<-c("STUDYID","USUBJID","RELAPSE2year")
Advance.risk<-Advance.risk[ , !(names(Advance.risk) %in% todrop)]
Define<-dataset[which(dataset$STUDYID=="DEFINE"),]
Define.risk<-Define[sample(nrow(Define), nrow(Define)/2),]
Define.risk<-Define.risk[ , !(names(Define.risk) %in% todrop)]
Confirm<-dataset[which(dataset$STUDYID=="CONFIRM"),]
Confirm.risk<-Confirm[sample(nrow(Confirm), nrow(Confirm)/2),]
Confirm.risk<-Confirm.risk[ , !(names(Confirm.risk) %in% todrop)]
Affirm<-dataset[which(dataset$STUDYID=="AFFIRM"),]
Affirm.risk<-Affirm[sample(nrow(Affirm), nrow(Affirm)/2),]
Affirm.risk<-Affirm.risk[ , !(names(Affirm.risk) %in% todrop)]
Mscrg<-dataset[which(dataset$STUDYID=="MSCRG"),]
Mscrg.risk<-Mscrg[sample(nrow(Mscrg), nrow(Mscrg)/2),]
Mscrg.risk<-Mscrg.risk[ , !(names(Mscrg.risk) %in% todrop)]
##all half studies together
mrg<-rbind(Advance.risk,Define.risk,Confirm.risk,Affirm.risk,Mscrg.risk)
#####################LASSO preparation####################
###blinded to treatment so drop variable TRT01A
todrop<-c("TRT01A")
mrg.both<-mrg[ , !(names(mrg) %in% todrop)]
### delete NA values (LASSO requierement)
mrg.both<-na.omit(mrg.both)
#### model matrix needed for LASSO
half.matrix<-model.matrix(mrg.both$RELAPSE1year~.,data=mrg.both)
half.matrix<-na.omit(half.matrix)
#################################LASSO################################
######################################################################
##10 cross validations
cv.fit.half<-cv.glmnet(x=half.matrix,y=mrg.both$RELAPSE1year,family="binomial")
### LASSO coefficients
cv.coef.half<-coef(cv.fit.half,s="lambda.1se")
####RESULTS
### non zero coefficients lead to selected variables
cv.pf.em.half<-rownames(cv.coef.half)[as.numeric(cv.coef.half)!=0]
print(cv.pf.em.half)
}
|
b2c1c9600e2d634a0af09230ab839363963fa4be
|
c13c41582b93e1ec4df4d3fd15c9b461ba926d72
|
/R/calc.pairprob.R
|
e050170abbc87d1f64cd1befa7e58b9bca35579d
|
[] |
no_license
|
pjotrp/rqtl
|
834979ea3e6453637dee4b7a53432b3c61b26f44
|
d7f377b50771d9f0862d1590bf05add06982cb35
|
refs/heads/master
| 2020-06-02T06:57:46.392621
| 2009-06-26T21:27:15
| 2009-06-26T21:27:15
| 127,591
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,834
|
r
|
calc.pairprob.R
|
######################################################################
#
# calc.pairprob.R
#
# copyright (c) 2001-9, Karl W Broman
# last modified Apr, 2009
# first written Nov, 2001
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. See the GNU
# General Public License, version 3, for more details.
#
# A copy of the GNU General Public License, version 3, is available
# at http://www.r-project.org/Licenses/GPL-3
#
# Part of the R/qtl package
# Contains: calc.pairprob
#
######################################################################
######################################################################
#
# calc.pairprob: calculate joint genotype probabilities for all pairs
# of putative QTLs, conditional on the observed marker
# data
#
# This is an *internal* function, not to be called by the user.
#
# The input argument cross is assumed to have just one chromosome.
#
######################################################################
calc.pairprob <-
function(cross, step=0, off.end=0, error.prob=0.0001,
map.function=c("haldane","kosambi","c-f","morgan"),
map, assumeCondIndep=FALSE)
{
# which type of cross is this?
type <- class(cross)[1]
if(assumeCondIndep) { # assume conditional independence of QTL given markers
if(!("prob" %in% names(cross$geno[[1]]))) {
cross <- calc.genoprob(subset(cross, chr=1), step=step, off.end=off.end,
error.prob=error.prob, map.function=map.function)
}
prob <- cross$geno[[1]]$prob
n.ind <- dim(prob)[1]
n.pos <- dim(prob)[2]
n.gen <- dim(prob)[3]
if(n.pos < 2) return(NULL)
z <- .C("R_calc_pairprob_condindep",
as.integer(n.ind),
as.integer(n.pos),
as.integer(n.gen),
as.double(prob),
pairprob=as.double(rep(0,n.ind*choose(n.pos, 2)*n.gen*n.gen)),
PACKAGE="qtl")
pairprob <- array(z$pairprob, dim=c(n.ind,n.pos*(n.pos-1)/2,n.gen,n.gen))
return(pairprob)
}
if(step==0 && off.end > 0) step <- off.end*2
# map function
map.function <- match.arg(map.function)
if(map.function=="kosambi") mf <- mf.k
else if(map.function=="c-f") mf <- mf.cf
else if(map.function=="morgan") mf <- mf.m
else mf <- mf.h
# don't let error.prob be exactly zero (or >1)
if(error.prob < 1e-50) error.prob <- 1e-50
if(error.prob > 1) {
error.prob <- 1-1e-50
warning("error.prob shouldn't be > 1!")
}
n.ind <- nind(cross)
n.chr <- nchr(cross)
# type of chromosome?
chrtype <- class(cross$geno[[1]])
if(chrtype=="X") xchr <- TRUE
else xchr <- FALSE
if(type == "f2") {
one.map <- TRUE
if(!xchr) { # autosome
cfunc <- "calc_pairprob_f2"
n.gen <- 3
gen.names <- getgenonames("f2", "A", cross.attr=attributes(cross))
}
else { # X chromsome
cfunc <- "calc_pairprob_bc"
n.gen <- 2
gen.names <- c("g1","g2")
}
}
else if(type == "bc") {
cfunc <- "calc_pairprob_bc"
n.gen <- 2
if(!xchr) # autosome
gen.names <- getgenonames("bc", "A", cross.attr=attributes(cross))
else gen.names <- c("g1","g2")
one.map <- TRUE
}
else if(type == "riself" || type=="risib" || type=="dh") {
cfunc <- "calc_pairprob_bc"
n.gen <- 2
gen.names <- getgenonames(type, "A", cross.attr=attributes(cross))
one.map <- TRUE
}
else if(type == "4way") {
cfunc <- "calc_pairprob_4way"
n.gen <- 4
one.map <- FALSE
gen.names <- getgenonames(type, "A", cross.attr=attributes(cross))
}
else if(type == "ri4self" || type=="ri4sib" || type=="ri8self" || type=="ri8sib") {
cfunc <- paste("calc_pairprob_", type, sep="")
n.gen <- as.numeric(substr(type, 3, 3))
one.map <- TRUE
gen.names <- LETTERS[1:n.gen]
if(xchr)
warning("calc.pairprob not working properly for the X chromosome for 4- or 8-way RIL.")
}
else
stop("calc.pairprob not available for cross type ", type, ".")
# genotype data
gen <- cross$geno[[1]]$data
gen[is.na(gen)] <- 0
# get recombination fractions
if(one.map) {
# map <- create.map(cross$geno[[1]]$map,step,off.end)
rf <- mf(diff(map))
if(type=="risib" || type=="riself")
rf <- adjust.rf.ri(rf,substr(type,3,nchar(type)),class(cross$geno[[1]]))
rf[rf < 1e-14] <- 1e-14
# new genotype matrix with pseudomarkers filled in
newgen <- matrix(ncol=length(map),nrow=nrow(gen))
colnames(newgen) <- names(map)
newgen[,colnames(gen)] <- gen
newgen[is.na(newgen)] <- 0
n.pos <- ncol(newgen)
marnames <- names(map)
}
else {
# map <- create.map(cross$geno[[1]]$map,step,off.end)
rf <- mf(diff(map[1,]))
rf[rf < 1e-14] <- 1e-14
rf2 <- mf(diff(map[2,]))
rf2[rf2 < 1e-14] <- 1e-14
# new genotype matrix with pseudomarkers filled in
newgen <- matrix(ncol=ncol(map),nrow=nrow(gen))
colnames(newgen) <- colnames(map)
newgen[,colnames(gen)] <- gen
newgen[is.na(newgen)] <- 0
n.pos <- ncol(newgen)
marnames <- colnames(map)
}
if(n.pos < 2) return(NULL)
# below: at least two positions
# call the C function
if(one.map) {
z <- .C(cfunc,
as.integer(n.ind), # number of individuals
as.integer(n.pos), # number of markers
as.integer(newgen), # genotype data
as.double(rf), # recombination fractions
as.double(error.prob), #
as.double(rep(0,n.gen*n.ind*n.pos)),
pairprob=as.double(rep(0,n.ind*n.pos*(n.pos-1)/2*n.gen^2)),
PACKAGE="qtl")
}
else {
z <- .C(cfunc,
as.integer(n.ind), # number of individuals
as.integer(n.pos), # number of markers
as.integer(newgen), # genotype data
as.double(rf), # recombination fractions
as.double(rf2), # recombination fractions
as.double(error.prob), #
as.double(rep(0,n.gen*n.ind*n.pos)),
pairprob=as.double(rep(0,n.ind*n.pos*(n.pos-1)/2*n.gen^2)),
PACKAGE="qtl")
}
pairprob <- array(z$pairprob, dim=c(n.ind,n.pos*(n.pos-1)/2,n.gen,n.gen))
# 4- and 8-way RIL: reorganize the results
if(type=="ri4self" || type=="ri4sib" || type=="ri8self" || type=="ri8sib")
pairprob <- reorgRIpairprob(cross, pairprob)
pairprob
}
# end of calc.pairprob.R
|
aa6076377eb6c1404d50e717e29bd8663644beec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/coneproj/examples/coneA.Rd.R
|
bd50a495055cde7688c53d8dbf10cc27ed84afdf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 973
|
r
|
coneA.Rd.R
|
library(coneproj)
### Name: coneA
### Title: Cone Projection - Polar Cone
### Aliases: coneA
### ** Examples
# generate y
set.seed(123)
n <- 50
x <- seq(-2, 2, length = 50)
y <- - x^2 + rnorm(n)
# create the constraint matrix to make the first half of y monotonically increasing
# and the second half of y monotonically decreasing
amat <- matrix(0, n - 1, n)
for(i in 1:(n/2 - 1)){
amat[i, i] <- -1; amat[i, i + 1] <- 1
}
for(i in (n/2):(n - 1)){
amat[i, i] <- 1; amat[i, i + 1] <- -1
}
# call coneA
ans1 <- coneA(y, amat)
ans2 <- coneA(y, amat, w = (1:n)/n)
# make a plot to compare the unweighted fit and the weighted fit
par(mar = c(4, 4, 1, 1))
plot(y, cex = .7, ylab = "y")
lines(fitted(ans1), col = 2, lty = 2)
lines(fitted(ans2), col = 4, lty = 2)
legend("topleft", bty = "n", c("unweighted fit", "weighted fit"), col = c(2, 4), lty = c(2, 2))
title("ConeA Example Plot")
|
e03424cc523fc048a53095ad0dd591c5db5558bd
|
52586df6b1df22e19750306185ee69a7b09abf42
|
/FastMCMC - Main/new_kernel_saem/warfarin/cow_mamyula.R
|
e7a40b60b4aa75ead780258f23f82c1bfcce7d8d
|
[] |
no_license
|
BelhalK/AccelerationTrainingAlgorithms
|
5d1390f5a5cb6f24f59f2c06073040056014aa64
|
0cc5f4405ad103f704cd7c6259762a66fb6bf37f
|
refs/heads/master
| 2023-07-25T02:28:38.095277
| 2020-10-30T09:14:28
| 2020-10-30T09:14:28
| 94,530,148
| 0
| 0
| null | 2023-07-06T21:20:14
| 2017-06-16T09:46:26
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 15,676
|
r
|
cow_mamyula.R
|
# setwd("/Users/karimimohammedbelhal/Desktop/variationalBayes/mcmc_R_isolate/Dir2")
# source('compute_LL.R')
# source('func_aux.R')
# source('func_cov.R')
# source('func_distcond.R')
# source('func_FIM.R')
# source('func_ggplot2.R')
# source('func_plots.R')
# source('func_simulations.R')
# source('ggplot2_global.R')
# # source('KL.R')
# #source('vi.R')
# source('global.R')
# source('main.R')
# source('mcmc_main.R')
# source('main_estep.R')
# source('main_estep_mcmc.R')
# source('main_estep_morekernels.R')
# source('main_initialiseMainAlgo.R')
# source('main_mstep.R')
# source('SaemixData.R')
# source('plots_ggplot2.R')
# source('saemix-package.R')
# source('SaemixModel.R')
# source('SaemixRes.R')
# source('SaemixObject.R')
# source('zzz.R')
setwd("/Users/karimimohammedbelhal/Documents/GitHub/saem/cmaes/Dir")
source('compute_LL.R')
source('func_aux.R')
source('func_cov.R')
source('func_distcond.R')
source('func_FIM.R')
source('func_ggplot2.R')
source('func_plots.R')
source('func_simulations.R')
source('ggplot2_global.R')
# source('KL.R')
#source('vi.R')
source('global.R')
source('main.R')
source('mcmc_main.R')
source('main_estep.R')
source('main_estep_mcmc.R')
source('main_estep_morekernels.R')
source('main_initialiseMainAlgo.R')
source('main_mstep.R')
source('SaemixData.R')
source('plots_ggplot2.R')
source('saemix-package.R')
source('SaemixModel.R')
source('SaemixRes.R')
source('SaemixObject.R')
source('zzz.R')
setwd("/Users/karimimohammedbelhal/Documents/GitHub/saem/new_kernel_saem")
source('newkernel_main.R')
source('main_new.R')
source('main_estep_new.R')
source('main_estep_new2.R')
source('main_gd.R')
source('main_estep_gd.R')
source('main_estep_newkernel.R')
source('main_gd_mix.R')
source('main_estep_gd_mix.R')
source('main_estep_mix.R')
source('main_estep_newkernel.R')
source('main_mamyula.R')
source('main_estep_mala.R')
source("mixtureFunctions.R")
library("mlxR")
library(sgd)
library(gridExtra)
library(grid)
library(ggplot2)
library(lattice)
#####################################################################################
# Theophylline
# Data - changing gender to M/F
# theo.saemix<-read.table("data/theo.saemix.tab",header=T,na=".")
# theo.saemix$Sex<-ifelse(theo.saemix$Sex==1,"M","F")
# saemix.data<-saemixData(name.data=theo.saemix,header=TRUE,sep=" ",na=NA, name.group=c("Id"),name.predictors=c("Dose","Time"),name.response=c("Concentration"),name.covariates=c("Weight","Sex"),units=list(x="hr",y="mg/L",covariates=c("kg","-")), name.X="Time")
# Doc
# data(theo.saemix)
# theo.saemix_less <- theo.saemix[1:120,]
# # theo.saemix<-read.table("data/theo.saemix.tab",header=T,na=".")
# saemix.data<-saemixData(name.data=theo.saemix_less,header=TRUE,sep=" ",na=NA, name.group=c("Id"),name.predictors=c("Dose","Time"),name.response=c("Concentration"),name.covariates=c("Weight","Sex"),units=list(x="hr",y="mg/L",covariates=c("kg","-")), name.X="Time")
data(cow.saemix)
# cow.saemix <- cow.saemix[1:594,]
saemix.data<-saemixData(name.data=cow.saemix,header=TRUE,name.group=c("cow"),
name.predictors=c("time"),name.response=c("weight"),
name.covariates=c("birthyear","twin","birthrank"),
units=list(x="days",y="kg",covariates=c("yr","-","-")))
# setwd("/Users/karimimohammedbelhal/Documents/GitHub/saem/new_kernel_saem/cow")
# cow.saemix<-read.table( "cow_synth.csv",header=T,na=".",sep=",")
# cow.saemix_less <- cow.saemix[1:10,1:3]
# setwd("/Users/karimimohammedbelhal/Documents/GitHub/saem/mcmc_newkernel")
# saemix.data<-saemixData(name.data=cow.saemix,header=TRUE,
# name.group=c("id"),name.predictors=c("time"),name.response=c("y"),
# units=list(x="yr",y="cm"))
growthcow<-function(psi,id,xidep) {
# input:
# psi : matrix of parameters (3 columns, a, b, k)
# id : vector of indices
# xidep : dependent variables (same nb of rows as length of id)
# returns:
# a vector of predictions of length equal to length of id
x<-xidep[,1]
a<-psi[id,1]
b<-psi[id,2]
k<-psi[id,3]
f<-a*(1-b*exp(-k*x))
return(f)
}
saemix.model<-saemixModel(model=growthcow,
description="Exponential growth model",
psi0=matrix(c(700,0.9,0.02,0,0,0),ncol=3,byrow=TRUE,
dimnames=list(NULL,c("A","B","k"))),transform.par=c(1,1,1),fixed.estim=c(1,1,1),
covariate.model=matrix(c(0,0,0),ncol=3,byrow=TRUE),
covariance.model=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),
omega.init=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),error.model="constant")
K1 = 100
K2 = 50
iterations = 1:(K1+K2+1)
gd_step = 0.01
end = K1+K2
seed0 = 39546
#RWM
options<-list(seed=39546,map=F,fim=F,ll.is=F,nb.chains = 1, nbiter.mcmc = c(2,2,2,0,0,0),displayProgress=FALSE, nbiter.saemix = c(K1,K2))
theo_ref<-data.frame(saemix_mamyula(saemix.model,saemix.data,options))
theo_ref <- cbind(iterations, theo_ref)
theo_ref[end,]
graphConvMC_twokernels(theo_ref,theo_ref, title="new kernel")
#saem with mala
options.mala<-list(seed=39546,map=F,fim=F,ll.is=F,nb.chains = 1, nbiter.mcmc = c(0,0,0,2,0,0),displayProgress=FALSE,nbiter.saemix = c(K1,K2),sigma.val = 0.01,gamma.val=0.01)
theo_mala<-data.frame(saemix_mamyula(saemix.model,saemix.data,options.mala))
theo_mala <- cbind(iterations, theo_mala)
#saem with mamyula
options.mamyula<-list(seed=39546,map=F,fim=F,ll.is=F,nb.chains = 1, nbiter.mcmc = c(0,0,0,0,2,0),displayProgress=FALSE,nbiter.saemix = c(K1,K2),sigma.val = 0.1,gamma.val=0.01,lambda.val=0.2)
theo_mamyula<-data.frame(saemix_mamyula(saemix.model,saemix.data,options.mamyula))
theo_mamyula <- cbind(iterations, theo_mamyula)
graphConvMC_twokernels(theo_ref,theo_mala, title="new kernel")
graphConvMC_threekernels(theo_ref,theo_mala,theo_mamyula, title="new kernel")
graphConvMC_threekernels(theo_ref,theo_mamyula,theo_mamyula, title="new kernel")
replicate = 10
seed0 = 395246
#RWM
final_rwm <- 0
for (j in 1:replicate){
print(j)
options<-list(seed=j*seed0,map=F,fim=F,ll.is=F,nb.chains = 1, nbiter.mcmc = c(2,0,0,0,0,0),displayProgress=FALSE, nbiter.saemix = c(K1,K2))
theo_ref<-data.frame(saemix_mamyula(saemix.model,saemix.data,options))
theo_ref <- cbind(iterations, theo_ref)
theo_ref['individual'] <- j
final_rwm <- rbind(final_rwm,theo_ref)
}
names(final_rwm)[1]<-paste("time")
names(final_rwm)[9]<-paste("id")
final_rwm1 <- final_rwm[c(9,1,2)]
final_rwm2 <- final_rwm[c(9,1,3)]
final_rwm3 <- final_rwm[c(9,1,4)]
final_rwm4 <- final_rwm[c(9,1,5)]
final_rwm5 <- final_rwm[c(9,1,6)]
final_rwm6 <- final_rwm[c(9,1,7)]
final_rwm7 <- final_rwm[c(9,1,8)]
# prctilemlx(final_rwm1[-1,],band = list(number = 8, level = 80)) + ggtitle("RWM")
# final_mix <- 0
# for (j in 1:replicate){
# print(j)
# options.mala<-list(seed=j*seed0,map=F,fim=F,ll.is=F,nb.chains = 1, nbiter.mcmc = c(0,0,0,2,0,0),nbiter.saemix = c(K1,K2),sigma.val = 0.01,gamma.val=0.01)
# theo_mix<-data.frame(saemix_mamyula(saemix.model,saemix.data,options.mala))
# theo_mix <- cbind(iterations, theo_mix)
# theo_mix['individual'] <- j
# final_mix <- rbind(final_mix,theo_mix)
# }
#mix (RWM and MAP new kernel for liste of saem iterations)
final_mix <- 0
for (j in 1:replicate){
print(j)
options.mamyula<-list(seed=j*seed0,map=F,fim=F,ll.is=F,nb.chains = 1, nbiter.mcmc = c(0,0,0,0,6,0),displayProgress=FALSE,nbiter.saemix = c(K1,K2),sigma.val = 0.1,gamma.val=0.01,lambda.val=0.2)
theo_mix<-data.frame(saemix_mamyula(saemix.model,saemix.data,options.mamyula))
theo_mix <- cbind(iterations, theo_mix)
theo_mix['individual'] <- j
final_mix <- rbind(final_mix,theo_mix)
}
names(final_mix)[1]<-paste("time")
names(final_mix)[9]<-paste("id")
final_mix1 <- final_mix[c(9,1,2)]
final_mix2 <- final_mix[c(9,1,3)]
final_mix3 <- final_mix[c(9,1,4)]
final_mix4 <- final_mix[c(9,1,5)]
final_mix5 <- final_mix[c(9,1,6)]
final_mix6 <- final_mix[c(9,1,7)]
final_mix7 <- final_mix[c(9,1,8)]
# prctilemlx(final_mix1[-1,1:3],band = list(number = 8, level = 80)) + ggtitle("mix")
final_rwm1['group'] <- 1
final_mix1['group'] <- 2
final_mix1$id <- final_mix1$id +1
final1 <- rbind(final_rwm1[-1,],final_mix1[-1,])
labels <- c("ref","new")
# prctilemlx(final1[c(1,4,2,3)], band = list(number = 4, level = 80),group='group', label = labels)
# plt1 <- prctilemlx(final1, band = list(number = 4, level = 80),group='group', label = labels)
# rownames(final1) <- 1:nrow(final1)
plot.S1 <- plot.prediction.intervals(final1[c(1,4,2,3)],
labels = labels,
legend.title = "algos",
colors = c('red', 'blue'))
plot.S <- plot.S1 + ylab("ka")+ theme(legend.position=c(0.9,0.8))+ theme_bw()
# print(plot.S1)
final_rwm2['group'] <- 1
final_mix2['group'] <- 2
final_mix2$id <- final_mix2$id +1
final2 <- rbind(final_rwm2[-1,],final_mix2[-1,])
labels <- c("ref","new")
# prctilemlx(final2[c(1,4,2,3)], band = list(number = 4, level = 80),group='group', label = labels)
# plt1 <- prctilemlx(final1, band = list(number = 4, level = 80),group='group', label = labels)
# rownames(final1) <- 1:nrow(final1)
plot.S2 <- plot.prediction.intervals(final2[c(1,4,2,3)],
labels = labels,
legend.title = "algos",
colors = c('red', 'blue'))
plot.S2 <- plot.S2 + ylab("V")+ theme(legend.position=c(0.9,0.8))+ theme_bw()
final_rwm3['group'] <- 1
final_mix3['group'] <- 2
final_mix3$id <- final_mix3$id +1
final3 <- rbind(final_rwm3[-1,],final_mix3[-1,])
labels <- c("ref","new")
# prctilemlx(final3[c(1,4,2,3)], band = list(number = 4, level = 80),group='group', label = labels)
# plt1 <- prctilemlx(final1, band = list(number = 4, level = 80),group='group', label = labels)
# rownames(final1) <- 1:nrow(final1)
plot.S3 <- plot.prediction.intervals(final3[c(1,4,2,3)],
labels = labels,
legend.title = "algos",
colors = c('red', 'blue'))
plot.S3 <- plot.S3 + ylab("k")+ theme(legend.position=c(0.9,0.8))+ theme_bw()
final_rwm4['group'] <- 1
final_mix4['group'] <- 2
final_mix4$id <- final_mix4$id +1
final4 <- rbind(final_rwm4[-1,],final_mix4[-1,])
labels <- c("ref","new")
# prctilemlx(final4[c(1,4,2,3)], band = list(number = 4, level = 80),group='group', label = labels)
# plt1 <- prctilemlx(final1, band = list(number = 4, level = 80),group='group', label = labels)
# rownames(final1) <- 1:nrow(final1)
plot.S4 <- plot.prediction.intervals(final4[c(1,4,2,3)],
labels = labels,
legend.title = "algos",
colors = c('red', 'blue'))
plot.S4 <- plot.S4 + ylab("w2ka")+ theme(legend.position=c(0.9,0.8))+ theme_bw()
final_rwm5['group'] <- 1
final_mix5['group'] <- 2
final_mix5$id <- final_mix5$id +1
final5 <- rbind(final_rwm5[-1,],final_mix5[-1,])
labels <- c("ref","new")
# prctilemlx(final5[c(1,4,2,3)], band = list(number = 4, level = 80),group='group', label = labels)
# plt1 <- prctilemlx(final1, band = list(number = 4, level = 80),group='group', label = labels)
# rownames(final1) <- 1:nrow(final1)
plot.S5 <- plot.prediction.intervals(final5[c(1,4,2,3)],
labels = labels,
legend.title = "algos",
colors = c('red', 'blue'))
plot.S5 <- plot.S5 + ylab("w2V")+ theme(legend.position=c(0.9,0.8))+ theme_bw()
final_rwm6['group'] <- 1
final_mix6['group'] <- 2
final_mix6$id <- final_mix6$id +1
final6 <- rbind(final_rwm6[-1,],final_mix6[-1,])
labels <- c("ref","new")
# prctilemlx(final6[c(1,4,2,3)], band = list(number = 4, level = 80),group='group', label = labels)
# plt1 <- prctilemlx(final1, band = list(number = 4, level = 80),group='group', label = labels)
# rownames(final1) <- 1:nrow(final1)
plot.S6 <- plot.prediction.intervals(final6[c(1,4,2,3)],
labels = labels,
legend.title = "algos",
colors = c('red', 'blue'))
plot.S6 <- plot.S6 + ylab("w2k")+ theme(legend.position=c(0.9,0.8))+ theme_bw()
final_rwm7['group'] <- 1
final_mix7['group'] <- 2
final_mix7$id <- final_mix7$id +1
final7 <- rbind(final_rwm7[-1,],final_mix7[-1,])
labels <- c("ref","new")
# prctilemlx(final7[c(1,4,2,3)], band = list(number = 4, level = 80),group='group', label = labels)
# plt1 <- prctilemlx(final1, band = list(number = 4, level = 80),group='group', label = labels)
# rownames(final1) <- 1:nrow(final1)
plot.S7 <- plot.prediction.intervals(final7[c(1,4,2,3)],
labels = labels,
legend.title = "algos",
colors = c('red', 'blue'))
plot.S7 <- plot.S7 + ylab("a")+ theme(legend.position=c(0.9,0.8))+ theme_bw()
grid.arrange(plot.S, plot.S2,plot.S3,plot.S4, plot.S5,plot.S6,plot.S7,ncol=3)
#values table
#values table
sample_mean_rwm <- 0
var_rwm <- 0
error_rwm <- 0
true_param <- c(1.5,32,0.1,0.4,0.01,0.8)
for (j in 1:replicate){
sample_mean_rwm <- sample_mean_rwm + colMeans(final_rwm[(j*K1):(j*(K1+K2)),c(2,3,4,5,6,8)])
}
sample_mean_rwm = 1/replicate*sample_mean_rwm
for (j in 1:replicate){
var_rwm <- var_rwm + (final_rwm[(j*(K1+K2)),c(2,3,4,5,6,8)]-sample_mean_rwm)^2
error_rwm <- error_rwm + (final_rwm[(j*(K1+K2)),c(2,3,4,5,6,8)]-true_param)^2
}
error_rwm = 1/replicate*error_rwm
var_rwm = 1/replicate*var_rwm
sample_mean_mix <- 0
var_mix <- 0
error_mix <- 0
true_param <- c(1.5,32,0.1,0.4,0.01,0.8)
for (j in 1:replicate){
sample_mean_mix <- sample_mean_mix + colMeans(final_mix[(j*K1):(j*(K1+K2)),c(2,3,4,5,6,8)])
}
sample_mean_mix = 1/replicate*sample_mean_mix
for (j in 1:replicate){
var_mix <- var_mix + (final_mix[(j*(K1+K2)),c(2,3,4,5,6,8)]-sample_mean_mix)^2
error_mix <- error_mix + (final_mix[(j*(K1+K2)),c(2,3,4,5,6,8)]-true_param)^2
}
error_mix = 1/replicate*error_mix
var_mix = 1/replicate*var_mix
plot.prediction.intervals <- function(r, plot.median=TRUE, level=1, labels=NULL,
legend.title=NULL, colors=NULL) {
P <- prctilemlx(r, number=1, level=level, plot=FALSE)
if (is.null(labels)) labels <- levels(r$group)
if (is.null(legend.title)) legend.title <- "group"
names(P$y)[2:4] <- c("p.min","p50","p.max")
pp <- ggplot(data=P$y)+ylab(NULL)+
geom_ribbon(aes(x=time,ymin=p.min, ymax=p.max,fill=group),alpha=.5)
if (plot.median)
pp <- pp + geom_line(aes(x=time,y=p50,colour=group))
if (is.null(colors)) {
pp <- pp + scale_fill_discrete(name=legend.title,
breaks=levels(r$group),
labels=labels)
pp <- pp + scale_colour_discrete(name=legend.title,
breaks=levels(r$group),
labels=labels,
guide=FALSE)
} else {
pp <- pp + scale_fill_manual(name=legend.title,
breaks=levels(r$group),
labels=labels,
values=colors)
pp <- pp + scale_colour_manual(name=legend.title,
breaks=levels(r$group),
labels=labels,
guide=FALSE,values=colors)
}
return(pp)
}
|
acea642194c37c360b606230ecaba053783fdaea
|
0420fa9f0a499af5edc75c7ddbe606a7ea684e6c
|
/R/zzz.R
|
58a6dc02e10f91c9159d080c3b7c902cddcb530a
|
[
"MIT"
] |
permissive
|
coolbutuseless/cairocore
|
ca8a2b82f594b086c7de38149fb3d3a5f150b455
|
5bf9f2f170f3c7ee81e0b5c709946745950115d4
|
refs/heads/master
| 2022-12-03T18:36:56.909944
| 2020-08-30T04:04:22
| 2020-08-30T04:04:22
| 288,104,420
| 12
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
zzz.R
|
.env <- new.env(parent = emptyenv())
.onLoad <- function(...) {
# Keep track of whether pkg has shown warning about memory-related functions
.env$warned <- FALSE
}
|
b4995a0b6d89b2de8f508148b81c75449f93d250
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GLMsData/examples/kstones.Rd.R
|
cbf7b60487742507b86eecc7591c45d37295b823
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168
|
r
|
kstones.Rd.R
|
library(GLMsData)
### Name: kstones
### Title: Treating kidney stones
### Aliases: kstones
### Keywords: datasets
### ** Examples
data(kstones)
summary(kstones)
|
3bd93f13f2faf07e753862e44707a3058904078a
|
b7cb283274a351a548b76ab5b7a4bca727fcd148
|
/run_analysis.R
|
3e687c9bbd9c8ca5c572b9e6cb0515cf48f1a500
|
[] |
no_license
|
bryanstephens/cleaning-data-course-project
|
2422bb592b155bcac5f26a0697c22303b3df7f5f
|
81cfb5c8e395bce0ff648682c087d6740b682899
|
refs/heads/master
| 2021-01-17T05:35:24.903314
| 2015-06-21T20:59:22
| 2015-06-21T20:59:22
| 37,818,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,813
|
r
|
run_analysis.R
|
library(plyr)
library(dplyr)
# Running the run_analysis() function will write a file "output.txt" that follows the following process:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#
# The function will assume the data directory is in the root of the working directory at "data/", but dataDir
# can be specified
run_analysis <- function(dataDir = "data") {
#Get nice human readable labels
activity_labels <- read.table(file.path(dataDir, "activity_labels.txt"), sep = " ", col.names = c("activity_id", "activity_name"))
feature_labels <- read.table(file.path(dataDir, "features.txt"), sep = " ", col.names = c("feature_id", "feature_name"))
#load test data with human readable variable names
subject_test <- read.table(file.path(dataDir, "test", "subject_test.txt"), col.names = c("subject_id"))
X_test_data <- read.table(file.path(dataDir, "test", "X_test.txt"), col.names = feature_labels$feature_name)
activity_test <- read.table(file.path(dataDir, "test", "y_test.txt"), col.names = c("activity_id"))
#concatenate loaded test data
subject_test_activity_data <- cbind(subject_test, activity_test, X_test_data)
#load training data with human readable variable names
subject_train <- read.table(file.path(dataDir, "train", "subject_train.txt"), col.names = c("subject_id"))
X_train_data <- read.table(file.path(dataDir, "train", "X_train.txt"), col.names = feature_labels$feature_name)
activity_train <- read.table(file.path(dataDir, "train", "y_train.txt"), col.names = c("activity_id"))
#concatenate loaded training data
subject_train_activity_data <- cbind(subject_train, activity_train, X_train_data)
#concatenate test and training data frames
all_data <- rbind(subject_test_activity_data, subject_train_activity_data)
#merge the activity name and the full dataset so we can have human readable activities
all_data_with_labels <- merge(all_data, activity_labels)
#get only the subject, activity_name and all variables that contain mean or std
subset_labeled_data <- select(all_data_with_labels, subject_id, activity_name, contains("mean"), contains("std"))
#get the mean of the columns along the subject ids and their activities
tidy_data <- ddply(subset_labeled_data, .(subject_id, activity_name), function(x) colMeans(x[,3:ncol(x)]))
#output file
write.table(tidy_data, file = file.path("output.txt"), row.name = FALSE)
}
|
78f0f8ee65e7d3357defdbf201facd33b6f67e1a
|
fd079194e5404f91dc020d570246bfbb7e8d306c
|
/Handouts/4-Stan introduction/R-Stan code/intro_stan_R_script.R
|
993031ce3f0e8b35b5b2b03a16714b3fbe4f9f0b
|
[] |
no_license
|
LeoEgidi/GLMM-2021
|
201c4f073436489e99e8881d9fc816528d899544
|
e3fd3232152a3ef12f6e0dc848909cc7f834de14
|
refs/heads/main
| 2023-04-02T09:20:28.217417
| 2021-04-07T11:08:13
| 2021-04-07T11:08:13
| 345,648,334
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,393
|
r
|
intro_stan_R_script.R
|
##############################
## 8 schools example
##############################
library(rstan)
library(bayesplot)
schools_dat <- list(J = 8,
y = c(28, 8, -3, 7, -1, 1, 18, 12),
sigma = c(15, 10, 16, 11, 9, 11, 10, 18))
fit_8schools <- stan(file = '8schools.stan', data = schools_dat)
print(fit_8schools, pars=c("mu", "tau", "theta"))
posterior <- as.array(fit_8schools)
color_scheme_set("red")
par_names <- c()
par_names[1]<-expression(mu)
par_names[2]<-expression(tau)
par_names[3]<-expression(theta[8])
par_names[4]<-expression(theta[7])
par_names[5]<-expression(theta[6])
par_names[6]<-expression(theta[5])
par_names[7]<-expression(theta[4])
par_names[8]<-expression(theta[3])
par_names[9]<-expression(theta[2])
par_names[10]<-expression(theta[1])
par_names[11]<-expression(eta[1])
par_names[12]<-expression(eta[2])
par_names[13]<-expression(eta[3])
par_names[14]<-expression(eta[4])
par_names[15]<-expression(eta[5])
par_names[16]<-expression(eta[6])
par_names[17]<-expression(eta[7])
par_names[18]<-expression(eta[8])
# posterior intervals
pdf(file="post_int_8schools.pdf", width =9, height=8.5)
mcmc_intervals(posterior,regex_pars=c("theta","tau", "mu" ))+
scale_y_discrete(labels = rev((parse(text= par_names[1:10]))))+
xaxis_text(on =TRUE, size=rel(1.9))+
yaxis_text(on =TRUE, size=rel(1.9))+
ggtitle("Posterior intervals")
dev.off()
# posterior areas
pdf(file="post_areas_8schools.pdf", width =9, height=8.5)
mcmc_areas(posterior, pars=c( "theta[1]", "theta[2]",
"theta[3]", "theta[4]", "theta[5]", "theta[6]",
"theta[7]", "theta[8]",
"tau", "mu" ))+
scale_y_discrete(labels = rev((parse(text= par_names[1:10]))))+
xaxis_text(on =TRUE, size=rel(1.9))+
yaxis_text(on =TRUE, size=rel(1.9))+
ggtitle("Posterior areas")
dev.off()
# marginal posterior
pdf(file="marg_post_8schools.pdf", width=12, height =8)
mcmc_dens(posterior)
dev.off()
# marginal posterior overlayed
pdf(file="marg_post_8schools_4chains.pdf", width=12.4, height =8)
mcmc_dens_overlay(posterior)
dev.off()
# bivariate plots
pdf(file="pairs_post_8schools.pdf", width=10, height=8)
mcmc_pairs(posterior, pars=c("mu", "tau"))
dev.off()
# trace plots
pdf(file="trace_post_8schools_4chains.pdf", width=12.4, height =8)
mcmc_trace(posterior)
dev.off()
|
68c2b8061ba446288154b054cbf15561fca483de
|
3a4ddc27bceb2a5e17fb17d8089e05b8c8452eb3
|
/cleanData_miningTools/million_fake_data.R
|
03b441f13485d4aa93e638a7e952d7d104f2a246
|
[] |
no_license
|
Wynnlin329/R_EDA_minning
|
f61e6b05efb66e547a701fa1a84785279d7907e5
|
c7522f69c393985cc691a4ef11854adf3e731d57
|
refs/heads/master
| 2021-02-12T05:28:47.807575
| 2020-03-03T07:20:24
| 2020-03-03T07:20:24
| 244,565,199
| 0
| 0
| null | null | null | null |
BIG5
|
R
| false
| false
| 1,958
|
r
|
million_fake_data.R
|
#產生百萬假會員資料
Sys.setlocale(category = "LC_ALL", locale = "cht")
profile <- data.frame(
id = paste("0000000", as.character( seq(from=1, to=1000000, by=1)),sep='' ) ,
age = round(rnorm(1000000,25,3)),
gender = sample(c("男生", "女生"), size = 1000000, replace = TRUE),
group = sample(c("誠品人", "鋼鐵仁","香港人"), size = 1000000, replace = TRUE),
zone = sample(c("台灣","香港"), size=1000000, replace=TRUE)
)
install.packages("sqldf")
library(sqldf)
profile$id <- sqldf("SELECT substr(id,-7,7) FROM profile")
profile_final <- data.frame(id = profile$id,
age = profile$age,
gender = profile$gender,
group = profile$group,
zone = profile$zone)
names(profile_final)[1] <- 'id'
head(profile_final,10)
#檔案輸出
write.table(profile_final, file = "profile.csv", quote = FALSE, sep = ",", row.names = FALSE,
col.names =TRUE )
#產生百萬假會員一年的消費資料
trans_df <- data.frame(data_month=character(),
id=character(),
product=character(),
Logicals=logical(),
Characters=character(),
amt=integer()
)
#每月產出假消費紀錄
for(i in 1:12)
{
id = sample(profile_final$id, size = round(runif(1,300000,700000)), replace = TRUE)
i_month <- ifelse(nchar(i)==1,paste('0',i,sep=''),i)
tmp_df <- data.frame(
data_month = paste('2019/',i_month,'/01',sep=''),
id = id,
product = sample(c("中文書", "外文書","雜誌","兒童","CD","DVD","風格文具",'生活雜貨'), size =
length(id), replace = TRUE),
amt = round(runif(length(id),50,699))
)
trans_df <- rbind(trans_df,tmp_df)
}
#檔案輸出
write.table(trans_df, file = "trans.csv", quote = FALSE, sep = ",", row.names = FALSE, col.names
=TRUE )
|
b4065384131d8c2ffde5d215741663b57ec7154c
|
b54045d3fb01b4a81f31e94c4731def4d1ee4a9e
|
/R/methods.r
|
a9b8260b0f8d6dd684b6dbaa8bfd3e300e12c6d7
|
[] |
no_license
|
orlinresearch/sdcMicro
|
f7db0fd5286a6310477039aecac50fb0acd01aa1
|
54d923bc86250cf0a75a8e95a0384bee96973a51
|
refs/heads/master
| 2021-01-17T18:20:17.180537
| 2014-07-18T12:55:35
| 2014-07-18T12:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,107
|
r
|
methods.r
|
#' @rdname get.sdcMicroObj-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric('get.sdcMicroObj', function(object, type) {standardGeneric('get.sdcMicroObj')})
#' modify \code{sdcMicroObj}-objects depending on argument \code{type}
#'
#' @param object an object of class \code{sdcMicroObj}
#' @param type a character vector of length 1 defining what to calculate|return|modify. Allowed types are:}
#' \itemize{
#' \item origData: set slot 'origData' of argument \code{object}
#' @param input a list depending on argument \code{type}.}
#' \itemize{
#' \item type==dataOrig: a list containing original microdata
#'
#' @return an object of class \code{sdcMicroObj}
#'
#' @export
#' @docType methods
#' @rdname set.sdcMicro-method
#'
#' @note internal function
#' @author Bernhard Meindl \email{bernhard.meindl@@statistik.gv.at}
setGeneric('set.sdcMicroObj', function(object, type, input) {standardGeneric('set.sdcMicroObj')})
#' undo last changes to \code{sdcMicroObj}-objects if possible
#' note that this will only work if the user makes use of the prev slot or uses the sdcMicroObj functions
#'
#' @param object an object of class \code{sdcMicroObj}
#'
#' @return an object of class \code{sdcMicroObj}
#'
#' @export
#' @docType methods
#' @rdname set.sdcMicro-method
#'
#' @note internal function
#' @author Elias Rut
setGeneric('undolast', function(obj) {standardGeneric('undolast')})
############################################
### methods only for class 'sdcMicroObj' ###
########################################ä###
#' @aliases get.sdcMicroObj,sdcMicroObj,character-method
#' @rdname get.sdcMicroObj-method
setMethod(f='get.sdcMicroObj', signature=c('sdcMicroObj', 'character'),
definition=function(object, type) {
if ( !type %in% c('origData', 'keyVars', 'pramVars', 'numVars',
'weightVar', 'hhId', 'strataVar', 'sensibleVar',
'manipKeyVars', 'manipPramVars', 'manipNumVars','manipStrataVar',
'originalRisk','risk', 'utility', 'pram', 'localSuppression','options', 'prev', 'set', 'deletedVars') ) {
stop("get.sdcMicroObj:: argument 'type' is not valid!\n")
}
if((!type %in% object@set) && !is.null(object@prev)) return (get.sdcMicroObj(object@prev, type))
if (!type %in% slotNames(object)) {
stop("wrong argument 'type'!\n")
}
return(slot(object, type))
}
)
#' @aliases set.sdcMicroObj,sdcMicroObj,character,listOrNULL-method
#' @rdname set.sdcMicroObj-method
setMethod(f='set.sdcMicroObj', signature=c('sdcMicroObj', 'character', 'listOrNULL'),
definition=function(object, type, input) {
if ( !type %in% c('origData','keyVars', 'pramVars', 'numVars','weightVar','hhId','strataVar',
'sensibleVar', 'manipPramVars', 'manipKeyVars','manipNumVars','manipStrataVar','risk','utility','pram','localSuppression','options','prev','set') ) {
stop("set.sdcMicroObj:: check argument 'type'!\n")
}
if ( type == 'origData' ) object@origData <- input[[1]]
if ( type == 'keyVars' ) object@keyVars <- input[[1]]
if ( type == 'pramVars' ) object@pramVars <- input[[1]]
if ( type == 'numVars' ) object@numVars <- input[[1]]
if ( type == 'weightVar' ) object@weightVar <- input[[1]]
if ( type == 'hhId' ) object@hhId <- input[[1]]
if ( type == 'strataVar' ) object@strataVar <- input[[1]]
if ( type == 'sensibleVar' ) object@sensibleVar <- input[[1]]
if ( type == 'manipKeyVars' ) object@manipKeyVars <- input[[1]]
if ( type == 'manipPramVars' ) object@manipPramVars <- input[[1]]
if ( type == 'manipNumVars' ) object@manipNumVars <- input[[1]]
if ( type == 'manipStrataVar' ) object@manipStrataVar <- input[[1]]
if ( type == 'risk' ) object@risk <- input[[1]]
if ( type == 'utility' ) object@utility <- input[[1]]
if ( type == 'pram' ) object@pram <- input[[1]]
if ( type == 'localSuppression' ) object@localSuppression <- input[[1]]
if ( type == 'options' ) object@options <- input[[1]]
if ( type == 'prev' ) object@prev <- input[[1]]
if ( type == 'set' ) object@set <- input[[1]]
if ( is.null ( object@set )) object@set <- list()
if ( length(object@set) == 0 || ! type %in% object@set ) object@set <- c(object@set, type)
validObject(object)
return(object)
}
)
setGeneric('calc.sdcMicroObj', function(object, type, ...) { standardGeneric('calc.sdcMicroObj')})
setMethod(f='calc.sdcMicroObj', signature=c('sdcMicroObj', 'character'),
definition=function(object, type, ...) {
if ( !type %in% c('violateKAnon') ) {
stop("set.sdcMicroObj:: check argument 'type'!\n")
}
### how many observations violate k-Anonymity
if ( type == 'violateKAnon' ) {
fk <- get.sdcMicroObj(object, type="fk")
args <- list(...)
m <- match("k", names(args))
if ( !is.na(m)) {
k <- args[[m]]
} else {
k <- 1
}
return(length(which(fk <= k)))
}
}
)
#' @rdname undo.sdcMicroObj-method
setMethod(f='undolast', signature=c('sdcMicroObj'),
definition=function(obj) {
if ( is.null(obj@prev) ) {
warning("Can not undo. No previous state stored. (The input object is returned).\n")
return(obj)
}
return(obj@prev)
}
)
|
64385d21b2f2a148aacbb82d8f599a8ce4b74a01
|
6b2d1646110a34df8d372aae7bbe56992f6e7485
|
/README.rd
|
2d359519283b7def025fd5ae1d7881b1fdb9d89c
|
[] |
no_license
|
bearachute/bearachute.github.com
|
5adbe36c0147b792b831243e5dc90d5350dc7ee6
|
279135512f3ea31b8455754159db6f093b1c48b0
|
refs/heads/master
| 2021-01-21T04:39:08.824736
| 2016-06-10T09:17:28
| 2016-06-10T09:17:28
| 607,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16
|
rd
|
README.rd
|
# page of mine
|
0b726475611f2d8676576ba2a235992cc39e285c
|
8438d199bb0b209533b3d444ec26414e410bb0ca
|
/measurement_stability/plots/plot-parallel-ratios.r
|
8f631712e61342a204f33a7e41c8fc1eae3896de
|
[] |
no_license
|
janbuchar/MT
|
5144c454ff9d14799fe17851d16eb45ae39bffaa
|
4a6bcb8d47aef30c527aa1172a09541fa8ce2c34
|
refs/heads/master
| 2023-08-04T08:21:21.482313
| 2020-05-28T08:16:57
| 2020-05-28T08:16:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
r
|
plot-parallel-ratios.r
|
library("ggplot2")
library("tikzDevice")
source("../../plot_helpers/helpers.r")
ratios <- read.csv(filename.from.args())
tikz("parallel-run-ratios.tex", width=5.5, height=4)
ggplot(ratios, aes(x=parallel_ratio)) +
labs(x="Ratio of time spent with all processes running and the total time", y="Number of groups") +
scale_x_reverse() +
geom_histogram(bins=20)
|
9b8ce4f2b73d72077048d81b1674fc0131e0520b
|
38396a5ba18094b2e7e5bb53206d943f726f7bb7
|
/R/coursera/jhu-data-science/02_r-programming/lessons/week2/add2.R
|
96c92cc172f06a2313c5fd720ae62176e5904ac4
|
[] |
no_license
|
frauca/samples
|
a048d051304656757c33a4b5bc23830c52cd4db0
|
cd5ba431fe343fae2f47aa7c3b956d60eddf8a78
|
refs/heads/master
| 2023-08-16T04:31:42.747222
| 2023-08-01T14:46:51
| 2023-08-01T14:46:51
| 3,519,641
| 0
| 0
| null | 2022-12-15T23:23:16
| 2012-02-22T22:07:03
|
Python
|
UTF-8
|
R
| false
| false
| 265
|
r
|
add2.R
|
add2<- function(x,y){
x+y
}
avobe10<- function(x){
avobe(x,10)
}
avobe <- function(x,y){
x[x>y]
}
columnsmean <-function(x,removeNA = TRUE){
nc <- ncol(x)
means <- numeric(nc)
for(i in 1:nc){
means[i] <- mean(x[,i],na.rm = removeNA)
}
means
}
|
7023643ced00e00aa7dcd92c2655c54c3b5500da
|
288b4b6998906714ab368e0ee14c70a4059be4ab
|
/man/dat.nakagawa2015.Rd
|
2f2ae858c2327566f412989eba0721ae77150634
|
[] |
no_license
|
qsh7950/metadat
|
f6243a382c8c0e3f4c9a0e2cd657edb0ffa3e018
|
5c70fa63d7acfa1f315534fb292950513cb2281e
|
refs/heads/master
| 2021-02-26T06:42:18.937872
| 2019-10-21T21:58:33
| 2019-10-21T21:58:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,193
|
rd
|
dat.nakagawa2015.Rd
|
\name{dat.nakagawa2015}
\docType{data}
\alias{dat.nakagawa2015}
\alias{dat.nakagawa2015.1}
\alias{dat.nakagawa2015.2}
\alias{dat.nakagawa2015.3}
\alias{dat.nakagawa2015.phy1}
\alias{dat.nakagawa2015.phy2}
\title{Meta-analysis of variance: ecological and evolutionary applications and beyond.}
\description{Data from Nakagawa et al. 2015, which is a methdological paper decribing effect sizes for studying the effects of a treatment/factor on variance. The study contains three different meta-analytic datasets and two phylogenys.}
\usage{dat.nakagawa2015}
\format{
The data frame dat.nakagawa2015.1 contains the following columns:
\tabular{lll}{
\bold{effect.id} \tab \code{integer} \tab a unique effect size identifier \cr
\bold{species} \tab \code{character} \tab species \cr
\bold{animal} \tab \code{character} \tab species in animal-friendly format to match the phylogeny \cr
\bold{reference} \tab \code{character} \tab reference or primary study \cr
\bold{unit} \tab \code{character} \tab unit the outcome is quantified in \cr
\bold{Trait} \tab \code{character} \tab broad categoristation type of trait quantified \cr
\bold{trait_details} \tab \code{character} \tab details of trait being quantified \cr
\bold{male_mean} \tab \code{numeric} \tab mean trait value for males \cr
\bold{male_SD} \tab \code{numeric} \tab sd of trait value for males \cr
\bold{male_n} \tab \code{integer} \tab sample size for males \cr
\bold{female_mean} \tab \code{numeric} \tab mean trait value for females \cr
\bold{female_SD} \tab \code{numeric} \tab sd of trait value for females \cr
\bold{female_n} \tab \code{integer} \tab sample size for females \cr
}
The data frame dat.nakagawa2015.2 contains the following columns:
\tabular{lll}{
\bold{effect.id} \tab \code{integer} \tab a unique effect size identifier \cr
\bold{species} \tab \code{character} \tab species \cr
\bold{animal} \tab \code{character} \tab species in animal-friendly format to match the phylogeny \cr
\bold{reference} \tab \code{character} \tab reference or primary study \cr
\bold{unit} \tab \code{character} \tab unit the outcome is quantified in \cr
\bold{Trait} \tab \code{character} \tab broad categoristation type of trait quantified \cr
\bold{trait_details} \tab \code{character} \tab details of trait being quantified \cr
\bold{male_mean} \tab \code{numeric} \tab mean trait value for males \cr
\bold{male_SD} \tab \code{numeric} \tab sd of trait value for males \cr
\bold{male_n} \tab \code{integer} \tab sample size for males \cr
\bold{female_mean} \tab \code{numeric} \tab mean trait value for females \cr
\bold{female_SD} \tab \code{numeric} \tab sd of trait value for females \cr
\bold{female_n} \tab \code{integer} \tab sample size for females \cr
}
The data frame dat.nakagawa2015.3 contains the following columns:
\tabular{lll}{
\bold{effect.id} \tab \code{character} \tab a unique effect size identifier \cr
\bold{Phylum} \tab \code{character} \tab phylum of parasite \cr
\bold{Class} \tab \code{character} \tab class of parasite \cr
\bold{Family} \tab \code{character} \tab family of parasite \cr
\bold{Host.species} \tab \code{character} \tab host species \cr
\bold{Host.taxon} \tab \code{character} \tab host taxa \cr
\bold{CMean} \tab \code{numeric} \tab mean trait value for control group \cr
\bold{CSD} \tab \code{numeric} \tab sd trait value for control group \cr
\bold{CN} \tab \code{integer} \tab sample size for control group \cr
\bold{EMean} \tab \code{numeric} \tab mean trait value for parasitised group \cr
\bold{ESD} \tab \code{numeric} \tab sd trait value for parasitised group \cr
\bold{EN} \tab \code{integer} \tab sample size for parasitised group \cr
\bold{Transmission.by.predation.vector.or.not} \tab \code{character} \tab is the parasite dependent upon predation for transmission \cr
\bold{spontaneous.or.stimulus.response} \tab \code{character} \tab was the response behaviour stimulated or spontaeous \cr
\bold{Behaviour.details} \tab \code{character} \tab detailed description of behaviour \cr
\bold{Year.of.publication} \tab \code{integer} \tab year published \cr
\bold{Reference} \tab \code{character} \tab reference \cr
\bold{Expected.direction.of.change} \tab \code{character} \tab whether parasitisation is expected to increase/decrease the trait mean \cr
}
The data dat.nakagawa2015.phy1 contains the topology for a phylogeny of the bird species in dat.nakagawa2015.1\cr
The data dat.nakagawa2015.phy2 contains the topology for a phylogeny of the bird species in dat.nakagawa2015.1\cr
}
\details{
Nakagawa et al. ()2015) is a methdological paper, which decribes effect sizes and models designed to study the effects of a treatment/factor on variance. The study contains three different meta-analytic datasets and two phylogenys.
dat.nakagawa2015.1 and dat.nakagawa2015.2 contain data on the mean and standard deviation (sd) in morphological traits of males and females in birds (dat.nakagawa2015.1) and mammals (dat.nakagawa2015.2). dat.nakagawa2015.phy1 and dat.nakagawa2015.phy2 are corresponding phylogenetic topologies. The sex-chromosome hypothesis predicts that the heterogametic sex (females in birds and males in mammals) will have greater variability.
dat.nakagawa2015.3 is a data set on the effects of parasitism on behavioural traits, and contains data on the mean and sd of animal behaviours of groups of animals that are and are not parasitised. The working hypothesis in the paper was that parasitism will increase variation in behaviour.
}
\source{Nakagawa, S., Poulin, R., Mengersen, K., Reinhold, K., Engqvist, L., Lagisz, M. & Senior, A.M. (2015). Meta-analysis of variance: ecological and evolutionary applications and beyond. \emph{Methods in Ecology and Evolution}, \bold{6}, 143-152}
\author{Alistair Senior, \email{alistair.senior@sydney.edu.au}}
\examples{
### copy data into 'dat' and examine data
dat <- dat.nakagawa2015.1
dat
phy <- dat.nakagawa2015.phy1
# Check a standard random effects vs a phylogenetic meta-analysis (REMA vs PMA) for the bird data
if((require(metafor) && require(ape))){
# Calculate the effect sizes - log coefficient of variance ratio
dat <- escalc(measure = "CVR", n1i = male_n, m1i = male_mean,
sd1i = male_SD, n2i = female_n,
m2i = female_mean, sd2i = female_SD,
data=dat)
# Convert phy to ultrametric format (Grafen's method)
phy<-compute.brlen(phy)
# Convert to correlation matrix
CorMatrix <- vcv.phylo(phy, cor=TRUE)
Cov <- CorMatrix[match(dat$animal, rownames(CorMatrix)),
match(dat$animal, colnames(CorMatrix))]
# Ensure the random effects are factors
dat$effect.id <- as.factor(dat$effect.id)
dat$species <- as.factor(dat$species)
dat$animal <- as.factor(dat$animal)
# Fit the models
REMA <- rma.mv(yi = yi , V = vi, random=list(~1|effect.id), data=dat)
summary(REMA)
PMA <- rma.mv(yi = yi , V = vi,
random=list(~1|animal, ~1|effect.id),
R = list(animal = Cov), data=dat)
summary(PMA)
}
}
\keyword{datasets}
\concept{ecology}
\concept{evolution}
\concept{standardized mean difference}
\concept{response ratio}
\concept{variance ratio}
|
eabdd4dcdc2b4492d469eaf702a9f5481f261c71
|
5f894b67fff7b4833f5a1ff42ee64d0f43015f5a
|
/R/utils.R
|
4d525d2b44f0d0ae2461ae98c7eed2ab7ac03d43
|
[
"MIT"
] |
permissive
|
crisprVerse/crisprScore
|
a56b885fae25da7656aa7af5a742e848caea4012
|
0bef9c4fce4e9d505bcbd847054dad1f02a0bc4f
|
refs/heads/master
| 2023-04-13T12:59:40.867354
| 2023-04-10T16:45:08
| 2023-04-10T16:45:08
| 523,799,905
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 402
|
r
|
utils.R
|
.checkSequenceInputs <- function(sequences,
return.output=TRUE
){
stopifnot(is.character(sequences))
len <- unique(nchar(sequences))
if (length(len)!=1){
stop("Provided sequences for must all have identical length.")
}
if (return.output){
return(sequences)
} else {
return(invisible(TRUE))
}
}
#' @import utils
NULL
|
efb3efe632711d4ac0deaabe4076581312d17842
|
79328b5dd4560365d89fdffc5f0d8a402ff2b6c6
|
/pkg/ternaryplot/man/ternary2xy-methods.Rd
|
f4aa9a3af1992d2a8fe7ecaa4c248c545ca99fe9
|
[] |
no_license
|
julienmoeys/ternaryplot
|
f1f97e2e409e164f8390acdaa677851a75ba1f2f
|
50c9901ce03b8e857eb1990564c43e0e5e58e36e
|
refs/heads/master
| 2021-01-21T04:47:19.042587
| 2016-06-17T16:52:13
| 2016-06-17T16:52:13
| 49,194,207
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 975
|
rd
|
ternary2xy-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aa06-ternary2xy.R
\name{ternary2xy}
\alias{ternary2xy}
\alias{ternary2xy.character}
\alias{ternary2xy.ternarySystem}
\title{Converts ternary point-data into x-y coordinates}
\usage{
ternary2xy(s, ...)
\method{ternary2xy}{character}(s, ...)
\method{ternary2xy}{ternarySystem}(s, x, ...)
}
\arguments{
\item{s}{A \code{\link[ternaryplot]{ternarySystem}} object or a
character string naming an pre-defined
\code{\link[ternaryplot]{ternarySystem-class}}.}
\item{x}{A \code{\link[base]{data.frame}} or a \code{\link[base]{matrix}}
with ternary point-data.}
\item{\dots}{Additional parameters passed to specific methods.}
}
\value{
Returns a \code{\link[base]{data.frame}} with point-data in
(columns) x and y.
}
\description{
Converts ternary point-data (bottom, left, right axis) into
x-y coordinates, according to the specification of a
\code{\link[ternaryplot]{ternarySystem-class}}
}
|
750f4c731bf4464dc83d194c39cc0a3689d8cfd7
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/bpcs/man/create_bpc_object.Rd
|
61c2c9eae7e6db124ff919401bdbb80525d65bfa
|
[
"MIT"
] |
permissive
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,586
|
rd
|
create_bpc_object.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bpc_object.R
\name{create_bpc_object}
\alias{create_bpc_object}
\title{Defines the class bpc and creates the bpc object.
To create we need to receive some defined parameters (the arguments from the bpc function), a lookup table and a the stanfit
object generated from the rstan sampling procedure}
\usage{
create_bpc_object(
stanfit,
lookup_table,
model_type,
standata,
call_arg,
cluster_lookup_table = NULL,
predictors_df = NULL,
predictors_lookup_table = NULL,
predictors_matrix = NULL
)
}
\arguments{
\item{stanfit}{Stanfit object returned by rstan::sampling}
\item{lookup_table}{lookup_table dataframe. Two columns one Index the other Names where each each index will match a string in the names}
\item{model_type}{the type of the model used to call stan (string)}
\item{standata}{a list with the data used to call the rstan::sampling procedure}
\item{call_arg}{a list with the arguments called from the bpc function}
\item{cluster_lookup_table}{a lookup table with we have random effects}
\item{predictors_df}{the data frame of the predictors for a generalized model}
\item{predictors_lookup_table}{a lookup table for generalized models}
\item{predictors_matrix}{a matrix of predictors for generalized models}
}
\value{
a bpc object
}
\description{
Defines the class bpc and creates the bpc object.
To create we need to receive some defined parameters (the arguments from the bpc function), a lookup table and a the stanfit
object generated from the rstan sampling procedure
}
|
885361f3a47494499676bf1c70f7abb449b85741
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609958706-test.R
|
c69bc565ad5a06cf5e00c8a67c440aba4b798c2d
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
1609958706-test.R
|
testlist <- list(x = structure(c(NaN, 2.54645670285165e+117, 4.42003930043543e-65, 1.16403863694468e+224, 5.77096118049826e+228, 9.88186273287392e-273, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 9L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result)
|
4702e88d5ad6eb000d655061fcbf563bcc45a5eb
|
0db41c671fbfe9c5ed923af94af7e5ecb521caeb
|
/tests/testthat/test-greedy_chi_sq_periodogram.R
|
15a557fc8434a13eb878f02e67466913a61ff506
|
[] |
no_license
|
hugheylab/zeitgebr
|
f5512c339d26879b8da37ad1cd26e66ba77b0e16
|
3a8706638e76cef76d54970f6ef2fb72f9f46acc
|
refs/heads/master
| 2022-12-16T18:08:28.743363
| 2020-09-01T19:28:44
| 2020-09-01T19:28:44
| 290,217,510
| 0
| 0
| null | 2020-08-25T13:06:26
| 2020-08-25T13:06:25
| null |
UTF-8
|
R
| false
| false
| 233
|
r
|
test-greedy_chi_sq_periodogram.R
|
context("greedy_chi_sq")
test_that("greedy-chi-sq periodogram works", {
data(dams_sample)
per <- dams_sample[,
greedy_chi_sq_periodogram(activity, sampling_rate = 1/60),
by = id]
})
|
2212e8248c313a57d0e4bf0b62a569f7872451a3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gcite/examples/gcite_citation_page.Rd.R
|
eff7cb8ca3b17598a8646f399f2a168722f66abb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 832
|
r
|
gcite_citation_page.Rd.R
|
library(gcite)
### Name: gcite_citation_page
### Title: Parse Google Citation Index
### Aliases: gcite_citation_page gcite_citation_page.xml_nodeset
### gcite_citation_page.xml_document gcite_citation_page.character
### gcite_citation_page.list gcite_citation_page.default
### ** Examples
if (!is_travis()) {
library(httr)
library(rvest)
url = paste0("https://scholar.google.com/citations?view_op=view_citation&",
"hl=en&oe=ASCII&user=T9eqZgMAAAAJ&pagesize=100&",
"citation_for_view=T9eqZgMAAAAJ:W7OEmFMy1HYC")
url = gcite_url(url = url, pagesize = 10, cstart = 0)
ind = gcite_citation_page(url)
doc = content(httr::GET(url))
ind = gcite_citation_page(doc)
ind_nodes = html_nodes(doc, "#gsc_vcd_table div")
ind_nodes = html_nodes(ind_nodes, xpath = '//div[@class = "gs_scl"]')
ind = gcite_citation_page(ind_nodes)
}
|
6975837d51bcc3ddc6eb2fe2be01dfa7ae9c0479
|
af3af1422679147816ef4fbb98992a86578be60a
|
/R/matchMulti-package.R
|
568d7ac49c3db279b78cbf37489f6f0fa65695dd
|
[] |
no_license
|
cran/matchMulti
|
d17dcb84d8ab549a8b6f2d877a02aa70d17ff6db
|
b3112005388feb375ab7f99116c5b06b7a0a8e29
|
refs/heads/master
| 2023-05-25T08:27:42.599051
| 2023-05-11T06:20:02
| 2023-05-11T06:20:02
| 54,420,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,601
|
r
|
matchMulti-package.R
|
#' 1980 and 1982 High School and Beyond Data
#'
#' These data are a subset of the data used in Raudenbush and Bryk (1999) for
#' multilevel modeling.
#'
#'
#' @name catholic_schools
#' @docType data
#' @format A \code{data.frame} with 1595 observations on the following
#' variables.
#'
#' school: unique school level identifier
#'
#' ses: student level socio-economic status scale ranges from approx. -3.578
#' to 2.692
#'
#' mathach: senior year mathematics test score, outcome measure
#'
#' female: student level indicator for sex
#'
#' minority: student level indicator for minority
#'
#' minority_mean: school level measure of percentage of student body that is
#' minority
#'
#' female_mean: school level measure of percentage of student body that is
#' female
#'
#' ses_mean: school level measure of average level of student socio-economic
#' status
#'
#'
#' sector: treatment indicator 1 if catholic 0 if public
#'
#' size: school level measure of total number of enrolled students
#'
#' acad: school level measure of the percentage of students on the academic
#' track
#'
#' discrm: school level measure of disciplinary climate ranges from approx.
#' -2.4 to 2.7
#'
#' size_large: school level indicator for schools with more than 1000 students
#'
#' minority_mean_large: school level indicator for schools with more than ten
#' percent minority
#'
#' @importFrom mvtnorm pmvnorm
#' @importFrom coin wilcox_test pvalue
#' @importFrom MASS ginv
#' @importFrom stats as.formula cov fisher.test mahalanobis model.matrix pnorm
#' quantile uniroot var binomial glm predict sd
#'
#' @references United States Department of Education. National Center for
#' Education Statistics. High School and Beyond, 1980: Sophomore and Senior
#' Cohort First Follow-Up (1982).
#' @source Raudenbush, S. W. and Bryk, A. (2002). \emph{Hierarchical Linear
#' Models: Applications and Data Analysis Methods}. Thousand Oaks, CA: Sage.
#' @keywords datasets
NULL
#' @title Mini-data set for illustration
#' @name minischool
#' @docType data
#' @description The Catholic schools dataset subset to a smaller number of
#' schools (with only 6 Catholic schools). See full dataset documentation for
#' more information.
#' @format A data frame with 1500 rows and 12 variables, as described in the
#' `catholic_schools` dataset.
#' @seealso catholic_schools
#' @source See documentation page for `catholic_schools` dataset.
NULL
#' matchMulti Package
#'
#' \code{matchMulti} provides and easy to use set of functions to do matching
#' with multilevel data. It is designed for use with grouped data such as
#' students in schools, where the user wishes to match a set of treated groups
#' to control groups to make the two groups more comparable.
#'
#' This package will match treated groups to control groups, but allows for
#' trimming of both units and groups to increase balance. There are also
#' functions for assessing balance after matching, estimating treatment effects
#' and performing sensitivity analysis for hidden confounders.
#'
#' @name matchMulti-package
#' @docType package
#' @author
#' c("\\Sexpr[results=rd,stage=build]{tools:::Rd_package_author(\"#1\")}",
#' "matchMulti")\Sexpr{tools:::Rd_package_author("matchMulti")}
#'
#' Maintainer:
#' c("\\Sexpr[results=rd,stage=build]{tools:::Rd_package_maintainer(\"#1\")}",
#' "matchMulti")\Sexpr{tools:::Rd_package_maintainer("matchMulti")}
#'
#' @seealso See also \code{\link{matchMulti}}, \code{\link{matchMultisens}},
#' \code{\link{balanceMulti}}, \code{\link{matchMultioutcome}},
#' \code{\link{rematchSchools}}
#' @keywords matchMulti
#' @examples
#'
#' \dontrun{
#' # Load Catholic school data
#' data(catholic_schools)
#'
#' student.cov <- c('minority','female','ses','mathach')
#'
#' # Check balance student balance before matching
#' balanceTable(catholic_schools[c(student.cov,'sector')], treatment = 'sector')
#'
#' #Match schools but not students within schools
#' match.simple <- matchMulti(catholic_schools, treatment = 'sector',
#' school.id = 'school', match.students = FALSE)
#'
#' #Check balance after matching - this checks both student and school balance
#' balanceMulti(match.simple, student.cov = student.cov)
#'
#' #Estimate treatment effect
#' output <- matchMultioutcome(match.simple, out.name = "mathach",
#' schl_id_name = "school", treat.name = "sector")
#'
#' # Perform sensitivity analysis using Rosenbaum bound -- increase Gamma to increase effect of
#' # possible hidden confounder
#'
#' matchMultisens(match.simple, out.name = "mathach",
#' schl_id_name = "school",
#' treat.name = "sector", Gamma=1.3)
#'
#' # Now match both schools and students within schools
#' match.out <- matchMulti(catholic_schools, treatment = 'sector',
#' school.id = 'school', match.students = TRUE, student.vars = student.cov)
#'
#' # Check balance again
#' bal.tab <- balanceMulti(match.out, student.cov = student.cov)
#'
#' # Now match with fine balance constraints on whether the school is large
#' # or has a high percentage of minority students
#' match.fb <- matchMulti(catholic_schools, treatment = 'sector', school.id = 'school',
#' match.students = TRUE, student.vars = student.cov,
#' school.fb = list(c('size_large'),c('size_large','minority_mean_large')))
#'
#' # Estimate treatment effects
#' matchMultioutcome(match.fb, out.name = "mathach", schl_id_name = "school", treat.name = "sector")
#'
#' #Check Balance
#' balanceMulti(match.fb, student.cov = student.cov)
#' }
#'
NULL
|
138a780479e71a19d14872e78838fb4a0bf51162
|
3d4fd9491344654eb6055930c6f407948c892fd4
|
/man/FIFO.Rd
|
815afa1ad7d9d63d6f11b866090efb4b09b2b6a9
|
[] |
no_license
|
vcerqueira/tsensembler
|
bb8542e81177e1c1da84ae53070840b1ade3d018
|
b0f1c786440ed3d09160931d970df3eeab09eb5e
|
refs/heads/master
| 2021-07-18T21:26:22.651558
| 2020-10-24T11:46:06
| 2020-10-24T11:46:06
| 82,293,256
| 34
| 14
| null | null | null | null |
UTF-8
|
R
| false
| true
| 553
|
rd
|
FIFO.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{FIFO}
\alias{FIFO}
\title{First-In First Out}
\usage{
FIFO(x, inval)
}
\arguments{
\item{x}{a vector;}
\item{inval}{new input value for vector x of the same
mode as \code{vector}}
}
\value{
A new vector \code{x}
}
\description{
First-In First Out utility function inserts a
new value \code{inval} into a given sequential vector
\code{x}, dropping the last value of the sequence
}
\examples{
FIFO(1:10, 11)
FIFO(LETTERS[1:10], letters[1])
}
\keyword{internal}
|
75f510003175811a09cb9f95060a0c6e69e0dd7c
|
285eaa3841ae93ed1636089c86002cd0f60e494a
|
/ML_TOOL/ui.R
|
da5d61a8ba4debf5a51f1529ac157d496a6ed8d2
|
[] |
no_license
|
Pratapdangeti/ML_Tool
|
527901980a07ca506450fd5451399efcc35e8269
|
e5b679cd8c7bd3b4083b6ea4cdd37ff4327c9864
|
refs/heads/main
| 2023-01-21T01:13:06.387505
| 2020-12-07T06:56:00
| 2020-12-07T06:56:00
| 319,230,213
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,822
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
library(DT)
library(plotly)
library(plyr)
library(rpart)
library(CORElearn)
library(randomForest)
library(gbm)
library(e1071)
library(pROC)
library(tree)
library(caret)
ui<- dashboardPage(
dashboardHeader(title = h4("Machine Learning Tool"),titleWidth = 300),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard",icon = icon("dashboard"),tabName = "dashboard"),
br(),
menuItem("Data Import", icon = icon("table"), tabName = "Data"),
menuItem("Data Treatment & EDA", icon = icon("list-alt"), tabName = "EDA"),
menuItem("Machine Learning Models", icon = icon("gears"), tabName = "Model" ),
menuItem("Model Outputs", icon = icon("bar-chart-o"), tabName = "Outputmdls")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
fluidRow(
box(title = "Machine Learning Tool for Quickly prototyping Model",height = "900px",width = "620px",solidHeader = TRUE,status = "success",
h4(p("Welcome to Machine Learning Tool ! This is Pratap, a Data Science enthusiast who tries to make things easy for fellow Data Science enthusiasts ")),
br(),
h4(p("In this page you can quickly prototype various Machine Learning Models on your chosen data")),
br(),
h4(p("Objective of the site is to make Machine Learning models easy for everybody in an interactive way !")),
br(),
h4(p("Major steps in Machine learning models are (Click on each tab left for exploring each section) ")),
br(),
h4(p(icon("table")," Data Import ")),
br(),
h4(p(icon("list-alt")," Data Treatment & Exploratory Data Analysis ")),
br(),
h4(p(icon("gears")," Model Building on Train Data")),
br(),
h4(p(icon("bar-chart-o")," Model Test on Validation Data"))
)
)
),
tabItem("Data",
fluidRow(
box(title = "Train Data Import",width = 4,solidHeader = TRUE, status = "primary",
fileInput('chosenfile', 'Select Train Data file ',width = "200px",
accept = c(
'text/csv','text/comma-separated-values','text/tab-separated-values',
'text/plain','.csv','.tsv'
)),
checkboxInput('header', 'Select Header for file with Header', TRUE)
),
box(title = "Test Data Import",width = 4,solidHeader = TRUE, status = "primary",
fileInput('tchosenfile', 'Select Test Data file',width = "200px",
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv','.tsv')),
checkboxInput('theader', 'Select Header for file with Header', TRUE)
),
box(title = "Check",width = 4,solidHeader = TRUE, status = "warning",
h4(p("Upload both Train & Test data files in order to make \"Machine Learning Models\" tab work properly !"))
)
),
fluidRow(
box(title = "Show Train data",width = 12,solidHeader = TRUE,status="primary",
DT::dataTableOutput("tbl",width = "98%",height = "auto"))
),
fluidRow(
box(title = "Show Test data",width = 12,solidHeader = TRUE,status="primary",
DT::dataTableOutput("ttbl",width = "98%",height = "auto"))
),
fluidRow(
box(title = "Show Train Data Structure",width = 12,solidHeader = TRUE,status="primary",
verbatimTextOutput("smry1")
)
),
fluidRow(
box(title = "Show Test Data Structure",width = 12,solidHeader = TRUE,status="primary",
verbatimTextOutput("smry2")
)
)
),
tabItem(tabName = "EDA",
fluidRow(
tabBox(
title = "",
id = "tabset1", height = "900px",width = "620px",
tabPanel("Missing & Outlier Treatment",icon = icon("tasks"),
fluidRow(
box(title = "Select Y variable to convert (If not in correct format)",solidHeader = TRUE,status = "primary", width = 4,
uiOutput("vartm"),
actionButton("chkclss", "BEFORE CHECK",icon=icon("caret-square-o-right")),
radioButtons("toVarOpts",label = h3("To Variable Selection"), list("integer","factor","double"),selected = "factor",
inline = TRUE, width = "500px"),
br(),
actionButton("convertb", "CONVERT",icon=icon("caret-square-o-right")),
br(),
br(),
actionButton("chkclssa", "AFTER CHECK",icon=icon("caret-square-o-right"))
),
box(title = "Variable Type Viewer",solidHeader = TRUE,status = "primary", width = 6,
textOutput("chkvar"),
textOutput("chkvara")
)
)
),
tabPanel("Univariate Analysis",icon = icon("area-chart"),
fluidRow(
box(title = "Univariate Analysis",solidHeader = TRUE,status = "primary", width = 4,
uiOutput("selov"),
verbatimTextOutput("smrysv")
),
box(title = "Histogram plot",solidHeader = TRUE,status = "primary", width = 6,
plotlyOutput("chrt")
)
)
),
tabPanel(title = "Bi-Variate Analysis",icon = icon("glyphicon glyphicon-sort", lib = "glyphicon"),
fluidRow(
box(title = "Bi-variate Analysis",solidHeader = TRUE,status = "primary", width = 4,
uiOutput("selbvx"),
uiOutput("selbvy")
),
box(title = "Analysis",solidHeader = TRUE,status = "primary", width = 6,
verbatimTextOutput("smrybv")
)
)
)
)
)),
tabItem(tabName = "Model",
fluidRow(
tabBox(
title = "",
id = "tabset2", height = "1500px",width = "620px",
tabPanel("Model Selection",icon = icon("eye"),
fluidRow(
box(title = "4.1 Select Variables",width = 4,solidHeader = TRUE,status = "primary",
uiOutput("selx"),
uiOutput("sely")
),
box(title = "4.2.1 Select_Classification_Model",width = 3,solidHeader = TRUE,status = "primary",
uiOutput("rnuic")
),
box(title = "4.2.2 Select_Regression_Model",width = 3,solidHeader = TRUE,status = "primary",
uiOutput("rnuir")
)
),
fluidRow(
box(title = "4.3 Select_Model_Parameters",width = 9,solidHeader = TRUE,status = "primary",
selectInput("pselmd", label = NULL, width = "300px",
choices = list("Logistic_Regression", "Linear_Regression","Decision_Tree_Classifier", "Decision_Tree_Regressor",
"Random_Forest_Classifer","Random_Forest_Regressor","Boosting_GBM_Classifier","Boosting_GBM_Regressor",
"SVM_Classifier","SVM_Regressor","Naive_Bayes","Kmeans_Clustering", "PCA" ),
selected = "Logistic_Regression"),
uiOutput("rnwd")
),
box(title = "4.4 Model Run",solidHeader = TRUE,status = "success", width = 3,
actionButton("actionc", "RUN CLASSIFICATION MODEL",icon=icon("caret-square-o-right")),
hr(),
actionButton("actionr", "RUN REGRESSION MODEL",icon=icon("caret-square-o-right"))
)
),
fluidRow(
box(title = "4.5 Model Results",solidHeader = TRUE,status = "success", width = 12,
verbatimTextOutput("accrcy"),
verbatimTextOutput("accrcyr")
)
)
),
tabPanel("Grid Search", icon = icon("search"),
fluidRow(
box(title = "Under construction",width = 3,solidHeader = TRUE,status = "primary",
h4(p(icon("glyphicon glyphicon-wrench", lib = "glyphicon")," Under Construction " ))
)
)
)
)
)),
tabItem(tabName = "Outputmdls",
fluidRow(
box(title = "Output visualization",width = "650px",solidHeader = TRUE,status = "primary",height = "900px",
h4(p(icon("glyphicon glyphicon-wrench", lib = "glyphicon")," Under Construction " ))
)
))
)
)
)
|
4c5896a1139e17b128ac59cc6478b4fb5a0517de
|
2bb5a15f6c9764756966d9db9c7742b5df003510
|
/man/plotGeneModel.Rd
|
1b1d1f2f4b4ea1cd23cf1606a9c1b0c12481f618
|
[] |
no_license
|
hsinyenwu/RiboPlotR
|
2fe03501b6ab76c6d9b5a69cfa6d2a01e82c1976
|
b03f1f6976f94a0a3a7cfc1c73efa431214e7ccb
|
refs/heads/master
| 2023-08-04T04:40:31.707226
| 2020-09-15T15:34:41
| 2020-09-15T15:34:41
| 193,273,002
| 12
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 575
|
rd
|
plotGeneModel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/code.R
\name{plotGeneModel}
\alias{plotGeneModel}
\title{plot plotGeneModel}
\usage{
plotGeneModel(gene, uORF, Extend = Extend, p.isoform = isoform)
}
\arguments{
\item{gene}{gene ID}
\item{uORF}{uORF ID}
\item{Extend}{number of nucleotides to extend on both side of the gene model}
\item{p.isoform}{isoform that is been plotting at the PLOTc, PLOTt,PLOTc2 or PLOTt2 function}
}
\value{
plot the gene model
}
\description{
plotGeneModel combines both plotRanges and p_site_plot_all functions
}
|
52227087452be17ab0e4235d669407b0b224d962
|
840c55a1b087be2e7d7d0750bcb66ec4415c686f
|
/Morgan/Restructure olddata.R
|
cf293fce853812bc133609d874c8efe69f786d66
|
[] |
no_license
|
DecisionNeurosciencePsychopathology/redcap_in_r
|
70cdfe116d16774444ec0262e5619f11d5c8de2a
|
b1668e85454eefb113e29e57d172a2865ce47e53
|
refs/heads/master
| 2021-05-15T04:04:47.938822
| 2021-04-09T17:21:01
| 2021-04-09T17:21:01
| 119,751,789
| 3
| 4
| null | 2020-08-14T16:04:48
| 2018-01-31T22:29:27
|
R
|
UTF-8
|
R
| false
| false
| 7,307
|
r
|
Restructure olddata.R
|
library(tidyverse)
library(chron)
#Where to find data
rootdir="C:/Users/buerkem/Box/skinner/projects_analyses/suicide_trajectories/data/soloff_csv_new/"
#Jiazhou's functions
startup()
md<-bsrc.checkdatabase2(ptcs$masterdemo, batch_size=200L)
idmap<-md$data[c("registration_redcapid","registration_wpicid","registration_soloffid")]
names(idmap)<-c("masterdemoid","wpicid","soloffid")
#Functions
#Grabs data from bsocial, everything that starts with x, minus the variable for complete
rd.var.map<-function(x){
names(bsoc$data[c("registration_redcapid",names(bsoc$data)[which(grepl(x,names(bsoc$data)))])])->bsocnames
bsocnames[-which(grepl("complete$", bsocnames))]->bsocnames
return(bsocnames)
}
##QOL interview
#Functions
#Gives value of 1 if not in range
qol.range<-function(range, cols){for (i in 1:nrow(QOL_fresh)){
if (any(sapply(QOL_fresh[i, cols], function(x){
!x %in% range & !is.na(x)
}))){
QOL_fresh$probs[i]<-1}
else{QOL_fresh$probs[i]<-0}}
return(QOL_fresh)}
#Changes these values to NA
qol.na<-function(range, cols){for (i in 1:nrow(QOL_fresh)){
QOL_fresh[i, cols]<-
sapply(QOL_fresh[i, cols], function(x){
ifelse (!x %in% range & !is.na(x) ,x<-NA,x<-x)})}
return(QOL_fresh)}
#Get form
QOL_raw <- read.csv(paste0(rootdir,"QOL_raw.csv"))
#rename the variables to something more reasonable:
QOL_fresh <- select(QOL_raw, ID, #FOLOQOL, DATEQOL,
TIME.BEGAN, QOLBA1:TIME.ENDED)
#get redcap names for each form
bsoc<-bsrc.checkdatabase2(ptcs$bsocial, batch_size=200L)
#get variables for qol
rd.var.map("qol")->qolvarmap
#change variable names to match redcap
names(QOL_fresh)<-qolvarmap[-c(18:23, 26, 77)]
as.character(QOL_fresh$qol_startdate)->QOL_fresh$qol_startdate
as.character(QOL_fresh$qol_endtime)->QOL_fresh$qol_endtime
as.character(QOL_fresh$qol_b_1_os)->QOL_fresh$qol_b_1_os
as.character(QOL_fresh$qol_b_2_a_des)->QOL_fresh$qol_b_2_a_des
as.character(QOL_fresh$qol_b_2_b_des)->QOL_fresh$qol_b_2_b_des
as.character(QOL_fresh$qol_b_2_c_des)->QOL_fresh$qol_b_2_c_des
as.character(QOL_fresh$qol_b_2_d_des)->QOL_fresh$qol_b_2_d_des
as.character(QOL_fresh$qol_b_2_e_des)->QOL_fresh$qol_b_2_e_des
as.character(QOL_fresh$qol_b_2_f_des)->QOL_fresh$qol_b_2_f_des
as.character(QOL_fresh$qol_f_1_others)->QOL_fresh$qol_f_1_others
as.character(QOL_fresh$qol_g_2)->QOL_fresh$qol_g_2
#Change 999's to NA
for (i in 1:nrow(QOL_fresh)){
QOL_fresh[i,]<-
sapply(QOL_fresh[i, ], function(x){
ifelse (x==999, x<-NA,x<-x)})}
#Range problems:
##Range problems for DT scale (1-7)
#which ones don't fit get probs=1
qol.range(range=c(1:7), c(3, 20:22, 32:35, 38, 39,
44:46, 70:72, 78:80, 84:86, 88:91))->QOL_fresh
#which ones don't fit
QOL_fresh[which(QOL_fresh$probs==1),c(1, 3, 20:22, 32:35, 38, 39, 44:46, 70:72, 78:80, 84:86, 88:91)]->qolprobs
#Make dataframe of missing original (ID, question, original value, new value)
qolprobs %>% gather(key="question", value="original",-registration_redcapid)->qolprobs
qolprobs[which(!qolprobs$original %in% c(1:7) & !is.na(qolprobs$original)),]->qolprobs
mutate(qolprobs, new=NA)->qolprobs
#Change the ones that don't fit to NA
qol.na(range=c(1:7), cols=c(3, 20:22, 32:35, 38, 39, 44:46, 70:72, 78:80, 84:86, 88:91))->QOL_fresh
##Range problems for living situations (1-16)
qol.range(range=c(1:16), c(4, 8, 10, 12, 14, 16))->QOL_fresh
#which ones don't fit (No range problems here)
QOL_fresh[which(QOL_fresh$probs==1),c(1, 4, 8, 10, 12, 14, 16)]->qolprobs2
##Range problems for YES/NO
qol.range(range=c(0:1,9), c(23:30, 47:60,65:69, 81:82))->QOL_fresh
#which ones don't fit
QOL_fresh[which(QOL_fresh$probs==1),c(1, 23:30, 47:60,65:69, 81:82)]->qolprobs3
#Make dataframe of missing original (ID, question, original value, new value)
qolprobs3 %>% gather(key="question", value="original",-registration_redcapid)->qolprobs3
qolprobs3[which(!qolprobs3$original %in% c(0:1) & !is.na(qolprobs3$original)),]->qolprobs3
mutate(qolprobs3, new=NA)->qolprobs3
#Change the ones that don't fit to NA
qol.na(range=c(1:7), cols=c(23:30, 47:60,65:69, 81:82))->QOL_fresh
##Range problems for 1:4 items
qol.range(range=c(1:4), c(31,64))->QOL_fresh
#which ones don't fit
QOL_fresh[which(QOL_fresh$probs==1),c(1, 31, 64)]->qolprobs4
#Make dataframe of missing original (ID, question, original value, new value)
qolprobs4 %>% gather(key="question", value="original",-registration_redcapid)->qolprobs4
qolprobs4[which(!qolprobs4$original %in% c(1:4) & !is.na(qolprobs4$original)),]->qolprobs4
mutate(qolprobs4, new=NA)->qolprobs4
#Change the ones that don't fit to NA
qol.na(range=c(1:4), cols=c(31,64))->QOL_fresh
##Range problems for 0:5 items
qol.range(range=c(0:5), c(36:37))->QOL_fresh
#which ones don't fit
QOL_fresh[which(QOL_fresh$probs==1),c(1, 36:37)]->qolprobs5
#Make dataframe of missing original (ID, question, original value, new value)
qolprobs5 %>% gather(key="question", value="original",-registration_redcapid)->qolprobs5
qolprobs5[which(!qolprobs5$original %in% c(0:5) & !is.na(qolprobs5$original)),]->qolprobs5
mutate(qolprobs5, new=NA)->qolprobs5
#Change the ones that don't fit to NA
qol.na(range=c(0:5), cols=c(36:37))->QOL_fresh
##Range problems for 1:5
qol.range(range=(1:5), c(40:43, 87))->QOL_fresh
#which ones don't fit
QOL_fresh[which(QOL_fresh$probs==1),c(1, 40:43, 87)]->qolprobs6
#Make dataframe of missing original (ID, question, original value, new value)
qolprobs6 %>% gather(key="question", value="original",-registration_redcapid)->qolprobs6
qolprobs6[which(!qolprobs6$original %in% c(1:5) & !is.na(qolprobs6$original)),]->qolprobs6
mutate(qolprobs6, new=NA)->qolprobs6
#Change the ones that don't fit to NA
qol.na(range=c(1:5), cols=c(40:43, 87))->QOL_fresh
##Range problems for 0:2- no issues
which(!QOL_fresh$qol_i_1 %in% c(1:5))
#Put all range problems together
qol.range.probs<-rbind(qolprobs, qolprobs3, qolprobs4, qolprobs5, qolprobs6)
#Check for duplicates: in the event that the same ID has two entries within a single follow-up, just take the earliest one
any(duplicated(QOL_fresh$registration_redcapid))
#FIGURE OUT IDS LAST
bsrc.findid(QOL_fresh,idmap = idmap,id.var = "registration_redcapid")->QOL_fresh
if(any(!QOL_fresh$ifexist)){message("ERROR: NOT ALL IDS EXIST IN MASTER DEMO, PLEASE FIX. Here are their soloff ids:")
print(QOL_fresh[which(!QOL_fresh$ifexist),"registration_redcapid"])}
#Figure out NAs
qol.remove.na<-function(cols){for (i in 1:nrow(QOL_fresh)){
QOL_fresh[i, cols]<-
sapply(QOL_fresh[i, cols], function(x){
ifelse (is.na(x), x<-999, x<-x)})}
return(QOL_fresh)}
qol.remove.na(c(3, 4, 8, 10, 12, 14, 16, 19:60, 65:73, 78:82, 84:86, 88:91))->QOL_fresh
as.data.frame(names(QOL_fresh))->r
|
057ad133d8f3d745332ecb6d60188db782149b56
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gmailr/examples/id.Rd.R
|
6ee97a72135339fa4f73c773c9fc1ef7a4782f29
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
id.Rd.R
|
library(gmailr)
### Name: id
### Title: Get the id of a gmailr object
### Aliases: id id.gmail_messages
### ** Examples
## Not run:
##D id(my_message)
##D id(my_draft)
## End(Not run)
|
61d9e1b60b696ebd95eff73ec8e0fd34adb302fb
|
4ceaa85dee194e818f41bb4da47fe41fd7dbfcf9
|
/R/sim_Nmix.R
|
c52716135580fbc1ed89746b6ed20b7f56a99e93
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RushingLab/WILD6900
|
6ee242e491817f065f6374e908aced3bfb0ac3dc
|
6543c0c4e4108becf65f6577b8d4b862afdadf49
|
refs/heads/master
| 2021-06-12T18:45:29.365473
| 2021-03-02T20:13:28
| 2021-03-02T20:13:28
| 156,604,120
| 13
| 3
|
NOASSERTION
| 2021-01-05T19:35:21
| 2018-11-07T20:27:52
|
HTML
|
UTF-8
|
R
| false
| false
| 1,818
|
r
|
sim_Nmix.R
|
#' sim_Nmix
#'
#' Simulate data for the N-mixture model with site-level covariats on lambda and occasion-level covariates on p
#' @param J: number of sites at which counts were made
#' @param K: number of times that counts were made at each site
#' @param alpha: vector containing the intercept and slopes of log-linear regression relating abundance to the site covariate A
#' @param beta: ivector containing the intercept and slopes of logistic-linear regression of detection probability on B
#' @return List containing the simulated data (y, XN, Xp) and the data-generating values
#'
sim_Nmix <- function(J = 200, K = 3, alpha = c(1,3), beta = c(0, -5)){
y <- array(dim = c(J, K)) # Array for counts
# Ecological process
# Covariate values: sort for ease of presentation
XN <- rep(1, J)
for(i in 1:(length(alpha)-1)){
XN <- cbind(XN, rnorm(n = J))
}
# Relationship expected abundance ñ covariate
lam <- exp(XN %*% alpha)
# Add Poisson noise: draw N from Poisson(lambda)
N <- rpois(n = J, lambda = lam)
totalN <- sum(N)
# Observation process
# Relationship detection prob ñ covariate
Xp <- rep(1, K)
for(i in 1:(length(beta)-1)){
Xp <- cbind(Xp, rnorm(n = K))
}
p <- plogis(Xp %*% beta)
for(j in 2:J){
Xp.temp <- rep(1, K)
for(i in 1:(length(beta)-1)){
Xp.temp <- cbind(Xp.temp, rnorm(n = K))
}
p <- cbind(p, plogis(Xp.temp %*% beta))
Xp <- abind::abind(Xp, Xp.temp, along = 3)
}
p <- t(p)
# Make
for (i in 1:K){
y[,i] <- rbinom(n = J, size = N, prob = p[,i])
}
# Return stuff
return(list(J = J, K = K, XN = XN, Xp = Xp,
alpha = alpha, beta = beta,
lam = lam, N = N,
totalN = totalN, p = p, y = y,
nAlpha = length(alpha), nBeta = length(beta)))
}
|
08d743bf1ba9ace7eb66414b147f0069d9cc67f6
|
e51a6d9a10841b41b8f90cbc7eb4cc8d10de3a74
|
/NFL Officials.R
|
781c6c4dce84de4e61e5d038ae60a370bd487c74
|
[] |
no_license
|
dbrait/NFL
|
7a93309bece30f3fb3f3f9958c9957e636a729ee
|
7a6bff67636ddc2e85960c7ac32189d958dd532c
|
refs/heads/master
| 2020-12-13T10:35:02.141547
| 2016-08-11T05:35:52
| 2016-08-11T05:35:52
| 65,441,374
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,761
|
r
|
NFL Officials.R
|
library(rvest)
library(stringr)
library(readr)
library(ggplot2)
library(dplyr)
library(tidyr)
library(broom)
library(lubridate)
base.url <- "http://www.pro-football-reference.com/"
officials.url <- "http://www.pro-football-reference.com/officials"
officials_urls <- read_html(officials.url) %>%
html_nodes("table a") %>%
html_attr("href")
official_names <- read_html(officials.url) %>%
html_nodes("table a") %>%
html_text()
officials <- data_frame(name = officials_names, url = official_urls)
all.data <- officials %>%
group_by(name, url) %>% do({
url <- paste(base.url, .$url, sep = "")
doc <- read_html(url)
off.data <- doc %>%
html_nodes("table#game_logs") %>%
html_table %>%
first %>%
filter(VPen != "VPen") %>%
group_by(Year, Game, Position) %>% do({
vals <- str_split(.$Game, "@")[[1]]
data_frame(home = str_replace(vals[2], "\\*", ""),
away = str_replace(vals[1], "\\*", ""),
hpts = as.numeric(.$HPts),
vpts = as.numeric(.$Vpts),
hpen = as.numeric(.$HPen),
vpen = as.numeric(.$VPen),
hpenyards = as.numeric(.$HPYds),
vpenyeards = as.numeric(.$VPYds))
}) %>%
ungroup
}) %>%
ungroup
all.data %>% write_csv("officials_data.csv")
#after scraping
all.data <- read_csv("officials_data.csv")
home <- all.data %>%
mutate(win = as.numeric(hpts > vpts)) %>%
select(data = Year, name, team=home, pens=hpen, yds=hpenyards, win) %>%
mutate(home=1)
away <- all.data %>%
mutate(win = as.numeric(vpts > hpts)) %>%
select(data = Year, name, team = away, pens = vpen, yds = vpenyards, win) %>%
mutate(home=0)
long.data <- home %>% bind_rows(away)
#top officials
all.data %>%
group_by(name) %>%
summarise(n = n()) %>%
arrange(-n)
#how many games per team
long.data %>%
group_by(team) %>%
summarise(n = length(unique(date))) %>%
arrange(-n)
#convert to wide format, one column per official
#make indicator variables
wide <- long.data %>%
mutate(seas = year(date - 180), #gets season
present = 1) %>%
spread(name, present, 0)
#officials one row per game, one column per official
officials.mat <- as.matrix(wide[,8:ncol(wide)])
#team has one row per game, one column per (team, season)
team.mat <- model.matrix( ~0 + team:factor(seas), wide)
#regression with penalties ~ team + all oficials involved
m <- lm(wide$pens ~ wide$home + team.mat + officials.mat)
#count games per official so use filter in plot below
official.rollup <- long.data %>%
group_by(name) %>%
summarise(n.games = n()/2)
tidy(m) %>%
mutate(official = str_match(term, "officials\\.mat(.*)")[,2]) %>%
filter(!is.na(official)) %>%
inner_join(official.rollup, by=c("official" = "name")) %>%
filter(n.games >= 250) %>%
ggplot(aes(x = reorder(official, estimate), y = estimate,
ymin = estimate - 1.96 * std.error,
ymax = estimate + 1.96 * std.error)) +
geom_pointrange() +
coord_flip() +
ylab("Team-Adjusted Extra Penalties per Game") +
xlab("Official Name") +
geom_hline(yintercept = 0.0, linetype="dashed") +
theme_bw()
#team plots
tidy(m) %>%
mutate(team = str_match(term, "team\\.matteam(.*):factor\\(seas\\)(.*)")[,2],
seas = str_match(term, "team\\.matteam(.*):factor\\(seas\\)(.*)")[,3]) %>%
filter(!is.na(team), seas == 2015) %>%
ggplot(aes(x = reorder(team, estimate), y = estimate,
ymin = estimate - 1.96 * std.error,
ymax = estimate + 1.96 * std.error)) +
geom_pointrange() +
coord_flip() +
ylab("Official-Adjusted Net Penalties per Game") +
xlab("Team") +
geom_hline(yintercept = 0.0, linetype = "dashed") +
theme_bw()
|
3a8ddc1334e2042b457af42fd4c12b4accf67a0c
|
28b46cf9ea408c9ef4eea6a90b18322527dea4cb
|
/R/visualization.R
|
165ccf2232646fe04947ef5f53a9da99e6cc1b33
|
[
"MIT"
] |
permissive
|
jenzopr/singlecellutils
|
743a532a945aff4b9be661050314eaeebf1f2dcc
|
316fe9f3b528de2b90641f86c5fc40bc5b7d23b2
|
refs/heads/master
| 2020-04-15T17:54:37.742798
| 2019-06-07T12:42:38
| 2019-06-07T12:42:38
| 164,892,706
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,125
|
r
|
visualization.R
|
#' Plots a silhouette plot
#'
#' @param object A SinglecellExperiment object.
#' @param use_dimred A character string indicating which dimension reduction to use.
#' @param clusters A character string indicating which annotation to use as clusters.
#' @param na.rm Remove NA values from clusters.
#'
#' @return A ggplot object
#'
#' @importFrom rlang .data
#' @export
plotSilhouette <- function(object, use_dimred, clusters, na.rm = TRUE) {
if ( !methods::is(object, "SingleCellExperiment") ) {
stop("Object must be of class SingleCellExperiment")
}
if (!use_dimred %in% SingleCellExperiment::reducedDimNames(object)) {
stop(paste("Object must contain a reducedDim named", use_dimred))
}
if (!clusters %in% colnames(SummarizedExperiment::colData(object))) {
stop(paste("Object must contain a column", clusters, "in colData"))
}
if (!is.factor(SummarizedExperiment::colData(object)[, clusters])) {
cl <- factor(SummarizedExperiment::colData(object)[, clusters])
} else {
cl <- SummarizedExperiment::colData(object)[, clusters]
}
if (na.rm) {
object <- object[, !is.na(cl)]
cl <- cl[!is.na(cl)]
}
s <- cluster::silhouette(as.numeric(cl), dist = stats::dist(SingleCellExperiment::reducedDim(object, use_dimred)))
df <- data.frame(cell = factor(colnames(object), levels = colnames(object)), silhouette = s[, "sil_width"], cluster = factor(s[,"cluster"], levels = unique(cl[order(cl)]), ordered = T))
df$cell <- factor(df$cell, levels = df$cell[order(df$cluster, df$silhouette)])
ggplot2::ggplot(data = df, ggplot2::aes(.data$cell, .data$silhouette, color = .data$cluster, fill = .data$cluster)) +
ggplot2::geom_bar(stat = "identity", position = "dodge") +
ggplot2::coord_flip()
}
#' Creating a ComplexHeatmap from expression or reducedDim data of a \code{\link[SingleCellExperiment]{SingleCellExperiment}} object
#'
#' This function serve as a wrapper for the \code{\link[ComplexHeatmap]{Heatmap}} function and uses \code{features} to show expression values or columns from the \code{use_dimred} slot.
#'
#' @param object A \code{\link[SingleCellExperiment]{SingleCellExperiment}} object.
#' @param features A character vector, vector of indices or a named list thereof. In case of a list, rows are split accordingly.
#' @param exprs_values String indicating which assay contains the data that should be used for plotting.
#' @param use_dimred A character string indicating which dimension reduction to use, or NULL.
#' @param split_by Character vector indicating by which columns should be split. In case of length of one, it determines a column name of the \code{\link[SummarizedExperiment]{colData}} slot.
#' @param rownames A string indicating a column name of \code{\link[SummarizedExperiment]{rowData}} slot, used as alternative rownames.
#' @param scale A logical, indicating whether data should be scaled before plotting.
#' @param col A vector of colors if the color mapping is discrete or a color mapping function if the matrix is continuous. See \code{\link[ComplexHeatmap]{Heatmap}}.
#' @param ... Additional arguments passed on to the \code{\link[ComplexHeatmap]{Heatmap}} function.
#'
#' @return A \code{\link[ComplexHeatmap]{Heatmap-class}} object.
#'
#' @export
plotComplexHeatmap <- function(object, features, exprs_values = "normcounts", use_dimred = NULL, split_by = NULL, rownames = NULL, scale = FALSE, col = NULL, ...) {
if (!requireNamespace("ComplexHeatmap", quietly = TRUE)) {
stop("Package ComplexHeatmap needed for this function to work. Please install it.", call. = FALSE)
}
# If a list of features, enable split
heatmap_split <- NULL
if(is.list(features)) {
# Make sure features is a named list
if(is.null(names(features))) {
names(features) <- as.character(1:length(features))
}
heatmap_split <- rep(names(features), sapply(features, length))
features <- unlist(features)
# Remove duplicated entries
duplicated_features <- duplicated(features)
if(any(duplicated_features)) {
warning(paste(sum(duplicated_features), "duplicated features have been removed from features."))
}
features <- features[!duplicated_features]
heatmap_split <- heatmap_split[!duplicated_features]
}
# Plot expression values or reduced dim?
if(!is.null(exprs_values)) {
assertive.sets::is_subset(exprs_values, SummarizedExperiment::assayNames(object))
if(is.character(features)) {
feature_match <- match(features, rownames(object))
object <- object[na.omit(feature_match), ]
} else {
object <- object[features, ]
}
object %>%
SummarizedExperiment::assay(i = exprs_values) %>%
as.matrix() -> data
# Add symbols instead of gene IDs
if(!is.null(rownames) && rownames %in% colnames(SummarizedExperiment::rowData(object))) {
rownames(data) <- SummarizedExperiment::rowData(object)[, rownames]
}
} else {
if(!is.null(use_dimred)) {
assertive.sets::is_subset(use_dimred, SingleCellExperiment::reducedDimNames(object))
if(is.character(features)) {
feature_match <- features
} else {
feature_match <- colnames(SingleCellExperiment::reducedDim(object, use_dimred))[features]
}
object %>%
SingleCellExperiment::reducedDim(use_dimred) %>%
as.data.frame() %>%
dplyr::select_(dots = feature_match) %>%
as.matrix() %>%
t() -> data
} else {
stop("Both, exprs_values and use_dimred, cannot be NULL.")
}
}
# Scale to z-score
if(scale) {
data <- t(scale(t(data), scale = F))
heatmap_color <- circlize::colorRamp2(breaks = seq(from = -10, to = 10, length.out = 9), colors = rev(RColorBrewer::brewer.pal(9, "RdBu")))
} else {
heatmap_color <- circlize::colorRamp2(breaks = seq(from = 0, to = max(data), length.out = 99), colors = viridis::magma(99))
}
# Heatmap color handling
if(is.null(col)) {
col <- heatmap_color
}
ComplexHeatmap::Heatmap(matrix = data,
col = col,
split = heatmap_split,
...)
}
|
a906abc7597458897f3e893148ed57a568b9ba54
|
1b16393b0b259bf9a0ca40d2f99599cf631f80ed
|
/app.R
|
c6a42e6bcc09c9e3bb59bc2df2e3d086063184a8
|
[] |
no_license
|
ClorisYue/dotplot
|
957e6203d293eaa5c7aa7083e9f56336c953ae5f
|
764485bc9fcc4f59203b1d5c5c845d7711437d1f
|
refs/heads/master
| 2020-03-10T01:38:44.307945
| 2018-04-11T15:44:30
| 2018-04-11T15:44:30
| 129,115,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,463
|
r
|
app.R
|
#
# play with dot plot
# Yue Wang
# April,2018
library(shiny)
library(ggplot2)
library(dplyr)
ui <- fluidPage(
titlePanel("Play with dot plot"),
tags$h4("Dotplot 1", class = "subtitle"),
fluidRow(
column(4,
verticalLayout(
plotOutput(outputId = "dotPlot11", width = "300px", height = "100px"),
sliderInput("binsize1",
"Bin size for chart1:",
min = 0,
max = 1,
value = 30),
sliderInput("stackratio1",
"Stackratio for chart1:",
min = 0.5,
max = 1.8,
value = 30)
)
),column(8,
verticalLayout(
plotOutput(outputId = "dotPlot12", width = "600px", height = "200px"),
fluidRow(
column(6, sliderInput("binsize2",
"Bin size for chart2:",
min = 0,
max = 1,
value = 30)
),
column(6, sliderInput("stackratio2",
"Stackratio for chart1:",
min = 0.5,
max = 1.8,
value = 30)
)
)
)
)),
fluidRow(
column(12,
verticalLayout(
plotOutput(outputId = "dotPlot13", width = "900px", height = "300px"),
fluidRow(
column(6, sliderInput("binsize3",
"Bin size for chart2:",
min = 0,
max = 1,
value = 30)
),
column(6, sliderInput("stackratio3",
"Stackratio for chart1:",
min = 0.5,
max = 1.8,
value = 30)
)
)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
getData <- function(mu, sigma){
samples <- 100
mu <- log(11.4)
sigma <- 0.2
data <- qlnorm(ppoints(samples), mu, sigma)
#ppoints -> vecter(0-n)
tem <- data.frame( time = data )
return(tem)
}
dotplot <- function(ratio, num) {
data <- getData(11.4, 0.2)
if(num == 1) g <- ggplot(data, aes(x=time)) + geom_dotplot(binwidth=input$binsize1, stackratio = input$stackratio1)+xlim(0,30)+theme(aspect.ratio = ratio)
else if(num == 2) g <- ggplot(data, aes(x=time)) + geom_dotplot(binwidth=input$binsize2, stackratio = input$stackratio2)+xlim(0,30)+theme(aspect.ratio = ratio)
else g <- ggplot(data, aes(x=time)) + geom_dotplot(binwidth=input$binsize3, stackratio = input$stackratio3)+xlim(0,30)+theme(aspect.ratio = ratio)
g
}
output$dotPlot11 <- renderPlot(dotplot(1/3, 1))
output$dotPlot12 <- renderPlot(dotplot(1/3, 2))
output$dotPlot13 <- renderPlot(dotplot(1/3, 3))
}
# Run the application
shinyApp(ui = ui, server = server)
|
00a8e6956a521eb5653edf38e8a210371ba875bf
|
c414a3d6d466ca68f9076d6cbe125e485485fea8
|
/CIPinnipedAnalysis/man/get_attendance.Rd
|
f792338cfb588c7d1126196fa0aaf502af52bb1e
|
[] |
no_license
|
jlaake/CIPinnipedAnalysis
|
1b35cafa3865a8cacfbc1c68fe9bf1ce21debc72
|
0c0d8f28a45253e7227ebc902f06819a0ecdd437
|
refs/heads/master
| 2021-01-17T09:26:37.164597
| 2019-04-12T02:28:11
| 2019-04-12T02:28:11
| 2,009,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,217
|
rd
|
get_attendance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attendance.R
\name{get_attendance}
\alias{get_attendance}
\title{Create daily attendance encounter history for female sea lions}
\usage{
get_attendance(resights, year = 2006, firstday = 20, lastday = 25,
areas = c("OCV", "EACS", "LNC", "LTC"))
}
\arguments{
\item{resights}{dataframe of zalpohus resights from Alive table in database}
\item{year}{4 digit year to extract}
\item{firstday}{numeric value of first day in May to use}
\item{lastday}{numeric value of last day in July to use}
\item{areas}{area codes to filter resightings}
}
\value{
list with a vector encounter histories (ch), vector of days when sighting took place (seenDays) and vector of all days (Days). Length of ch will be max of Days.
}
\description{
For a given year and day range (firstday,lastday), finds females that have been seen with a pup in July
in a set of areas (e.g., LTC,LNC,EACS,OCV) and constructs a daily encounter history of seen (1) and not seen(0)
to model attendance patterns.
}
\examples{
resights=getCalcurData(db="Zc", tbl="Alive")
get_attendance(resights,year=2006,firstday=5)
}
\author{
Jeff Laake
}
|
9da8e3cc21e10500179cc34b4470ca1cbac3e730
|
21ecc323387b8b12e996bc40f0a1e5ff69f97259
|
/scripts/FoodProvisionEfficient.R
|
fea1cf2102c1f4e2059268cb303a80c0f98475dd
|
[] |
no_license
|
DanOvando/FoodProvision2019-reply
|
a3950c8f08458cc742bf1fa4b267548576794b7c
|
35d01932c93151533fc89bff02d0eb91f4202f42
|
refs/heads/master
| 2023-03-20T00:29:05.128872
| 2021-03-26T21:12:13
| 2021-03-26T21:12:13
| 308,357,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 129,579
|
r
|
FoodProvisionEfficient.R
|
#Food provision code for the Nat Geo project
#Last checked: 5 May 2020
#Author: Reniel Cabral
#Extra notes:
#this is the equation I used for the derivative of delta h wrt R for assumption #2
#(1-(1-E)^(1/(1-x)))*((m*k*(1-x))/((1-(1-E)^(1/(1-x)))*x+m))*(1- (((1-(1-E)^(1/(1-x)))*(1-x)*m)/((((1-(1-E)^(1/(1-x)))*x)+m)*r)))
#Clear memory
# gc()
# rm(list = ls())
saveme<-0 #if 1, activate saving of plots
#libraries
library(raster)
library(tidyverse)
library(sf)
library(rredlist)
library(furrr)
library(dplyr)
library(miscTools)
library(RColorBrewer)
library(doParallel)
library(reshape)
library(data.table)
library(doParallel)
library(rgdal)
library(maptools)
library(rasterVis)
library(tmap)
library(leaflet)
library(rootSolve)
library(viridis)
#load the expert data and save as RDS for faster loading
#Aquaexpert<-read.csv("/Users/ren/Documents/CODES/FoodProvision/Aquamaps/spatial-datasets_Aquamaps_complete_current_data_all_hcaf_species_native_expert.csv")
#saveRDS(Aquaexpert, file = "/Users/ren/Documents/CODES/FoodProvision/Aquamaps/Aquaexpert.rds")
Aquaexpert2<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/Aquamaps/Aquaexpert.rds")
head(Aquaexpert2)
#stack all the species distribution by summing the probabilities, then plot
speciesstack<-Aquaexpert2 %>% group_by(CenterLat,CenterLong) %>% summarise(S=sum(probability))
speciesstack2<-as.data.frame(speciesstack)
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(speciesstack2[,2:1]))
empty_raster[cells] <- speciesstack2[,3]
plot(empty_raster,main="Sum of all species suitability, expert")
##load the other data and bind with expert-vetted data
#Aquaothers<-read.csv("/Users/ren/Documents/CODES/FoodProvision/Aquamaps/spatial-datasets_Aquamaps_complete_current_data_all_hcaf_species_native.csv")
#saveRDS(Aquaothers, file = "/Users/ren/Documents/CODES/FoodProvision/Aquamaps/Aquaothers.rds")
Aquaothers2<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/Aquamaps/Aquaothers.rds")
Aquaothers2<-rbind(Aquaothers2,Aquaexpert2)
head(Aquaothers2)
speciesstackothers<-Aquaothers2 %>% group_by(CenterLat,CenterLong) %>% summarise(S=sum(probability))
speciesstack2others<-as.data.frame(speciesstackothers)
empty_rasterothers <- raster(res = 0.5)
cellsothers <- cellFromXY(empty_rasterothers, as.matrix(speciesstack2others[,2:1]))
empty_rasterothers[cellsothers] <- speciesstack2others[,3]
plot(empty_rasterothers,main="Sum of all species suitability")
#Load Costello et al. (2016) database
#CostelloData<-read.csv("/Users/ren/Documents/CODES/FoodProvision/Aquamaps/UnlumpedProjectionData.csv")
CostelloData<-read.csv("/Users/ren/Documents/CODES/FoodProvision/Aquamaps/UnlumpedProjectionData.csv", stringsAsFactors = FALSE)
#CostelloData<-read.csv("/Users/ren/Documents/CODES/FoodProvision/Aquamaps/ProjectionData.csv")
dim(CostelloData)
head(CostelloData,5)
Costello2012<-CostelloData %>% filter(Year=="2012")
table(Costello2012$Dbase)
table(Costello2012$Policy)
table(Costello2012$Scenario)
head(Costello2012)
#MSY from costello of RAM, FAO, and SOFIA
Costello2012 %>% group_by(Dbase,CatchShare) %>% summarise(sum(MSY))
#Manually change species name with related species to match Aquamaps species range data
CostelloDataPrime<- CostelloData %>%
mutate(SciName=replace(SciName, SciName=="Sardinops melanostictus", "Sardinops sagax")) %>%
mutate(SciName=replace(SciName, SciName=="Sardinops caeruleus", "Sardinops sagax")) %>%
mutate(SciName=replace(SciName, SciName=="Sardinops ocellatus", "Sardinops sagax")) %>%
mutate(SciName=replace(SciName, SciName=="Merluccius capensis, M.paradoxus", "Merluccius capensis")) %>%
mutate(SciName=replace(SciName, SciName=="Auxis thazard, A. rochei", "Auxis thazard")) %>%
mutate(SciName=replace(SciName, SciName=="Pleuronectes quadrituberculat.", "Pleuronectes quadrituberculat")) %>%
mutate(SciName=replace(SciName, SciName=="Pseudopleuronectes herzenst.", "Pseudopleuronectes herzenst")) %>%
mutate(SciName=replace(SciName, SciName=="Herklotsichthys quadrimaculat.", "Herklotsichthys quadrimaculat")) %>%
mutate(SciName=replace(SciName, SciName=="Engraulis capensis", "Engraulis encrasicolus")) %>%
mutate(SciName=replace(SciName, SciName=="Trachypenaeus curvirostris", "Trachysalambria curvirostris")) %>%
mutate(SciName=replace(SciName, SciName=="Patinopecten yessoensis", "Mizuhopecten yessoensis")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus setiferus", "Litopenaeus setiferus")) %>%
mutate(SciName=replace(SciName, SciName=="Loligo opalescens", "Doryteuthis opalescens")) %>%
mutate(SciName=replace(SciName, SciName=="Larimichthys croceus", "Larimichthys crocea")) %>%
mutate(SciName=replace(SciName, SciName=="Loligo gahi", "Doryteuthis gahi")) %>%
mutate(SciName=replace(SciName, SciName=="Chelon haematocheilus", "Liza haematocheila")) %>%
mutate(SciName=replace(SciName, SciName=="Anadara granosa", "Tegillarca granosa")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus chinensis", "Fenneropenaeus chinensis")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus merguiensis", "Fenneropenaeus merguiensis")) %>%
mutate(SciName=replace(SciName, SciName=="Sebastes marinus", "Sebastes norvegicus")) %>%
mutate(SciName=replace(SciName, SciName=="Cancer magister", "Metacarcinus magister")) %>%
mutate(SciName=replace(SciName, SciName=="Loligo pealeii", "Doryteuthis pealeii")) %>%
mutate(SciName=replace(SciName, SciName=="Spisula polynyma", "Mactromeris polynyma")) %>%
mutate(SciName=replace(SciName, SciName=="Ommastrephes bartramii", "Ommastrephes bartramii")) %>%
mutate(SciName=replace(SciName, SciName=="Stichopus japonicus", "Apostichopus japonicus")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus notialis", "Farfantepenaeus notialis")) %>%
mutate(SciName=replace(SciName, SciName=="Psetta maxima", "Scophthalmus maximus")) %>%
mutate(SciName=replace(SciName, SciName=="Ostrea lutaria", "Ostrea chilensis")) %>%
mutate(SciName=replace(SciName, SciName=="Tawera gayi", "Tawera elliptica")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus japonicus", "Marsupenaeus japonicus")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus brasiliensis","Farfantepenaeus aztecus")) %>%
mutate(SciName=replace(SciName, SciName=="Mytilus chilensis","Mytilus edulis")) %>%
mutate(SciName=replace(SciName, SciName=="Tetrapturus audax","Kajikia audax" )) %>%
mutate(SciName=replace(SciName, SciName=="Cheilodactylus bergi","Nemadactylus bergi")) %>%
mutate(SciName=replace(SciName, SciName=="Venerupis pullastra","Venerupis corrugata")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus aztecus","Farfantepenaeus aztecus")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus duorarum","Farfantepenaeus duorarum")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus kerathurus","Melicertus kerathurus")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus californiensis","Farfantepenaeus californiensis")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus brevirostris","Farfantepenaeus brevirostris")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus latisulcatus","Melicertus latisulcatus")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus occidentalis","Litopenaeus occidentalis")) %>%
mutate(SciName=replace(SciName, SciName=="Penaeus vannamei","Litopenaeus vannamei")) %>%
mutate(SciName=replace(SciName, SciName=="Raja naevus","Leucoraja naevus")) %>%
mutate(SciName=replace(SciName, SciName=="Jasus novaehollandiae","Jasus edwardsii")) %>%
mutate(SciName=replace(SciName, SciName=="Makaira indica","Istiompax indica")) %>%
mutate(SciName=replace(SciName, SciName=="Lithodes aequispina","Lithodes aequispinus")) %>%
mutate(SciName=replace(SciName, SciName=="Eleginus navaga","Eleginus nawaga")) %>%
mutate(SciName=replace(SciName, SciName=="Saxidomus giganteus","Saxidomus gigantea")) %>%
mutate(SciName=replace(SciName, SciName=="Mugil soiuy","Liza haematocheila")) %>%
mutate(SciName=replace(SciName, SciName=="Xiphopenaeus riveti","Xiphopenaeus kroyeri")) %>%
mutate(SciName=replace(SciName, SciName=="Pleuronectes vetulus","Parophrys vetulus")) %>%
mutate(SciName=replace(SciName, SciName=="Raja radiata","Amblyraja radiata")) %>%
mutate(SciName=replace(SciName, SciName=="Aspitrigla cuculus","Chelidonichthys cuculus")) %>%
mutate(SciName=replace(SciName, SciName=="Valamugil seheli","Moolgarda seheli")) %>%
mutate(SciName=replace(SciName, SciName=="Tetrapturus albidus","Kajikia albida")) %>%
mutate(SciName=replace(SciName, SciName=="Zenopsis nebulosus","Zenopsis nebulosa")) %>%
mutate(SciName=replace(SciName, SciName=="Arius thalassinus","Netuma thalassinus")) %>%
mutate(SciName=replace(SciName, SciName=="Parika scaber","Meuschenia scaber")) %>%
mutate(SciName=replace(SciName, SciName=="Sardinops neopilchardus","Sardinops sagax")) %>%
mutate(SciName=replace(SciName, SciName=="Raja batis","Dipturus batis")) %>%
mutate(SciName=replace(SciName, SciName=="Alosa pontica","Alosa immaculata")) %>%
mutate(SciName=replace(SciName, SciName=="Conger orbignyanus","Conger orbignianus")) %>%
mutate(SciName=replace(SciName, SciName=="Acanthopagrus schlegeli","Acanthopagrus schlegelii")) %>%
mutate(SciName=replace(SciName, SciName=="Solea lascaris","Pegusa lascaris")) %>%
mutate(SciName=replace(SciName, SciName=="Raja circularis","Leucoraja circularis")) %>%
mutate(SciName=replace(SciName, SciName=="Balistes carolinensis","Balistes capriscus")) %>%
mutate(SciName=replace(SciName, SciName=="Plesiopenaeus edwardsianus","Aristaeopsis edwardsiana")) %>%
mutate(SciName=replace(SciName, SciName=="Epinephelus flavolimbatus","Hyporthodus flavolimbatus")) %>%
mutate(SciName=replace(SciName, SciName=="Epinephelus niveatus","Hyporthodus niveatus")) %>%
mutate(SciName=replace(SciName, SciName=="Epinephelus nigritus","Hyporthodus nigritus")) %>%
mutate(SciName=replace(SciName, SciName=="Epinephelus mystacinus","Hyporthodus mystacinus")) %>%
mutate(SciName=replace(SciName, SciName=="Raja oxyrinchus","Dipturus oxyrinchus")) %>%
mutate(SciName=replace(SciName, SciName=="Raja fullonica","Leucoraja fullonica")) %>%
mutate(SciName=replace(SciName, SciName=="Jasus verreauxi","Sagmariasus verreauxi")) %>%
mutate(SciName=replace(SciName, SciName=="Anadara ovalis","Lunarca ovalis")) %>%
mutate(SciName=replace(SciName, SciName=="Pseudopentaceros richardsoni","Pentaceros richardsoni")) %>%
mutate(SciName=replace(SciName, SciName=="Chelidonichthys lastoviza","Trigloporus lastoviza")) %>%
mutate(SciName=replace(SciName, SciName=="Protothaca staminea","Leukoma staminea")) %>%
mutate(SciName=replace(SciName, SciName=="Notothenia squamifrons","Lepidonotothen squamifrons")) %>%
mutate(SciName=replace(SciName, SciName=="Pleuronectes quadrituberculat","Pleuronectes quadrituberculatus")) %>%
mutate(SciName=replace(SciName, SciName=="Pseudopleuronectes herzenst","Pseudopleuronectes herzensteini")) %>%
mutate(SciName=replace(SciName, SciName=="Herklotsichthys quadrimaculat","Herklotsichthys quadrimaculatus")) %>%
filter(k>0) #remove zero carrying capacity
CostelloPresentPrime<- CostelloDataPrime %>% filter(Year=="2012")
head(CostelloPresentPrime)
CostelloK<-CostelloDataPrime %>% filter(Year=="2012") %>% mutate(k=Biomass/(0.4*BvBmsy)) %>% group_by(SciName) %>% summarize(K=sum(k), B=sum(Biomass), Fstatus=weighted.mean(FvFmsy, MSY), Bstatus=weighted.mean(BvBmsy, MSY)) %>% mutate(BK2012=B/K)
head(CostelloK)
dim(CostelloK)
plot(CostelloK$BK2012)
Costello2050<-CostelloDataPrime %>% filter(Year=="2050", Policy=="BAU", Scenario=="All Stocks", CatchShare==0, Dbase!="RAM") %>%
#group_by(SciName) %>% summarize(catch2050=sum(Catch), biomass2050=sum(Biomass), k2050=sum(k)) %>% mutate(ER2050=catch2050/biomass2050, bvk2050=biomass2050/k2050)
group_by(SciName) %>% summarize(catch2050=sum(Catch), biomass2050=sum(Biomass), k2050=sum(k)) %>% mutate(bvk2050=biomass2050/k2050)
head(Costello2050)
dim(Costello2050)
Costello2050ALL<-CostelloDataPrime %>% filter(Year=="2050", Policy=="BAU", Scenario=="All Stocks") %>%
#group_by(SciName) %>% summarize(catch2050ALL=sum(Catch), biomass2050ALL=sum(Biomass), k2050ALL=sum(k)) %>% mutate(ER2050ALL=catch2050ALL/biomass2050ALL, bvk2050ALL=biomass2050ALL/k2050ALL)
group_by(SciName) %>% summarize(catch2050ALL=sum(Catch), biomass2050ALL=sum(Biomass), k2050ALL=sum(k)) %>% mutate(bvk2050ALL=biomass2050ALL/k2050ALL)
head(Costello2050ALL)
dim(Costello2050ALL)
#combine the two database
CostelloPresent0<-left_join(CostelloK,Costello2050, by="SciName")
CostelloPresent1<-left_join(CostelloPresent0,Costello2050ALL, by="SciName")
CostelloPresent<-CostelloPresent1
#rank species
CostelloPresent<-as.data.frame(CostelloPresent)
rankedsp<-CostelloPresent[order(-CostelloPresent$K),]
#there is an <NA> in the SciName --- remove that
rankedsp<-rankedsp %>% filter(!SciName=="<NA>")
#check
"Sardinops sagax" %in% rankedsp$SciName
head(rankedsp,5)
dim(rankedsp)# there are 1098 unique species/genus/family entries
##next step is to match species with K
#load the species id matching
spnamelookup<-read.csv("/Users/ren/Documents/CODES/FoodProvision/Aquamaps/aquamaps_spp_ref_revised.csv")
spnamelookup<-as.data.frame(spnamelookup)
head(spnamelookup)
dim(spnamelookup)
#check
"Herklotsichthys quadrimaculatus" %in% c(as.character(spnamelookup$resolved_scientific_name),
as.character(spnamelookup$aquamaps_sci_name),
as.character(spnamelookup$worms_sci_name),
as.character(spnamelookup$eol_sci_name),
as.character(spnamelookup$col_sci_name),
as.character(spnamelookup$gbif_sci_name),
as.character(spnamelookup$itis_sci_name))
#Species in costello db included
include <-rankedsp %>% filter((SciName %in% spnamelookup$resolved_scientific_name) |
(SciName %in% spnamelookup$aquamaps_sci_name) |
(SciName %in% spnamelookup$worms_sci_name) |
(SciName %in% spnamelookup$eol_sci_name) |
(SciName %in% spnamelookup$col_sci_name) |
(SciName %in% spnamelookup$gbif_sci_name) |
(SciName %in% spnamelookup$itis_sci_name))
dim(include)
head(include)#these are the species in Costello DB included
#check
"Clupea bentincki" %in% include$SciName
dim(include)
# #check what are the species we need information
# mfile_v2<-read.csv("/Users/ren/Documents/CODES/FoodProvision/mobility_data_paper - data.csv")
# head(mfile_v2)
# head(include)
# Add_data<-include %>% filter(! (SciName %in% mfile_v2$SciName)) %>% select(SciName)
# dim(Add_data)
# #save additional species and add to the database
# #write.csv(Add_data, file = "/Users/ren/Documents/CODES/FoodProvision/AdditionalSpecies_RevisionPNAS.csv")
#what are the species in costello db not included?
rankedsp %>% filter(!(rankedsp$SciName %in% include$SciName)) %>% dplyr::select(SciName)
#Clean species name mismatch
#what are the species ID of these?
head(spnamelookup)
spID<-include
nudge<-dim(spID)[2]
spID$v1<-spnamelookup$SPECIESID[match(include$SciName,spnamelookup$resolved_scientific_name)]#this is correct!
spID$v2<-spnamelookup$SPECIESID[match(include$SciName,spnamelookup$aquamaps_sci_name)]
spID$v3<-spnamelookup$SPECIESID[match(include$SciName,spnamelookup$worms_sci_name)]
spID$v4<-spnamelookup$SPECIESID[match(include$SciName,spnamelookup$eol_sci_name)]
spID$v5<-spnamelookup$SPECIESID[match(include$SciName,spnamelookup$col_sci_name)]
spID$v6<-spnamelookup$SPECIESID[match(include$SciName,spnamelookup$gbif_sci_name)]
spID$v7<-spnamelookup$SPECIESID[match(include$SciName,spnamelookup$itis_sci_name)]
spID$fin<-apply(spID[,(1:7)+nudge],1, function(x) unique(x[!is.na(x)]))
head(spID)
#add this spID to "include" file
include$SpeciesID<-spID$fin
head(include)
plot(include$bvk2050-include$bvk2050ALL)
weighted.mean(include$bvk2050,include$K,na.rm=T)
weighted.mean(include$bvk2050ALL,include$K,na.rm=T)
head(include)
dim(include)
#fill NAs in bvk2050
include$bvk2050[is.na(include$bvk2050)] <- -1
include <- include %>% mutate(bvk_fin=(bvk2050ALL*((bvk2050==-1)*1)) + (bvk2050*(1-((bvk2050==-1)*1))))
head(include)
#Load r then remove SciName entry in "include" if they have no r information
#this is the growth parameter
r_rev<-read.csv("/Users/ren/Documents/GitHub/FoodProvision2019/Parameters/r_data_whitneycheck - rsave_whitneycheck.csv")
head(r_rev)
rinclude<-r_rev %>% filter(r>0 | r_mean>0) %>% dplyr::select(species)
dim(rinclude)
#remove some species with no r data
include<-include %>% filter(SciName %in% rinclude$species)
dim(include) #811 species.
##TRANSFER files to VM for convertion to mollweide
#saveRDS(Aquaothers2, file = "/Users/ren/Documents/CODES/FoodProvision/Aquaothers2.rds")
#saveRDS(include, file = "/Users/ren/Documents/CODES/FoodProvision/include.rds")
#saveRDS(land_shp_moll, file = "/Users/ren/Documents/CODES/FoodProvision/land_shp_moll.rds")
##Add K to the Aquaothers2
head(Aquaothers2)
Aqua3<-merge(Aquaothers2,include,by="SpeciesID")
head(Aqua3)
dim(Aqua3)
length(unique(Aqua3$SpeciesID))
Aqua3 <- Aqua3 %>% group_by(SpeciesID) %>% mutate(totalprob=sum(probability))
Aqua3stack<-Aqua3 %>% group_by(CenterLat,CenterLong) %>% mutate(Kcell=sum(probability*K/totalprob)) %>% summarise(S=sum(probability*K/totalprob), Fstat=weighted.mean(Fstatus,Kcell), Bstat=weighted.mean(Bstatus,Kcell))
head(Aqua3stack) #S is total K per cell
Aqua3stack<-as.data.frame(Aqua3stack)
dim(Aqua3stack) #160647
head(Aqua3stack)
# raster_test <- Aqua3stack %>%
# select(CenterLong, CenterLat, S)%>%
# raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
# raster::projectRaster(crs = "+proj=moll")
#
# raster_test %>%
# as.data.frame(xy = T) %>%
# filter(!is.na(S)) %>%
# set_names(c("CenterLong", "CenterLat", "S")) %>%
# ggplot(aes(x=CenterLong,y=CenterLat,fill=S)) +
# geom_raster()+
# geom_sf(data = land_shp_moll, inherit.aes = F)
#
#
# empty_raster <- raster(res = 0.5)
# cells <- cellFromXY(empty_raster, as.matrix(Aqua3stack[,2:1]))
# empty_raster[cells] <- Aqua3stack[,3]
# plot(empty_raster,main="Carrying capacity per cell (MT)")
#
# if(saveme==1){
# png(file="/Users/ren/Documents/CODES/FoodProvision/Results/K_AquaCostello.png", width = 6, height = 4, units = 'in', res = 300)
# #plot(empty_raster,main="K per cell, Aquamaps + Costello et al. (2016) data (MT)")
# plot(PlotFunction(empty_raster),zlim=c(0,maxValue(empty_raster)), main="K per cell, Aquamaps + Costello et al. (2016) data (MT)",axes=F,box=F,legend=F)
# plot(empty_raster, zlim=c(0,maxValue(empty_raster)),legend.only=TRUE,legend.width=1, legend.shrink=0.75,axis.args=list(cex.axis=0.5),
# legend.args=list(text='Carrying capacity, K (MT)', side=4, font=2, line=2.5, cex=0.8))
# dev.off()
# }
#
# #plot
# crs(empty_raster) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# maxValue(empty_raster)
# z_pal <- list(breaks = c(0,0.5e5,1e5,1.5e5,2e5,2.5e5,3e5,5e5),
# labels = c("0-0.5e5", "0.5-1e5", "1-1.5e5", "1.5-2e5", "2-2.5e5", "2.5-3e5", "3-5e5"),
# colors = rev(c("#d73027","#fdae61","#fee090","#e0f3f8","#abd9e9","#74add1", "#4575b4")))
# land_shp <-st_read("/Users/ren/Documents/CODES/FoodProvision/landshp_moll/spatial-datasets-land-land_50.shp")
# ocean_low_res_moll<-raster::raster("/Users/ren/Documents/CODES/FoodProvision/ocean-low-res-moll.tiff")
# caption<-"hello world"
# land_shp_moll <- land_shp %>% st_transform(crs = projection(ocean_low_res_moll))
# empty_raster %>%
# raster::projectRaster(ocean_low_res_moll) %>%
# tmap::tm_shape()+
# tmap::tm_raster(title = "K (MT)",
# palette = z_pal$colors,
# breaks = z_pal$breaks,
# labels = z_pal$labels,
# legend.is.portrait = T,
# legend.reverse = T)+
# tmap::tm_shape(land_shp_moll)+
# tmap::tm_fill(col = "black", border.col = "transparent")+
# tmap::tm_credits(caption) +
# tmap::tm_layout(title = "Carrying capacity (MT per 0.5x0.5 degree)",
# title.position = c("center", .95),
# inner.margins = c(0.12, 0, 0.08, 0.04),
# frame = F,
# legend.position = c(.99, "center"))
#
# ##PLOT FISHERIES STATUS PER CELL
# #F/Fmsy
# empty_raster <- raster(res = 0.5)
# cells <- cellFromXY(empty_raster, as.matrix(Aqua3stack[,2:1]))
# empty_raster[cells] <- Aqua3stack[,4]
# plot(empty_raster,main="F/Fmsy")
#
# if(saveme==1){
# png(file="/Users/ren/Documents/CODES/FishCrime/FvFmsy.png", width = 6, height = 4, units = 'in', res = 300)
# levelplot(empty_raster, par.settings = RdBuTheme(),main="F/Fmsy")
# dev.off()
# }
#
# #-----plot, F/Fmsy
# crs(empty_raster) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# maxValue(empty_raster)
# z_pal <- list(breaks = c(0,1,2,3,15),
# labels = c("0-1", "1-2", "2-3", "3-15"),
# colors = rev(c("#d73027","#fdae61","#fee090", "#4575b4")))
# land_shp <-st_read("/Users/ren/Documents/CODES/FoodProvision/landshp_moll/spatial-datasets-land-land_50.shp")
# ocean_low_res_moll<-raster::raster("/Users/ren/Documents/CODES/FoodProvision/ocean-low-res-moll.tiff")
# caption<-""
# land_shp_moll <- land_shp %>% st_transform(crs = projection(ocean_low_res_moll))
# empty_raster %>%
# raster::projectRaster(ocean_low_res_moll) %>%
# tmap::tm_shape()+
# tmap::tm_raster(title = "F/Fmsy",
# palette = z_pal$colors,
# breaks = z_pal$breaks,
# labels = z_pal$labels,
# legend.is.portrait = T,
# legend.reverse = T)+
# tmap::tm_shape(land_shp_moll)+
# tmap::tm_fill(col = "black", border.col = "transparent")+
# tmap::tm_credits(caption) +
# tmap::tm_layout(title = "F/Fmsy",
# title.position = c("center", .95),
# inner.margins = c(0.12, 0, 0.08, 0.04),
# frame = F,
# legend.position = c(.99, "center"))
#
# #NOW PLOT B/BMSY
# empty_raster <- raster(res = 0.5)
# cells <- cellFromXY(empty_raster, as.matrix(Aqua3stack[,2:1]))
# empty_raster[cells] <- Aqua3stack[,5]
# plot(empty_raster,main="B/Bmsy")
#
# if(saveme==1){
# png(file="/Users/ren/Documents/CODES/FishCrime/BvBmsy.png", width = 6, height = 4, units = 'in', res = 300)
# levelplot(empty_raster, par.settings = RdBuTheme(),main="B/Bmsy")
# dev.off()
# }
#
# #-----plot, B/Bmsy
# crs(empty_raster) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# maxValue(empty_raster)
# z_pal <- list(breaks = c(0,1,2,3),
# labels = c("0-1", "1-2", "2-3"),
# colors = rev(c("#4575b4","#fdae61","#d73027")))
# land_shp <-st_read("/Users/ren/Documents/CODES/FoodProvision/landshp_moll/spatial-datasets-land-land_50.shp")
# ocean_low_res_moll<-raster::raster("/Users/ren/Documents/CODES/FoodProvision/ocean-low-res-moll.tiff")
# caption<-""
# land_shp_moll <- land_shp %>% st_transform(crs = projection(ocean_low_res_moll))
# empty_raster %>%
# raster::projectRaster(ocean_low_res_moll) %>%
# tmap::tm_shape()+
# tmap::tm_raster(title = "B/Bmsy",
# palette = z_pal$colors,
# breaks = z_pal$breaks,
# labels = z_pal$labels,
# legend.is.portrait = T,
# legend.reverse = T)+
# tmap::tm_shape(land_shp_moll)+
# tmap::tm_fill(col = "black", border.col = "transparent")+
# tmap::tm_credits(caption) +
# tmap::tm_layout(title = "B/Bmsy",
# title.position = c("center", .95),
# inner.margins = c(0.12, 0, 0.08, 0.04),
# frame = F,
# legend.position = c(.99, "center"))
#
# levelplot(empty_raster, par.settings = RdBuTheme())
#
# # rasterVis plot #check: https://pjbartlein.github.io/REarthSysSci/raster_intro.html
# mapTheme <- rasterTheme(region = rev(brewer.pal(10, "RdBu")))
# plt<-levelplot(PlotFunction(empty_raster), margin = F, cuts=11, pretty=TRUE, par.settings = mapTheme)
# plt
# #--this is for deriving the biological parameters c/o Chris Free
# #Derive r
# library(FishLife)
# load("/Users/ren/Documents/CODES/FoodProvision/Return.RData")
# # Predicted variables
# # -------------------------------------------
# # Loo - asymptotic length (Linf, cm)
# # K - growth coefficient (K)
# # Winfinity - Asymptotic mass (Winf, g)
# # tmax - maximum age (Amax, yr)
# # tm - age at maturity (Amat, yr)
# # M - mortality rate (M, 1/yr)
# # Lm - length at maturity (Lmat, cm)
# # Temperature - average temperature (T, °C)
# # ln_var - marginal standard deviation of recruitment variability (τ)
# # rho - autocorrelation of recruitment variability (ρ)
# # ln_MASPS - maximum annual spawners per spawner (r)
#
# # Derived variables
# # -------------------------------------------
# # ln_margsd - standard deviation for recruitment (σ): σ = sqrt(τ^2 / (1-ρ^2))
# # h / logitbound_h - steepness (h): h = ρ / (4 + ρ)
# # ln_Fmsy - FMSY
# # ln_Fmsy_over_m - FMSY/M ratio
# # r / ln_r - Intrinsic growth rate (r): dominant eigen value for Leslie matrix w/ assumptions: length-weight b=3.04, VonB t0=-0.1, maturity ogive slope=0.25*tmat
# # G / ln_G - Generation time (G, yr)
#
# fishlife2 <- function(species){
# # Setup container
# fl <- data.frame(species=sort(unique(species)),
# linf_cm=NA, k=NA, winf_g=NA, tmax_yr=NA, tmat_yr=NA,
# m=NA, lmat_cm=NA, temp_c=NA,
# sr_var=NA, sr_rho=NA, masps=NA, sr_sd=NA,
# h=NA, fmsydivm=NA, fmsy=NA, r=NA, g_yr=NA, stringsAsFactors=F)
#
# # Loop through species
# for(i in 1:nrow(fl)){
#
# # Match species to FishLife
# sciname <- fl$species[i]
# genus <- stringr::word(sciname, 1)
# nwords_in_spp <- length(strsplit(sciname, " ")[[1]])
# spp <- stringr::word(sciname, start=2, end=nwords_in_spp)
# spp <- ifelse(spp=="spp", "predictive", spp)
# try(taxa_match <- FishLife::Search_species(Genus=genus, Species = spp, add_ancestors=TRUE)$match_taxonomy)
#
# # Get predictions from FishLife (mean and covariance)
# if(inherits(taxa_match, "try-error")){
# # Record blanks
# fl[i,2:ncol(fl)] <- rep(NA, ncol(fl)-1)
# }else{
# # Extract FishLife 2.0 means
# params <- colnames(Return$beta_gv)
# mus <- Return$beta_gv[rownames(Return$beta_gv)==taxa_match[[1]], ]
# mus_use <- mus[c("Loo", "K", "Winfinity", "tmax", "tm",
# "M", "Lm", "Temperature", "ln_var", "rho", "ln_MASPS", "ln_margsd",
# "h", "ln_Fmsy_over_M", "ln_Fmsy", "r", "G")]
# fl[i,2:ncol(fl)] <- mus_use
# }
#
# }
#
# # Exponentiate columns
# # These columns are not log-transformed: "temp_c", "rho", "h", "r", "g_yr", "fmsy"
# log_cols <- c("linf_cm", "k", "winf_g", "tmax_yr", "tmat_yr", "m", "lmat_cm", "sr_var", "masps", "sr_sd", "fmsydivm", "fmsy")
# fl[,log_cols] <- exp(fl[,log_cols])
#
# # Return
# return(fl)
# }
# #----end function for deriving the biol params
#get coordinates
coords <- read.table("/Users/ren/Documents/CODES/FoodProvision/Lat_Lon_DBEM.txt", sep = ",", col.name = c("id", "lon", "lat"))
head(coords)
dim(coords)
coordUNIQUE<-unique(coords[c("lat", "lon")])
dim(coordUNIQUE) #great!
head(Aqua3)
Aqua3<-Aqua3 %>% mutate(normprob=probability/totalprob)
Aqua3 %>% group_by(SpeciesID) %>% summarize(sumtest=sum(normprob))
#RUN ONLY ONCE / NO RUN, JUST LOAD (the commented code is important)
coords2<-coords
Aqua3Important<-Aqua3 %>% dplyr::select(SpeciesID,CenterLat,CenterLong,normprob)
colnames(Aqua3Important)[which(names(Aqua3Important) == "CenterLat")] <- "lat"
colnames(Aqua3Important)[which(names(Aqua3Important) == "CenterLong")] <- "lon"
dim(Aqua3Important)
head(Aqua3Important)
TEST_UNIQUE<-unique(Aqua3Important[c("lat", "lon")])
dim(TEST_UNIQUE)
# for (i in unique(Aqua3Important$SpeciesID)){
# Aqua3sub<-Aqua3Important %>% filter(SpeciesID==i)
# Aqua3sub$SpeciesID<-NULL
# colnames(Aqua3sub)[which(names(Aqua3sub) == "normprob")] <- i
# coords2<-left_join(coords2, Aqua3sub,by=c("lon","lat"))
# }
# coords2<-coords2 %>% mutate_if(is.numeric,coalesce,0)
# saveRDS(coords2, file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata_PNASrev.rds")
#saveRDS(coords2, file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata_v2.rds")
#saveRDS(coords2, file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata.rds") #UNLUMPED DATA
#saveRDS(coords2, file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata_lumped.rds") #LUMPED DATA
#load the generated data above
Aqua4<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata_PNASrev.rds")
#Aqua4<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata_v2.rds")#this is the file for the first draft of PNAS
#Aqua4<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata.rds")
#Aqua4<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/reshaped_costellodata_lumped.rds")
dim(Aqua4)
Aqua4<-distinct(Aqua4,lon,lat, .keep_all= TRUE)
dim(Aqua4)
colSums(Aqua4,na.rm=T)#to verify that the code above is correct, we should perform colsum
#Aqua4 is the rawest data. Separate managed species from the species list.
head(Aqua4) #id, lon, lat, species
#"include" contains SciName, K, SpeciesID
dim(Aqua4)
TEST_UNIQUE<-unique(Aqua4[c("lat", "lon")])
dim(TEST_UNIQUE)
##MANAGEMENT
#ManagementLayer<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerData.rds")
ManagementLayer<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerv3.rds") #v3 is the one with stockid
names(ManagementLayer)[names(ManagementLayer) == 'x'] <- 'lon'
names(ManagementLayer)[names(ManagementLayer) == 'y'] <- 'lat'
names(ManagementLayer)[names(ManagementLayer) == 'species'] <- 'SciName'
head(ManagementLayer) #x is lon, y is lat
unique(ManagementLayer$stockid)
#There are NAs in the stockid! Remove them.
ManagementLayer <- na.omit(ManagementLayer)
#add species id code into the "ManagementLayer" file. We can get the id from "include"
head(include)
#check why the first entry has no data
head(CostelloDataPrime)
CostelloDataPrime %>% filter(SciName=="Trachurus murphyi", Year==2050)
#ManagementLayer2<-left_join(ManagementLayer,include,by="SciName") %>% select(lon,lat,SpeciesID)
#try to add stockid
ManagementLayer2<-left_join(ManagementLayer,include,by="SciName") %>% dplyr::select(lon,lat,stockid,SpeciesID)
ManagementLayer2$Value<-1
head(ManagementLayer2)
ManagementLayer2trans<-ManagementLayer2 %>% dplyr::select(lon,lat,stockid,Value)
head(ManagementLayer2trans)
unique(ManagementLayer2$stockid)
unique(ManagementLayer2$SpeciesID)
##---reshape the management layer
##NOTE: This can take some time so I will save the output and just load it.
##when new species is added, rerun the code below
#attempt failed --- memory not enough
#ManagementLayer4<-cast(ManagementLayer2trans,lon+lat~stockid)
#saveRDS(ManagementLayer4, file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayer4.rds")
#OK, THIS IS THE FINAL CODE!!! RUN ONLY ONCE --- FAST RUN. FEW MINUTES.
# count<-0
# ManagementLayerPNAS<-coords
# for (i in unique(ManagementLayer2trans$stockid)){
# ML2Tprime<-ManagementLayer2trans %>% filter(stockid==i)
# ML2Tprime$stockid<-NULL
# colnames(ML2Tprime)[which(names(ML2Tprime) == "Value")] <- i
# ManagementLayerPNAS<-left_join(ManagementLayerPNAS, ML2Tprime,by=c("lon","lat"))
# count<-count+1
# print(count)
# }
# ManagementLayerPNAS<-ManagementLayerPNAS %>% mutate_if(is.numeric,coalesce,0)
# saveRDS(ManagementLayerPNAS, file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerPNAS.rds")
##FROM HERE, I CHANGED MNGT LAYER 3 to 4
ManagementLayer4<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerPNAS.rds") #revised PNAS file
#ManagementLayer4<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayer4.rds")#this is the original PNAS file
head(ManagementLayer4)
plot(ManagementLayer4$'BGRDRSE')
#what are the species ids of the stocks?
#Reference- stockid and SpeciesID
ReferenceStockSpeciesID<-ManagementLayer2 %>% dplyr::select(stockid,SpeciesID) %>% group_by(stockid,SpeciesID) %>% summarise(n=n())
ReferenceStockSpeciesID2<-ReferenceStockSpeciesID %>% filter(is.na(SpeciesID)==F)
dim(ReferenceStockSpeciesID2)
#536 of the species in Costello et al. species list have stock assessments
#normalization function
NormFunction<-function(rawfile){
# remove id, lat, lon
rawfile$id <- NULL
rawfile$lon <- NULL
rawfile$lat <- NULL
rawfile[is.na(rawfile)] <- 0
#total_K_cell<-rowSums(rawfile)
total_K_species<-colSums(rawfile)
Norm_K<-t(t(rawfile)/total_K_species)
return(Norm_K)
}
#separate managed layer from our main data (Aqua4)
head(Aqua4)
Aqua4[is.na(Aqua4)] <- 0
Aqua4Norm<-as.data.frame(NormFunction(Aqua4))
colSums(Aqua4Norm) #ok, answer correct
#bring the coordinates back using cbind
Aqua4Norm<-cbind(Aqua4[c("id", "lon", "lat")],Aqua4Norm)
#Species that are not in the management layer/poorly managed
Aqua4poor<-Aqua4Norm[ , -which(names(Aqua4Norm) %in% ReferenceStockSpeciesID2$SpeciesID)]
head(Aqua4poor)
#Species in the management layer//not yet disaggregated
Aqua4other<-Aqua4Norm[ , which(names(Aqua4Norm) %in% ReferenceStockSpeciesID2$SpeciesID)]
Aqua4other<-cbind(Aqua4[c("id", "lon", "lat")],Aqua4other)
head(Aqua4other)
#I just want the coordinates
AquaPoor_other<- Aqua4other %>% dplyr::select(c(lon,lat))
AquaManaged_other<- Aqua4other %>% dplyr::select(c(lon,lat))
#separating managed and unmanaged
#managed
for (j in ReferenceStockSpeciesID2$stockid){
i<-ReferenceStockSpeciesID2$SpeciesID[which(ReferenceStockSpeciesID2$stockid==j)]
#i="Fis-10768"
Layer1<-Aqua4other %>% dplyr::select(c(lon,lat,i))
Layer2<-ManagementLayer4 %>% dplyr::select(c(lon,lat,j))
Layer3<-left_join(Layer1,Layer2,by=c("lon","lat"))
Layer3[is.na(Layer3)] <- 0
AquaManaged_other<-AquaManaged_other %>% mutate(myval=Layer3[,3]*(Layer3[,4]==1))
names(AquaManaged_other)[names(AquaManaged_other) == 'myval'] <- j
}
head(AquaManaged_other)
#unmanaged
for (i in unique(ReferenceStockSpeciesID2$SpeciesID)){
j<-ReferenceStockSpeciesID2$stockid[which(ReferenceStockSpeciesID2$SpeciesID==i)]
#i="Fis-22832" #this is with duplicate
Layer1<-Aqua4other %>% select(c(lon,lat,i))
Layer2<-ManagementLayer4 %>% select(c(lon,lat))
Layer2prime<-ManagementLayer4 %>% select(c(j))
Layer2$isum<-rowSums(Layer2prime,na.rm =T)
Layer3<-left_join(Layer1,Layer2,by=c("lon","lat"))
Layer3[is.na(Layer3)] <- 0
AquaPoor_other <- AquaPoor_other %>% mutate(myval=Layer3[,3]*(Layer3[,4]!=1))
names(AquaPoor_other)[names(AquaPoor_other) == 'myval'] <- i
}
#we can plot the managed layer to see if it is the same as expected (and not mess up the coordinates)
head(AquaManaged_other)
dim(AquaManaged_other)
nmanageareas<-as.data.frame((AquaManaged_other[,3:dim(AquaManaged_other)[2]]>0)*1)
summanagedlayer<-rowSums(nmanageareas)
max(summanagedlayer)
#THIS IS THE PLOT OF THE MANAGEMENT LAYER. NOTICE THAT IT IS LESS THAN THE RAW DATA
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(AquaManaged_other[,1:2]))
empty_raster[cells] <- summanagedlayer
head(empty_raster)
plot(empty_raster,main="Management",axes=F,box=F)
#Alternative management layer plot based on ggplot
ManagementLayerPlot<-cbind(AquaManaged_other[,1:2], summanagedlayer)
head(ManagementLayerPlot)
land_shp_moll<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/land_shp_moll.rds")
ManagementLayerPlotFin<- ManagementLayerPlot %>%
select(lon, lat, summanagedlayer)%>%
raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
raster::projectRaster(crs = "+proj=moll") %>%
as.data.frame(xy = T) %>%
filter(!is.na(summanagedlayer)) %>%
set_names(c("lon", "lat", "Count")) %>%
ggplot(aes(x=lon,y=lat,fill=Count)) + scale_fill_viridis()+#option="plasma")+#scale_fill_gradient(color=viridis)+#scale_fill_gradient(low="white", high="#00539CFF")+#guides(fill=guide_legend())+
theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+
geom_raster()+
geom_sf(data = land_shp_moll,fill="darkgray", lwd = 0.1, inherit.aes = F)
ManagementLayerPlotFin
ggsave(file="/Users/ren/Documents/CODES/FoodProvision/PaperFigures/ManagementLayerPlotFin.png", ManagementLayerPlotFin,width = 10, height = 8, dpi = 300, units = "in")#resolution not great
#Saving the management layer for the paper
if(saveme==1){
png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/ManagementLayer.png", width = 6, height = 4, units = 'in', res = 300)
plot(PlotFunction(empty_raster),zlim=c(0,maxValue(empty_raster)), axes=F,box=F,legend=F)
plot(empty_raster, zlim=c(0,maxValue(empty_raster)),legend.only=TRUE,legend.width=1, legend.shrink=0.75,axis.args=list(cex.axis=0.5),
legend.args=list(text='Number', side=4, font=2, line=2.5, cex=0.8))
dev.off()
}
#these are three relevant files
head(AquaPoor_other) #unmanaged layer that have species in managed #have lon lat
head(AquaManaged_other) #managed layer #have lon,lat
head(Aqua4poor)#completely unmanaged species ##have id
#This is for calculating the multipliers of K
KNorm_Aqua4poor<-as.data.frame(colSums(Aqua4poor[,!(names(Aqua4poor) %in% c('id','lon', 'lat'))])) #ok, answer correct
setDT(KNorm_Aqua4poor, keep.rownames = "SpeciesID")
colnames(KNorm_Aqua4poor) <- c("SpeciesID","Kfrac")
KNorm_Aqua4poor$Manage<-0
table(KNorm_Aqua4poor$Kfrac)
KNorm_AquaPoor_other<-as.data.frame(colSums(AquaPoor_other[,!(names(AquaPoor_other) %in% c('id','lon', 'lat'))]))
setDT(KNorm_AquaPoor_other, keep.rownames = "SpeciesID")
colnames(KNorm_AquaPoor_other) <- c("SpeciesID","Kfrac")
KNorm_AquaPoor_other$Manage<-0
table(KNorm_AquaPoor_other$Kfrac)#1 species with no K, we can remove this.
#What is that species?
removespeciesAquaPoor<-KNorm_AquaPoor_other %>% filter(Kfrac==0)
removespeciesAquaPoor
KNorm_AquaPoor_other<-KNorm_AquaPoor_other %>% filter(Kfrac!=0)
KNorm_AquaManaged_other<-as.data.frame(colSums(AquaManaged_other[,!(names(AquaManaged_other) %in% c('id','lon', 'lat'))]))
setDT(KNorm_AquaManaged_other, keep.rownames = "stockid")
colnames(KNorm_AquaManaged_other) <- c("stockid","Kfrac")
KNorm_AquaManaged_other$Manage<-1
table(KNorm_AquaManaged_other$Kfrac) #there are 10 stocks with zero K. We can remove this. [1 stock completely managed]
#What is that species?
removespeciesAquaManaged<-KNorm_AquaManaged_other %>% filter(Kfrac==0)
removespeciesAquaManaged
dim(KNorm_AquaManaged_other)
KNorm_AquaManaged_other<-KNorm_AquaManaged_other %>% filter(Kfrac!=0)
dim(KNorm_AquaManaged_other)
stocktoSpID<-ReferenceStockSpeciesID2 %>% select(-n)
KNorm_AquaManaged_other_SPID<-left_join(KNorm_AquaManaged_other,stocktoSpID, by="stockid") %>%select(SpeciesID,Kfrac,Manage,stockid)
KNorm_Aqua4poor$stockid<-KNorm_Aqua4poor$SpeciesID
KNorm_AquaPoor_other$stockid<-KNorm_AquaPoor_other$SpeciesID
KfracFile<-rbind(KNorm_Aqua4poor,KNorm_AquaPoor_other,KNorm_AquaManaged_other_SPID)
#perfect! I've calculated K multipliers. The next step is to us SpeciesID and Manage as matching columns to get Kfrac.
#The above are just K fractions, not the main file!!!
#Below we will partition the file
#i will drop the id in Aqua4poor and merge it with AquaPoor_other
Aqua4poor$id <- NULL
#merge unmanaged fishery by using cbind
PoorlyManagedComb<-cbind(AquaPoor_other,Aqua4poor[,3:dim(Aqua4poor)[2]]) #coordinates are included... I removed the coors from Aqua4poor
head(PoorlyManagedComb)
max(colSums(PoorlyManagedComb))
dim(PoorlyManagedComb)
dim(AquaPoor_other)
#ok, the above looks good. The Managed fishery is just the "AquaManaged_other"
ManagedComb<-AquaManaged_other
#remove columns we identified above in the calculation of Kfrac
removespeciesAquaPoor$SpeciesID
PoorlyManagedComb<-PoorlyManagedComb[ , ! names(PoorlyManagedComb) %in% removespeciesAquaPoor$SpeciesID, drop=F]
dim(PoorlyManagedComb)
dim(ManagedComb)
removespeciesAquaManaged$stockid
ManagedComb<-ManagedComb[, ! names(ManagedComb) %in% removespeciesAquaManaged$stockid, drop=F]
dim(ManagedComb)
#These are our normalized files
Norm_PoorlyManagedComb<-NormFunction(PoorlyManagedComb)
Norm_ManagedComb<-NormFunction(ManagedComb)
colSums(Norm_ManagedComb) #ok, answer correct
#We could bring back the coordinates, compute K, remove K==0 to save computing space.
Coord_ManagedComb<-ManagedComb %>% select(lon,lat)
Coord_PoorlyManagedComb<-PoorlyManagedComb %>% select(lon,lat)
#coord added to normalized file ##We could decide later if we want to work on this together or run separately
CoordNorm_PoorlyManagedComb<-cbind(Coord_PoorlyManagedComb,Norm_PoorlyManagedComb)
CoordNorm_ManagedComb<-cbind(Coord_ManagedComb,Norm_ManagedComb)
#the two lines below proves that the coordinates are the same above
sum(CoordNorm_PoorlyManagedComb$lat-CoordNorm_ManagedComb$lat)
sum(CoordNorm_PoorlyManagedComb$lon-CoordNorm_ManagedComb$lon)
megacell<-cbind(Norm_PoorlyManagedComb,Norm_ManagedComb)
colnames(Norm_PoorlyManagedComb)
colnames(megacell) #it is working --- same filenames
dim(megacell)
#add sum per cell, then remove
Reduced_megacell<-cbind(megacell,NKsum=rowSums(megacell,na.rm=T))
#add coordinates to the megacell
Reduced_megacell2<-cbind(Coord_PoorlyManagedComb,Reduced_megacell)
dim(Reduced_megacell2)
Reduced_megacell3 <- Reduced_megacell2[ which(Reduced_megacell2$NKsum>0),]
dim(Reduced_megacell3)
kpercellforplot<-Reduced_megacell3$NKsum
Reduced_megacell3$NKsum <- NULL
#coordinates
CleanCoordmegacell<-Reduced_megacell3[c("lon", "lat")]
head(CleanCoordmegacell)
dim(CleanCoordmegacell)
CleanCoordmegacellUNIQUE<-unique(CleanCoordmegacell[c("lon", "lat")])
dim(CleanCoordmegacellUNIQUE)
#remove lat long
Reduced_megacell3$lat <- NULL
Reduced_megacell3$lon <- NULL
Cleanmegacell<-Reduced_megacell3
head(Cleanmegacell)
colSums(Cleanmegacell)
dim(Cleanmegacell)
# ##these are the reference files!!!
# CleanCoordPoor<-Reduced_CoordNorm_PoorlyManagedComb %>% select(lon,lat)
# CleanPoor<-Reduced_CoordNorm_PoorlyManagedComb %>% select(-c(lon,lat))
# CleanCoordManage<-Reduced_CoordNorm_ManagedComb %>% select(lon,lat)
# CleanManage<-Reduced_CoordNorm_ManagedComb %>% select(-c(lon,lat))
# dim(CleanPoor)
# dim(CleanManage)
#get species ids in prep for making the MegaData biological parameters.
PoorMng<-as.data.frame(colnames(Norm_PoorlyManagedComb))
PoorMng<-cbind(PoorMng,0)
colnames(PoorMng) <- c("SpeciesID","Manage")
head(PoorMng)
dim(PoorMng)
PoorMng$stockid<-PoorMng$SpeciesID #this is for merging purpose later
Mng<-as.data.frame(colnames(Norm_ManagedComb))
Mng<-cbind(Mng,1)
colnames(Mng) <- c("stockid","Manage")
head(Mng)
dim(Mng)
#bind this with the real species id
head(ReferenceStockSpeciesID2)
dim(ReferenceStockSpeciesID2)
stocktoSpID<-ReferenceStockSpeciesID2 %>% select(-n)
Mng2<-left_join(Mng,stocktoSpID, by="stockid") %>%select(SpeciesID,Manage,stockid)
head(Mng2)
dim(Mng2)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#Next is to prepare a table of the biological layers?
MegaData<-rbind(PoorMng,Mng2)
#add species name?
MegaData<-left_join(MegaData,include,by="SpeciesID")
#Add Kfrac and Kfinal
KfracFile <- KfracFile %>% select(stockid,Kfrac) #i just need these since stockid's are unique
MegaData<-left_join(MegaData,KfracFile,by="stockid")
#MegaData$Kfin<-MegaData$K*MegaData$Kfrac
head(MegaData)
dim(MegaData)
# #add r #comment this for now. I will just load the r parameters
# biolparams<-fishlife2(as.character(MegaData$SciName))
# biolparams2<-biolparams %>% select(species,r)
# colnames(biolparams2)[colnames(biolparams2)=="species"] <- "SciName"
# MegaData<-left_join(MegaData,biolparams2,by="SciName") #ok
#Load r then remove SciName entry in "include" if they have no r information
#this is the growth parameter
r_rev<-read.csv("/Users/ren/Documents/GitHub/FoodProvision2019/Parameters/r_data_whitneycheck - rsave_whitneycheck.csv")
head(r_rev)
dim(r_rev)
r_data<-r_rev %>% filter(species %in% MegaData$SciName) %>% select(species, r_mean,ln_r_mu,ln_r_sd,r,r_lower_bound,r_upper_bound) %>% mutate(stdev=(r_upper_bound-r_lower_bound)/4)
colnames(r_data)[which(names(r_data) == "r_mean")] <- "r_thorson"
colnames(r_data)[which(names(r_data) == "r")] <- "r_fishbase"
colnames(r_data)[which(names(r_data) == "species")] <- "SciName"
r_data<-r_data %>% rowwise() %>% mutate(r_fin = sum(r_thorson,r_fishbase, na.rm=TRUE))
MegaData<-left_join(MegaData,r_data,by="SciName")
dim(MegaData)
head(MegaData)
# rbound<-r_rev %>% filter(r>0) %>% mutate(stdev=(r_upper_bound-r_lower_bound)/4)#, rlowerfrac=(r-r_lower_bound)/r, rupperfrac=(r_upper_bound-r)/r)
# plot(rbound$stdev)
# xx<-seq(0, 2, length.out=1000)
# yy<-dnorm(xx, mean = 0.60, sd = 0.1275, log = FALSE)
# plot(xx,yy)
# #To do: plot all r curve!
mfile<-read.csv("/Users/ren/Documents/GitHub/FoodProvision2019/Parameters/mobility_data_paper - data.csv")
mfile$m_fin<-mfile$m_index
mfile<-mfile %>% mutate(m_fin=replace(m_fin,m_fin==1,0.1),
m_fin=replace(m_fin,m_fin==2,0.3),
m_fin=replace(m_fin,m_fin==3,0.9))
head(mfile)
dim(mfile)
SI_r_and_m_data<-left_join(r_data,mfile,by="SciName")
head(SI_r_and_m_data)
dim(SI_r_and_m_data)
write.csv(SI_r_and_m_data, file = "/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/SI_r_and_m_data.csv")
mfile<-mfile %>% select(SciName,m_fin)
MegaData<-left_join(MegaData,mfile,by="SciName")
head(MegaData)
dim(MegaData)
#ER is exploitation rate, E is escapement. This is for stock-assessed.
ERmanage<-read.csv("/Users/ren/Documents/CODES/FoodProvision/MatchedER - MatchedERFin.csv")
ERmanage_add_PNAS<-read.csv("/Users/ren/Documents/CODES/FoodProvision/MatchedER_PNAS.csv")
head(ERmanage)
head(ERmanage_add_PNAS)
ERmanage_add_PNAS_newSp<-ERmanage_add_PNAS %>% filter(! stockid %in% ERmanage$stockid)
dim(ERmanage)
ERmanage<-rbind(ERmanage,ERmanage_add_PNAS_newSp)
dim(ERmanage)
MegaData<-left_join(MegaData,ERmanage,by="stockid")
#ERmanage is the exploitation rate of managed fishery
#if ERmanage is > r, make ER==r. AT ER=r, biomass and catch will be zero
MegaData <- MegaData %>% mutate(ERset= (ER*(ER<=r_fin)) + (r_fin*(ER>r_fin))) %>% mutate(E=1-ERset)
head(MegaData)
#calculate E that will result to bvk_fin, add E at msy, calculate E that will make BvK=0.1
MegaData <- MegaData %>% mutate(ER_costello=r_fin-(bvk_fin*r_fin)) %>% mutate(E_costello=1-ER_costello, Emsy=1-(0.5*r_fin), EBvK01=1-(0.9*r_fin))
head(MegaData)
min(MegaData$bvk_fin)
max(MegaData$bvk_fin)
max(MegaData$ER_costello)
min(MegaData$ER_costello)
max(MegaData$E_costello)
min(MegaData$E_costello)
##REVISED ASSUMPTION 1: Default is E and E_costello otherwise --- this means
#E costello 2050 + E constant (BAU2 assumption)
MegaData$Emanage<-MegaData$E
min(MegaData$Emanage,na.rm=T)
MegaData$Emanage[is.na(MegaData$Emanage)] <- -1
min(MegaData$Emanage)
MegaData <- MegaData %>% mutate(Efin=(Emanage*(Emanage!=-1)) + (E_costello*(Emanage==-1)))
head(MegaData)
#Assumption 2
#E assumption that would make BvK 0.1 for all stocks, add also Emsy
MegaData <- MegaData %>% mutate(EBvK01fin=(Emanage*(Emanage!=-1)) + (EBvK01*(Emanage==-1)))
head(MegaData)
MegaData <- MegaData %>% mutate(EBvK01_msy=(Emsy*(Emanage!=-1)) + (EBvK01*(Emanage==-1)))
head(MegaData)
##add Efin + MSY assumption for species with stock assessment
MegaData<-MegaData %>% mutate(Efin_msy= (Efin*(Manage==0)+ Emsy*(Manage==1)))
head(MegaData)
# ##add scorched earth + current E for others, and scorched earth + MSY
# MegaData<-MegaData %>% mutate(Escorched_current= (((1-r_fin)*(Manage==0))+ (Efin*(Manage==1))), Escorched_msy= (((1-r_fin)*(Manage==0))+ (Emsy*(Manage==1))))
# head(MegaData)
# #50% of the poorly managed will be managed at msy
# MegaData$randomnum<-runif(dim(MegaData)[1])
# head(MegaData)
# MegaData<-MegaData %>% mutate(Efinhalf_msy= (Efin*(Manage==0 & randomnum<=0.5)+ Emsy*(Manage==0 & randomnum>0.5)+ Emsy*(Manage==1)))
# head(MegaData)
#BAU1: check Fstatus (then F current forever)
MegaData <- MegaData %>% mutate(Efin_BAU1=Efin*(Manage==1)+Efin*((Fstatus>1 | Bstatus<1) & Manage==0)+ (1-r_fin+(BK2012*r_fin))*(Fstatus<1 & Bstatus>1 & Manage==0))
head(MegaData)
plot(MegaData$Efin,MegaData$Efin_BAU1)
min(MegaData$Efin)
#E should not be less than 0
MegaData$Efin_BAU1[MegaData$Efin_BAU1<0] <- 0
MegaData$Efin[MegaData$Efin<0] <- 0
MegaData$Emsy[MegaData$Emsy<0] <- 0
MegaData$Efin_msy[MegaData$Efin_msy<0] <- 0
MegaData$EBvK01fin[MegaData$EBvK01fin<0] <- 0
MegaData$EBvK01_msy[MegaData$EBvK01_msy<0] <- 0
# #check this later
# halfearth<-MegaData %>% filter(Manage==0) %>% select(MSYfin,Efin,Emsy) %>% mutate(ERratio=(1-Efin)/(1-Emsy))
# head(halfearth)
# plot(halfearth$MSYfin,halfearth$ERratio)
#MSY per species from costello et al.
MSYperspeciescostello<-CostelloPresentPrime %>% select(SciName,MSY) %>% filter(SciName %in% MegaData$SciName) %>% group_by(SciName) %>% summarize(MSYtotal=sum(MSY))
MegaData<-left_join(MegaData,MSYperspeciescostello,by="SciName")
MegaData$MSYfin<-MegaData$MSYtotal*MegaData$Kfrac
MegaData$Kfin<-4*MegaData$MSYfin/MegaData$r_fin
sum(MegaData$MSYfin)
sum(MegaData$Kfin)
head(MegaData)
plot(MegaData$Kfin,MegaData$K*MegaData$Kfrac)
abline(0,1)
#plot K per stock per species for SI
head(MegaData)
# change fill color by groups and add text labels
MegaDataKplot<-MegaData[order(-MegaData$Kfin),] %>% slice(1:50)
plotMegaDataKplot<-ggplot(MegaDataKplot, aes(x = reorder(stockid, Kfin), y = Kfin)) +
geom_bar(fill="steelblue",stat = "identity") +
coord_flip() +
geom_text(aes(label = SciName,size=14), nudge_y = 9e6, color = "black")+
labs(y = "Carrying capacity, K (MT)", x="Fish stock")+ ylim(0, max(MegaDataKplot$Kfin)+1.5e7)+
theme(axis.text=element_text(size=14),
axis.title=element_text(size=16,face="bold"),
legend.position="none")
plotMegaDataKplot
png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/KperStock.png", width = 10, height = 10,units = 'in', res = 300)
plotMegaDataKplot
dev.off()
#plot K per species for SI
head(MegaData)
sum((MegaData$Manage==0)*1)
MegaDataKplot<-MegaData %>% filter(Manage==0) %>% mutate(KfinTot=4*MSYtotal/r_fin)
MegaDataKplot<-MegaDataKplot[order(-MegaDataKplot$KfinTot),] %>% slice(1:50)
plotMegaDataKplot<-ggplot(MegaDataKplot, aes(x = reorder(SciName, KfinTot), y = KfinTot)) +
geom_bar(fill="steelblue",stat = "identity") +
coord_flip() +
#geom_text(aes(label = SciName,size=14), nudge_y = 8e6, color = "black")+
labs(y = "Carrying capacity, K (MT)", x="Species")+ #ylim(0, 4.7e7)+
theme(axis.text=element_text(size=14),
axis.title=element_text(size=16,face="bold"),
legend.position="none")
plotMegaDataKplot
png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/KperSpecies.png", width = 10, height = 10,units = 'in', res = 300)
plotMegaDataKplot
dev.off()
#Check total MSY and compare to Costello et al.
#This is reported in the paper
sum((MegaData$r_fin*MegaData$Kfin)/4) #MSYtotal
sum(MegaData$Kfin) #total K
#MegaData<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/MegaData.rds")
#Ropt, Hopt
head(MegaData)
MegaData$Ropt<-((MegaData$m_fin*MegaData$r_fin) + (((2*MegaData$Efin_BAU1)-2)*MegaData$m_fin)) / (((MegaData$Efin_BAU1-1)*MegaData$r_fin)+(((2*MegaData$Efin_BAU1)-2)*MegaData$m_fin))
hist(MegaData$Ropt,xlab="Ropt",main="")
#SI plot for paper
#Optimal MPA size per species
optimalMPAsize<-as.data.frame(MegaData$Ropt*100)
names(optimalMPAsize) <- c("Ropt")
optimalMPAsize<-optimalMPAsize %>% filter(Ropt<=100) %>% filter(Ropt>0)
head(optimalMPAsize)
dim(optimalMPAsize)
#hist(optimalMPAsize,xlab="Ropt",main="")
mu<-median(optimalMPAsize$Ropt)
mu
mean(optimalMPAsize$Ropt)
optimalMPAsize<-as.data.frame(optimalMPAsize)
p<-ggplot(optimalMPAsize, aes(x=Ropt)) +geom_histogram()+
#geom_histogram(fill="white", position="dodge")+
geom_vline(xintercept=mu,
linetype="dashed", colour="red")+
theme(legend.position="top")+labs(x="MPA size (0-100%)",y="Number of stocks")
p
png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Ropt.png", width = 6, height = 6,units = 'in',res = 300)
p
dev.off()
#another figure in the SI
BK<-ggplot(MegaData, aes(x=bvk_fin)) +geom_histogram()+
geom_vline(xintercept=median(MegaData$bvk_fin),
linetype="dashed", colour="red")+
theme(legend.position="top")+labs(x="B/K",y="Number of stocks")
BK
png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/BK.png", width = 6, height = 6,units = 'in',res = 300)
BK
dev.off()
#Given Ropt, what is Hopt???
MegaData$Hopt<-((1-MegaData$Efin_BAU1)*((MegaData$m_fin*MegaData$Kfin*(1-MegaData$Ropt))/(MegaData$Ropt-(MegaData$Efin_BAU1*MegaData$Ropt)+MegaData$m_fin))*(1-(((1-MegaData$Efin_BAU1)*(1-MegaData$Ropt)*MegaData$m_fin)/((MegaData$Ropt-(MegaData$Efin_BAU1*MegaData$Ropt)+MegaData$m_fin)*MegaData$r_fin)))) - ((1-MegaData$Efin_BAU1)*((MegaData$r_fin+MegaData$Efin_BAU1-1)/MegaData$r_fin)*MegaData$Kfin)
hist(MegaData$Hopt)
##What proportion of MSY is managed vs unmanaged?
MegaData %>% group_by(Manage) %>% summarise(msy=sum(MSYfin)) %>% mutate(proportion=msy/sum(msy))
#34.3% of the stocks have stock assessment
#install.packages("nls2")
#install.packages("minpack.lm")
library(nls2)
library(minpack.lm)
# #plot curve for different MPA size
# dH<-vector()
# Rvec<-vector()
# count<-0
# FracMPA<-seq(0,1,0.01)
# E<-MegaData$Efin[1]
# m<-MegaData$m[1]
# K<-MegaData$Kfin[1]
# r<-MegaData$r[1]
# for (R in FracMPA){
# count<-count+1
# dH[count]<-((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r)))) - ((1-E)*((r+E-1)/r)*K)
# Rvec[count]<-R
# }
# Mycurve<-cbind(Rvec,dH)
# Mycurve<-as.data.frame(Mycurve)
# plot(Mycurve$Rvec,Mycurve$dH)
# #write.csv(Mycurve, file = "/Users/ren/Documents/CODES/FoodProvision/Curve_explore.csv")
#
# #curve fitting
# x <- Mycurve$Rvec[1:37]
# y <- Mycurve$dH[1:37]
# p <- plot(x,y,pch=19)
# nlsFit <- nlsLM(y ~b1*((((1489.9105/b1)^(1/b2))/0.3578660)*x)^b2,start=list(b1 = y[2]/x[2],b2=0.3))
# newdata <- data.frame(x = seq(min(x),max(x),len=100))
# predictLine <- lines(newdata$x,predict(nlsFit,newdata=newdata),col="red")
# print(predictLine)
#derive parameters for Juan// Zonation
head(MegaData)
table(MegaData$Ropt)
ZonationMegaData<-MegaData %>% filter(Ropt>0) %>% filter(Ropt<=1)
hist(ZonationMegaData$Ropt)
max(ZonationMegaData$Ropt)
max(ZonationMegaData$Hopt)
min(ZonationMegaData$Hopt)
#other parameters
Zonation_others<- MegaData %>% filter(! (stockid %in% ZonationMegaData$stockid))
dim(Zonation_others)
#THIS IS FOR ALL THE SPECIES #unmanaged
#w1param<-vector()
xparam<-vector()
#Tparam<-vector()
FracMPA<-seq(0,1,0.0001)
pdf("/Users/ren/Documents/CODES/FoodProvision/Results/curvefit.pdf")
for (i in 1:dim(ZonationMegaData)[1]){
dH<-vector()
Rvec<-vector()
count<-0
E<-ZonationMegaData$Efin_BAU1[i]
m_fin<-ZonationMegaData$m_fin[i]
K<-ZonationMegaData$Kfin[i]
r_fin<-ZonationMegaData$r_fin[i]
Hopt<-ZonationMegaData$Hopt[i]
Ropt<-ZonationMegaData$Ropt[i]
for (R in FracMPA){
count<-count+1
dH[count]<-((1-E)*((m_fin*K*(1-R))/(R-(E*R)+m_fin))*(1-(((1-E)*(1-R)*m_fin)/((R-(E*R)+m_fin)*r_fin)))) - ((1-E)*((r_fin+E-1)/r_fin)*K)
Rvec[count]<-R
}
Mycurve<-cbind(Rvec,dH)
Mycurve<-as.data.frame(Mycurve)
#plot(Mycurve$Rvec,Mycurve$dH)
#write.csv(Mycurve, file = "/Users/ren/Documents/CODES/FoodProvision/Curve_explore.csv")
#curve fitting
maxposition<-which(Mycurve$dH==max(Mycurve$dH))
x <- Mycurve$Rvec[1:maxposition]
y <- Mycurve$dH[1:maxposition]
# p <- plot(x,y,pch=19)
#nlsFit <- nlsLM(y ~b1*((((Hopt/b1)^(1/b2))/Ropt)*x)^b2,start=list(b1 = y[2]/x[2],b2=0.5))
nlsFit <- nlsLM(y ~Hopt*(x/Ropt)^b2,start=list(b2=0.5))
newdata <- data.frame(x = seq(min(x),max(x),len=100))
#predictLine <- lines(newdata$x,predict(nlsFit,newdata=newdata),col="red")
#print(predictLine)
plot(x,y,pch=19,main=ZonationMegaData$stockid[i])+
lines(newdata$x,predict(nlsFit,newdata=newdata),col="red")
#w1param[i]<-coef(nlsFit)[1]
#xparam[i]<-coef(nlsFit)[2]
#Tparam[i]<-1/(((Hopt/coef(nlsFit)[1])^(1/coef(nlsFit)[2]))/Ropt)
xparam[i]<-coef(nlsFit)
}
dev.off()
#ZonationMegaData$w1param<-w1param
ZonationMegaData$xparam<-xparam
#ZonationMegaData$Tparam<-Tparam
head(ZonationMegaData)
ZonationMegaData$ExploitationRate<-1-ZonationMegaData$Efin_BAU1
ForZonationMegaData<-ZonationMegaData %>% select(stockid,Kfin,Ropt,Hopt,xparam,ExploitationRate,Kfin,m_fin,r_fin)
head(ForZonationMegaData)
write.csv(ForZonationMegaData, file = "/Users/ren/Documents/CODES/FoodProvision/ForZonationMegaData_Unmanaged_R1.csv")
#THIS IS FOR OTHER SPECIES #MANAGED
w2param<-vector()
yparam<-vector()
FracMPA<-seq(0,1,0.0001)
pdf("/Users/ren/Documents/CODES/FoodProvision/Results/curvefitMANAGED.pdf")
for (i in 1:dim(Zonation_others)[1]){
dH<-vector()
Rvec<-vector()
count<-0
E<-Zonation_others$Efin_BAU1[i]
m_fin<-Zonation_others$m_fin[i]
K<-Zonation_others$Kfin[i]
r_fin<-Zonation_others$r_fin[i]
Hopt<-Zonation_others$Hopt[i]
Ropt<-Zonation_others$Ropt[i]
for (R in FracMPA){
count<-count+1
dH[count]<-((1-E)*((m_fin*K*(1-R))/(R-(E*R)+m_fin))*(1-(((1-E)*(1-R)*m_fin)/((R-(E*R)+m_fin)*r_fin)))) - ((1-E)*((r_fin+E-1)/r_fin)*K)
Rvec[count]<-R
}
Mycurve<-cbind(Rvec,dH)
Mycurve<-as.data.frame(Mycurve)
# plot(Mycurve$Rvec,Mycurve$dH,main=Zonation_others$stockid[i])
maxposition<-which(Mycurve$dH==max(Mycurve$dH))
x <- Mycurve$Rvec
y <- Mycurve$dH
nlsFit <- nlsLM(y ~(w2*(x^y1)),start=list(w2=(y[2]/x[2])-0.1,y1=0.5))
newdata <- data.frame(x = seq(min(x),max(x),len=100))
plot(x,y,pch=19,main=Zonation_others$stockid[i])+
lines(newdata$x,predict(nlsFit,newdata=newdata),col="red")
w2param[i]<-coef(nlsFit)[1]
yparam[i]<-coef(nlsFit)[2]
}
dev.off()
Zonation_others$w2param<-w2param
Zonation_others$yparam<-yparam
Zonation_others$ExploitationRate<-1-Zonation_others$Efin_BAU1
ForZonationMegaData_Managed<-Zonation_others %>% select(stockid,Kfin,Ropt,Hopt,w2param,yparam,ExploitationRate,Kfin,m_fin,r_fin)
write.csv(ForZonationMegaData_Managed, file = "/Users/ren/Documents/CODES/FoodProvision/ForZonationMegaData_Managed_R1.csv")
# ####For Zonation BAU1---------BAU1 -------------BAU1
# #Ropt, Hopt
# MegaData$Ropt<-((MegaData$m*MegaData$r) + (((2*MegaData$Efin_BAU1)-2)*MegaData$m)) / (((MegaData$Efin_BAU1-1)*MegaData$r)+(((2*MegaData$Efin_BAU1)-2)*MegaData$m))
#
# #Given Ropt, what is Hopt???
# MegaData$Hopt<-((1-MegaData$Efin_BAU1)*((MegaData$m*MegaData$Kfin*(1-MegaData$Ropt))/(MegaData$Ropt-(MegaData$Efin_BAU1*MegaData$Ropt)+MegaData$m))*(1-(((1-MegaData$Efin_BAU1)*(1-MegaData$Ropt)*MegaData$m)/((MegaData$Ropt-(MegaData$Efin_BAU1*MegaData$Ropt)+MegaData$m)*MegaData$r)))) - ((1-MegaData$Efin_BAU1)*((MegaData$r+MegaData$Efin_BAU1-1)/MegaData$r)*MegaData$Kfin)
#
# #derive parameters for Juan// Zonation
# ZonationMegaData<-MegaData %>% filter(Ropt>0) %>% filter(Ropt<=1)
#
# #other parameters
# Zonation_others<- MegaData %>% filter(! (stockid %in% ZonationMegaData$stockid))
#
# #THIS IS FOR ALL THE SPECIES #unmanaged
# xparam<-vector()
# FracMPA<-seq(0,1,0.0001)
# pdf("/Users/ren/Documents/CODES/FoodProvision/Results/curvefit.pdf")
# for (i in 1:dim(ZonationMegaData)[1]){
# dH<-vector()
# Rvec<-vector()
# count<-0
# E<-ZonationMegaData$Efin_BAU1[i]
# m<-ZonationMegaData$m[i]
# K<-ZonationMegaData$Kfin[i]
# r<-ZonationMegaData$r[i]
# Hopt<-ZonationMegaData$Hopt[i]
# Ropt<-ZonationMegaData$Ropt[i]
# for (R in FracMPA){
# count<-count+1
# dH[count]<-((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r)))) - ((1-E)*((r+E-1)/r)*K)
# Rvec[count]<-R
# }
# Mycurve<-cbind(Rvec,dH)
# Mycurve<-as.data.frame(Mycurve)
#
# maxposition<-which(Mycurve$dH==max(Mycurve$dH))
#
# x <- Mycurve$Rvec[1:maxposition]
# y <- Mycurve$dH[1:maxposition]
#
# nlsFit <- nlsLM(y ~Hopt*(x/Ropt)^b2,start=list(b2=0.5))
# newdata <- data.frame(x = seq(min(x),max(x),len=100))
#
# plot(x,y,pch=19,main=ZonationMegaData$stockid[i])+
# lines(newdata$x,predict(nlsFit,newdata=newdata),col="red")
#
# xparam[i]<-coef(nlsFit)
# }
# dev.off()
#
# ZonationMegaData$xparam<-xparam
# head(ZonationMegaData)
# ZonationMegaData$ExploitationRate<-1-ZonationMegaData$Efin_BAU1
# ForZonationMegaData_BAU1<-ZonationMegaData %>% select(stockid,Kfin,Ropt,Hopt,xparam,ExploitationRate,Kfin,m,r)
# write.csv(ForZonationMegaData_BAU1, file = "/Users/ren/Documents/CODES/FoodProvision/ForZonationMegaData_Unmanaged_BAU1.csv")
#
#
# #THIS IS FOR OTHER SPECIES #MANAGED
# w2param<-vector()
# yparam<-vector()
# FracMPA<-seq(0,1,0.0001)
# pdf("/Users/ren/Documents/CODES/FoodProvision/Results/curvefitMANAGED.pdf")
# for (i in 1:dim(Zonation_others)[1]){
# dH<-vector()
# Rvec<-vector()
# count<-0
# E<-Zonation_others$Efin_BAU1[i]
# m<-Zonation_others$m[i]
# K<-Zonation_others$Kfin[i]
# r<-Zonation_others$r[i]
# Hopt<-Zonation_others$Hopt[i]
# Ropt<-Zonation_others$Ropt[i]
# for (R in FracMPA){
# count<-count+1
# dH[count]<-((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r)))) - ((1-E)*((r+E-1)/r)*K)
# Rvec[count]<-R
# }
# Mycurve<-cbind(Rvec,dH)
# Mycurve<-as.data.frame(Mycurve)
#
# maxposition<-which(Mycurve$dH==max(Mycurve$dH))
# x <- Mycurve$Rvec
# y <- Mycurve$dH
# nlsFit <- nlsLM(y ~(w2*(x^y1)),start=list(w2=(y[2]/x[2])-0.1,y1=0.5))
# newdata <- data.frame(x = seq(min(x),max(x),len=100))
# plot(x,y,pch=19,main=Zonation_others$stockid[i])+
# lines(newdata$x,predict(nlsFit,newdata=newdata),col="red")
# w2param[i]<-coef(nlsFit)[1]
# yparam[i]<-coef(nlsFit)[2]
# }
# dev.off()
# Zonation_others$w2param<-w2param
# Zonation_others$yparam<-yparam
# Zonation_others$ExploitationRate<-1-Zonation_others$Efin_BAU1
# ForZonationMegaData_Managed_BAU1<-Zonation_others %>% select(stockid,Kfin,Ropt,Hopt,w2param,yparam,ExploitationRate,Kfin,m,r)
# write.csv(ForZonationMegaData_Managed_BAU1, file = "/Users/ren/Documents/CODES/FoodProvision/ForZonationMegaData_Managed_BAU1.csv")
###PLOT K using the new data!!!!!!!!!!!!!!!!!!!!!!!!
#files needed: Aquaothers2, MegaData
saveRDS(Aquaothers2, file = "/Users/ren/Documents/CODES/FoodProvision/Aquaothers2.rds")
saveRDS(Aquaothers2, file = "/Users/ren/Documents/CODES/FoodProvision/MegaData.rds")
Aquaothers2<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/Aquaothers2.rds")
MegaData<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/MegaData.rds")
head(Aquaothers2)
#K and SpeciesID
head(MegaData)
MagaData_K<-MegaData %>% filter(Manage==0)
includeREV<-MagaData_K %>% select(SpeciesID,r_fin,MSYtotal) %>% mutate(Ktotal=4*MSYtotal/r_fin) %>% select(SpeciesID,Ktotal)
Aqua3Rev<-merge(Aquaothers2,includeREV,by="SpeciesID")
head(Aqua3Rev)
###
#length(unique(Aqua3$SpeciesID))
Aqua3Rev <- Aqua3Rev %>% group_by(SpeciesID) %>% mutate(totalprob=sum(probability))
Aqua3stackRev<-Aqua3Rev %>% group_by(CenterLat,CenterLong) %>% summarise(S=sum(probability*Ktotal/totalprob))
#head(Aqua3stackRev) #S is total K per cell
Aqua3stackRev<-as.data.frame(Aqua3stackRev)
# empty_raster <- raster(res = 0.5)
# cells <- cellFromXY(empty_raster, as.matrix(Aqua3stackRev[,2:1]))
# empty_raster[cells] <- Aqua3stackRev[,3]
# plot(empty_raster,main="Carrying capacity per cell (MT)")
# png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Kplot_all_flat.png", width = 8, height = 5, units = 'in', res = 300)
# plot(empty_raster)
# dev.off()
# #----K PLOT - BASED ON GGPLOT
# head(Aqua3stackRev)
land_shp_moll<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/land_shp_moll.rds")
# KPLOTFin<- Aqua3stackRev %>%
# set_names(c("lat", "lon", "K")) %>%
# select(lon, lat, K)%>%
# raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
# raster::projectRaster(crs = "+proj=moll") %>%
# as.data.frame(xy = T) %>%
# filter(!is.na(K)) %>%
# set_names(c("lon", "lat", "K")) %>%
# ggplot(aes(x=lon,y=lat,fill=K)) + labs(fill="K (MT/cell)")+scale_fill_gradient(low="white", high="#00539CFF")+
# #guides(fill=guide_legend())+
# theme(axis.title.x = element_blank(),axis.title.y = element_blank())+
# geom_raster()+
# geom_sf(data = land_shp_moll, inherit.aes = F)
# KPLOTFin
# ggsave(file="/Users/ren/Documents/CODES/FoodProvision/PaperFigures/KPLOTFin.png", KPLOTFin,width = 10, height = 8, dpi = 300, units = "in")#resolution not great
#format same as pixel level food prov
root<-3
head(Aqua3stackRev)
SI_totalkpercell<-Aqua3stackRev %>%
set_names(c("lat", "lon", "K")) %>%
select(lon, lat, K)%>%
raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
raster::projectRaster(crs = "+proj=moll") %>%
as.data.frame(xy = T) %>%
filter(!is.na(K)) %>%
set_names(c("lon", "lat", "K")) %>%
mutate(tmp = K^(1/root)) %>%
ggplot()+
geom_raster(aes(lon, lat, fill = tmp))+
scale_fill_viridis(
labels = function(x){x^root},
name="K (MT/cell)",
limits=c(0,max(Aqua3stackRev$S)^(1/root))
)+
#scale_fill_gradient2(labels = function(x){x^root},
# low = "white",
# high = "#00539CFF", space = "Lab",
# name="K (MT/cell)",
# limits=c(0,max(Aqua3stackRev$S)^(1/root)))+
theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+ #"bottom
labs(title = "", fill = "", y = "", x = "")+
#geom_raster(data=MPA_coord, aes(x=lon, y=lat),fill="cyan")+ #"#EEA47FFF"
geom_sf(data = land_shp_moll, fill="black", lwd = 0, inherit.aes = F)+ theme(panel.grid.major = element_line(colour = 'transparent'))
SI_totalkpercell
ggsave(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/SI_totalkpercell.png", SI_totalkpercell,width = 10, height = 8, dpi = 300, units = "in")#resolution not great
#--Kplot based on Juan's code
max(Aqua3stackRev$S)
min(Aqua3stackRev$S)
#Qresult<-result[result>0]
T1<-0
T2<-100
T3<-500
T4<-5000
T5<-10000
T6<-50000
T7<-100000.0
T8<-max(Aqua3stackRev$S)#335857.3
library(tmap)
library(leaflet)
ocean_low_res_moll<-raster::raster("/Users/ren/Documents/CODES/FoodProvision/ocean-low-res-moll.tiff")
crs(empty_raster) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
maxValue(empty_raster)
z_pal <- list(breaks = c(T1,T2,T3,T4,T5,T6,T7,T8),
labels = c(paste(T1,"-",T2), paste(T2,"-",T3), paste(T3,"-",T4), paste(T4,"-",T5), paste(T5,"-",T6), paste(T6,"-",T7), paste(T7,"-",round(T8))),
colors = rev(c("#d73027","#fdae61","#fee090","#e0f3f8","#abd9e9","#74add1", "#4575b4")))
land_shp <-st_read("/Users/ren/Documents/CODES/FoodProvision/landshp_moll/spatial-datasets-land-land_50.shp")
ocean_low_res_moll<-raster::raster("/Users/ren/Documents/CODES/FoodProvision/ocean-low-res-moll.tiff")
caption<-"Pixel-level food provisioning potential"
land_shp_moll <- land_shp %>% st_transform(crs = projection(ocean_low_res_moll))
Kplot<-empty_raster %>%
raster::projectRaster(ocean_low_res_moll) %>%
tmap::tm_shape()+
tmap::tm_raster(title = "K (MT/cell)",
palette = z_pal$colors,
breaks = z_pal$breaks,
labels = z_pal$labels,
legend.is.portrait = T,
legend.reverse = T)+
tmap::tm_shape(land_shp_moll)+
tmap::tm_fill(col = "black", border.col = "transparent")+
#tmap::tm_credits(caption) +
tmap::tm_layout(#title = "Food provision potential (MT)",
#title.position = c("center", .95),
inner.margins = c(0.12, 0, 0.08, 0.04),
frame = F,
legend.position = c(.99, "center"))
Kplot
#ggsave("/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Kplot_all.tiff", plot=Kplot,dpi=300)
tiff(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Kplot_all.tiff", width = 12, height = 6, units = 'in', res = 300)
Kplot
dev.off()
#test if I can combine tmap with ggplot
library(cowplot)
library(ggplot2)
library(magick)
library(png)
library(grid)
library(gridExtra)
library(tiff)
p2<-readTIFF("/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Kplot_all.tiff")
grid.arrange(rasterGrob(p2),rasterGrob(p2),ncol=1)
#OK, INSERT code for identifying ocean areas for country-level analysis
head(Aqua3stackRev)
oceancoord<-Aqua3stackRev %>% select(CenterLat,CenterLong)
colnames(oceancoord) <- c("lat","lon")
#load EEZ file from Juan
highreseezgrid<-read.csv("/Users/ren/Documents/CODES/FoodProvision/EEZfile/high_res_eez_grid.csv")
#convert to 0.5 resolution
res<-0.5
highreseezgrid$CenterLat<-floor(highreseezgrid$lat_bin_center/res)*res+ 0.5*res
highreseezgrid$CenterLong<-floor(highreseezgrid$lon_bin_center/res)*res+ 0.5*res
highreseezgrid<-highreseezgrid %>% select(territory_iso3,sovereign_iso3,CenterLat,CenterLong)
colnames(highreseezgrid) <- c("territory","sovereign","lat","lon")
highreseezgrid2<-left_join(oceancoord,highreseezgrid,by=c("lat","lon"))
head(highreseezgrid2)
table(highreseezgrid2$sovereign)
countries<-c("AUS", "CAN", "CHL", "FJI", "GHA", "IDN", "IND", "JAM", "JPN", "KEN", "MEX", "NAM", "NOR", "PLW", "PRT")
EEZs<-highreseezgrid2 %>% filter(is.na(sovereign)==F)#%>% filter(sovereign %in% countries)
head(EEZs)
EEZs_coord<-unique(EEZs[c("lat", "lon")])
head(EEZs_coord)
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(EEZs_coord[,2:1]))
empty_raster[cells] <- 1
plot(empty_raster,main="EEZs")
#save EEZs_coord
saveRDS(EEZs_coord,file = "/Users/ren/Documents/CODES/FoodProvision/EEZfile/EEZs_coord.rds")
HighSeas<-highreseezgrid2 %>% filter(is.na(sovereign)==T)#%>% filter(sovereign %in% countries)
HighSeas_coord<-unique(HighSeas[c("lat", "lon")])
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(HighSeas_coord[,2:1]))
empty_raster[cells] <- 1
plot(empty_raster,main="High Seas")
saveRDS(HighSeas_coord,file = "/Users/ren/Documents/CODES/FoodProvision/EEZfile/HighSeas_coord.rds")
#Example
AUS<-highreseezgrid2 %>% filter(sovereign=="AUS")
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(AUS[,2:1]))
empty_raster[cells] <- 1
plot(empty_raster,main="AUS")
CAN<-highreseezgrid2 %>% filter(sovereign=="CAN")
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(CAN[,2:1]))
empty_raster[cells] <- 1
plot(empty_raster,main="CAN")
#load MPA file and just check what it looks like
MPAcurrent<-read.csv("/Users/ren/Documents/CODES/FoodProvision/EEZfile/politically_correct_mpas.csv")
head(MPAcurrent)
table(MPAcurrent$mpa)
#convert to 0.5 resolution
res<-0.5
MPAcurrent$lon<-floor(MPAcurrent$lon/res)*res+ 0.5*res
MPAcurrent$lat<-floor(MPAcurrent$lat/res)*res+ 0.5*res
#get only mpa==1
MPAcurrent<-MPAcurrent %>% filter(mpa==1)
dim(MPAcurrent)
MPA_coord<-unique(MPAcurrent[c("lat", "lon")])
dim(MPA_coord)
head(MPA_coord)
saveRDS(MPA_coord,file = "/Users/ren/Documents/CODES/FoodProvision/EEZfile/MPA_coord.rds")
#---compute K per m!!!!!!!!!!!!!!! (for SI)
#
MagaData_K<-MegaData %>% filter(Manage==0) %>% filter(m_fin==0.1) #managed only
head(MagaData_K)
includeREV<-MagaData_K %>% select(SpeciesID,r_fin,MSYtotal) %>% mutate(Ktotal=4*MSYtotal/r_fin) %>% select(SpeciesID,Ktotal)
Aqua3Rev<-merge(Aquaothers2,includeREV,by="SpeciesID")
Aqua3Rev <- Aqua3Rev %>% group_by(SpeciesID) %>% mutate(totalprob=sum(probability))
Aqua3stackRev<-Aqua3Rev %>% group_by(CenterLat,CenterLong) %>% summarise(S=sum(probability*Ktotal/totalprob))
Aqua3stackRev<-as.data.frame(Aqua3stackRev)
# empty_raster <- raster(res = 0.5)
# cells <- cellFromXY(empty_raster, as.matrix(Aqua3stackRev[,2:1]))
# empty_raster[cells] <- Aqua3stackRev[,3]
# plot(PlotFunction(empty_raster),main="Carrying capacity per cell (MT), m=0.1")
# png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Kplot_all_flat_m01.png", width = 8, height = 5, units = 'in', res = 300)
# plot(empty_raster)
# dev.off()
land_shp_moll<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/land_shp_moll.rds")
# head(Aqua3stackRev)
# max(Aqua3stackRev$S)
# m01<- Aqua3stackRev %>%
# set_names(c("lat", "lon", "K")) %>%
# select(lon, lat, K)%>%
# raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
# raster::projectRaster(crs = "+proj=moll") %>%
# as.data.frame(xy = T) %>%
# filter(!is.na(K)) %>%
# set_names(c("lon", "lat", "K")) %>%
# ggplot(aes(x=lon,y=lat,fill=K)) + scale_fill_gradient(low="white", high="#00539CFF",name="K (MT/pixel)",limit=c(0,max(Aqua3stackRev$S)))+#guides(fill=guide_legend())+
# theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+ #theme_bw()+
# geom_raster()+
# #geom_sf(data = land_shp_moll, inherit.aes = F)
# geom_sf(data = land_shp_moll, fill="darkgray", lwd = 0.1, inherit.aes = F)
# m01
# ggsave(file="/Users/ren/Documents/CODES/FoodProvision/PaperFigures/m01.png", m01, width = 10, height = 8, dpi = 300, units = "in")#resolution not great
#carrying capacity, m=0.1
##plot same as pixel level food prov
SI_totalkpercell_m01<-Aqua3stackRev %>%
set_names(c("lat", "lon", "K")) %>%
select(lon, lat, K)%>%
raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
raster::projectRaster(crs = "+proj=moll") %>%
as.data.frame(xy = T) %>%
filter(!is.na(K)) %>%
set_names(c("lon", "lat", "K")) %>%
mutate(tmp = K^(1/root)) %>%
ggplot()+
geom_raster(aes(lon, lat, fill = tmp))+
scale_fill_viridis(
labels = function(x){x^root},
name="K (MT/cell)",
limits=c(0,max(Aqua3stackRev$S)^(1/root))
)+
# scale_fill_gradient2(labels = function(x){x^root},
# low = "white",
# high = "#00539CFF", space = "Lab",
# name="K (MT/cell)",
# limits=c(0,max(Aqua3stackRev$S)^(1/root)))+
theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+ #"bottom
labs(title = "", fill = "", y = "", x = "")+
#geom_raster(data=MPA_coord, aes(x=lon, y=lat),fill="cyan")+ #"#EEA47FFF"
geom_sf(data = land_shp_moll, fill="black", lwd = 0, inherit.aes = F)+ theme(panel.grid.major = element_line(colour = 'transparent'))
SI_totalkpercell_m01
ggsave(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/SI_totalkpercell_m01.png", SI_totalkpercell_m01,width = 10, height = 8, dpi = 300, units = "in")#resolution not great
# #carrying capacity, m=0.3
MagaData_K<-MegaData %>% filter(Manage==0) %>% filter(m_fin==0.3) #managed only
head(MagaData_K)
includeREV<-MagaData_K %>% select(SpeciesID,r_fin,MSYtotal) %>% mutate(Ktotal=4*MSYtotal/r_fin) %>% select(SpeciesID,Ktotal)
Aqua3Rev<-merge(Aquaothers2,includeREV,by="SpeciesID")
Aqua3Rev <- Aqua3Rev %>% group_by(SpeciesID) %>% mutate(totalprob=sum(probability))
Aqua3stackRev<-Aqua3Rev %>% group_by(CenterLat,CenterLong) %>% summarise(S=sum(probability*Ktotal/totalprob))
Aqua3stackRev<-as.data.frame(Aqua3stackRev)
# empty_raster <- raster(res = 0.5)
# cells <- cellFromXY(empty_raster, as.matrix(Aqua3stackRev[,2:1]))
# empty_raster[cells] <- Aqua3stackRev[,3]
# plot(empty_raster,main="Carrying capacity per cell (MT), m=0.3")
# png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Kplot_all_flat_m03.png", width = 8, height = 5, units = 'in', res = 300)
# plot(empty_raster)
# dev.off()
# m3<- Aqua3stackRev %>%
# set_names(c("lat", "lon", "K")) %>%
# select(lon, lat, K)%>%
# raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
# raster::projectRaster(crs = "+proj=moll") %>%
# as.data.frame(xy = T) %>%
# filter(!is.na(K)) %>%
# set_names(c("lon", "lat", "K")) %>%
# ggplot(aes(x=lon,y=lat,fill=K)) + scale_fill_gradient(low="white", high="#00539CFF",name="K (MT/pixel)",limit=c(0,max(Aqua3stackRev$S)))+#guides(fill=guide_legend())+
# theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+ #theme_bw()+
# geom_raster()+
# #geom_sf(data = land_shp_moll, inherit.aes = F)
# geom_sf(data = land_shp_moll, fill="darkgray", lwd = 0.1, inherit.aes = F)
# m3
# ggsave(file="/Users/ren/Documents/CODES/FoodProvision/PaperFigures/m3.png", m3, width = 10, height = 8, dpi = 300, units = "in")#resolution not great
##plot same as pixel level food prov
SI_totalkpercell_m3<-Aqua3stackRev %>%
set_names(c("lat", "lon", "K")) %>%
select(lon, lat, K)%>%
raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
raster::projectRaster(crs = "+proj=moll") %>%
as.data.frame(xy = T) %>%
filter(!is.na(K)) %>%
set_names(c("lon", "lat", "K")) %>%
mutate(tmp = K^(1/root)) %>%
ggplot()+
geom_raster(aes(lon, lat, fill = tmp))+
scale_fill_viridis(
labels = function(x){x^root},
name="K (MT/cell)",
limits=c(0,max(Aqua3stackRev$S)^(1/root))
)+
# scale_fill_gradient2(labels = function(x){x^root},
# low = "white",
# high = "#00539CFF", space = "Lab",
# name="K (MT/cell)",
# limits=c(0,max(Aqua3stackRev$S)^(1/root)))+
theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+ #"bottom
labs(title = "", fill = "", y = "", x = "")+
#geom_raster(data=MPA_coord, aes(x=lon, y=lat),fill="cyan")+ #"#EEA47FFF"
geom_sf(data = land_shp_moll, fill="black", lwd = 0, inherit.aes = F)+ theme(panel.grid.major = element_line(colour = 'transparent'))
SI_totalkpercell_m3
ggsave(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/SI_totalkpercell_m3.png", SI_totalkpercell_m3,width = 10, height = 8, dpi = 300, units = "in")#resolution not great
# #carrying capacity, m=9
MagaData_K<-MegaData %>% filter(Manage==0) %>% filter(m_fin==0.9) #managed only
head(MagaData_K)
includeREV<-MagaData_K %>% select(SpeciesID,r_fin,MSYtotal) %>% mutate(Ktotal=4*MSYtotal/r_fin) %>% select(SpeciesID,Ktotal)
Aqua3Rev<-merge(Aquaothers2,includeREV,by="SpeciesID")
Aqua3Rev <- Aqua3Rev %>% group_by(SpeciesID) %>% mutate(totalprob=sum(probability))
Aqua3stackRev<-Aqua3Rev %>% group_by(CenterLat,CenterLong) %>% summarise(S=sum(probability*Ktotal/totalprob))
Aqua3stackRev<-as.data.frame(Aqua3stackRev)
# empty_raster <- raster(res = 0.5)
# cells <- cellFromXY(empty_raster, as.matrix(Aqua3stackRev[,2:1]))
# empty_raster[cells] <- Aqua3stackRev[,3]
# plot(empty_raster,main="Carrying capacity per cell (MT), m=0.9")
# png(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/Kplot_all_flat_m09.png", width = 8, height = 5, units = 'in', res = 300)
# plot(empty_raster)
# dev.off()
#
# m9<- Aqua3stackRev %>%
# set_names(c("lat", "lon", "K")) %>%
# select(lon, lat, K)%>%
# raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
# raster::projectRaster(crs = "+proj=moll") %>%
# as.data.frame(xy = T) %>%
# filter(!is.na(K)) %>%
# set_names(c("lon", "lat", "K")) %>%
# ggplot(aes(x=lon,y=lat,fill=K)) + scale_fill_gradient(low="white", high="#00539CFF",name="K (MT/pixel)",limit=c(0,max(Aqua3stackRev$S)))+#guides(fill=guide_legend())+
# theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+ #theme_bw()+
# geom_raster()+
# #geom_sf(data = land_shp_moll, inherit.aes = F)
# geom_sf(data = land_shp_moll, fill="darkgray", lwd = 0.1, inherit.aes = F)
# m9
# ggsave(file="/Users/ren/Documents/CODES/FoodProvision/PaperFigures/m9.png", m9, width = 10, height = 8, dpi = 300, units = "in")#resolution not great
##plot same as pixel level food prov
SI_totalkpercell_m9<-Aqua3stackRev %>%
set_names(c("lat", "lon", "K")) %>%
select(lon, lat, K)%>%
raster::rasterFromXYZ(crs = "+proj=longlat +datum=WGS84") %>%
raster::projectRaster(crs = "+proj=moll") %>%
as.data.frame(xy = T) %>%
filter(!is.na(K)) %>%
set_names(c("lon", "lat", "K")) %>%
mutate(tmp = K^(1/root)) %>%
ggplot()+
geom_raster(aes(lon, lat, fill = tmp))+
scale_fill_viridis(
labels = function(x){x^root},
name="K (MT/cell)",
limits=c(0,max(Aqua3stackRev$S)^(1/root))
)+
# scale_fill_gradient2(labels = function(x){x^root},
# low = "white",
# high = "#00539CFF", space = "Lab",
# name="K (MT/cell)",
# limits=c(0,max(Aqua3stackRev$S)^(1/root)))+
theme(axis.title.x = element_blank(),axis.title.y = element_blank(), panel.background = element_blank())+ #"bottom
labs(title = "", fill = "", y = "", x = "")+
#geom_raster(data=MPA_coord, aes(x=lon, y=lat),fill="cyan")+ #"#EEA47FFF"
geom_sf(data = land_shp_moll, fill="black", lwd = 0, inherit.aes = F)+ theme(panel.grid.major = element_line(colour = 'transparent'))
SI_totalkpercell_m9
ggsave(file="/Users/ren/Documents/CODES/FoodProvision/SupplementInfo/SI_totalkpercell_m9.png", SI_totalkpercell_m9,width = 10, height = 8, dpi = 300, units = "in")#resolution not great
#Files to transfer to bigger machine
#MegaData, Cleanmegacell, and the coordinates --
saveRDS(Cleanmegacell, file = "/Users/ren/Documents/CODES/FoodProvision/Cleanmegacell.rds")
saveRDS(MegaData, file = "/Users/ren/Documents/CODES/FoodProvision/MegaData.rds")
saveRDS(CleanCoordmegacell, file = "/Users/ren/Documents/CODES/FoodProvision/CleanCoordmegacell.rds")
MegaData_UncertaintyAnalysis<-MegaData %>% mutate(ExploitationRate_BAU1=1-Efin_BAU1,
ExploitationRate_OAcons=1-Efin,
ExploitationRate_AllMSY=1-Emsy,
ExploitationRate_EfinMSY=1-Efin_msy,
ExploitationRate_WormOA=1-EBvK01fin,
ExploitationRate_WormMSY=1-EBvK01_msy) %>%
select(SpeciesID,stockid,SciName,
Manage,Fstatus,Bstatus,Emanage,ER,bvk_fin,BK2012,
m_fin,Kfin,r_fin,r_thorson,ln_r_mu,ln_r_sd,r_fishbase,stdev,
ExploitationRate_BAU1,
ExploitationRate_OAcons,
ExploitationRate_AllMSY,
ExploitationRate_EfinMSY,
ExploitationRate_WormOA,
ExploitationRate_WormMSY)
head(MegaData_UncertaintyAnalysis)
plot(MegaData_UncertaintyAnalysis$ExploitationRate_WormMSY)
write.csv(MegaData_UncertaintyAnalysis, file = "/Users/ren/Documents/CODES/FoodProvision/MegaData_UncertaintyAnalysis.csv")
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
###Compute spillover---PIXEL-LEVEL spillover
dim(Cleanmegacell)
dim(MegaData)
head(Cleanmegacell)
head(MegaData)
#UNMANAGED data
UNMNG_MegaData<-MegaData %>% filter(Manage==0)
UNMNG_Cleanmegacell<-Cleanmegacell %>% select(UNMNG_MegaData$stockid)
#Managed data
MNG_MegaData<-MegaData %>% filter(Manage==1)
MNG_Cleanmegacell<-Cleanmegacell %>% select(MNG_MegaData$stockid)
#1. UNMANAGED only
UNMNG_numcell<-dim(UNMNG_Cleanmegacell)[1]
K<-UNMNG_MegaData$Kfin #k per species
m<-UNMNG_MegaData$m #mobility per species
#Harvest without MPA
R <-0 #MPA size
r<-UNMNG_MegaData$r
E<-UNMNG_MegaData$Efin
#----Harvest with no MPA, BAU, no climate
HBAU <- sum((1-E)*((r+E-1)/r)*K)
MPAselect0<-matrix(0, nrow=UNMNG_numcell, ncol=1)
TUNMNG_Cleanmegacell<-t(UNMNG_Cleanmegacell)
cores<-detectCores()
registerDoParallel(cores)
system.time({
UNMNG_result <- foreach(iter = 1:240, .combine = rbind) %dopar% {
MPAselect<-MPAselect0
MPAselect[iter]<-1
keeps<-which(MPAselect==1)
MPAselect2<-as.matrix(MPAselect[keeps,])
TUNMNG_Cleanmegacell2<-as.matrix(TUNMNG_Cleanmegacell[,keeps])
Kprotected<-as.data.frame(TUNMNG_Cleanmegacell2 %*% MPAselect2)
R<-Kprotected$V1
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)#sum((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))), na.rm=T)
HMPA-HBAU
}
})
plot(UNMNG_result)
stopImplicitCluster()
#2. MANAGED only
MNG_numcell<-dim(MNG_Cleanmegacell)[1]
K<-MNG_MegaData$Kfin #k per species
m<-MNG_MegaData$m #mobility per species
#Harvest without MPA
R <-0 #MPA size
r<-MNG_MegaData$r
E<-MNG_MegaData$Efin
#----Harvest with no MPA, BAU, no climate
HBAU <- sum((1-E)*((r+E-1)/r)*K)
MPAselect0<-matrix(0, nrow=MNG_numcell, ncol=1)
TMNG_Cleanmegacell<-t(MNG_Cleanmegacell)
cores<-detectCores()
registerDoParallel(cores)
system.time({
MNG_result <- foreach(iter = 1:24, .combine = rbind) %dopar% {
MPAselect<-MPAselect0
MPAselect[iter]<-1
Kprotected<-as.data.frame(TMNG_Cleanmegacell %*% MPAselect)
R<-Kprotected$V1
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)#sum((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))), na.rm=T)
HMPA-HBAU
}
})
MNG_result
stopImplicitCluster()
#Files to transfer to bigger machine
#MegaData, Cleanmegacell, and the coordinates --
saveRDS(Cleanmegacell, file = "/Users/ren/Documents/CODES/FoodProvision/Cleanmegacell.rds")
saveRDS(MegaData, file = "/Users/ren/Documents/CODES/FoodProvision/MegaData.rds")
saveRDS(CleanCoordmegacell, file = "/Users/ren/Documents/CODES/FoodProvision/CleanCoordmegacell.rds")
#colnames(Cleanmegacell)
##YOU CAN RESTART R NOW TO REFRESH MEMORY
#load big files
Cleanmegacell<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/Cleanmegacell.rds")
MegaData<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/MegaData.rds")
CleanCoordmegacell<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/CleanCoordmegacell.rds")
#3.COMBINED MANAGED AND UNMANAGED
numcell<-dim(Cleanmegacell)[1]
dH<-matrix(0, nrow=numcell, ncol=1)
K<-MegaData$Kfin #k per species
m<-MegaData$m #mobility per species
#Harvest without MPA
R <-0 #MPA size
r<-MegaData$r
E<-MegaData$Efin
#----Harvest with no MPA, BAU, no climate
HBAU <- sum((1-E)*((r+E-1)/r)*K)
HBAUpixel<-(1-E)*((r+E-1)/r)*K
deltaHpixel<-(K<0)*1
#under business as usual, top x species
MPAselect0<-matrix(0, nrow=numcell, ncol=1)
#Norm_K_filter: row is species name, col is pixel id, content is normaized K
#TNorm_K_filter<-t(Norm_K_filter)
TCleanmegacell<-t(Cleanmegacell)
# #response per pixel. Not useful!
# for (iter in 1:numcell) {
# MPAselect<-MPAselect0
# MPAselect[iter]<-1
#
# keeps<-which(MPAselect==1)
# MPAselect2<-as.matrix(MPAselect[keeps,])
# TCleanmegacell2<-as.matrix(TCleanmegacell[,keeps])
# Kprotected<-as.data.frame(TCleanmegacell2 %*% MPAselect2)
#
# R<-Kprotected$V1
# HMPApixel<-(1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r)))
# HMPApixel[is.na(HMPApixel)]<-0
# deltaHpixel<-deltaHpixel + HMPApixel - HBAUpixel
# }
# plot(deltaHpixel)
# sum(deltaHpixel)
# #plot K per stock per species for SI
# StockList<-MegaData %>% select(stockid,SciName,MSYfin,Kfin)
# StockList$deltaH<-deltaHpixel
# StockList$deltaH_MSY<-StockList$deltaH/StockList$MSYfin
# plot(StockList$deltaH_MSY)
# head(StockList)
# # Horizontal bar plots,
# # change fill color by groups and add text labels
# deltaHpixelplot<-StockList[order(-StockList$deltaH),] %>% slice(1:50)
# plotdeltaHpixelplot<-ggplot(deltaHpixelplot, aes(x = reorder(stockid, deltaH), y = deltaH)) +
# geom_bar(fill="steelblue",stat = "identity") +
# coord_flip() +
# geom_text(aes(label = SciName,size=14), nudge_y = 0, color = "black")+
# labs(y = "Carrying capacity, K (MT)", x="Fish stock")+ #ylim(0, 4.5e7)+
# theme(axis.text=element_text(size=14),
# axis.title=element_text(size=16,face="bold"),
# legend.position="none")
# plotdeltaHpixelplot #this is global
#MAKE A LOOK-UP TABLE, each pixel have values of how much of the geog range of each species is in MPA
# ##NO NEED TO RERUN
# KprotectedPerCell<-vector()
# MPAselect0<-matrix(0, nrow=numcell, ncol=1)
# for (iter in 1:numcell){
# MPAselect<-MPAselect0
# MPAselect[iter]<-1
# keeps<-which(MPAselect==1)
# MPAselect2<-as.matrix(MPAselect[keeps,])
# TCleanmegacell2<-as.matrix(TCleanmegacell[,keeps])
# KprotectedPerCell[iter]<-as.data.frame(TCleanmegacell2 %*% MPAselect2)
# }
# KprotectedPerCell_Library<-as.data.frame(KprotectedPerCell)
# names(KprotectedPerCell_Library) <- 1:numcell
# head(KprotectedPerCell_Library)
# dim(KprotectedPerCell_Library)
# sum(KprotectedPerCell_Library[1000])
# saveRDS(KprotectedPerCell_Library, file = "/Users/ren/Documents/CODES/FoodProvision/KprotectedPerCell_Library.rds")
cores<-detectCores()
registerDoParallel(cores)
system.time({
result <- foreach(iter = 1:numcell, .combine = cbind) %dopar% {
MPAselect<-MPAselect0
MPAselect[iter]<-1
keeps<-which(MPAselect==1)
MPAselect2<-as.matrix(MPAselect[keeps,])
TCleanmegacell2<-as.matrix(TCleanmegacell[,keeps])
Kprotected<-as.data.frame(TCleanmegacell2 %*% MPAselect2)
R<-Kprotected$V1
HMPA<-sum((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))), na.rm=T)
HMPA-HBAU
}
})
max(result)
min(result)
stopImplicitCluster()
#stopCluster(myCluster)
#pixellevelresult<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/Results/foodprovision2July.rds")
#plot(pixellevelresult)
#quantile(result,0)
# Or you can pass in your own quantiles:
#quantile(result, q = c(0.125,0.25,0.375,0.5,0.625,0.75))
#Qresult<-result[result>0]
T1<-min(result)
T2<-0
T3<-100
T4<-500
T5<-1000
T6<-5000
T7<-10000
T8<-max(result)
#plot same as Juan
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(CleanCoordmegacell[,1:2]))
empty_raster[cells] <- result
plot(empty_raster)
library(tmap)
library(leaflet)
ocean_low_res_moll<-raster::raster("/Users/ren/Documents/CODES/FoodProvision/ocean-low-res-moll.tiff")
crs(empty_raster) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
maxValue(empty_raster)
z_pal <- list(breaks = c(T1,T2,T3,T4,T5,T6,T7,T8),
labels = c(paste(round(T1),"-",T2), paste(T2,"-",T3), paste(T3,"-",T4), paste(T4,"-",T5), paste(T5,"-",T6), paste(T6,"-",T7), paste(T7,"-",round(T8))),
colors = rev(c("#d73027","#fdae61","#fee090","#e0f3f8","#abd9e9","#74add1", "#4575b4")))
land_shp <-st_read("/Users/ren/Documents/CODES/FoodProvision/landshp_moll/spatial-datasets-land-land_50.shp")
ocean_low_res_moll<-raster::raster("/Users/ren/Documents/CODES/FoodProvision/ocean-low-res-moll.tiff")
caption<-"Pixel-level food provisioning potential"
land_shp_moll <- land_shp %>% st_transform(crs = projection(ocean_low_res_moll))
pixellevelfood<-empty_raster %>%
raster::projectRaster(ocean_low_res_moll) %>%
tmap::tm_shape()+
tmap::tm_raster(title = expression(paste(Delta, "H (MT)")),
palette = z_pal$colors,
breaks = z_pal$breaks,
labels = z_pal$labels,
legend.is.portrait = T,
legend.reverse = T)+
tmap::tm_shape(land_shp_moll)+
tmap::tm_fill(col = "black", border.col = "transparent")+
#tmap::tm_credits(caption) +
tmap::tm_layout(#title = "Food provision potential (MT)",
#title.position = c("center", .95),
inner.margins = c(0.12, 0, 0.08, 0.04),
frame = F,
legend.position = c(.99, "center"))
pixellevelfood
png(file="/Users/ren/Documents/CODES/FoodProvision/Results/Fig2_PixelLevelFoodProv.png", width = 12, height = 6, units = 'in', res = 300)
pixellevelfood
dev.off()
###OK to restart R and load
MegaData<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/MegaData.rds")
head(MegaData)
SpeciesInfo <- MegaData %>% select(stockid,Manage,SciName,MSYfin)
#Performance of per species as we close more of the ocean
plotmarginal2<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/plotmarginal2.rds")
head(plotmarginal2)
colnames(plotmarginal2) <- c("stockid","MPA","deltaH")
plotmarginal3<-left_join(plotmarginal2,SpeciesInfo,by="stockid")
head(plotmarginal3)
ggplot(plotmarginal3, aes(x=MPA,y=deltaH))+geom_point()+facet_wrap( ~ Manage)
#I want to plot just the managed
manageddeltaH<-plotmarginal3 %>% filter(Manage==1)
pdf("/Users/ren/Documents/CODES/FoodProvision/Results/deltaHpersp.pdf")
for (i in unique(manageddeltaH$stockid)){
manageddeltaHsp<-manageddeltaH %>% filter(stockid==i)
plot(manageddeltaHsp$MPA,manageddeltaHsp$deltaH,main=paste(i,"/",manageddeltaHsp$SciName[1]),ylim=c(min(manageddeltaHsp$deltaH),manageddeltaHsp$MSYfin[1]))
abline(h=manageddeltaHsp$MSYfin[1], col="blue")
#ggplot(manageddeltaHsp, aes(x=MPA,y=deltaH))+geom_point()+labs(title=i)
}
dev.off()
#Iplot the unmanaged
unmanageddeltaH<-plotmarginal3 %>% filter(Manage==0)
pdf("/Users/ren/Documents/CODES/FoodProvision/Results/deltaHpersp_unmanaged.pdf")
for (i in unique(unmanageddeltaH$stockid)){
unmanageddeltaHsp<-unmanageddeltaH %>% filter(stockid==i)
plot(unmanageddeltaHsp$MPA,unmanageddeltaHsp$deltaH,main=paste(i,"/",unmanageddeltaHsp$SciName[1]),ylim=c(min(unmanageddeltaHsp$deltaH),unmanageddeltaHsp$MSYfin[1]))
abline(h=unmanageddeltaHsp$MSYfin[1], col="blue")
#ggplot(manageddeltaHsp, aes(x=MPA,y=deltaH))+geom_point()+labs(title=i)
}
dev.off()
#check Managed result with the Megadata file
MegaDataManaged<-MegaData %>% filter(Manage==1)
#--------------------------
####Network code -- PICK 1000 at a time
#there are 168,712 cells so 1:168 #per 1000 is 0.593%. Up to 10%, it will be 16.87
#files needed:
KprotectedPerCell_Library<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/KprotectedPerCell_Library.rds")
##TRY new approach
numcell<-dim(Cleanmegacell)[1]
celltoiterateFULL<-1:numcell
celltoiterate<-celltoiterateFULL
MPAselect0<-matrix(0, nrow=numcell, ncol=1)
#TCleanmegacell<-t(Cleanmegacell)
PriorityAreas<-c()
NetworkResult<-vector()
###Compute spillover---PIXEL-LEVEL spillover #in case not yet computed
K<-MegaData$Kfin #k per species
m<-MegaData$m #mobility per species
#Harvest without MPA
R <-0 #MPA size
r<-MegaData$r
E<-MegaData$Efin
#----Harvest with no MPA, BAU, no climate
hbau <-(1-E)*((r+E-1)/r)*K
hbau <-hbau*(hbau>0)
#table(hbau) #negative harvest should be zero harvest! check code tomorrow.
HBAU <- sum(hbau)#sum((1-E)*((r+E-1)/r)*K)
HBAU
nmax<-20
PerSpDeltaH<-matrix(nrow=nmax,ncol=1342)
#head(PerSpDeltaH)
###this block is for implementing EEZ only selection
head(EEZs_coord)
EEZs_coord$EEZ<-1
CleanCoordmegacell_EEZ<-left_join(CleanCoordmegacell,EEZs_coord,by=c("lon","lat"))
head(CleanCoordmegacell_EEZ)
dim(CleanCoordmegacell_EEZ)
#positions of 1s
EEZposition<-which(CleanCoordmegacell_EEZ$EEZ==1)
celltoiterate<-EEZposition#celltoiterateFULL
length(celltoiterate)
cores<-detectCores()
registerDoParallel(cores)
for (i in 1:nmax){
MPAselectPrev<-rowSums(KprotectedPerCell_Library[,which(MPAselect0==1),drop=FALSE])
result <- foreach(iter = 1:length(celltoiterate), .combine = rbind) %dopar% {
MPAselect<-MPAselect0
MPAselect[celltoiterate[iter]]<-1
R<-MPAselectPrev+KprotectedPerCell_Library[,celltoiterate[iter]]
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)
HMPA-HBAU
}
#1. find the location of the top 1000 highest pixel-level
myorderHightoLow<-order(-result)#positions
cellselected<-myorderHightoLow[1:1000] #but these are the position of the temporary pixels, not our reference pixels
#convert coord to scale comparable to priority areas
Prioritycellselected<-celltoiterate[cellselected]
#3. block those additional 1000 in MPAselect
MPAselect0[Prioritycellselected]<-1
#3. save them for our priority areas
PriorityAreas<-append(PriorityAreas,Prioritycellselected)
#4. Calculate food prov of the additional 1000 cells
MPAselect<-MPAselect0
R<-rowSums(KprotectedPerCell_Library[,which(MPAselect==1),drop=FALSE])
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)#sum((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))), na.rm=T)
HMPA-HBAU
#save result
PerSpDeltaH[i,]<-hmpa-hbau
NetworkResult[i]<-HMPA-HBAU
#pass this to the top
celltoiterate<-celltoiterateFULL[-PriorityAreas]
print(c(i,NetworkResult[i]))
rm(result,myorderHightoLow,cellselected,Prioritycellselected, MPAselect,keeps,MPAselect2,TCleanmegacell2,Kprotected,R,hmpa,HMPA)
}
plot(NetworkResult)
cores<-detectCores()
registerDoParallel(cores)
for (i in 1:nmax){
#for (i in 1:168){
#be sure to make celltoiterate adaptive
MPAselectPrev<-rowSums(KprotectedPerCell_Library[,which(MPAselect0==1),drop=FALSE])
result <- foreach(iter = 1:length(celltoiterate), .combine = rbind) %dopar% {
MPAselect<-MPAselect0
MPAselect[celltoiterate[iter]]<-1
R<-MPAselectPrev+KprotectedPerCell_Library[,celltoiterate[iter]]
#R<-rowSums(KprotectedPerCell_Library[,which(MPAselect==1),drop=FALSE])
# keeps<-which(MPAselect==1)
# MPAselect2<-as.matrix(MPAselect[keeps,])
# TCleanmegacell2<-as.matrix(TCleanmegacell[,keeps])
# Kprotected<-as.data.frame(TCleanmegacell2 %*% MPAselect2)
#R<-Kprotected$V1
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)
HMPA-HBAU
}
#1. find the location of the top 1000 highest pixel-level
myorderHightoLow<-order(-result)#positions
cellselected<-myorderHightoLow[1:1000] #but these are the position of the temporary pixels, not our reference pixels
#convert coord to scale comparable to priority areas
Prioritycellselected<-celltoiterate[cellselected]
#plot(result[myorderHightoLow][1:1000])#plot values for demo
#3. block those additional 1000 in MPAselect
MPAselect0[Prioritycellselected]<-1
#3. save them for our priority areas
PriorityAreas<-append(PriorityAreas,Prioritycellselected)
#4. Calculate food prov of the additional 1000 cells
MPAselect<-MPAselect0
# keeps<-which(MPAselect==1)
# MPAselect2<-as.matrix(MPAselect[keeps,])
# TCleanmegacell2<-as.matrix(TCleanmegacell[,keeps])
# Kprotected<-as.data.frame(TCleanmegacell2 %*% MPAselect2)
R<-rowSums(KprotectedPerCell_Library[,which(MPAselect==1),drop=FALSE])
#Kprotected<-as.data.frame(TCleanmegacell %*% MPAselect)
#R<-Kprotected$V1
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)#sum((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))), na.rm=T)
HMPA-HBAU
PerSpDeltaH[i,]<-hmpa-hbau
#save result
NetworkResult[i]<-HMPA-HBAU
#pass this to the top
celltoiterate<-celltoiterateFULL[-PriorityAreas]
print(c(i,NetworkResult[i]))
rm(result,myorderHightoLow,cellselected,Prioritycellselected, MPAselect,keeps,MPAselect2,TCleanmegacell2,Kprotected,R,hmpa,HMPA)
}
plot(NetworkResult)
stopImplicitCluster()
head(PerSpDeltaH)
DeltaPerSpDeltaH<-PerSpDeltaH[1:nmax-1,]
DeltaPerSpDeltaH<-rbind(0,DeltaPerSpDeltaH)
DeltaPerSpDeltaH<-PerSpDeltaH-DeltaPerSpDeltaH
DeltaPerSpDeltaH2<-as.data.frame(DeltaPerSpDeltaH)
#melt this
colnames(DeltaPerSpDeltaH2)<-1:1342
DeltaPerSpDeltaH2$ID<-1:nmax
#combine with megadata
head(MegaData)
LongDeltaHpersp <- melt(DeltaPerSpDeltaH2, id=c("ID"))
head(LongDeltaHpersp)
MegaCategory<-MegaData %>% select(Manage,stockid,SciName,m)
MegaCategory$variable<-as.factor(1:1342)
head(MegaCategory)
LongDeltaHpersp2<- left_join(LongDeltaHpersp,MegaCategory,by="variable")
LongDeltaHpersp2$Manage<-as.factor(LongDeltaHpersp2$Manage)
LongDeltaHpersp2$m<-as.factor(LongDeltaHpersp2$m)
head(LongDeltaHpersp2)
#this is one of the plots I want
ggplot(LongDeltaHpersp2, aes(x=ID,y=value,colour=m,shape=Manage)) + geom_point(size=5) +
scale_shape(solid = FALSE)
#add face_wrap
ggplot(LongDeltaHpersp2, aes(x=ID,y=value,colour=m,shape=Manage)) + geom_point(size=5) +
scale_shape(solid = FALSE) +facet_wrap(~Manage)
#save delta H per species --- also fun!
#I want to plot just the managed
manageddeltaH<-LongDeltaHpersp2 %>% filter(Manage==1)
pdf("/Users/ren/Documents/CODES/FoodProvision/Results/deltaHpersp_managed.pdf")
for (i in unique(manageddeltaH$stockid)){
manageddeltaHsp<-manageddeltaH %>% filter(stockid==i)
plot(manageddeltaHsp$ID,manageddeltaHsp$value,main=paste(i,"/",manageddeltaHsp$SciName[1]))
#abline(h=manageddeltaHsp$MSYfin[1], col="blue")
#ggplot(manageddeltaHsp, aes(x=MPA,y=deltaH))+geom_point()+labs(title=i)
}
dev.off()
#Iplot the unmanaged
unmanageddeltaH<-LongDeltaHpersp2 %>% filter(Manage==0)
pdf("/Users/ren/Documents/CODES/FoodProvision/Results/deltaHpersp_unmanaged.pdf")
for (i in unique(unmanageddeltaH$stockid)){
unmanageddeltaHsp<-unmanageddeltaH %>% filter(stockid==i)
plot(unmanageddeltaHsp$ID,unmanageddeltaHsp$value,main=paste(i,"/",unmanageddeltaHsp$SciName[1]))
}
dev.off()
#check Managed result with the Megadata file
MegaDataManaged<-MegaData %>% filter(Manage==1)
###CALCULARTE PIXEL-LEVEL CATCH for JUAN
###
numcell<-dim(Cleanmegacell)[1]
celltoiterateFULL<-1:numcell
celltoiterate<-celltoiterateFULL
MPAselect0<-matrix(0, nrow=numcell, ncol=1)
TCleanmegacell<-t(Cleanmegacell)
PriorityAreas<-c()
NetworkResult<-vector()
###Compute spillover---PIXEL-LEVEL spillover #in case not yet computed
K<-MegaData$Kfin #k per species
m<-MegaData$m #mobility per species
#Harvest without MPA
R <-0 #MPA size
r<-MegaData$r
E<-MegaData$Efin
#----Harvest with no MPA, BAU, no climate
hbau <-(1-E)*((r+E-1)/r)*K
hbau <-hbau*(hbau>0)
table(hbau) #negative harvest should be zero harvest! check code tomorrow.
HBAU <- sum(hbau)#sum((1-E)*((r+E-1)/r)*K)
HBAU
cores<-detectCores()
registerDoParallel(cores)
for (i in 1:20){
#be sure to make celltoiterate adaptive
result <- foreach(iter = 1:length(celltoiterate), .combine = rbind) %dopar% {
MPAselect<-MPAselect0
MPAselect[celltoiterate[iter]]<-1
keeps<-which(MPAselect==1)
MPAselect2<-as.matrix(MPAselect[keeps,])
TCleanmegacell2<-as.matrix(TCleanmegacell[,keeps])
Kprotected<-as.data.frame(TCleanmegacell2 %*% MPAselect2)
R<-Kprotected$V1
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)
HMPA-HBAU
}
#1. find the location of the top 1000 highest pixel-level
myorderHightoLow<-order(-result)#positions
cellselected<-myorderHightoLow[1:100] #but these are the position of the temporary pixels, not our reference pixels
#convert coord to scale comparable to priority areas
Prioritycellselected<-celltoiterate[cellselected]
#plot(result[myorderHightoLow][1:1000])#plot values for demo
#3. block those additional 1000 in MPAselect
MPAselect0[Prioritycellselected]<-1
#3. save them for our priority areas
PriorityAreas<-append(PriorityAreas,Prioritycellselected)
#4. Calculate food prov of the additional 1000 cells
MPAselect<-MPAselect0
keeps<-which(MPAselect==1)
MPAselect2<-as.matrix(MPAselect[keeps,])
TCleanmegacell2<-as.matrix(TCleanmegacell[,keeps])
Kprotected<-as.data.frame(TCleanmegacell2 %*% MPAselect2)
#Kprotected<-as.data.frame(TCleanmegacell %*% MPAselect)
R<-Kprotected$V1
hmpa<-na.omit((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
hmpa<-hmpa*(hmpa>0)
HMPA<-sum(hmpa)#sum((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))), na.rm=T)
HMPA-HBAU
#save result
NetworkResult[i]<-HMPA-HBAU
#pass this to the top
celltoiterate<-celltoiterateFULL[-PriorityAreas]
print(c(i,NetworkResult[i]))
rm(result,myorderHightoLow,cellselected,Prioritycellselected, MPAselect,keeps,MPAselect2,TCleanmegacell2,Kprotected,R,hmpa,HMPA)
}
plot(NetworkResult)
stopImplicitCluster()
BenefitCurve<-as.data.frame(NetworkResult)/1000000
BenefitCurve$MPA <- (seq.int(nrow(BenefitCurve))/168712)*100*100
head(BenefitCurve)
zerozero<-data.frame(0,0)
names(zerozero)<-c("NetworkResult","MPA")
BenefitCurve<-rbind(BenefitCurve,zerozero)
p <- ggplot(BenefitCurve, aes(MPA, NetworkResult))
benefitplot<-p + geom_point(shape = 21, colour = "black", fill = "white", size = 2, stroke = 2)+
labs(x="% ocean protected",y="Change in catch (million metric tons)")
benefitplot
#png(file="~/Food provision/benefitfunctionFood.png", width = 6, height = 4, units = 'in', res = 300)
#benefitplot
#dev.off()
#plot priority areas
plot(PriorityAreas)
saveRDS(PriorityAreas, file = "/Users/ren/Documents/CODES/FoodProvision/PriorityAreas.rds")
##RAM Legacy database here
library(devtools)
#install_github("ropensci/ramlegacy")
library(ramlegacy)
#download_ramlegacy(version="4.44") #downloading the latest version, 4.44
load_ramlegacy()
RAMDATA<-load_ramlegacy(tables = "timeseries_values_views")
head(RAMDATA$timeseries_values_views)
RAMDATA2<-RAMDATA$timeseries_values_views
head(RAMDATA2)
colnames(RAMDATA2)
RAMDATA3<-RAMDATA2 %>% select(stockid,year,ERbest,ER)
#remove entries with no ER values
terminalER<-RAMDATA3 %>% filter(! (ER=='NA')) %>% group_by(stockid) %>% slice(which.max(year))
#terminalERtest<-RAMDATA2 %>% filter(! (ER=='NA')) %>% group_by(stockid) %>% slice(which.max(year))
#head(terminalERtest)
head(terminalER)
plot(terminalER$ERbest,terminalER$ER)
hist(terminalER$year)
table(terminalER$ER)
terminalER$stockid ##terminalER is the file containing the stock assessments with terminal Exploitation Rate
#get our stockid them match
head(Mng2)
MatchedER<-left_join(Mng2,terminalER,by="stockid")
head(MatchedER)
hist(MatchedER$year)
hist(MatchedER$ER)
sum(MatchedER$ERbest-MatchedER$ER,na.rm=T) #ok. This proves that ERbest and ER are the same
MatchedERHOST<-MatchedER %>% select(stockid,year,ER) %>% filter(! (ER=='NA'))
head(MatchedERHOST)
#Fill gaps
#1. filter entries with no match
head(MatchedER)
Gap1<-MatchedER %>% filter(is.na(ERbest)==T)
head(Gap1)
dim(Gap1)
#2. Use RAMDATA2 to get matchings
# then remove TCbest == NA, TB==NA
head(RAMDATA2)
ematch1<-RAMDATA2 %>% filter(stockid %in% Gap1$stockid) %>% filter(TCbest>=0) %>% filter(TB>=0) %>% group_by(stockid) %>% slice(which.max(year)) %>% select(stockid,year,TCbest,TB)
ematch1$ER<-ematch1$TCbest/ematch1$TB
ematch1 <- ematch1 %>% select(stockid,year,ER)
head(ematch1)
MatchedERHOST2 <- rbind(as.data.frame(MatchedERHOST),as.data.frame(ematch1))
#3.matching based on SSB
ematch2<-RAMDATA2 %>% filter(stockid %in% Gap1$stockid) %>% filter(TCbest>=0) %>% filter(SSB>=0) %>% group_by(stockid) %>% slice(which.max(year)) %>% select(stockid,year,TCbest,SSB)
ematch2$ER<-ematch2$TCbest/ematch2$SSB
ematch2<-ematch2 %>% select(stockid,year,ER) %>% filter(! stockid %in% MatchedERHOST2$stockid)
head(ematch2)
MatchedERHOST3 <- rbind(as.data.frame(MatchedERHOST2),as.data.frame(ematch2))
head(MatchedERHOST3)
#save MatchedERHOST3 and load in google doc for manual entries
write.csv(MatchedERHOST3, file = "/Users/ren/Documents/CODES/FoodProvision/MatchedER_PNAS.csv")
#what are the stockid with no match?
Gap2<-Gap1 %>% filter(! stockid %in% MatchedERHOST3$stockid)
head(Gap2)
withmatch<-c("DOYSFS","HERR4RFA","HERR4RSP","HERRPWS","HERRSITKA")
nomatchv44<-RAMDATA2 %>% filter(stockid %in% Gap2$stockid) %>% filter(! stockid %in% withmatch) %>% select(stockid,year,TBbest,TCbest,ERbest,TB,SSB,TC,ER)# %>% filter(year>=2000)
###NEXT is to load the RAM version consistent with Chris Free's shapefiles
download_ramlegacy(version = "4.3")
load_ramlegacy(version="4.3")
RAMDATA43<-load_ramlegacy(tables = "timeseries_values_views")
head(RAMDATA43$timeseries_values_views)
RAMDATA2_43<-RAMDATA43$timeseries_values_views
head(RAMDATA2_43)
Testme<-RAMDATA2_43 %>% filter(stockid %in% Gap2$stockid) %>% filter(! stockid %in% withmatch) %>% select(stockid,year,TBbest,TCbest,ERbest,TB,SSB,TC,ER) %>% filter(year>=2000)
xxxxxxxxxx
#read Chris Free metadata file of the management layer
ChrisMetadata<-read.csv("/Users/ren/Documents/CODES/FoodProvision/ramldb_v3.8_stock_boundary_table_v2_formatted.csv")
head(ChrisMetadata)
ChrisMetadata<-ChrisMetadata %>% select(stockid,assessid)
MatchedER<-left_join(ChrisMetadata,terminalER,by="stockid")
head(MatchedER)
MatchedER
#save this and load this to google sheet?
sum(MatchedER$ERbest-MatchedER$ER,na.rm=T) #ok. This proves that ERbest and ER are the same
ManagementLayer<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayer.rds")
head(ManagementLayer)
unique(ManagementLayer$species)
#Below is for adding the stock ID in Chris Free file
#match species list from Chris Free Species list
###SHAPEFILES
shapefolder <- list.files(path="/Users/ren/Documents/CODES/FoodProvision/ramldb_boundaries",pattern="*.shp")
typeof(shapefolder)
head(shapefolder)
shapenames<- as.data.frame(shapefolder) %>% separate(shapefolder,c("filename", "rest"),sep=".shp", remove=TRUE) %>% select (-c("rest"))
shapenames<- unique(shapenames) #there are duplicates from .hml and we want a character
shapenames<- as.character(shapenames$filename)
head(shapenames)
shapenames
#This is for regenerating the management layer
# ref_raster<- raster("/Users/ren/Documents/CODES/FoodProvision/referenceraster")#i got this from the version 1 code
# datamanagelist<-list()
# count=0
# for (i in shapenames){
# count<-count+1
# #i=shapenames[1]
# shape1 <- read_sf(dsn = "/Users/ren/Documents/CODES/FoodProvision/ramldb_boundaries", layer = i)
# ##this is how you will get the species name
# #shape1$species
# #shape1$stockid for the stock id
#
# # #this is for moll transform. I will work on
# # datashape1<-shape1 %>%
# # sf::st_wrap_dateline(options = c("WRAPDATELINE=YES", "DATELINEOFFSET=180"), quiet = TRUE) %>%
# # sf::st_transform("+proj=moll") %>%
# # raster::rasterize(ocean_low_res_moll,getCover = T) %>%
# # raster::as.data.frame(xy = T)
# #this is for regular, wgs
# datashape1<-shape1 %>%
# sf::st_wrap_dateline(options = c("WRAPDATELINE=YES", "DATELINEOFFSET=180"), quiet = TRUE) %>%
# sf::st_transform("+proj=longlat +datum=WGS84") %>%
# raster::rasterize(ref_raster,getCover = T) %>%
# raster::as.data.frame(xy = T)
# #plot(datashape1)
#
# datafilter<-datashape1 %>% filter(layer>0) %>% mutate(species=shape1$species,stockid=shape1$stockid)
# #manageareas<-rbind(manageareas,datafilter)
# datamanagelist[[count]]<-datafilter
# print(c(i,count))
# }
#
# ManagementLayer<-dplyr::bind_rows(datamanagelist)
# table(ManagementLayer$species)
# head(ManagementLayer)
# saveRDS(ManagementLayer, file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerv3.rds")
###- use ManagementLayer3. It has an additional entry, i.e., species and stockid
ManagementLayer<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerv3.rds")
stocklistChrisF<-ManagementLayer %>% group_by(species, stockid) %>%summarize(n=n())
ChrisMetadata<-read.csv("/Users/ren/Documents/CODES/FoodProvision/ramldb_v3.8_stock_boundary_table_v2_formatted.csv")
ChrisMetadata<-ChrisMetadata %>% select(stockid,assessid)
stocklistChrisF<-left_join(stocklistChrisF,ChrisMetadata,by="stockid")
MatchedER<-left_join(stocklistChrisF,terminalER,by="stockid") %>% select (-c(n,ERbest)) %>% filter(is.na(species)==F)
head(MatchedER)
MatchedER
dim(MatchedER)
dim(MatchedER %>% filter(is.na(year)==T))[1]/dim(MatchedER)[1]
table(MatchedER$ER)
write.csv(MatchedER, file = "/Users/ren/Documents/CODES/FoodProvision/ERwithStockAssessmentTEST.csv")
##This is the file that we are using!!!
#Isolate stockid with no maching ---
NomatchER<-MatchedER %>% filter(is.na(ER)==T)
#match with costello data
NomatchER_costello<-left_join(NomatchER,terminaldataCostello,by="assessid")
#NomatchER_costellowithRAM295<-left_join(NomatchER_costello,RAM295matching3,by="assessid") ##not useful information
NomatchER_costello_Nomatch<-NomatchER_costello%>% filter(is.na(Biomass)==T)
#RAMDATA2 is the rawest Assessment data
head(RAMDATA2)
RAMDATA_nomatch<-RAMDATA2 %>% filter(stockid %in% NomatchER_costello_Nomatch$stockid)
#Check costello DB #costello et al. used v.2.95
download_ramlegacy(version="2.95") #downloading the latest version, 4.44
load_ramlegacy(version="2.95")
RAM295<-load_ramlegacy(tables = "assessment")$assessment %>% select(assessid,stockid)
head(RAM295)
RAM295matching1<-stocklistChrisF %>% select(-c(assessid,n))
RAM295matching2<-left_join(RAM295matching1,RAM295,by="stockid")
RAM295matching3<-left_join(RAM295matching2,terminaldataCostello,by="assessid")
head(RAMonly)
table(RAMonly$Policy)
table(RAMonly$Scenario)
subRAMCostello<-RAMonly %>% filter(Policy=="Historic") %>% select(IdOrig,SciName,CatchShare,Year,Catch,Biomass)
terminaldataCostello<-subRAMCostello %>% group_by(IdOrig) %>% slice(which.max(Year))
head(terminaldataCostello)
colnames(terminaldataCostello)[colnames(terminaldataCostello)=="IdOrig"] <- "assessid"
head(ChrisMetadata)
mergedCostelloFree<-left_join(ChrisMetadata,terminaldataCostello, by="assessid")
head(mergedCostelloFree)
write.csv(mergedCostelloFree, file = "/Users/ren/Documents/CODES/FoodProvision/ER_CostelloFree.csv")
# ##PARALLEL VERSION OF ABOVE (unstable)
# cores<-detectCores()
# registerDoParallel(cores)
# system.time({
# #ManagementLayer <- foreach(i=1:length(shapenames), .combine = rbind) %dopar% {
# ManagementLayer <- foreach(i=199:length(shapenames), .combine = rbind) %dopar% {
# shape1 <- read_sf(dsn = "/Users/ren/Documents/CODES/FoodProvision/ramldb_boundaries", layer = shapenames[i])
# datashape1<-shape1 %>%
# sf::st_wrap_dateline(options = c("WRAPDATELINE=YES", "DATELINEOFFSET=180"), quiet = TRUE) %>%
# sf::st_transform("+proj=longlat +datum=WGS84") %>%
# raster::rasterize(ref_raster,getCover = T) %>%
# raster::as.data.frame(xy = T)
#
# datafilter<-datashape1 %>% filter(layer>0) %>% mutate(species=shape1$species,stockid=shape1$stockid)
# datafilter
# }
# })
# head(ManagementLayer)
# dim(ManagementLayer)
# stopImplicitCluster()
#save rasterized files
saveRDS(ManagementLayer, file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerv2.rds")
ManagementLayer<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerv2.rds")
StockIdinChrisFreeDB<-unique(ManagementLayer$stockid)
#xxxxxx-----------------------
#remove coords for now
head(Aqua4)
combined_df<-Aqua4[,-c(1:3)]
combined_df[is.na(combined_df)] <- 0
head(combined_df)
#total carrying capacity per cell
total_K_cell<-rowSums(combined_df)
head(total_K_cell)
#total K per species. Question: is this K
total_K_species<-colSums(combined_df)
head(total_K_species)
#Normalize K. Divide elements by the total K of the species.
Norm_K<-t(t(combined_df)/total_K_species)
#this is for checking if the answer is right
colSums(Norm_K)
#ok the answer is right
#Let us just work on non-zero data and see if it will work if we plot it
ksum_with_coords <- cbind(Aqua4[,c(1:3)], TotalK=rowSums(Norm_K)) #this is merging coords and sum of K per pixel
head(ksum_with_coords)
##remove total K = 0
#ksum_with_coords_filter<-ksum_with_coords %>% filter(TotalK>0)
#dim(ksum_with_coords_filter)
#>>this is the variable that contains the data, remove K_sum==0
k_with_coords <- cbind(ksum_with_coords, Norm_K)
dim(k_with_coords)
#I will make two files in case I will need the coordinates
Norm_K_filter0<-k_with_coords %>% filter(TotalK>0)
head(Norm_K_filter0)
#remove id, lon, lat, and TotalK
Norm_K_filter<-Norm_K_filter0[,-c(1:4)]
head(Norm_K_filter)
dim(Norm_K_filter)
plot(Norm_K_filter0[,1])
#MPA selection algorithm
numcell<-nrow(Norm_K_filter) #just choose the raw data - no coordinates
#10% protection for now
MPAselect<-matrix(round(ifelse(runif(numcell,min=0,max=1)<0.5,1,0)), nrow=numcell, ncol=1)
dim(MPAselect)
dim(Norm_K_filter)
# this is how much proportion of the K we are protecting per species
Kprotected<-t(t(Norm_K_filter) %*% MPAselect)
Kprotected
dim(Kprotected)
Kprotected<-as.data.frame(t(Kprotected))
colnames(Kprotected)[which(names(Kprotected)=="V1")]<-"Kprotected"
head(Kprotected)
#Now, if we close a pixel, that should give us a food provision value
#assumptions for now
#1. all open access - will simplify things for now
#2. global catch under BAU will be a single value (no climate for now) - i can compute this
#3. delta_catch will be network-level catch - catch under BAU
MainData<-Kprotected
MainData$SpeciesID<-row.names(MainData)
MainData<-left_join(MainData,include,by="SpeciesID")
biolparams<-fishlife2(as.character(MainData$SciName))
biolparams<-biolparams %>% select(species,r) %>% rename(SciName=species)
head(biolparams)
MainData<-left_join(MainData,biolparams,by="SciName")
head(MainData)
#this code just save the species list for collection of m
head(spnamelookup)
SpeciesNamesCostello<-MainData %>% select(SpeciesID,SciName)
FBname<-spnamelookup %>% select(SPECIESID,family,FBname)
colnames(FBname)[which(names(FBname) == "SPECIESID")] <- "SpeciesID"
SpeciesNamesCostello<-left_join(SpeciesNamesCostello,FBname,by="SpeciesID")
head(SpeciesNamesCostello)
write.csv(SpeciesNamesCostello, file = "/Users/ren/Documents/CODES/FoodProvision/SpeciesNamesCostello.csv")
#load file with m estimate
mfile<-read.csv("/Users/ren/Documents/CODES/FoodProvision/SpeciesNamesCostello_m.csv")
mfile<-mfile %>% mutate(m=replace(m,m==1,0.1),
m=replace(m,m==2,0.5),
m=replace(m,m==3,0.9))
head(mfile)
MainData$m<-mfile$m
###----xxx-----xxx-----xxx---PIXEL-LEVEL spillover
dH<-matrix(0, nrow=numcell, ncol=1)
K<-MainData$K #k per species
m<-MainData$m #mobility per species
#Harvest without MPA
R <-0 #MPA size
r<-MainData$r
E <-1-(0.9*r) #escapement, amount retained
###if E is negative, make it zero --- meaning you harvest more. negative is also ok meaning you are borrowing from the growth.
#E <- (E>0)*E
#hist(E)
Ropt<-((m*r) + (((2*E)-2)*m)) / (((E-1)*r)+(((2*E)-2)*m))
MainData$E<-E
MainData$Ropt<-Ropt
hist(MainData$Ropt,xlab="Ropt",main="")
#Given Ropt, what is Hopt???
Hopt<-((1-E)*((m*K*(1-Ropt))/(Ropt-(E*Ropt)+m))*(1-(((1-E)*(1-Ropt)*m)/((Ropt-(E*Ropt)+m)*r)))) - ((1-E)*((r+E-1)/r)*K)
#----Harvest with no MPA, BAU, no climate
HBAU <- sum((1-E)*((r+E-1)/r)*K)
MPAselect0<-matrix(0, nrow=numcell, ncol=1)
#Norm_K_filter: row is species name, col is pixel id, content is normaized K
TNorm_K_filter<-t(Norm_K_filter)
#saveRDS(TNorm_K_filter, file = "/Users/ren/Documents/CODES/FoodProvision/fTNorm_K_filter.rds")
#saveRDS(MainData, file = "/Users/ren/Documents/CODES/FoodProvision/MainData.rds")
head(MPAselect0)
#try parallel programming
#install.packages("doParallel")
cores<-detectCores()
registerDoParallel(cores)
system.time({
result <- foreach(iter = 1000:1012, .combine = rbind) %dopar% {
MPAselect<-MPAselect0
MPAselect[iter]<-1
Kprotected<-as.data.frame(TNorm_K_filter %*% MPAselect)
R<-Kprotected$V1
HMPA<-sum((1-E)*((m*K*(1-R))/(R-(E*R)+m))*(1-(((1-E)*(1-R)*m)/((R-(E*R)+m)*r))))
HMPA-HBAU
}
})
result
max(result)
stopImplicitCluster()
#stopCluster(myCluster)
pixellevelspill<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/pixellevelspill.rds")
head(pixellevelspill)
plot(pixellevelspill)
#numcell is 168712. 1% is 1687, 0.5% is 844.
head(Norm_K_filter0[,2:3])
##########################################################################
##----with Management DRAFT
ManagementLayer<-readRDS(file = "/Users/ren/Documents/CODES/FoodProvision/ManagementLayerData.rds")
head(ManagementLayer)
managementmap<-ManagementLayer %>% group_by(x,y) %>% summarise(nmanage=n())
head(managementmap)
xyz<-rasterFromXYZ(managementmap)
plot(xyz,main="Management Layer (number of assessed stocks per pixel)")
#Norm_K_filter0 contains the lon lat
#for now, just check the species
speciesmanaged<-unique(ManagementLayer$species)
head(include)
sum((speciesmanaged %in% include$SciName)*1)
#188 species are managed
empty_raster <- raster(res = 0.5)
cells <- cellFromXY(empty_raster, as.matrix(managementmap[,1:2]))
empty_raster[cells] <- managementmap$nmanage
head(empty_raster)
plot(PlotFunction(empty_raster),main="Food provision potential (MT)",axes=F,box=F)
##################################
#plot for the slides
head(include)
options("scipen"=100, "digits"=4)
barplot(include$K,log="y",main="K distribution",
xlab="", ylab="K (MT)")
# ggplot(data=barplotme, aes(x=SciName, y=K)) +
# geom_bar(stat="identity", fill="steelblue")+
# theme_minimal()
#plot r
head(MainData)
options("scipen"=100, "digits"=4)
rplotme<-MainData[order(-r),]
barplot(rplotme$r, main="r distribution")
#plot family and number of species
head(mfile)
familysummary_costello<-mfile %>% group_by(family) %>% summarize(n=n())
sort(-familysummary_costello$n)
write.csv(familysummary_costello,"/Users/ren/Documents/CODES/FoodProvision/Results/familysummary_costello.csv")
ggplot(mfile) + geom_bar(aes(m))
#######################
#toy model Fishery is managed
K<-1000
M<-0.9
a <-1 #surv. I think this should just be 1
R <-0 #MPA size
m<-M*(1-R) #frac biomass moving out
r<-0.5
#E <-1/(0.9*r+1) #escapement, unmanaged
E <- 1-(r/2) #managed
Bout= -(E*K + 2*E^2*K*a^2 - E^2*K*a^3 - 3*E*K*R - 2*E*K*a + 3*E*K*R^2 - E*K*R^3 + E*K*a^2 - E^2*K*a - E^2*K*r - 3*E*K*R*a^2 - 6*E*K*R^2*a + 3*E^2*K*R*a + 2*E*K*R^3*a + 3*E*K*R^2*r + 4*E^2*K*R*r - 3*E*K*R^3*r + E*K*R^4*r - 2*E*K*a^2*m + 2*E^2*K*a*r + 3*E*K*R^2*a^2 - 6*E^2*K*R*a^2 - 3*E^2*K*R^2*a - E*K*R^3*a^2 + 3*E^2*K*R*a^3 + E^2*K*R^3*a - 6*E^2*K*R^2*r + 4*E^2*K*R^3*r - E^2*K*R^4*r + E*K*a^2*m^2 - 2*E^2*K*a^2*m + 2*E^2*K*a^3*m - E^2*K*a^2*r + 6*E*K*R*a - E*K*R*r + 2*E*K*a*m + 6*E^2*K*R^2*a^2 - 3*E^2*K*R^2*a^3 - 2*E^2*K*R^3*a^2 + E^2*K*R^3*a^3 - E^2*K*a^3*m^2 - 5*E*K*R*a*m + E*K*R*a*r + E*K*R^2*a^2*m^2 + E^2*K*R*a^2*m^2 + E^2*K*R*a^3*m^2 + 2*E^2*K*R^2*a^3*m - E^2*K*R^3*a^2*m - 3*E^2*K*R^2*a^2*r + E^2*K*R^3*a^2*r - E^2*K*a^2*m^2*r + 5*E*K*R*a^2*m + 4*E*K*R^2*a*m + E^2*K*R*a*m - E*K*R^3*a*m - 3*E*K*R^2*a*r - 7*E^2*K*R*a*r + 3*E*K*R^3*a*r - E*K*R^4*a*r - E^2*K*R^2*a^2*m^2 - 2*E^2*K*a*m*r - 2*E*K*R*a^2*m^2 - 4*E*K*R^2*a^2*m + 3*E^2*K*R*a^2*m - 2*E^2*K*R^2*a*m + E*K*R^3*a^2*m - 4*E^2*K*R*a^3*m + E^2*K*R^3*a*m + 3*E^2*K*R*a^2*r + 9*E^2*K*R^2*a*r - 5*E^2*K*R^3*a*r + E^2*K*R^4*a*r + 2*E^2*K*a^2*m*r - 4*E^2*K*R*a^2*m*r - 4*E^2*K*R^2*a*m*r + E^2*K*R^3*a*m*r - E*K*R*a*m*r + E^2*K*R*a^2*m^2*r + 2*E^2*K*R^2*a^2*m*r + 2*E*K*R^2*a*m*r + 5*E^2*K*R*a*m*r - E*K*R^3*a*m*r)/(r*E^2*R^4 + 2*r*E^2*R^3*a - 4*r*E^2*R^3 + r*E^2*R^2*a^2 + 2*r*E^2*R^2*a*m - 6*r*E^2*R^2*a + 6*r*E^2*R^2 + 2*r*E^2*R*a^2*m - 2*r*E^2*R*a^2 - 4*r*E^2*R*a*m + 6*r*E^2*R*a - 4*r*E^2*R + r*E^2*a^2*m^2 - 2*r*E^2*a^2*m + r*E^2*a^2 + 2*r*E^2*a*m - 2*r*E^2*a + r*E^2 - 2*r*E*R^4 - 2*r*E*R^3*a + 6*r*E*R^3 - 2*r*E*R^2*a*m + 4*r*E*R^2*a - 6*r*E*R^2 + 2*r*E*R*a*m - 2*r*E*R*a + 2*r*E*R + r*R^4 - 2*r*R^3 + r*R^2)
#----Harvest with no MPA, BAU, no climate
HBAU <- sum((1-E)*Bout)
HBAU
HBAU_OA = 31.03
HBAU_Managed = 83.33
K<-1000
M<-1
a <-1 #surv. I think this should just be 1
r<-0.5
H_managed<-vector()
count<-0
E<-0.5
#E <- 1/(1+(0.5*r)) #managed
#E <-1/(0.9*r+1)
FracMPA<-seq(0,.95,0.05)
for (R in FracMPA){
count<-count+1
m<-M*(1-R)
Bout= -(E*K + 2*E^2*K*a^2 - E^2*K*a^3 - 3*E*K*R - 2*E*K*a + 3*E*K*R^2 - E*K*R^3 + E*K*a^2 - E^2*K*a - E^2*K*r - 3*E*K*R*a^2 - 6*E*K*R^2*a + 3*E^2*K*R*a + 2*E*K*R^3*a + 3*E*K*R^2*r + 4*E^2*K*R*r - 3*E*K*R^3*r + E*K*R^4*r - 2*E*K*a^2*m + 2*E^2*K*a*r + 3*E*K*R^2*a^2 - 6*E^2*K*R*a^2 - 3*E^2*K*R^2*a - E*K*R^3*a^2 + 3*E^2*K*R*a^3 + E^2*K*R^3*a - 6*E^2*K*R^2*r + 4*E^2*K*R^3*r - E^2*K*R^4*r + E*K*a^2*m^2 - 2*E^2*K*a^2*m + 2*E^2*K*a^3*m - E^2*K*a^2*r + 6*E*K*R*a - E*K*R*r + 2*E*K*a*m + 6*E^2*K*R^2*a^2 - 3*E^2*K*R^2*a^3 - 2*E^2*K*R^3*a^2 + E^2*K*R^3*a^3 - E^2*K*a^3*m^2 - 5*E*K*R*a*m + E*K*R*a*r + E*K*R^2*a^2*m^2 + E^2*K*R*a^2*m^2 + E^2*K*R*a^3*m^2 + 2*E^2*K*R^2*a^3*m - E^2*K*R^3*a^2*m - 3*E^2*K*R^2*a^2*r + E^2*K*R^3*a^2*r - E^2*K*a^2*m^2*r + 5*E*K*R*a^2*m + 4*E*K*R^2*a*m + E^2*K*R*a*m - E*K*R^3*a*m - 3*E*K*R^2*a*r - 7*E^2*K*R*a*r + 3*E*K*R^3*a*r - E*K*R^4*a*r - E^2*K*R^2*a^2*m^2 - 2*E^2*K*a*m*r - 2*E*K*R*a^2*m^2 - 4*E*K*R^2*a^2*m + 3*E^2*K*R*a^2*m - 2*E^2*K*R^2*a*m + E*K*R^3*a^2*m - 4*E^2*K*R*a^3*m + E^2*K*R^3*a*m + 3*E^2*K*R*a^2*r + 9*E^2*K*R^2*a*r - 5*E^2*K*R^3*a*r + E^2*K*R^4*a*r + 2*E^2*K*a^2*m*r - 4*E^2*K*R*a^2*m*r - 4*E^2*K*R^2*a*m*r + E^2*K*R^3*a*m*r - E*K*R*a*m*r + E^2*K*R*a^2*m^2*r + 2*E^2*K*R^2*a^2*m*r + 2*E*K*R^2*a*m*r + 5*E^2*K*R*a*m*r - E*K*R^3*a*m*r)/(r*E^2*R^4 + 2*r*E^2*R^3*a - 4*r*E^2*R^3 + r*E^2*R^2*a^2 + 2*r*E^2*R^2*a*m - 6*r*E^2*R^2*a + 6*r*E^2*R^2 + 2*r*E^2*R*a^2*m - 2*r*E^2*R*a^2 - 4*r*E^2*R*a*m + 6*r*E^2*R*a - 4*r*E^2*R + r*E^2*a^2*m^2 - 2*r*E^2*a^2*m + r*E^2*a^2 + 2*r*E^2*a*m - 2*r*E^2*a + r*E^2 - 2*r*E*R^4 - 2*r*E*R^3*a + 6*r*E*R^3 - 2*r*E*R^2*a*m + 4*r*E*R^2*a - 6*r*E*R^2 + 2*r*E*R*a*m - 2*r*E*R*a + 2*r*E*R + r*R^4 - 2*r*R^3 + r*R^2)
H_managed[count]<-(1-E)*Bout
}
plot(FracMPA,H_managed,ylim=c(0,max(na.omit(H_managed))))
#what is the effect of different escapement level?
H_managed<-vector()
count<-0
R<-0
for (E in FracMPA){
count<-count+1
m<-0.9*(1-R)
Bout= -(E*K + 2*E^2*K*a^2 - E^2*K*a^3 - 3*E*K*R - 2*E*K*a + 3*E*K*R^2 - E*K*R^3 + E*K*a^2 - E^2*K*a - E^2*K*r - 3*E*K*R*a^2 - 6*E*K*R^2*a + 3*E^2*K*R*a + 2*E*K*R^3*a + 3*E*K*R^2*r + 4*E^2*K*R*r - 3*E*K*R^3*r + E*K*R^4*r - 2*E*K*a^2*m + 2*E^2*K*a*r + 3*E*K*R^2*a^2 - 6*E^2*K*R*a^2 - 3*E^2*K*R^2*a - E*K*R^3*a^2 + 3*E^2*K*R*a^3 + E^2*K*R^3*a - 6*E^2*K*R^2*r + 4*E^2*K*R^3*r - E^2*K*R^4*r + E*K*a^2*m^2 - 2*E^2*K*a^2*m + 2*E^2*K*a^3*m - E^2*K*a^2*r + 6*E*K*R*a - E*K*R*r + 2*E*K*a*m + 6*E^2*K*R^2*a^2 - 3*E^2*K*R^2*a^3 - 2*E^2*K*R^3*a^2 + E^2*K*R^3*a^3 - E^2*K*a^3*m^2 - 5*E*K*R*a*m + E*K*R*a*r + E*K*R^2*a^2*m^2 + E^2*K*R*a^2*m^2 + E^2*K*R*a^3*m^2 + 2*E^2*K*R^2*a^3*m - E^2*K*R^3*a^2*m - 3*E^2*K*R^2*a^2*r + E^2*K*R^3*a^2*r - E^2*K*a^2*m^2*r + 5*E*K*R*a^2*m + 4*E*K*R^2*a*m + E^2*K*R*a*m - E*K*R^3*a*m - 3*E*K*R^2*a*r - 7*E^2*K*R*a*r + 3*E*K*R^3*a*r - E*K*R^4*a*r - E^2*K*R^2*a^2*m^2 - 2*E^2*K*a*m*r - 2*E*K*R*a^2*m^2 - 4*E*K*R^2*a^2*m + 3*E^2*K*R*a^2*m - 2*E^2*K*R^2*a*m + E*K*R^3*a^2*m - 4*E^2*K*R*a^3*m + E^2*K*R^3*a*m + 3*E^2*K*R*a^2*r + 9*E^2*K*R^2*a*r - 5*E^2*K*R^3*a*r + E^2*K*R^4*a*r + 2*E^2*K*a^2*m*r - 4*E^2*K*R*a^2*m*r - 4*E^2*K*R^2*a*m*r + E^2*K*R^3*a*m*r - E*K*R*a*m*r + E^2*K*R*a^2*m^2*r + 2*E^2*K*R^2*a^2*m*r + 2*E*K*R^2*a*m*r + 5*E^2*K*R*a*m*r - E*K*R^3*a*m*r)/(r*E^2*R^4 + 2*r*E^2*R^3*a - 4*r*E^2*R^3 + r*E^2*R^2*a^2 + 2*r*E^2*R^2*a*m - 6*r*E^2*R^2*a + 6*r*E^2*R^2 + 2*r*E^2*R*a^2*m - 2*r*E^2*R*a^2 - 4*r*E^2*R*a*m + 6*r*E^2*R*a - 4*r*E^2*R + r*E^2*a^2*m^2 - 2*r*E^2*a^2*m + r*E^2*a^2 + 2*r*E^2*a*m - 2*r*E^2*a + r*E^2 - 2*r*E*R^4 - 2*r*E*R^3*a + 6*r*E*R^3 - 2*r*E*R^2*a*m + 4*r*E*R^2*a - 6*r*E*R^2 + 2*r*E*R*a*m - 2*r*E*R*a + 2*r*E*R + r*R^4 - 2*r*R^3 + r*R^2)
H_managed[count]<-(1-E)*Bout
}
plot(FracMPA,H_managed)
#How's Bout behaving for diff MPA size? Managed and unmanaged
#PART OF PRESENTATION
K<-1000
#M<-0.1
a <-1 #surv. I think this should just be 1
r<-0.1
H_managed<-vector()
Rvec<-vector()
Mvec<-vector()
count<-0
#E<-0.5
#E <- 1/(1+(0.5*r)) #managed
E <-1/(0.9*r+1)
FracMPA<-seq(0,0.99,0.01)
for (R in FracMPA){
for (M in c(0.1,0.5,0.9)){
count<-count+1
m<-M*(1-R)
Bout= -(E*K + 2*E^2*K*a^2 - E^2*K*a^3 - 3*E*K*R - 2*E*K*a + 3*E*K*R^2 - E*K*R^3 + E*K*a^2 - E^2*K*a - E^2*K*r - 3*E*K*R*a^2 - 6*E*K*R^2*a + 3*E^2*K*R*a + 2*E*K*R^3*a + 3*E*K*R^2*r + 4*E^2*K*R*r - 3*E*K*R^3*r + E*K*R^4*r - 2*E*K*a^2*m + 2*E^2*K*a*r + 3*E*K*R^2*a^2 - 6*E^2*K*R*a^2 - 3*E^2*K*R^2*a - E*K*R^3*a^2 + 3*E^2*K*R*a^3 + E^2*K*R^3*a - 6*E^2*K*R^2*r + 4*E^2*K*R^3*r - E^2*K*R^4*r + E*K*a^2*m^2 - 2*E^2*K*a^2*m + 2*E^2*K*a^3*m - E^2*K*a^2*r + 6*E*K*R*a - E*K*R*r + 2*E*K*a*m + 6*E^2*K*R^2*a^2 - 3*E^2*K*R^2*a^3 - 2*E^2*K*R^3*a^2 + E^2*K*R^3*a^3 - E^2*K*a^3*m^2 - 5*E*K*R*a*m + E*K*R*a*r + E*K*R^2*a^2*m^2 + E^2*K*R*a^2*m^2 + E^2*K*R*a^3*m^2 + 2*E^2*K*R^2*a^3*m - E^2*K*R^3*a^2*m - 3*E^2*K*R^2*a^2*r + E^2*K*R^3*a^2*r - E^2*K*a^2*m^2*r + 5*E*K*R*a^2*m + 4*E*K*R^2*a*m + E^2*K*R*a*m - E*K*R^3*a*m - 3*E*K*R^2*a*r - 7*E^2*K*R*a*r + 3*E*K*R^3*a*r - E*K*R^4*a*r - E^2*K*R^2*a^2*m^2 - 2*E^2*K*a*m*r - 2*E*K*R*a^2*m^2 - 4*E*K*R^2*a^2*m + 3*E^2*K*R*a^2*m - 2*E^2*K*R^2*a*m + E*K*R^3*a^2*m - 4*E^2*K*R*a^3*m + E^2*K*R^3*a*m + 3*E^2*K*R*a^2*r + 9*E^2*K*R^2*a*r - 5*E^2*K*R^3*a*r + E^2*K*R^4*a*r + 2*E^2*K*a^2*m*r - 4*E^2*K*R*a^2*m*r - 4*E^2*K*R^2*a*m*r + E^2*K*R^3*a*m*r - E*K*R*a*m*r + E^2*K*R*a^2*m^2*r + 2*E^2*K*R^2*a^2*m*r + 2*E*K*R^2*a*m*r + 5*E^2*K*R*a*m*r - E*K*R^3*a*m*r)/(r*E^2*R^4 + 2*r*E^2*R^3*a - 4*r*E^2*R^3 + r*E^2*R^2*a^2 + 2*r*E^2*R^2*a*m - 6*r*E^2*R^2*a + 6*r*E^2*R^2 + 2*r*E^2*R*a^2*m - 2*r*E^2*R*a^2 - 4*r*E^2*R*a*m + 6*r*E^2*R*a - 4*r*E^2*R + r*E^2*a^2*m^2 - 2*r*E^2*a^2*m + r*E^2*a^2 + 2*r*E^2*a*m - 2*r*E^2*a + r*E^2 - 2*r*E*R^4 - 2*r*E*R^3*a + 6*r*E*R^3 - 2*r*E*R^2*a*m + 4*r*E*R^2*a - 6*r*E*R^2 + 2*r*E*R*a*m - 2*r*E*R*a + 2*r*E*R + r*R^4 - 2*r*R^3 + r*R^2)
H_managed[count]<-(1-E)*Bout
Rvec[count]<-R
Mvec[count]<-M
}}
Result<-cbind(H_managed,Rvec,Mvec)
head(Result)
p1<-ggplot(data=as.data.frame(Result), aes(x=Rvec,y=H_managed,colour=factor(Mvec)))+geom_line()+
labs(x="MPA size",y="Catch",color="mobility")+theme_minimal()+ ggtitle("Managed species (r=1)")+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16,face="bold"),legend.text=element_text(size=16),legend.title=element_text(size=16),plot.title = element_text(size = 16, face = "bold"))
p1
ggsave(filename="/Users/ren/Documents/CODES/FoodProvision/Results/managed_r1.png",p1,width=6, height=4,dpi=300)
|
3e280548ba3f2143078149b8a9e400f63a6f3f1d
|
24db90382c28eba1b1e47754dd6d6cd92d60a566
|
/01_TimetoHF.R
|
915aa1c3da80cccfeceb25b4b82dcff52d648782
|
[] |
no_license
|
shk313/Madagascar
|
939613f00c0266d7562d867c9da748180a753885
|
5fffe4e5de29544885a2cd3946df84ee28912d11
|
refs/heads/master
| 2021-06-24T02:51:23.318117
| 2020-11-18T07:15:58
| 2020-11-18T07:15:58
| 144,282,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,890
|
r
|
01_TimetoHF.R
|
######################################################
####### Time to Health Facilities in Madagascar
####### Using Weiss et al Friction surface model
####################################################
###Time to each HF in madagascar ###
# clear workspace
rm(list = ls())
## Required Packages
require(gdistance)
library(raster)
library(rgdal)
#pull in polygon to get extent
poly <- readOGR(dsn = "Z:/Madagascar-NMCP/Madagascar Shapefiles/Madagascar_Admin_shapefiles/Malareo_District.shp", #
layer = "Malareo_District")
e <- extent(poly)
# Input Files
friction.surface.filename <- "Z:/mastergrids/Other_Global_Covariates/Accessibility/Weiss/friction_surface_2015_v1.tif"
################### FILL IN BELOW ##############
point.filename <- "G:/Madagascar Research/"# structured as [UNIQUE_ID, X_COORD, Y_COORD] aka [LONG, LAT]. Use a header.
# Define the spatial information from the friction surface
friction <- raster(friction.surface.filename)
fs1 <- crop(friction, e)
plot(fs1)
## Read in the points table.
points <- read.csv("lat_long_id.csv", header = TRUE)
head(points)
points <- subset(points, ID >2666)
head(points)
#Loop through points
HF_list <- unique(points$ID)
filepath <- ("C:/Users/SuzanneK/Documents/MAP stuff/Madagascar_Project/") ######## FILL IN HERE ######
for (i in seq_along(HF_list)) {
T.filename <- paste(filepath, HF_list[i], ".HF.T.rds", sep='')
T.GC.filename <- paste(filepath, HF_list[i], ".HF.T.GC.rds", sep='')
output.filename <- paste(filepath, HF_list[i], "HF.access.tif", sep='')
T <- transition(fs1, function(x) 1/mean(x), 8) # RAM intensive, can be very slow for large areas
saveRDS(T, T.filename)
T.GC <- geoCorrection(T)
saveRDS(T.GC, T.GC.filename)
HF.coords <- c(points$X[i],points$Y[i])
HF.raster <- accCost(T.GC, HF.coords)
writeRaster(HF.raster, output.filename)
}
|
1433e6c46da1f6d99329ccfa4768bca9aa548356
|
943b81fb66e492d5e59632399d5194237f241b6c
|
/R/randomForest-internals.R
|
74a6ed648d49a3f0bdb895f64e231507c14e9ec1
|
[] |
no_license
|
jasenfinch/metabolyseR
|
d95c750ddeae7b807431bf35ee086a31ac6c25db
|
2f1312d3773b7595f039b1e853544cc90d3ccf66
|
refs/heads/master
| 2023-08-31T11:49:25.358681
| 2023-08-24T09:42:28
| 2023-08-24T09:42:28
| 88,983,134
| 5
| 0
| null | 2023-09-12T15:01:49
| 2017-04-21T12:48:23
|
R
|
UTF-8
|
R
| false
| false
| 3,115
|
r
|
randomForest-internals.R
|
performRF <- function(x,cls,rf,type,returnModel){
params <- formals(randomForest::randomForest)
params$x <- x
params <- c(params,rf)
if (!is.null(cls)) params$y <- cls
model <- do.call(randomForest::randomForest,params)
model_results <- list(metrics = performanceMetrics(model,
type = type),
importance = modelImportance(model,type),
predictions = modelPredictions(model,type),
proximities = modelProximities(model))
if (isTRUE(returnModel)) model_results <- c(model_results,
list(model = model))
return(model_results)
}
performanceMetrics <- function(model,type){
switch(type,
unsupervised = tibble(),
classification = classificationMetrics(model),
regression = regressionMetrics(model))
}
modelPredictions <- function(model,type){
switch(type,
unsupervised = tibble(),
classification = classificationPredictions(model),
regression = regressionPredictions(model))
}
modelImportance <- function(model,type){
switch(type,
unsupervised = classificationImportance(model),
classification = classificationImportance(model),
regression = regressionImportance(model))
}
modelProximities <- function(model){
model$proximity %>%
as_tibble(.name_repair = 'minimal') %>%
mutate(sample = seq_len(nrow(.))) %>%
gather('sample2','proximity',-sample) %>%
rename(sample1 = sample) %>%
mutate(sample2 = as.numeric(sample2))
}
collate <- function(models,results,type){
switch(type,
unsupervised = collateUnsupervised(models,results),
classification = collateClassification(models,results),
regression = collateRegression(models,results)
)
}
collateModels <- function(models,type){
switch(type,
unsupervised = collateUnsupervisedModels(models),
classification = collateClassificationModels(models),
regression = collateRegressionModels(models))
}
supervised <- function(x,
cls,
rf,
reps,
binary,
comparisons,
perm,
returnModels,
seed){
i <- x %>%
sinfo() %>%
select(all_of(cls))
i %>%
colnames() %>%
map(~{
cls <- .
pred <- i %>%
select(all_of(cls)) %>%
deframe()
if (is.numeric(pred)) {
regression(x,
cls,
rf,
reps,
perm,
returnModels,
seed)
} else {
classification(x,
cls,
rf,
reps,
binary,
comparisons,
perm,
returnModels,
seed)
}
}) %>%
set_names(colnames(i))
}
|
39589e6b79d2602feb48df694e24b926b704639e
|
c361f98049dcdbb490ac865f7c6d8a966f09ffe1
|
/Tableau and R Projects/statistical analysis.R
|
ad9cefafe026ca1e8cc448fa9789c46d7d76a48b
|
[
"Unlicense"
] |
permissive
|
Sinchen-Gundmi/Projects
|
108e67bdf0dfdd1ddd9229f691f0b2aeb243bd6b
|
99d0b5b57d520ed921d26b896f9178ac39700c8a
|
refs/heads/master
| 2022-09-27T18:16:27.160648
| 2020-05-26T19:18:51
| 2020-05-26T19:18:51
| 258,614,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
statistical analysis.R
|
trees
summary(trees)
hist(trees$Girth)
hist(trees$Height)
hist(trees$Volume)
boxplot(trees)
plot(trees$Girth, trees$Height)
x=(lm(Girth~Height,data=trees))
summary(x)
abline(x)
abline(trees$Girth, trees$Height)
plot(trees$Girth, trees$Volume)
plot(trees$Volume, trees$Height)
plot(trees$Girth~trees$Height, xlab = "Height", ylab = "% Body fat")
panel.smooth(trees$Girth,trees$Height)
qqnorm(trees$Volume, xlab="Volume",)
qqnorm(trees$Height)
rubber.summary
d <- density(trees$Girth)
plot(d)
d <- density(trees$Hieght)
plot(d)
library(DAAG)
library(latticeExtra)
library(MASS)
library(ggplot2)
library(ggcorrplot)
Rubber
a<-1
a
corr=cor(Rubber)
ggcorrplot(corr)
oddbooks
m2=lm(weight~log(thick)+log(height)+log(breadth), data=oddbooks)
summary(m2)
corr=cor(oddbooks)
corr
ggcorrplot(corr)
|
53023d04b41b7f3fd7a53f86a6774e8c12648cd3
|
556dbb845330d9d950238a66d4f15ce65edbcfc6
|
/cachematrix.R
|
7562e54e005e0c0563ef0a3922d5b6b881ae91ee
|
[] |
no_license
|
ubik125/ProgrammingAssignment2
|
6d5ab244b04ac8d88521b80a72b853c310ebb27d
|
df115537c5c2ec70c2dafc5229e0480ef29f852e
|
refs/heads/master
| 2020-12-06T20:03:28.707698
| 2020-01-08T12:14:11
| 2020-01-08T12:14:11
| 232,540,068
| 0
| 0
| null | 2020-01-08T10:40:07
| 2020-01-08T10:40:06
| null |
UTF-8
|
R
| false
| false
| 1,162
|
r
|
cachematrix.R
|
## This script in going to compute the inverse of a given matrix
## but since matrix inversion is computationally intensive there is a
## benefit to caching the inverse of a
## matrix rather than compute it repeatedly.
## Below you will find a couple of functions that are used to create
## a "special" matrix that stores a matrix and caches its inverse.
## This function creates a special matrix, actually a list,
## that cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list (set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special matrix created by
## makeCacheMatrix. If the inverse has already been calculated
## then it gets the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
## Return a matrix that is the inverse of 'x'
|
426b405348ec5bdf34be043019a7fc22aeb70931
|
12b22ccaacdf625ab9456181c4c5997478bb85e2
|
/R/utils.R
|
711d7027982300cdfe82c06a0c252e53eaae840f
|
[
"MIT"
] |
permissive
|
andyofsmeg/ggTag
|
73f3accff9ef3307164b7bdfa11bd895b0ecea91
|
6b15bba62ac4f2a921fbcb86ce0f0b3dc7e8fa84
|
refs/heads/master
| 2020-06-12T18:43:37.598697
| 2019-07-05T10:44:36
| 2019-07-05T10:44:36
| 75,773,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,089
|
r
|
utils.R
|
#' Add time to plot
#'
#' @param date_format Character. R date format to use for the date.
#'
addDateTime <- function(date_format = "%d%b%Y %H:%M"){
theTime <- Sys.time()
theTime <- toupper(format(theTime, date_format))
theTime
}
#' Count string lines
#'
#' Count string lines based on character returns
#' @param string a character vector of length 1.
count_lines <- function(string){
length(unlist(str_split(string, "\\n")))
}
#' Count meta lines
#'
#' Function to count the number of lines of meta information
#'
#' @param meta Lines of meta information for a single meta data item. The function counts lines based on character returns, \code{"\n"} and the number of elements in the vector.
#' @return numeric. The total number of lines of meta information.
countMeta <- function(meta){
# lines_in_each_element <- lapply(meta, count_lines)
# sum(unlist(lines_in_each_element))
count_lines(meta)
}
#' Count meta lines
#'
#' Function to count the number of lines of meta information
#'
#' @param ... \code{\link{countMeta}} Multiple metadata items to be counted by `countMeta`
#' @return numeric. The total number of lines of meta information.
#'
countMetaMulti <- function(...){
metaList <- list(...)
metaCount <- lapply(metaList, countMeta)
maxMetaCount <- max(unlist(metaCount))
}
#' Create User & Path String
#'
#' @param userID logical. Should the user ID be included with the path
#' @param path logical or character. If logical, should current working directory
#' be added as the path. If character, the path to be included.
#'
createUserPath <- function(userID = TRUE, path = TRUE){
# Convert logical to correct path
path <- switch(as.character(path),
"TRUE" = getwd(),
"FALSE" = "",
path)
# Combinations of path & userID
if(userID & path != ""){
path <- paste(Sys.info()["user"], path, sep = ":")
} else if(userID & path == ""){
path <- Sys.info()["user"]
} else if(!userID & path == ""){
path <- ""
}
path
}
|
cd6c64d93ae4e6b4f436d7b5c0257bcf472b1147
|
7999c9b45958805c0bbd80f2c5903e55428d33bc
|
/code/expired/expr.tsne.plot.R
|
b23cad36f512d8283f9d1e50c7da1995bf7a6884
|
[] |
no_license
|
dreamfishes/CancerToCellLine
|
bac53cf45a8e7a365a8390d872199e0c41c50f52
|
b44285c9f907b54ad64f8330a500b17f0639c081
|
refs/heads/main
| 2023-06-17T14:29:11.651886
| 2021-07-11T18:56:39
| 2021-07-11T18:56:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,920
|
r
|
expr.tsne.plot.R
|
load('client-side/output/combine.expression.R.output/combined.expr.matrix.RData')
load('client-side/output/combine.expression.R.output/normalized.combined.expr.matrix.RData')
load('client-side/output/CCLE.breast.cancer.cell.line.meta.R.output/CCLE.breast.cancer.cell.line.meta.RData')
load('client-side/output/correlation.analysis.with.expression.R.output/correlation.analysis.with.expression.RData')
require(foreach)
require(dplyr)
data.source <- rep('CCLE',times=ncol(combined.expr.matrix))
data.source[grepl(x=colnames(combined.expr.matrix),pattern='SRR')] <- 'MET500'
data.source[grepl(x=colnames(combined.expr.matrix),pattern='TCGA')] <- 'TCGA'
data.source[grepl(x=colnames(combined.expr.matrix),pattern='BREAST')] <- 'CCLE.breast.cancer'
normalized.data <- combat.normalized.combined.expr.matrix
# normalized.data <- peer.5.normalized.combined.expr.matrix
# normalized.data <- peer.3.normalized.combined.expr.matrix
normalized.data <- peer.normalized.combined.expr.matrix
normalized.data <- pca.normalized.combined.expr.matrix
#normalized.data <- combined.expr.matrix
CCLE.log2.rpkm.matrix <- normalized.data[,CCLE.cell.line]
CCLE.median <- apply(CCLE.log2.rpkm.matrix,1,median)
CCLE.expressed.gene <- names(CCLE.median)[CCLE.median > 1]
tmp <- CCLE.log2.rpkm.matrix[CCLE.expressed.gene,]
tmp.rank <- apply(tmp,2,rank)
rank.mean <- apply(tmp.rank,1,mean)
rank.sd <- apply(tmp.rank,1,sd)
plot(x=rank.mean,y=rank.sd)
lowess(x=rank.mean,y=rank.sd) %>% lines(lwd=5,col='red')
CCLE.rna.seq.marker.gene.1000 <- names(sort(rank.sd,decreasing =TRUE))[1:1000]
CCLE.rna.seq.marker.gene.2000 <- names(sort(rank.sd,decreasing =TRUE))[1:2000]
CCLE.rna.seq.marker.gene.3000 <- names(sort(rank.sd,decreasing =TRUE))[1:3000]
dist.obj <- 1- cor(normalized.data[CCLE.rna.seq.marker.gene.1000,] ,method='spearman')
CCLE.log2.rpkm.matrix <- normalized.data[,CCLE.cell.line]
CCLE.var <- apply(CCLE.log2.rpkm.matrix,1,mad)
CCLE.rna.seq.marker.gene.100 <- names(sort(CCLE.var,decreasing =TRUE))[1:100]
CCLE.rna.seq.marker.gene.1000 <- names(sort(CCLE.var,decreasing =TRUE))[1:1000]
CCLE.rna.seq.marker.gene.2000 <- names(sort(CCLE.var,decreasing =TRUE))[1:2000]
CCLE.rna.seq.marker.gene.3000 <- names(sort(CCLE.var,decreasing =TRUE))[1:3000]
CCLE.rna.seq.marker.gene.5000 <- names(sort(CCLE.var,decreasing =TRUE))[1:5000]
dist.obj <- dist(normalized.data[CCLE.rna.seq.marker.gene.100,] %>% t)
source('client-side/code/util.R')
require(foreach)
rs <- pick.out.cell.line(expr.of.cell.lines = normalized.data [,CCLE.cell.line],expr.of.samples = normalized.data [,data.source == 'MET500'],marker.gene = CCLE.rna.seq.marker.gene.1000)
require(Rtsne)
tsne.rs <- Rtsne(dist.obj)
# tsne.rs <- Rtsne(normalized.data[CCLE.rna.seq.marker.gene.1000,] %>% t)
draw.df <- data.frame(dim1=tsne.rs$Y[,1],dim2=tsne.rs$Y[,2],data.source = data.source)
rownames(draw.df) <- colnames(normalized.data)
draw.df$subtype <- 'none'
flag <- rownames(draw.df) %in% MET500.breast.cancer.polyA.Basal.sample
draw.df[flag,'subtype'] <- 'Basal'
flag <- rownames(draw.df) %in% MET500.breast.cancer.polyA.Her2.sample
draw.df[flag,'subtype'] <- 'Her2'
flag <- rownames(draw.df) %in% MET500.breast.cancer.polyA.LumA.sample
draw.df[flag,'subtype'] <- 'LumA'
flag <- rownames(draw.df) %in% MET500.breast.cancer.polyA.LumB.sample
draw.df[flag,'subtype'] <- 'LumB'
draw.df[names(TCGA.breast.cancer.pam50.subtype),'subtype'] <- TCGA.breast.cancer.pam50.subtype
ggplot(draw.df) + geom_point(aes(x=dim1,y=dim2,shape=data.source,color=subtype),size=3) + theme_linedraw(base_size = 35,base_family = 'Arial')
# pca.rs <- prcomp(combined.expr.matrix %>% t)
#
#
#
#
# s <- (revmap(org.Hs.egENSEMBL) %>% as.list)
# gg <- s[CCLE.rna.seq.marker.gene.1000] %>% unlist
#
#
# common.genes <- intersect(CCLE.rna.seq.marker.gene.1000,rownames(combine.expr.matrix))
# dist.obj <- (1- cor(combine.expr.matrix[common.genes,c(MET500.breast.cancer.polyA.sample,CCLE.breast.cancer.cell.line,colnames(TCGA.breast.cancer.log2.fpkm.matrix) )],method='spearman')) %>% as.dist
# common.genes <- intersect(gg,rownames(normalized.combine.expr.matrix))
#
# dist.obj <- (1- cor(normalized.combine.expr.matrix[common.genes,c(MET500.breast.cancer.polyA.sample,CCLE.breast.cancer.cell.line )],method='spearman')) %>% as.dist
#
#
#
# col.annotataion <- data.frame( data.source = c(rep(x='MET500',times=length(MET500.breast.cancer.polyA.sample)),
# rep(x='CCLE', times=ncol(CCLE.log2.rpkm.matrix) ),
# rep(x='TCGA', times=ncol(TCGA.breast.cancer.log2.fpkm.matrix))
# )
# )
# rownames(col.annotataion) <- colnames(combine.expr.matrix )
#
#
#
#
# tsne.rs <- Rtsne(dist.obj)
#
# rownames(tsne.rs$Y) <- colnames(normalized.combine.expr.matrix)
# rownames(tsne.rs$Y) <- c(MET500.breast.cancer.polyA.sample,CCLE.breast.cancer.cell.line,colnames(TCGA.breast.cancer.log2.fpkm.matrix))
# rownames(tsne.rs$Y) <- c(MET500.breast.cancer.polyA.sample,CCLE.breast.cancer.cell.line)
#
# draw.df <- data.frame(x=tsne.rs$Y[,1],y=tsne.rs$Y[,2],col=col.annotataion[rownames(tsne.rs$Y),'data.source'])
# rownames(draw.df) <- rownames(tsne.rs$Y)
# draw.df$col <- as.character(draw.df$col)
# draw.df[draw.df$col == 'CCLE','col'] <- 'CCLE.non.breast.cancer'
# draw.df[grepl(x=rownames(draw.df),pattern='BREAST'),'col'] <- 'CCLE.breast.cancer'
# ggplot(draw.df) + geom_point(aes(x=x,y=y,col=col),size=3) + theme_gray(base_size = 25) + xlab('Dim1') + ylab('Dim2')
|
3a4f3352f06b9c1aa144f66057e3c7070b2f7155
|
51e0499ab36fc15bb99c1a3fd3b02874485f0c6a
|
/test.R
|
55457cfdaba73c7cf4c5d0cedf06985837c74ff4
|
[] |
no_license
|
Yongxin8888/GIS
|
2618a8d393adca7c1d6eb7202832a3fc7ae11a3b
|
763e20eabc4788a8ae1a27a21faeb0ccd0aab526
|
refs/heads/master
| 2020-08-26T17:14:52.699451
| 2019-10-23T15:15:14
| 2019-10-23T15:15:14
| 217,085,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10
|
r
|
test.R
|
x=3+4+5+6
|
4263f5d41e627ffbc26f30b3580e99f07ba7b2af
|
1ed761a15da505c2286a0f374c8e81b074e1eb27
|
/ref_cls.R
|
a3dd5beaac81768c1810bde66a86441651eae70f
|
[] |
no_license
|
Rphillips1995/SRAFunctions2015
|
9f6a77370e4b3e3b4f82e013f015e3a3501da3ba
|
d1e9d8033793c9979ce52a839a28201ea76cbba4
|
refs/heads/master
| 2021-01-10T15:32:13.619891
| 2015-07-28T14:17:59
| 2015-07-28T14:17:59
| 36,888,021
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,588
|
r
|
ref_cls.R
|
#' This function builds a reference dataframe for cell_lines.
#'
#' This function uses the XML package to read the tables that exist on different URLS.
#' To build the reference dataframe of cell lines, we used tables from the Sigma
#' Aldrich table that contained over 900 different cell lines and their corresponding tissue
#' and species. The function saves the reference dataframe into the working directory. From the
#' working directory you can load the reference dataframe into the global environment.
#'
#'
#' @return ref_cell_lines A dataframe containing 913 different cell lines and corresponding tissues
#'
#' @keywords keywords
#'
#' @examples
#' ref_cls()
ref_cls <- function(){
#All urls will come from the sigma aldrich website.
#Getting a reference of cancer cell lines from sigma aldrich table.
#Information was extracted from the urls on July 27th, 2015.
url<-'http://www.sigmaaldrich.com/europe/life-science-offers/cell-cycle/sigma-ecacc-cell/cancer-cell-lines.html#BRC'
cancer_cell_lines <- readHTMLTable(url, header = TRUE,
as.data.frame = TRUE,
which = 1,
stringsAsFactors = FALSE)
#cancer_cell_lines clean up
cancer_cell_lines<-cancer_cell_lines[,-1]
cancer_cell_lines<-cancer_cell_lines[-c(1:18),]
cancer_cell_lines<-subset(cancer_cell_lines,subset=(V2!='NA'))
cancer_cell_lines<-within(cancer_cell_lines,V3<-toupper(with(cancer_cell_lines,V3)))
cancer_cell_lines<-within(cancer_cell_lines,V4<-toupper(with(cancer_cell_lines,V4)))
#There are rows where the tissue and Species were switched around.
cancer_cell_lines<-within(cancer_cell_lines,{
V3<-ifelse(V3=='HUMAN',
toupper(cancer_cell_lines$V4),
cancer_cell_lines$V3)
})
#Getting a reference of cardio vascular cell lines
url2<-"http://www.sigmaaldrich.com/europe/life-science-offers/cell-cycle/sigma-ecacc-cell/cardiovascular-disease.html"
cardio_cell_lines <- readHTMLTable(url2, header = TRUE,
as.data.frame = TRUE,
which = 1,
stringsAsFactors = FALSE)
#Cleaning up cardio_cell_lines
cardio_cell_lines<- cardio_cell_lines[-c(1:9),]
cardio_cell_lines<- cardio_cell_lines[,-1]
cardio_cell_lines<- subset(cardio_cell_lines,subset=(V2!='NA'))
#Getting a reference of diabetes and respiratory cell lines
url3<-"http://www.sigmaaldrich.com/europe/life-science-offers/cell-cycle/sigma-ecacc-cell/diabetes-respiratory.html"
diaresp_cell_lines <- readHTMLTable(url3, header = TRUE,
as.data.frame = TRUE,
which = 1,
stringsAsFactors = FALSE)
#Cleaning up diaresp_cell_lines
diaresp_cell_lines <- diaresp_cell_lines[-c(1:9),]
diaresp_cell_lines <- diaresp_cell_lines[,-1]
diaresp_cell_lines <- subset(diaresp_cell_lines,subset=(V2!='NA'))
#The names and order of the columns must be uniform throughout.
diaresp_cell_lines <- diaresp_cell_lines[,c(1,3,2)]
names(diaresp_cell_lines) [2] <- "V3"
names(diaresp_cell_lines) [3] <- "V4"
#Getting reference for Musculoskeletal cell lines
url4<-"http://www.sigmaaldrich.com/europe/life-science-offers/cell-cycle/sigma-ecacc-cell/musculoskeletal.html"
msstem_cell_lines <- readHTMLTable(url4, header = TRUE,
as.data.frame = TRUE,
which = 1,
stringsAsFactors = FALSE)
#Cleaning up msstem_cell_lines
msstem_cell_lines <- msstem_cell_lines[-c(1:9),]
msstem_cell_lines <- msstem_cell_lines[,-1]
msstem_cell_lines <- subset(msstem_cell_lines,subset=(V2!="NA"))
#The names and order of the columns must be uniform throughout.
msstem_cell_lines <- msstem_cell_lines[,c(1,3,2)]
names(msstem_cell_lines) [2] <- "V3"
names(msstem_cell_lines) [3] <- "V4"
save(msstem_cell_lines,file='msmsstem_cell_lines.Rda')
#Now we can create our final reference table
ref_cell_lines<-rbind(cancer_cell_lines,cardio_cell_lines,diaresp_cell_lines,msstem_cell_lines)
#Finally we manipulate the cell lines just as we did for the metadata to make the cell lines more uniform and easier
#to search with.
ref_cell_lines <- within(ref_cell_lines,V2<-gsub('-','',with(ref_cell_lines,V2)))
ref_cell_lines <- within(ref_cell_lines,V2<-gsub(' ','',with(ref_cell_lines,V2)))
ref_cell_lines <- within(ref_cell_lines,V2<- toupper(with(ref_cell_lines,V2)))
#There are duplicated cell_line in the table. We need to get rid of those.
refdups <- duplicated(ref_cell_lines[,1])
ref_cell_lines <- ref_cell_lines[!refdups,]
#Renaming
names(ref_cell_lines) [1] <- 'cell_line'
names(ref_cell_lines) [2] <- 'tissue'
names(ref_cell_lines) [3] <- 'species'
{
#There are rows where the tissue and Species were switched around.
ref_cell_lines<-within(ref_cell_lines,{
tissue<-ifelse(tissue=='HUMAN'|tissue=="MOUSE"|tissue=="RABBIT"|tissue=="DOG"|tissue=="HUMAN/MOUSE HYBRID"|tissue=="RAT/MOUSE HYBRID"|tissue=="GUINEA PIG"|tissue=="RAT"|tissue=="MOUSE X RAT HYBRID",
toupper(ref_cell_lines$species),
ref_cell_lines$tissue)
})
#Saving the file
save(ref_cell_lines,file='ref_cell_lines.Rda')
}
}
|
957bcd4452338ca9563ebfb3947ea1f26531a002
|
63d50cbf64469abd6d4729ba0266496ced3433cf
|
/harsha1/character-vector.R
|
9109df8cf8322484bc6312d44729ee495952bc2f
|
[] |
no_license
|
tactlabs/r-samples
|
a391a9a07022ecd66f29e04d15b3d7abeca7ea7c
|
a5d7985fe815a87b31e4eeee739bc2b7c600c9dc
|
refs/heads/master
| 2023-07-08T09:00:59.805757
| 2021-07-25T15:16:20
| 2021-07-25T15:16:20
| 381,659,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 190
|
r
|
character-vector.R
|
# Created on : 29-06-2021
# Course work:
# @author: Harsha Vardhan
# Source:
#Using character vector as index
x <- c("first"=3, "second"=0, "third"=9)
x
x["second"]
x[c("first", "third")]
|
64ef563baf5352ddafa4125da4e84aa6aa1b879d
|
2ec6726b083ec743e7f221bbb8e2a397c8d6b152
|
/ExDataFiles/Project2/plot1.R
|
1c5e8904d6f7f86c26755b4f6f926ec0d6791b27
|
[] |
no_license
|
salma-rodriguez/DataScienceTraining
|
bc697a23d21762bf13baf5113b0a53bee8253dc1
|
a5297d975380e860c46c73f9230e0e7c9dc3f5cd
|
refs/heads/master
| 2020-12-31T05:10:34.942826
| 2020-04-23T03:38:51
| 2020-04-23T03:38:51
| 58,333,024
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 914
|
r
|
plot1.R
|
## This function makes a plot of the total fine particulate matter
## emission by year and aggregated from all sources.
# Assumption: we are pointing to the directory containing
# household_power_consumption.txt
genplot1 <- function() {
data <- readRDS("summarySCC_PM25.rds")
## cumulate value of PM2.5 on all sources per year
total <- tapply(data$Emission, data$year, sum) ## PM2.5 totals
png("plot1.png", bg="transparent")
barplot(total, ## the cumulate total
names.arg = names(total), ## the years
col = c("lightgreen", "lightblue", "lightgreen", "lightblue"),
width = c(.1, .1, .1, .1),
xlab = "Year",
ylab = expression(PM[2.5] * " (tons)"),
main = expression("Total " * PM[2.5] * " for all US Counties"))
dev.off()
}
|
e847f36f3efc0715cffcce5f5d82e87628f4563b
|
0e3a15d53df9dca2633f8aa89633a8da7d54c8b4
|
/plot2.R
|
485bb53c511ddf75ff68f07eb1bf9b0b39cee6de
|
[] |
no_license
|
LauraVelikonja/ExData_Plotting1
|
fa57ac2ca3535558f1e051225b51af1b5b36c501
|
e6722d4722999ef48aec0e946cec98939e70e9d3
|
refs/heads/master
| 2021-01-23T03:16:44.715372
| 2016-08-08T10:45:14
| 2016-08-08T10:45:14
| 65,195,663
| 0
| 0
| null | 2016-08-08T10:33:11
| 2016-08-08T10:33:10
| null |
UTF-8
|
R
| false
| false
| 961
|
r
|
plot2.R
|
# Set directory
setwd("...")
# Read in data
library(data.table)
data <- fread("household_power_consumption.txt")
# Replace ? with NA values
data[data=="?"] <- NA
data <- data.frame(data)
# Convert data and time to Data/Time classes
data$Timestamp = strptime(paste(data$Date, data$Time),
format="%d/%m/%Y %H:%M:%S", tz="UTC")
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Only use data from Day 2007-02-01 and 2007-02-02
data <- subset(data, Date == "2007-02-01"| Date == "2007-02-02")
# Convert characters to numeric values
for (i in (3:dim(data)[2]-1)){
data[,i] <- as.numeric(data[,i])
}
#data <- cbind(data,weekdays.Date(data$Date))
#names(data)[10] <- "Weekday"
# Export to png
png(filename = "plot2.png",width = 480, height = 480)
# Creating plot 2
plot(data$Timestamp, data$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab ="")
dev.off()
|
2a3cb622546de8abf6832a4b469e3ca29469077a
|
9798caa0d5f3a2d21425471af6a091735a962002
|
/man/normalizeTumorBoost.Rd
|
6b07049e19365df4f6ba51c72a80ed359495a503
|
[] |
no_license
|
HenrikBengtsson/aroma.light
|
a77bb7dbfcaa358ccd55a476671695a4ee41ad8a
|
55e6e27a7bb4fec2c70598de30e5b0e3ff711e1f
|
refs/heads/master
| 2023-07-07T12:38:48.144371
| 2023-04-26T19:57:50
| 2023-04-26T19:57:50
| 24,348,041
| 0
| 5
| null | 2017-12-20T00:27:27
| 2014-09-22T22:32:24
|
R
|
UTF-8
|
R
| false
| false
| 5,815
|
rd
|
normalizeTumorBoost.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% normalizeTumorBoost.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{normalizeTumorBoost}
\alias{normalizeTumorBoost}
\alias{normalizeTumorBoost.numeric}
\title{Normalizes allele B fractions for a tumor given a match normal}
\description{
TumorBoost [1] is a normalization method that normalizes the allele B
fractions of a tumor sample given the allele B fractions and genotypes
of a matched normal.
The method is a single-sample (single-pair) method.
It does not require total copy-number estimates.
The normalization is done such that the total copy number is
unchanged afterwards.
}
\usage{
\method{normalizeTumorBoost}{numeric}(betaT, betaN, muN=callNaiveGenotypes(betaN), preserveScale=FALSE,
flavor=c("v4", "v3", "v2", "v1"), ...)
}
\arguments{
\item{betaT, betaN}{Two \code{\link[base]{numeric}} \code{\link[base]{vector}}s each of length J with
tumor and normal allele B fractions, respectively.}
\item{muN}{An optional \code{\link[base]{vector}} of length J containing
normal genotypes calls in (0,1/2,1,\code{\link[base]{NA}}) for (AA,AB,BB).}
\item{preserveScale}{If \code{\link[base:logical]{TRUE}}, SNPs that are heterozygous in the
matched normal are corrected for signal compression using an estimate
of signal compression based on the amount of correction performed
by TumorBoost on SNPs that are homozygous in the matched normal.}
\item{flavor}{A \code{\link[base]{character}} string specifying the type of
correction applied.}
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{numeric}} \code{\link[base]{vector}} of length J containing the normalized
allele B fractions for the tumor.
Attribute \code{modelFit} is a \code{\link[base]{list}} containing model fit parameters.
}
\details{
Allele B fractions are defined as the ratio between the allele B signal
and the sum of both (all) allele signals at the same locus.
Allele B fractions are typically within [0,1], but may have a slightly
wider support due to for instance negative noise.
This is typically also the case for the returned normalized
allele B fractions.
}
\section{Flavors}{
This method provides a few different "flavors" for normalizing the
data. The following values of argument \code{flavor} are accepted:
\itemize{
\item{v4: (default) The TumorBoost method, i.e. Eqns. (8)-(9) in [1].}
\item{v3: Eqn (9) in [1] is applied to both heterozygous and homozygous
SNPs, which effectively is v4 where the normalized allele B
fractions for homozygous SNPs becomes 0 and 1.}
\item{v2: ...}
\item{v1: TumorBoost where correction factor is forced to one, i.e.
\eqn{\eta_j=1}. As explained in [1], this is a suboptimal
normalization method. See also the discussion in the
paragraph following Eqn (12) in [1].}
}
}
\section{Preserving scale}{
\emph{As of \pkg{aroma.light} v1.33.3 (March 30, 2014),
argument \code{preserveScale} no longer has a default value and has
to be specified explicitly. This is done in order to change the
default to \code{\link[base:logical]{FALSE}} in a future version, while minimizing the risk
for surprises.}
Allele B fractions are more or less compressed toward a half, e.g.
the signals for homozygous SNPs are slightly away from zero and one.
The TumorBoost method decreases the correlation in allele B fractions
between the tumor and the normal \emph{conditioned on the genotype}.
What it does not control for is the mean level of the allele B fraction
\emph{conditioned on the genotype}.
By design, most flavors of the method will correct the homozygous SNPs
such that their mean levels get close to the expected zero and
one levels. However, the heterozygous SNPs will typically keep the
same mean levels as before.
One possibility is to adjust the signals such as the mean levels of
the heterozygous SNPs relative to that of the homozygous SNPs is
the same after as before the normalization.
If argument \code{preserveScale=TRUE}, then SNPs that are heterozygous
(in the matched normal) are corrected for signal compression using
an estimate of signal compression based on the amount of correction
performed by TumorBoost on SNPs that are homozygous
(in the matched normal).
The option of preserving the scale is \emph{not} discussed in the
TumorBoost paper [1], which presents the \code{preserveScale=FALSE}
version.
}
\examples{
library(R.utils)
# Load data
pathname <- system.file("data-ex/TumorBoost,fracB,exampleData.Rbin", package="aroma.light")
data <- loadObject(pathname)
attachLocally(data)
pos <- position/1e6
muN <- genotypeN
layout(matrix(1:4, ncol=1))
par(mar=c(2.5,4,0.5,1)+0.1)
ylim <- c(-0.05, 1.05)
col <- rep("#999999", length(muN))
col[muN == 1/2] <- "#000000"
# Allele B fractions for the normal sample
plot(pos, betaN, col=col, ylim=ylim)
# Allele B fractions for the tumor sample
plot(pos, betaT, col=col, ylim=ylim)
# TumorBoost w/ naive genotype calls
betaTN <- normalizeTumorBoost(betaT=betaT, betaN=betaN, preserveScale=FALSE)
plot(pos, betaTN, col=col, ylim=ylim)
# TumorBoost w/ external multi-sample genotype calls
betaTNx <- normalizeTumorBoost(betaT=betaT, betaN=betaN, muN=muN, preserveScale=FALSE)
plot(pos, betaTNx, col=col, ylim=ylim)
}
\author{Henrik Bengtsson, Pierre Neuvial}
\references{
[1] H. Bengtsson, P. Neuvial and T.P. Speed, \emph{TumorBoost: Normalization of allele-specific tumor copy numbers from a single pair of tumor-normal genotyping microarrays}, BMC Bioinformatics, 2010, 11:245. [PMID 20462408]
\cr
}
\keyword{methods}
|
2c3bcc39c18f0d9f60bc5f2ffde9a8b2922eb631
|
bfb86d79b0815b04e197d612a7911323891b1aa0
|
/man/gx_get_import_directory.Rd
|
735e73c3a7538ec9bb329536b73213839d6d9a9b
|
[
"MIT"
] |
permissive
|
scholtalbers/r-galaxy-connector
|
758027adfeddb08e673eb852cf127da7f82c069f
|
413d3eb3514ba33ae6ad2c1cd00ad52cab3a05d1
|
refs/heads/master
| 2021-07-13T01:22:05.381973
| 2021-03-19T11:07:35
| 2021-03-19T11:07:35
| 43,147,160
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 398
|
rd
|
gx_get_import_directory.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GalaxyConnector.R
\name{gx_get_import_directory}
\alias{gx_get_import_directory}
\title{gx_get_import_directory}
\usage{
gx_get_import_directory(create = FALSE)
}
\arguments{
\item{create, }{if TRUE, create import directory if it does not exist}
}
\description{
This function returns the import directory to work with
}
|
ebeeb87d00c80c01b8895501c1605603175037c7
|
4ce21e0843601675734a877bf0885e5cb4c5dca0
|
/Code/NHANES_continuous/create_race_sex_dummies.R
|
d253e88ea31fefd0a14b2cffa87a1a5756b1be4b
|
[] |
no_license
|
bryansashakim/Framingham-Risk-Scores
|
9b9b03ffb4791ffee32eaaeedc7cf3ef7b147d9a
|
50e7a9ed82762a8a4ca4dcbd0c8bc3a1b396c8db
|
refs/heads/master
| 2023-02-28T15:12:07.134012
| 2021-02-02T21:26:44
| 2021-02-02T21:26:44
| 296,344,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,834
|
r
|
create_race_sex_dummies.R
|
nhanes1999_2000 = nhanes1999_2000 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes1999_2000 = nhanes1999_2000 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes1999_2000 = nhanes1999_2000 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes1999_2000 = nhanes1999_2000 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes1999_2000$race_sex_cat = ifelse(nhanes1999_2000$MW==1, 1, NA)
nhanes1999_2000$race_sex_cat[nhanes1999_2000$FW == 1] = 2
nhanes1999_2000$race_sex_cat[nhanes1999_2000$MB == 1] = 3
nhanes1999_2000$race_sex_cat[nhanes1999_2000$FB == 1] = 4
nhanes2001_2002 = nhanes2001_2002 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2001_2002 = nhanes2001_2002 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2001_2002 = nhanes2001_2002 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2001_2002 = nhanes2001_2002 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2001_2002$race_sex_cat = ifelse(nhanes2001_2002$MW==1, 1, NA)
nhanes2001_2002$race_sex_cat[nhanes2001_2002$FW == 1] = 2
nhanes2001_2002$race_sex_cat[nhanes2001_2002$MB == 1] = 3
nhanes2001_2002$race_sex_cat[nhanes2001_2002$FB == 1] = 4
nhanes2003_2004 = nhanes2003_2004 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2003_2004 = nhanes2003_2004 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2003_2004 = nhanes2003_2004 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2003_2004 = nhanes2003_2004 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2003_2004$race_sex_cat = ifelse(nhanes2003_2004$MW==1, 1, NA)
nhanes2003_2004$race_sex_cat[nhanes2003_2004$FW == 1] = 2
nhanes2003_2004$race_sex_cat[nhanes2003_2004$MB == 1] = 3
nhanes2003_2004$race_sex_cat[nhanes2003_2004$FB == 1] = 4
nhanes2005_2006 = nhanes2005_2006 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2005_2006 = nhanes2005_2006 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2005_2006 = nhanes2005_2006 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2005_2006 = nhanes2005_2006 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2005_2006$race_sex_cat = ifelse(nhanes2005_2006$MW==1, 1, NA)
nhanes2005_2006$race_sex_cat[nhanes2005_2006$FW == 1] = 2
nhanes2005_2006$race_sex_cat[nhanes2005_2006$MB == 1] = 3
nhanes2005_2006$race_sex_cat[nhanes2005_2006$FB == 1] = 4
nhanes2007_2008 = nhanes2007_2008 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2007_2008 = nhanes2007_2008 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2007_2008 = nhanes2007_2008 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2007_2008 = nhanes2007_2008 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2007_2008$race_sex_cat = ifelse(nhanes2007_2008$MW==1, 1, NA)
nhanes2007_2008$race_sex_cat[nhanes2007_2008$FW == 1] = 2
nhanes2007_2008$race_sex_cat[nhanes2007_2008$MB == 1] = 3
nhanes2007_2008$race_sex_cat[nhanes2007_2008$FB == 1] = 4
nhanes2009_2010 = nhanes2009_2010 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2009_2010 = nhanes2009_2010 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2009_2010 = nhanes2009_2010 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2009_2010 = nhanes2009_2010 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2009_2010$race_sex_cat = ifelse(nhanes2009_2010$MW==1, 1, NA)
nhanes2009_2010$race_sex_cat[nhanes2009_2010$FW == 1] = 2
nhanes2009_2010$race_sex_cat[nhanes2009_2010$MB == 1] = 3
nhanes2009_2010$race_sex_cat[nhanes2009_2010$FB == 1] = 4
nhanes2011_2012 = nhanes2011_2012 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2011_2012 = nhanes2011_2012 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2011_2012 = nhanes2011_2012 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2011_2012 = nhanes2011_2012 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2011_2012$race_sex_cat = ifelse(nhanes2011_2012$MW==1, 1, NA)
nhanes2011_2012$race_sex_cat[nhanes2011_2012$FW == 1] = 2
nhanes2011_2012$race_sex_cat[nhanes2011_2012$MB == 1] = 3
nhanes2011_2012$race_sex_cat[nhanes2011_2012$FB == 1] = 4
nhanes2013_2014 = nhanes2013_2014 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2013_2014 = nhanes2013_2014 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2013_2014 = nhanes2013_2014 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2013_2014 = nhanes2013_2014 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2013_2014$race_sex_cat = ifelse(nhanes2013_2014$MW==1, 1, NA)
nhanes2013_2014$race_sex_cat[nhanes2013_2014$FW == 1] = 2
nhanes2013_2014$race_sex_cat[nhanes2013_2014$MB == 1] = 3
nhanes2013_2014$race_sex_cat[nhanes2013_2014$FB == 1] = 4
nhanes2015_2016 = nhanes2015_2016 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2015_2016 = nhanes2015_2016 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2015_2016 = nhanes2015_2016 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2015_2016 = nhanes2015_2016 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2015_2016$race_sex_cat = ifelse(nhanes2015_2016$MW==1, 1, NA)
nhanes2015_2016$race_sex_cat[nhanes2015_2016$FW == 1] = 2
nhanes2015_2016$race_sex_cat[nhanes2015_2016$MB == 1] = 3
nhanes2015_2016$race_sex_cat[nhanes2015_2016$FB == 1] = 4
nhanes2017_2018 = nhanes2017_2018 %>% mutate(MW = ifelse(race == 1 & sex == 1, 1, 0))
nhanes2017_2018 = nhanes2017_2018 %>% mutate(FW = ifelse(race == 1 & sex == 2, 1, 0))
nhanes2017_2018 = nhanes2017_2018 %>% mutate(MB = ifelse(race == 2 & sex == 1, 1, 0))
nhanes2017_2018 = nhanes2017_2018 %>% mutate(FB = ifelse(race == 2 & sex == 2, 1, 0))
nhanes2017_2018$race_sex_cat = ifelse(nhanes2017_2018$MW==1, 1, NA)
nhanes2017_2018$race_sex_cat[nhanes2017_2018$FW == 1] = 2
nhanes2017_2018$race_sex_cat[nhanes2017_2018$MB == 1] = 3
nhanes2017_2018$race_sex_cat[nhanes2017_2018$FB == 1] = 4
get_race_sex_sums = function(race,sex,age,hdl,totchol,asbp,smoker,diabetic) {
#### MALES ####
if (sex == 1 & race == 1) {
sum = sum((mwhite_coeff[[1]]*log(age)),(mwhite_coeff[[2]]*log(totchol)),(mwhite_coeff[[3]]*log(totchol)*log(age)),(mwhite_coeff[[4]]*log(hdl)),(mwhite_coeff[[5]]*log(hdl)*log(age)),(mwhite_coeff[[6]]*log(asbp)),(mwhite_coeff[[7]]*smoker),(mwhite_coeff[[8]]*smoker*log(age)),(mwhite_coeff[[9]]*diabetic))
}
else if (sex==1 & race == 2){
sum = sum((mblack_coeff[[1]]*log(age)),(mblack_coeff[[2]]*log(totchol)),(mblack_coeff[[3]]*log(hdl)),(mblack_coeff[[4]]*log(asbp)),(mblack_coeff[[5]]*smoker),(mblack_coeff[[6]]*diabetic))
}
#### FEMALES ####
else if (sex==2 & race == 1){
sum = sum((fwhite_coeff[[1]]*log(age)),(fwhite_coeff[[2]]*log(age)^2),(fwhite_coeff[[3]]*log(totchol)),(fwhite_coeff[[4]]*log(totchol)*log(age)),(fwhite_coeff[[5]]*log(hdl)),(fwhite_coeff[[6]]*log(hdl)*log(age)),(fwhite_coeff[[7]]*log(asbp)),(fwhite_coeff[[8]]*smoker),(fwhite_coeff[[9]]*smoker*log(age)),(fwhite_coeff[[10]]*diabetic))
}
else if (sex==2 & race == 2){
sum = sum((fblack_coeff[[1]]*log(age)),(fblack_coeff[[2]]*log(totchol)),(fblack_coeff[[3]]*log(hdl)),(fblack_coeff[[4]]*log(hdl)*log(age)),(fblack_coeff[[5]]*log(asbp)),(fblack_coeff[[6]]*log(asbp)*log(age)),(fblack_coeff[[7]]*smoker),(fblack_coeff[[8]]*diabetic))
}
return(sum)
}
nhanes1999_2000$race_sex_sum = get_race_sex_sums %>% mapply(nhanes1999_2000$race,nhanes1999_2000$sex,nhanes1999_2000$age,nhanes1999_2000$hdl,nhanes1999_2000$lbxtc,nhanes1999_2000$asbp,nhanes1999_2000$curr_smq,nhanes1999_2000$isDiabetic)
nhanes1999_2000_means = nhanes1999_2000 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes1999_2000_means=nhanes1999_2000_means[,2]
nhanes2001_2002$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2001_2002$race,nhanes2001_2002$sex,nhanes2001_2002$age,nhanes2001_2002$hdl,nhanes2001_2002$lbxtc,nhanes2001_2002$asbp,nhanes2001_2002$curr_smq,nhanes2001_2002$isDiabetic)
nhanes2001_2002_means = nhanes2001_2002 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2001_2002_means=nhanes2001_2002_means[,2]
nhanes2003_2004$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2003_2004$race,nhanes2003_2004$sex,nhanes2003_2004$age,nhanes2003_2004$hdl,nhanes2003_2004$lbxtc,nhanes2003_2004$asbp,nhanes2003_2004$curr_smq,nhanes2003_2004$isDiabetic)
nhanes2003_2004_means = nhanes2003_2004 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2003_2004_means=nhanes2003_2004_means[,2]
nhanes2005_2006$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2005_2006$race,nhanes2005_2006$sex,nhanes2005_2006$age,nhanes2005_2006$hdl,nhanes2005_2006$lbxtc,nhanes2005_2006$asbp,nhanes2005_2006$curr_smq,nhanes2005_2006$isDiabetic)
nhanes2005_2006_means = nhanes2005_2006 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2005_2006_means=nhanes2005_2006_means[,2]
nhanes2007_2008$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2007_2008$race,nhanes2007_2008$sex,nhanes2007_2008$age,nhanes2007_2008$hdl,nhanes2007_2008$lbxtc,nhanes2007_2008$asbp,nhanes2007_2008$curr_smq,nhanes2007_2008$isDiabetic)
nhanes2007_2008_means = nhanes2007_2008 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2007_2008_means=nhanes2007_2008_means[,2]
nhanes2009_2010$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2009_2010$race,nhanes2009_2010$sex,nhanes2009_2010$age,nhanes2009_2010$hdl,nhanes2009_2010$lbxtc,nhanes2009_2010$asbp,nhanes2009_2010$curr_smq,nhanes2009_2010$isDiabetic)
nhanes2009_2010_means = nhanes2009_2010 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2009_2010_means=nhanes2009_2010_means[,2]
nhanes2011_2012$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2011_2012$race,nhanes2011_2012$sex,nhanes2011_2012$age,nhanes2011_2012$hdl,nhanes2011_2012$lbxtc,nhanes2011_2012$asbp,nhanes2011_2012$curr_smq,nhanes2011_2012$isDiabetic)
nhanes2011_2012_means = nhanes2011_2012 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2011_2012_means=nhanes2011_2012_means[,2]
nhanes2013_2014$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2013_2014$race,nhanes2013_2014$sex,nhanes2013_2014$age,nhanes2013_2014$hdl,nhanes2013_2014$lbxtc,nhanes2013_2014$asbp,nhanes2013_2014$curr_smq,nhanes2013_2014$isDiabetic)
nhanes2013_2014_means = nhanes2013_2014 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2013_2014_means=nhanes2013_2014_means[,2]
nhanes2015_2016$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2015_2016$race,nhanes2015_2016$sex,nhanes2015_2016$age,nhanes2015_2016$hdl,nhanes2015_2016$lbxtc,nhanes2015_2016$asbp,nhanes2015_2016$curr_smq,nhanes2015_2016$isDiabetic)
nhanes2015_2016_means = nhanes2015_2016 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2015_2016_means=nhanes2015_2016_means[,2]
nhanes2017_2018$race_sex_sum = get_race_sex_sums %>% mapply(nhanes2017_2018$race,nhanes2017_2018$sex,nhanes2017_2018$age,nhanes2017_2018$hdl,nhanes2017_2018$lbxtc,nhanes2017_2018$asbp,nhanes2017_2018$curr_smq,nhanes2017_2018$isDiabetic)
nhanes2017_2018_means = nhanes2017_2018 %>%
group_by(race_sex_cat) %>%
summarise_at(vars(race_sex_sum), # Specify column
list(name = mean))
nhanes2017_2018_means=nhanes2017_2018_means[,2]
|
52e2550205d8d187025777c4052373f92d2cadc9
|
ede4e46674393f223e70a4b558b70d2eb8d765d1
|
/R/Experiment/Durations/in/inComplex.R
|
44e0578c23c39dfd210db4087e8678f289d81fe1
|
[] |
no_license
|
SoniaBenHedia/HHU_scripts
|
a159cba13fd13105d3c986739a0b577b825e356e
|
34b2bc9dd8c523894c1110a8c4c1be18fed6c889
|
refs/heads/master
| 2022-04-11T11:04:55.779158
| 2020-03-02T20:53:19
| 2020-03-02T20:53:19
| 242,042,851
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 147,524
|
r
|
inComplex.R
|
#Loading libraries
#library(ascii)
library(xtable)
library(languageR)
library(lme4)
library (MASS)
#library (betareg)
#library(plotrix)
library(LMERConvenienceFunctions)
library(nlme)
library(rms)
library(visreg)
#library(ggplot2)
#library(stargazer)
#library(texreg)
library(mlmRev)
library(lmerTest)
#library(influence.ME)
library(multcomp)
library(dplyr)
# set the directory, so R knows where to find a file
setwd("C:/Users/sbenhedia/Dropbox/Geminates/Experimente/Analyses/Analyses in/")
###########################################################################
# I will start with the complex dataset, and thus will need the complex dataset-
# In the following I will first take a look at the pertinent varoables
# and then fit a model
############################################################################
InComplex <- read.csv("InComplex.csv", sep=",",header = T, na.string=c("na", "", "NA"))
str(InComplex)
# 'data.frame': 1155 obs. of 83 variables:
# $ X.1 : int 1 2 3 4 5 6 7 8 9 10 ...
# $ X : int 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 ...
# $ Item : Factor w/ 50 levels "inact","inappeasable",..: 1 1 1 1 1 1 1 1 1 1 ...
# $ Participant : Factor w/ 29 levels "Experiment_1_participant_10",..: 20 24 22 4 27 10 25 12 21 18 ...
# $ ID : int 3201 4144 4340 651 5011 1773 4506 2030 3556 2989 ...
# $ Filename : Factor w/ 1155 levels "Participant_10_10.TextGrid",..: 746 908 840 148 1065 380 947 489 831 695 ...
# $ DeletionMorph : Factor w/ 1 level "N": 1 1 1 1 1 1 1 1 1 1 ...
# $ DeviantPronun : Factor w/ 2 levels "N","Y": 1 1 1 1 1 1 1 1 1 1 ...
# $ Accentuation : Factor w/ 2 levels "Accented","Unaccented": 2 1 2 2 1 1 1 1 1 1 ...
# $ Annotator : Factor w/ 5 levels "Lara","Mandy",..: 1 2 5 5 4 1 4 3 3 2 ...
# $ Order : int 10 40 60 157 258 165 34 284 303 286 ...
# $ WordDur : num 0.418 0.763 0.329 0.418 0.617 ...
# $ SyllNum : int 2 2 2 2 2 2 2 2 2 2 ...
# $ SegNum : int 6 5 5 5 5 5 5 5 5 5 ...
# $ ConsonantDur : num 0.0517 0.0656 0.0568 0.0439 0.0615 ...
# $ PrecSeg : Factor w/ 5 levels "@","{","i","I",..: 4 4 4 4 4 4 4 4 4 4 ...
# $ PrecSegVC : Factor w/ 1 level "V": 1 1 1 1 1 1 1 1 1 1 ...
# $ PrecSegDur : num 0.0561 0.0663 0.0449 0.0378 0.1358 ...
# $ FollSeg : Factor w/ 37 levels "@","@U","{","{kt",..: 3 3 3 3 5 3 3 3 3 3 ...
# $ FollSegVC : Factor w/ 2 levels "C","V": 2 2 2 2 2 2 2 2 2 2 ...
# $ FollSegDur : num 0.108 0.18 0.107 0.13 0.201 ...
# $ PrePauseDur : num 0 0.0867 0 0.6514 0 ...
# $ PostPauseDur : num 0 0.25 0 0 0.169 ...
# $ SentenceDur : num 2.91 2.08 2.21 4.33 1.76 ...
# $ GlottalStop : Factor w/ 2 levels "GlottalStop",..: 1 2 2 2 2 2 2 2 2 1 ...
# $ GlottalStopDur : num 0.0558 0 0 0 0 ...
# $ LocSpeech : num 14.34 6.55 15.19 11.97 8.11 ...
# $ AffixDur : num 0.1636 0.132 0.1017 0.0817 0.1973 ...
# $ BaseDuration : num 0.255 0.631 0.227 0.336 0.419 ...
# $ FirstSyllDur : num 0.1636 0.132 0.1017 0.0817 0.1973 ...
# $ WordDurWithoutGlottalStop : num 0.363 0.763 0.329 0.418 0.617 ...
# $ AffixDurWithoutGlottalStop : num 0.1078 0.132 0.1017 0.0817 0.1973 ...
# $ Environment : Factor w/ 3 levels "n#C","n#nV","n#V": 3 3 3 3 3 3 3 3 3 3 ...
# $ Affix : Factor w/ 2 levels "Loc","Neg": 1 1 1 1 1 1 1 1 1 1 ...
# $ WordFormFrequencyBNC : int 2 2 2 2 2 2 2 2 2 2 ...
# $ WordFormFrequencyAllCOCA : int 0 0 0 0 0 0 0 0 0 0 ...
# $ WordFormFrequencySpokenCOCA: int 0 0 0 0 0 0 0 0 0 0 ...
# $ Base : Factor w/ 45 levels "act","appeasable",..: 1 1 1 1 1 1 1 1 1 1 ...
# $ WordLemmaFrequencyBNC : int 2 2 2 2 2 2 2 2 2 2 ...
# $ BaseLemmaFrequencyBNC : int 38099 38099 38099 38099 38099 38099 38099 38099 38099 38099 ...
# $ SyllPhon : int 2 2 2 2 2 2 2 2 2 2 ...
# $ AffixStress : Factor w/ 3 levels "primary","secondary",..: 3 3 3 3 3 3 3 3 3 3 ...
# $ BaseInitialStress : Factor w/ 2 levels "primary","unstressed": 1 1 1 1 1 1 1 1 1 1 ...
# $ SemanticTransparency : Factor w/ 2 levels "opaque","transparent": 2 2 2 2 2 2 2 2 2 2 ...
# $ TypeOfRoot : Factor w/ 2 levels "bound","word": 2 2 2 2 2 2 2 2 2 2 ...
# $ Rating : int 2 2 4 4 1 1 4 4 1 2 ...
# $ TimeRating : num 758 610 1315 1011 692 ...
# $ TotalTime : num 650 528 1129 826 605 ...
# $ Age : int 19 18 19 35 32 19 29 24 61 19 ...
# $ Sex : Factor w/ 6 levels "female","Female",..: 2 2 4 4 5 4 6 1 2 5 ...
# $ L1 : Factor w/ 10 levels "british","British",..: 5 5 5 2 5 2 3 6 2 5 ...
# $ Bilingual : Factor w/ 6 levels "I only know British English",..: 4 4 3 3 4 6 2 3 3 4 ...
# $ Grow_Up_Region : Factor w/ 26 levels "3 years in Cambridge. 2 in Bristol. 3 in Felixstowe. 8 in Bradford. 2 in Abingdon",..: 13 19 18 24 11 3 22 26 24 5 ...
# $ Languages : Factor w/ 19 levels "Basic French",..: 4 1 12 15 19 5 3 13 8 16 ...
# $ Latin : Factor w/ 14 levels "2 years secondary school",..: 6 11 5 12 6 3 4 5 5 6 ...
# $ Profession_Studies : Factor w/ 28 levels "2nd year Natural Sciences (Chemistry and materials)",..: 27 22 9 16 2 14 4 17 24 7 ...
# $ University : Factor w/ 14 levels "anglia ruskin",..: 14 9 5 7 2 2 3 2 11 5 ...
# $ Knowledge_English_Ling : Factor w/ 13 levels "Currently in my 2nd year of the course at university",..: 5 5 13 4 5 6 6 5 9 5 ...
# $ Phonetics : Factor w/ 12 levels "A couple of lectures",..: 7 7 11 7 7 7 8 7 11 7 ...
# $ Phonology : Factor w/ 11 levels "A couple of lectures",..: 7 7 10 7 7 7 8 7 10 6 ...
# $ Morphology : Factor w/ 10 levels "currently studying",..: 5 5 9 5 5 5 6 5 9 5 ...
# $ Semantics : Factor w/ 10 levels "currently studying",..: 5 5 9 5 5 5 6 5 9 5 ...
# $ AccentuationCondition : Factor w/ 2 levels "accented","unaccented": 2 1 2 2 1 1 1 1 2 1 ...
# $ Experiment : Factor w/ 1 level "Experiment_1": 1 1 1 1 1 1 1 1 1 1 ...
# $ logWordFormFreq : num 0.693 0.693 0.693 0.693 0.693 ...
# $ logBaseLemmaFreq : num 10.5 10.5 10.5 10.5 10.5 ...
# $ logWordLemmaFreq : num 0.693 0.693 0.693 0.693 0.693 ...
# $ RelFreq : num 5.25e-05 5.25e-05 5.25e-05 5.25e-05 5.25e-05 ...
# $ logRelFreq : num -9.85 -9.85 -9.85 -9.85 -9.85 ...
# $ Root : logi NA NA NA NA NA NA ...
# $ BaseFinalStress : logi NA NA NA NA NA NA ...
# $ SuffixAdjSuffix : logi NA NA NA NA NA NA ...
# $ LastSyllDur : logi NA NA NA NA NA NA ...
# $ InCorpus : logi NA NA NA NA NA NA ...
# $ Consonant : logi NA NA NA NA NA NA ...
# $ Orthography : Factor w/ 2 levels "n","nn": 1 1 1 1 1 1 1 1 1 1 ...
# $ median : int 2 2 2 2 2 2 2 2 2 2 ...
# $ TypeOfBase : Factor w/ 2 levels "bound","word": 2 2 2 2 2 2 2 2 2 2 ...
# $ PrePause : Factor w/ 2 levels "No Pause","Pause": 1 2 1 2 1 2 1 2 1 1 ...
# $ PostPause : Factor w/ 2 levels "No Pause","Pause": 1 2 1 1 2 2 1 2 1 2 ...
# $ GlobalSpeechRate : num 3.1 1.92 4.07 2.08 2.28 ...
# $ WordFreqCategorical : Factor w/ 3 levels "HighFreq","LowFreq",..: 2 2 2 2 2 2 2 2 2 2 ...
# $ RatingCategorical : Factor w/ 2 levels "difficultly_decomposed",..: 2 2 1 1 2 2 1 1 2 2 ...
InComplex$X.1<-NULL
InComplex$X<-NULL
###############################################################
# Summary: variables to include ##
###############################################################
## We are going to include the following predictors:
# - Item (rand. effect)
# - Participant (rand. effect)
# - Order
# - Environment
# - logWordFormFreq
# - FirstSyllableBaseStress
# - Accentuation
# - Loc Speech and/or Global Speech
# - PrePause
# - PostPause
# - PrecSegDur (in model in which affixed words are compared)
# - logRelFreq (in model in which affixed words are compared)
# - Type of Root in model in which affixed words are compared)
# - SemanticTransparency in model in which affixed words are compared)
# - Affix in which affixed words are compared)
# - Rating in model in which affixed words are compared)
# Let's see whether it makes sense to include the decomposability measures (only
# makes sense if we have variability)
########################################################################
# The decomposability measures(I need to analyze them and their relations
# in a separate analysis before I can decide how to procede...)
# probabaly influence of each individual variable and the PC
# 1. Semantic Transparency
table(InComplex$SemanticTransparency)
# opaque transparent
# 193 962
# 2. Type of Base
table(InComplex$TypeOfBase)
# bound word
# 138 1017
table(InComplex$TypeOfBase,InComplex$SemanticTransparency)
# opaque transparent
# bound 121 17
# word 72 945
#yeah....
# 3. Rating
table(InComplex$Rating)
# 1 2 3 4
# 724 143 114 137
table(InComplex$Rating,InComplex$TypeOfBase)
# bound word
# 1 24 700
# 2 13 130
# 3 28 86
# 4 68 69
(table(InComplex$Rating,InComplex$SemanticTransparency))
# opaque transparent
# 1 43 681
# 2 22 121
# 3 43 71
# 4 79 58
pairscor.fnc(InComplex [, c("SemanticTransparency", "TypeOfBase","Rating","logRelFreq","Affix")])
# we find high correlations, so we will test each variable's influence individually
# and do a PC: We will do that later
######################################################################################
# fitting a model #
######################################################################################
# Let's see how much of the variability can solely be explained by speaker and item
SpeakerComplex.lmer <- lmer(ConsonantDur ~ 1 + (1|Participant), data = InComplex)
cor(InComplex$ConsonantDur, fitted(SpeakerComplex.lmer))^2
#[1] 0.0808807
ItemComplex.lmer <- lmer(ConsonantDur ~ 1 + (1|Item), data = InComplex)
cor(InComplex$ConsonantDur, fitted(ItemComplex.lmer))^2
#[1] 0.5948826
ItemSpeakerComplex.lmer <- lmer(ConsonantDur ~ 1 + (1|Item) + (1|Participant), data = InComplex)
cor(InComplex$ConsonantDur, fitted(ItemSpeakerComplex.lmer))^2
#0.6772357
# so aroIn1d 68 percent of the variability can be explained by this! That's a lot. Most is explained
# by item
##########################################################
## Do an initial model:
InComplex$OrderRescale<-InComplex$Order*0.1
InComplex.lmer1 <- lmer(ConsonantDur ~ Environment+ AccentuationCondition+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause+ PostPause + PrecSegDur+
(1|Item) + (1|Participant), data = InComplex)
summary(InComplex.lmer1)
# Linear mixed model fit by REML
# t-tests use Satterthwaite approximations to degrees of freedom ['lmerMod']
# Formula: ConsonantDur ~ Environment + AccentuationCondition + OrderRescale +
# logWordFormFreq + BaseInitialStress + LocSpeech + GlobalSpeechRate +
# PrePause + PostPause + PrecSegDur + (1 | Item) + (1 | Participant)
# Data: InComplex
#
# REML criterion at convergence: -5892.1
#
# Scaled residuals:
# Min 1Q Median 3Q Max
# -2.6310 -0.6218 -0.0737 0.5098 6.2639
#
# Random effects:
# Groups Name Variance Std.Dev.
# Item (Intercept) 6.734e-05 0.008206
# Participant (Intercept) 4.221e-05 0.006497
# Residual 2.819e-04 0.016789
# Number of obs: 1155, groups: Item, 50; Participant, 29
#
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 1.428e-01 6.445e-03 5.141e+02 22.152 <2e-16 ***
# Environmentn#nV -1.271e-02 4.939e-03 4.580e+01 -2.574 0.0134 *
# Environmentn#V -3.942e-02 3.068e-03 4.850e+01 -12.846 <2e-16 ***
# AccentuationConditionunaccented 1.846e-03 1.563e-03 8.947e+02 1.181 0.2378
# OrderRescale -1.477e-05 5.615e-05 1.106e+03 -0.263 0.7926
# logWordFormFreq 5.819e-04 5.287e-04 4.660e+01 1.101 0.2767
# BaseInitialStressunstressed -2.939e-03 2.853e-03 4.620e+01 -1.030 0.3083
# LocSpeech -2.973e-03 3.321e-04 1.056e+03 -8.953 <2e-16 ***
# GlobalSpeechRate -5.127e-03 1.845e-03 7.390e+02 -2.778 0.0056 **
# PrePausePause 1.836e-03 1.251e-03 1.115e+03 1.468 0.1424
# PostPausePause -3.308e-03 1.406e-03 1.118e+03 -2.352 0.0188 *
# PrecSegDur -6.144e-02 2.767e-02 1.115e+03 -2.221 0.0266 *
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#
# Correlation of Fixed Effects:
# (Intr) Envrnmntn#nV Envirnmntn#V AccntC OrdrRs lgWrFF BsIntS LcSpch GlblSR PrPsPs PstPsP
# Envrnmntn#nV -0.132
# Envirnmntn#V -0.127 0.259
# AccnttnCndt 0.251 0.004 0.015
# OrderRescal -0.156 0.002 0.005 -0.018
# lgWrdFrmFrq -0.266 -0.009 0.214 0.072 -0.026
# BsIntlStrss -0.204 0.039 -0.387 -0.026 0.005 0.079
# LocSpeech -0.498 0.026 -0.159 -0.034 0.044 0.056 0.066
# GloblSpchRt -0.421 -0.019 0.033 -0.570 -0.003 -0.119 0.008 -0.326
# PrePausePas -0.226 -0.006 -0.010 -0.023 0.078 -0.040 -0.008 -0.055 0.218
# PostPausePs -0.390 0.005 -0.021 0.207 -0.052 -0.002 0.029 0.148 0.249 -0.042
# PrecSegDur -0.463 -0.028 -0.089 -0.039 -0.035 0.000 -0.005 0.302 0.035 0.015 0.073
cor(InComplex$ConsonantDur, fitted(InComplex.lmer1))^2
#[1] 0.7130087
#######################################################################################
# Dealing with collinearity #
######################################################################################
# Before slInming down the model we should deal with possible collinearity problems
# I will do so, by looking at what happens if both varables stay in a model, what happens
# if one is thrown out and also which effect each one has on its own
# 1.logWordFormFreq & logRelFreq
# Model woth both
InComplex.lmerFrequencies <- lmer(ConsonantDur ~ logWordFormFreq+ logRelFreq+ (1|Item) + (1|Participant), data = InComplex)
summary(InComplex.lmerFrequencies)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 6.367e-02 5.318e-03 5.549e+01 11.974 <2e-16 ***
# logWordFormFreq 2.398e-03 1.291e-03 4.699e+01 1.857 0.0696 .
# logRelFreq 1.298e-03 8.644e-04 4.686e+01 1.502 0.1398
cor(InComplex$ConsonantDur, fitted(InComplex.lmerFrequencies))^2
#[1] 0.6771709
# only Word Form Freq
InComplex.lmerWordFrequency <- lmer(ConsonantDur ~ logWordFormFreq + (1|Item) + (1|Participant), data = InComplex)
summary(InComplex.lmerWordFrequency)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 0.060762 0.005009 57.630000 12.130 <2e-16 ***
# logWordFormFreq 0.002934 0.001257 47.960000 2.334 0.0238 *
cor(InComplex$ConsonantDur, fitted(InComplex.lmerWordFrequency))^2
#[1] 0.6771858
# only RelFreq
InComplex.lmerRelFrequency <- lmer(ConsonantDur ~ logRelFreq+ (1|Item) + (1|Participant), data = InComplex)
summary(InComplex.lmerRelFrequency)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 7.091e-02 3.690e-03 6.442e+01 19.215 <2e-16 ***
# logRelFreq 1.742e-03 8.519e-04 4.791e+01 2.045 0.0463 *
cor(InComplex$ConsonantDur, fitted(InComplex.lmerRelFrequency))^2
#[1]0.6772068
#####################################
# Summary Coll. Frequencies:
# So, both factors become signficant when they are the only variables in the model.
# When both are in the model, none is significant. However, they do not supress each other
#################################################
# 2. Loc Speech and/or Global Speech
cor.test(InComplex$LocSpeech,InComplex$GlobalSpeechRate)
# Pearson's product-moment correlation
#
# data: InComplex$LocSpeech and InComplex$GlobalSpeechRate
# t = 19.953, df = 1153, p-value < 2.2e-16
# alternative hypothesis: true correlation is not equal to 0
# 95 percent confidence interval:
# 0.4624632 0.5482893
# sample estimates:
# cor
# 0.5066303
InComplex.lmerSpeechRates<- lmer(ConsonantDur ~ LocSpeech+ GlobalSpeechRate + (1|Item)+(1|Participant), data = InComplex)
summary(InComplex.lmerSpeechRates)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 1.078e-01 4.635e-03 2.062e+02 23.262 <2e-16 ***
# LocSpeech -2.753e-03 3.262e-04 1.143e+03 -8.439 <2e-16 ***
# GlobalSpeechRate -2.312e-03 1.324e-03 1.096e+03 -1.745 0.0812 .
#
cor(InComplex$ConsonantDur, fitted(InComplex.lmerSpeechRates))^2
#[1] 0.7110499
InComplex.lmerLocSpeech<- lmer(ConsonantDur ~ LocSpeech + (1|Item)+(1|Participant), data = InComplex)
summary(InComplex.lmerLocSpeech)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 1.068e-01 4.577e-03 2.047e+02 23.34 <2e-16 ***
# LocSpeech -3.103e-03 2.585e-04 1.114e+03 -12.00 <2e-16 ***
cor(InComplex$ConsonantDur, fitted(InComplex.lmerLocSpeech))^2
#[1] 0.709937
#options(scipen=999)
InComplex.lmerGlobalSpeech<- lmer(ConsonantDur ~ GlobalSpeechRate + (1|Item)+(1|Participant), data = InComplex)
print(summary(InComplex.lmerGlobalSpeech),digits=6)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.98628e-02 4.39950e-03 1.28712e+02 20.42568 < 2.22e-16 ***
# GlobalSpeechRate -9.13923e-03 1.08004e-03 1.04235e+03 -8.46196 < 2.22e-16 ***
cor(InComplex$ConsonantDur, fitted(InComplex.lmerGlobalSpeech))^2
#[1] 0.6959133
#####################################
# Summary Coll. Speech Rates:
# - When both Speech Rates rae in the model, only Loc is sign.
# - The effect size of LocSpeech decreases when it is the only variable in the model
# - The effect size of GlobalSpeech increases when it is the only variable in the model and it
# becomes sign
# - The effect direction never changes (no supression)
# - Not a big difference in R2 between the different models
#################################################
##############################################################
# The decomposability variables
############
#Rating
InComplex.lmerRating<- lmer(ConsonantDur ~ Rating + (1|Item)+(1|Participant), data = InComplex)
summary(InComplex.lmerRating)
#
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 6.769e-02 3.886e-03 8.080e+01 17.416 <2e-16 ***
# Rating 8.675e-04 7.174e-04 1.100e+03 1.209 0.227
# not significant
########
#Type of base
InComplex.lmerTypeOfBase<- lmer(ConsonantDur ~ TypeOfBase + (1|Item)+(1|Participant), data = InComplex)
summary(InComplex.lmerTypeOfBase)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 0.083603 0.008872 51.770000 9.423 8.11e-13 ***
# TypeOfBaseword -0.016918 0.009417 48.700000 -1.797 0.0786 .
# not significant
######################
# RelFreq
InComplex.lmerRelFreq<- lmer(ConsonantDur ~ logRelFreq+ (1|Item)+(1|Participant), data = InComplex)
summary(InComplex.lmerRelFreq)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 7.091e-02 3.690e-03 6.442e+01 19.215 <2e-16 ***
# logRelFreq 1.742e-03 8.519e-04 4.791e+01 2.045 0.0463 *
# significant!!!!
##############
# Semantic Trans
InComplex.lmerST<- lmer(ConsonantDur ~ SemanticTransparency+ (1|Item)+(1|Participant), data = InComplex)
summary(InComplex.lmerST)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 0.084134 0.007729 52.360000 10.89 4.66e-15 ***
# SemanticTransparencytransparent -0.018397 0.008361 48.400000 -2.20 0.0326 *
# significant
##############
# Affix
InComplex.lmerAffix<- lmer(ConsonantDur ~ Affix+ (1|Item)+(1|Participant), data = InComplex)
summary(InComplex.lmerAffix)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 0.086030 0.006459 53.820000 13.320 < 2e-16 ***
# AffixNeg -0.022350 0.007194 48.230000 -3.107 0.00317 **
# significant
##############################################################################################
# ############
# summary coll. ############
##############################################################################################
# Now we have dealt with all collinearity problems:
# - We will keep both frequency variables even though they are never significanr
# - We will keep both Speech Rate variables but must be aware of the fact that their effect
# size cannot be interpreted!
# # ST, Affix and RelFreq are significant when being the only variable in the model
# - This needs to be investigated later on (after simplification)
###############################################################################################
###########################################################################
# PC Analysis #
###########################################################################
library(pls)
# For the PC, we need to recode all the variables, so that they are numeric
# # also they need to "point in the same direction" --> the higher
# the less decomposable
# RelFreq is fine
# Rating is fine
#Affix
levels(InComplex$Affix)
#[1] "Loc" "Neg"
InComplex$Affix <- relevel (InComplex$Affix, ref= "Neg")
InComplex$NumAffix<-as.numeric(InComplex$Affix)
table(InComplex$Affix,InComplex$NumAffix)
# 1 2
# Neg 885 0
# Loc 0 270
# good
#Type pf base
levels(InComplex$TypeOfBase)
#[1] "bound" "word"
InComplex$TypeOfBase <- relevel (InComplex$TypeOfBase, ref= "word" )
InComplex$NumTypeOfBase<-as.numeric(InComplex$TypeOfBase)
table(InComplex$TypeOfBase,InComplex$NumTypeOfBase)
# 1 2
# word 1017 0
# bound 0 138
#Smenatic Transparency
levels(InComplex$SemanticTransparency)
#[1] "opaque" "transparent"
InComplex$SemanticTransparency <- relevel (InComplex$SemanticTransparency, ref= "transparent" )
InComplex$NumSemanticTransparency<-as.numeric(InComplex$SemanticTransparency)
table(InComplex$SemanticTransparency,InComplex$NumSemanticTransparency)
# 1 2
# transparent 962 0
# opaque 0 193
# one further problem is that the variables are on
# different scales - so we need to change this
InComplex$ScaledSemanticTransparency<-as.numeric(scale(InComplex$NumSemanticTransparency))
summary(InComplex$ScaledSemanticTransparency)
InComplex$ScaledRating<-as.numeric(scale(InComplex$Rating))
summary(InComplex$ScaledRating)
InComplex$ScaledTypeOfBase<-as.numeric(scale(InComplex$NumTypeOfBase))
summary(InComplex$ScaledTypeOfBase)
InComplex$ScaledAffix<-as.numeric(scale(InComplex$NumAffix))
summary(InComplex$ScaledAffix)
InComplex$ScaledRelFreq<-as.numeric(scale(InComplex$logRelFreq))
summary(InComplex$ScaledRelFreq)
# so now they are scaled
# there are quite a observations without a rating....in order to do the PC analysis we need
# a new data set...
InComplexRating<-InComplex[!is.na(InComplex$Rating),]
decomposability.pc <- prcomp(InComplexRating[, c("ScaledAffix", "ScaledRelFreq","ScaledRating","ScaledTypeOfBase","ScaledSemanticTransparency")])
summary(decomposability.pc)
# Importance of components:
# PC1 PC2 PC3 PC4 PC5
# Standard deviation 1.7703 0.8659 0.7785 0.61866 0.3677
# Proportion of Variance 0.6258 0.1497 0.1210 0.07643 0.0270
# Cumulative Proportion 0.6258 0.7755 0.8966 0.97300 1.0000
# so first three are the most Inportant ones
decomposability.pc$rotation
# PC1 PC2 PC3 PC4 PC5
# ScaledAffix -0.4568198 0.59087172 0.02229637 -0.32412304 -0.58020117
# ScaledRelFreq -0.3904374 -0.72367825 -0.34726269 -0.39095894 -0.22451755
# ScaledRating -0.3913149 -0.25190402 0.88195871 0.03624244 0.06520998
# ScaledTypeOfBase -0.4689925 -0.03729805 -0.24236801 0.83598594 -0.14505235
# ScaledSemanticTransparency -0.5155511 0.24962698 -0.20571359 -0.20471777 0.76659373
# PC1: Reall all measures
# PC 2: also mixture, except fot ype of base
# PC 3_ Rating, RelFreq, Type of Base
# PC4: Affix, SemTrans
xtable(decomposability.pc$rotation, digits=3)
# let's try out a model with PC1 and PC 2 and PC 3 as predictor variables
InComplexRating$PCDec1 <- decomposability.pc$x[, 1]
InComplexRating$PCDec2 <- decomposability.pc$x[, 2]
InComplexRating$PCDec3 <- decomposability.pc$x[, 3]
InComplexRating$PCDec4 <- decomposability.pc$x[, 4]
pairscor.fnc(InComplexRating [, c("PCDec1", "PCDec2", "PCDec3", "PCDec4", "Affix",
"logRelFreq", "SemanticTransparency", "Rating", "TypeOfBase")])
# let's see whether this has an influence
In_Complex_PC.lmer<-lmer(ConsonantDur ~ PCDec1+PCDec2+PCDec3+PCDec4 +
(1|Item) + (1|Participant), data = InComplexRating)
summary(In_Complex_PC.lmer)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 0.0690166 0.0035393 63.0400000 19.500 < 2e-16 ***
# PCDec1 -0.0049812 0.0017281 46.4700000 -2.882 0.00596 **
# PCDec2 0.0014721 0.0035565 46.2000000 0.414 0.68085
# PCDec3 -0.0007769 0.0014962 87.8100000 -0.519 0.60489
# PCDec4 -0.0051314 0.0049615 46.2900000 -1.034 0.30639
# ---
# maybe PC 1: I guess that means we need to make a whole analysis, i.e. stepweise exludision....
#######################################################
# PS Model:
InComplexPC.lmer1 <- lmer(ConsonantDur ~ Environment+ AccentuationCondition+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause+ PostPause + PrecSegDur+PCDec1+PCDec2+PCDec3+
(1|Item) + (1|Participant), data = InComplexRating)
summary(InComplexPC.lmer1)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 1.442e-01 6.554e-03 5.068e+02 22.002 < 2e-16 ***
# Environmentn#nV -9.958e-03 5.267e-03 4.410e+01 -1.891 0.06523 .
# Environmentn#V -4.073e-02 3.200e-03 4.420e+01 -12.731 2.22e-16 ***
# AccentuationConditionunaccented 2.430e-03 1.606e-03 8.538e+02 1.513 0.13072
# OrderRescale -1.774e-05 5.752e-05 1.068e+03 -0.308 0.75788
# logWordFormFreq 7.210e-04 5.300e-04 4.470e+01 1.360 0.18055
# BaseInitialStressunstressed -3.619e-03 2.927e-03 4.390e+01 -1.236 0.22296
# LocSpeech -2.944e-03 3.456e-04 1.098e+03 -8.521 < 2e-16 ***
# GlobalSpeechRate -5.655e-03 1.900e-03 7.074e+02 -2.977 0.00301 **
# PrePausePause 1.910e-03 1.273e-03 1.073e+03 1.500 0.13384
# PostPausePause -3.283e-03 1.437e-03 1.079e+03 -2.286 0.02247 *
# PrecSegDur -6.546e-02 2.841e-02 1.076e+03 -2.304 0.02143 *
# PCDec1 9.224e-04 7.651e-04 4.730e+01 1.206 0.23398
# PCDec2 2.359e-03 1.602e-03 4.740e+01 1.472 0.14750
# PCDec3 1.165e-03 9.168e-04 2.252e+02 1.270 0.20532
###################################################################
# #
# Let's now check the assumptions of our model: #
###################################################################
par(mfrow=c(1,1))
qqnorm (residuals (InComplexPC.lmer1))
qqline (residuals (InComplexPC.lmer1))
# That does not look that good.
## The qq plot shows that the residuals are not normally distributed --
# this means that the assumption of a linear relation between the dependent
# and the independent variable is violated.
# What to do?
# - transform the response variable
# - transform one or more of the predictors
# - add higher-order predictors
# Maybe a box-cox transformation will lead to a better
# distribuition of res. Let's try
InComplexBC.lm<-lm(ConsonantDur ~ Environment+ AccentuationCondition+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause+ PostPause + PrecSegDur+PCDec1+PCDec2+PCDec3,
data = InComplexRating)
summary(InComplexBC.lm)
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 1.481e-01 5.595e-03 26.461 < 2e-16 ***
# Environmentn#nV -8.530e-03 2.538e-03 -3.361 0.000803 ***
# Environmentn#V -4.022e-02 1.536e-03 -26.186 < 2e-16 ***
# AccentuationConditionunaccented 1.621e-03 1.575e-03 1.029 0.303504
# OrderRescale -9.412e-06 6.423e-05 -0.147 0.883517
# logWordFormFreq 7.331e-04 2.539e-04 2.887 0.003960 **
# BaseInitialStressunstressed -3.860e-03 1.388e-03 -2.780 0.005530 **
# LocSpeech -3.111e-03 3.443e-04 -9.038 < 2e-16 ***
# GlobalSpeechRate -5.850e-03 1.652e-03 -3.542 0.000414 ***
# PrePausePause 1.116e-03 1.346e-03 0.830 0.406961
# PostPausePause -6.384e-03 1.501e-03 -4.253 2.28e-05 ***
# PrecSegDur -5.644e-02 2.957e-02 -1.909 0.056541 .
# PCDec1 1.103e-03 3.863e-04 2.856 0.004373 **
# PCDec2 2.682e-03 8.225e-04 3.262 0.001142 **
# PCDec3 8.398e-04 7.641e-04 1.099 0.272033
bc<-boxcox(InComplexBC.lm)
lambda <- bc$x[which.max(bc$y)]
lambda
#[1] 0.06060606
InComplexRating$bc <- InComplexRating$ConsonantDur^lambda
########## Let's redo the model
InComplexPC.lmer2 <- lmer(bc ~ Environment+ AccentuationCondition+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause+ PostPause + PrecSegDur+PCDec1+PCDec2+PCDec3+
(1|Item) + (1|Participant), data = InComplexRating)
summary(InComplexPC.lmer2)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.976e-01 4.566e-03 5.396e+02 196.613 < 2e-16 ***
# Environmentn#nV -7.756e-03 3.539e-03 4.420e+01 -2.192 0.03371 *
# Environmentn#V -2.987e-02 2.150e-03 4.420e+01 -13.894 < 2e-16 ***
# AccentuationConditionunaccented 7.489e-04 1.127e-03 9.070e+02 0.664 0.50660
# OrderRescale -2.109e-05 4.006e-05 1.068e+03 -0.527 0.59862
# logWordFormFreq 3.430e-04 3.562e-04 4.470e+01 0.963 0.34082
# BaseInitialStressunstressed -4.272e-03 1.967e-03 4.390e+01 -2.172 0.03528 *
# LocSpeech -1.980e-03 2.404e-04 1.095e+03 -8.234 4.44e-16 ***
# GlobalSpeechRate -2.534e-03 1.336e-03 7.857e+02 -1.897 0.05819 .
# PrePausePause 1.372e-03 8.881e-04 1.076e+03 1.545 0.12265
# PostPausePause -2.378e-03 1.002e-03 1.082e+03 -2.373 0.01781 *
# PrecSegDur -5.192e-02 1.982e-02 1.080e+03 -2.619 0.00894 **
# PCDec1 5.185e-04 5.147e-04 4.750e+01 1.007 0.31882
# PCDec2 1.476e-03 1.078e-03 4.770e+01 1.369 0.17729
# PCDec3 5.371e-04 6.292e-04 2.380e+02 0.854 0.39417
###################################################################
# #
# Let's now check the assumptions of our model: #
###################################################################
par(mfrow=c(1,1))
qqnorm (residuals (InComplexPC.lmer2))
qqline (residuals (InComplexPC.lmer2))
# looks better but we need to exclude outliers
outliers<-romr.fnc(InComplexPC.lmer2, InComplexRating, trim = 2.5)
# n.removed = 21
# percent.removed = 1.878354
InComplexRating2<-outliers$data
dim(InComplexRating2)
#[1] 1097 96
dim(InComplexRating)
#[1] 1118 95
# okay it worked, now let's refit the model
InComplexPC.lmer3 <- lmer(bc ~ Environment+ AccentuationCondition+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause+ PostPause + PrecSegDur+PCDec1+PCDec2+PCDec3+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer3)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.956e-01 4.229e-03 5.081e+02 211.789 < 2e-16 ***
# Environmentn#nV -8.396e-03 3.388e-03 4.410e+01 -2.478 0.01711 *
# Environmentn#V -3.019e-02 2.052e-03 4.360e+01 -14.717 < 2e-16 ***
# AccentuationConditionunaccented 5.776e-04 1.040e-03 9.371e+02 0.555 0.57874
# OrderRescale -3.329e-05 3.618e-05 1.042e+03 -0.920 0.35779
# logWordFormFreq 3.274e-04 3.399e-04 4.410e+01 0.963 0.34068
# BaseInitialStressunstressed -3.399e-03 1.879e-03 4.350e+01 -1.809 0.07732 .
# LocSpeech -1.901e-03 2.201e-04 1.073e+03 -8.638 < 2e-16 ***
# GlobalSpeechRate -2.106e-03 1.233e-03 8.729e+02 -1.709 0.08787 .
# PrePausePause 6.170e-04 8.040e-04 1.051e+03 0.767 0.44302
# PostPausePause -1.847e-03 9.191e-04 1.058e+03 -2.009 0.04475 *
# PrecSegDur -4.780e-02 1.800e-02 1.058e+03 -2.655 0.00806 **
# PCDec1 5.263e-04 4.901e-04 4.650e+01 1.074 0.28847
# PCDec2 1.472e-03 1.027e-03 4.670e+01 1.434 0.15831
# PCDec3 5.970e-04 5.811e-04 2.149e+02 1.027 0.30535
qqnorm (residuals (InComplexPC.lmer3))
qqline (residuals (InComplexPC.lmer3))
# looks good. let's now simplify the model
###############
# Simplification of PC-model
# first, let's throw out Accentuation
InComplexPC.lmer4 <- lmer(bc ~ Environment+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause+ PostPause + PrecSegDur+PCDec1+PCDec2+PCDec3+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer4)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.950e-01 4.077e-03 4.924e+02 219.549 < 2e-16 ***
# Environmentn#nV -8.419e-03 3.387e-03 4.410e+01 -2.486 0.01680 *
# Environmentn#V -3.021e-02 2.051e-03 4.360e+01 -14.730 < 2e-16 ***
# OrderRescale -3.313e-05 3.617e-05 1.044e+03 -0.916 0.35989
# logWordFormFreq 3.142e-04 3.390e-04 4.360e+01 0.927 0.35917
# BaseInitialStressunstressed -3.364e-03 1.877e-03 4.340e+01 -1.792 0.08011 .
# LocSpeech -1.897e-03 2.199e-04 1.073e+03 -8.628 < 2e-16 ***
# GlobalSpeechRate -1.710e-03 1.005e-03 1.068e+03 -1.702 0.08910 .
# PrePausePause 6.272e-04 8.036e-04 1.052e+03 0.781 0.43524
# PostPausePause -1.959e-03 8.967e-04 1.061e+03 -2.184 0.02916 *
# PrecSegDur -4.733e-02 1.798e-02 1.059e+03 -2.633 0.00859 **
# PCDec1 5.282e-04 4.901e-04 4.650e+01 1.078 0.28669
# PCDec2 1.458e-03 1.026e-03 4.670e+01 1.420 0.16215
# PCDec3 5.839e-04 5.804e-04 2.143e+02 1.006 0.31553
# good, nothing has changed, next let's throw out PrePause
InComplexPC.lmer5 <- lmer(bc ~ Environment+ OrderRescale +
BaseInitialStress + LocSpeech + GlobalSpeechRate +
logWordFormFreq+ PostPause + PrecSegDur+PCDec1+PCDec2+PCDec3+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer5)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.957e-01 3.980e-03 4.636e+02 225.029 < 2e-16 ***
# Environmentn#nV -8.367e-03 3.387e-03 4.400e+01 -2.470 0.01746 *
# Environmentn#V -3.016e-02 2.051e-03 4.350e+01 -14.709 < 2e-16 ***
# OrderRescale -3.544e-05 3.604e-05 1.044e+03 -0.983 0.32574
# BaseInitialStressunstressed -3.371e-03 1.878e-03 4.340e+01 -1.795 0.07959 .
# LocSpeech -1.886e-03 2.193e-04 1.075e+03 -8.598 < 2e-16 ***
# GlobalSpeechRate -1.907e-03 9.725e-04 1.060e+03 -1.961 0.05009 .
# logWordFormFreq 3.237e-04 3.389e-04 4.360e+01 0.955 0.34478
# PostPausePause -1.934e-03 8.959e-04 1.062e+03 -2.159 0.03107 *
# PrecSegDur -4.746e-02 1.798e-02 1.060e+03 -2.640 0.00841 **
# PCDec1 5.130e-04 4.898e-04 4.630e+01 1.047 0.30034
# PCDec2 1.489e-03 1.026e-03 4.650e+01 1.451 0.15337
# PCDec3 5.941e-04 5.803e-04 2.140e+02 1.024 0.30712
# good,now PC 1
# good, nothing has changed, next let's throw out Freq
InComplexPC.lmer6 <- lmer(bc ~ Environment+ OrderRescale +
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PostPause + PrecSegDur+PCDec1+PCDec2+PCDec3+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer6)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.968e-01 3.813e-03 6.540e+02 235.216 < 2e-16 ***
# Environmentn#nV -8.434e-03 3.388e-03 4.520e+01 -2.490 0.01653 *
# Environmentn#V -3.052e-02 2.016e-03 4.470e+01 -15.137 < 2e-16 ***
# OrderRescale -3.444e-05 3.603e-05 1.046e+03 -0.956 0.33927
# BaseInitialStressunstressed -3.476e-03 1.875e-03 4.470e+01 -1.854 0.07039 .
# LocSpeech -1.897e-03 2.190e-04 1.075e+03 -8.665 < 2e-16 ***
# GlobalSpeechRate -1.818e-03 9.679e-04 1.066e+03 -1.878 0.06063 .
# PostPausePause -1.909e-03 8.954e-04 1.064e+03 -2.132 0.03324 *
# PrecSegDur -4.747e-02 1.797e-02 1.060e+03 -2.641 0.00838 **
# PCDec1 4.700e-04 4.878e-04 4.760e+01 0.964 0.34015
# PCDec2 1.419e-03 1.024e-03 4.770e+01 1.386 0.17220
# PCDec3 5.584e-04 5.792e-04 2.211e+02 0.964 0.33601
# good, nothing has changed, next let's throw out Pc1
InComplexPC.lmer7<- lmer(bc ~ Environment+ OrderRescale +
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PostPause + PrecSegDur+PCDec2+PCDec3+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer7)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.962e-01 3.759e-03 6.620e+02 238.408 < 2e-16 ***
# Environmentn#nV -8.594e-03 3.381e-03 4.630e+01 -2.542 0.01445 *
# Environmentn#V -2.993e-02 1.920e-03 4.770e+01 -15.588 < 2e-16 ***
# OrderRescale -3.344e-05 3.601e-05 1.047e+03 -0.929 0.35328
# BaseInitialStressunstressed -3.429e-03 1.873e-03 4.560e+01 -1.831 0.07368 .
# LocSpeech -1.864e-03 2.162e-04 1.043e+03 -8.620 < 2e-16 ***
# GlobalSpeechRate -1.906e-03 9.636e-04 1.067e+03 -1.978 0.04820 *
# PostPausePause -1.914e-03 8.954e-04 1.064e+03 -2.137 0.03281 *
# PrecSegDur -4.688e-02 1.796e-02 1.062e+03 -2.610 0.00919 **
# PCDec2 1.384e-03 1.022e-03 4.890e+01 1.354 0.18192
# PCDec3 3.971e-04 5.543e-04 3.717e+02 0.716 0.47413
# good, nothing has changed, next let's throw out Pc3
InComplexPC.lmer8<- lmer(bc ~ Environment+ OrderRescale +
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PostPause + PrecSegDur+PCDec2+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer8)
#
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.963e-01 3.754e-03 6.619e+02 238.770 < 2e-16 ***
# Environmentn#nV -9.001e-03 3.327e-03 4.880e+01 -2.706 0.00936 **
# Environmentn#V -2.997e-02 1.916e-03 4.810e+01 -15.637 < 2e-16 ***
# OrderRescale -3.431e-05 3.599e-05 1.048e+03 -0.953 0.34064
# BaseInitialStressunstressed -3.317e-03 1.863e-03 4.690e+01 -1.780 0.08149 .
# LocSpeech -1.877e-03 2.153e-04 1.040e+03 -8.717 < 2e-16 ***
# GlobalSpeechRate -1.885e-03 9.629e-04 1.067e+03 -1.958 0.05048 .
# PostPausePause -1.902e-03 8.951e-04 1.064e+03 -2.125 0.03378 *
# PrecSegDur -4.678e-02 1.796e-02 1.063e+03 -2.605 0.00932 **
# PCDec2 1.078e-03 9.266e-04 8.610e+01 1.164 0.24784
#
# good, nothing has changed, next let's throw out Order
InComplexPC.lmer9<- lmer(bc ~ Environment+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PostPause + PrecSegDur+PCDec2+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer9)
#
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.958e-01 3.711e-03 6.469e+02 241.406 < 2e-16 ***
# Environmentn#nV -8.990e-03 3.324e-03 4.880e+01 -2.705 0.00939 **
# Environmentn#V -2.995e-02 1.915e-03 4.810e+01 -15.640 < 2e-16 ***
# BaseInitialStressunstressed -3.301e-03 1.862e-03 4.690e+01 -1.773 0.08267 .
# LocSpeech -1.869e-03 2.151e-04 1.040e+03 -8.685 < 2e-16 ***
# GlobalSpeechRate -1.911e-03 9.625e-04 1.068e+03 -1.985 0.04740 *
# PostPausePause -1.933e-03 8.944e-04 1.066e+03 -2.161 0.03091 *
# PrecSegDur -4.774e-02 1.793e-02 1.064e+03 -2.662 0.00788 **
# PCDec2 1.075e-03 9.260e-04 8.610e+01 1.161 0.24891
# good, nothing has changed, next let's throw out Pc2
InComplexPC.lmer10<- lmer(bc ~ Environment+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PostPause + PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer10)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.957e-01 3.717e-03 6.472e+02 240.970 < 2e-16 ***
# Environmentn#nV -1.024e-02 3.184e-03 4.660e+01 -3.216 0.00236 **
# Environmentn#V -3.012e-02 1.932e-03 4.940e+01 -15.593 < 2e-16 ***
# BaseInitialStressunstressed -2.798e-03 1.832e-03 4.700e+01 -1.527 0.13353
# LocSpeech -1.902e-03 2.134e-04 9.998e+02 -8.916 < 2e-16 ***
# GlobalSpeechRate -1.766e-03 9.544e-04 1.064e+03 -1.850 0.06458 .
# PostPausePause -1.908e-03 8.940e-04 1.067e+03 -2.134 0.03303 *
# PrecSegDur -4.775e-02 1.793e-02 1.064e+03 -2.663 0.00785 **
# good, nothing has changed, next let's throw out stress
InComplexPC.lmer11<- lmer(bc ~ Environment+
+ LocSpeech + GlobalSpeechRate +
PostPause + PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer11)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.946e-01 3.652e-03 6.797e+02 244.972 < 2e-16 ***
# Environmentn#nV -1.004e-02 3.225e-03 4.740e+01 -3.114 0.00312 **
# Environmentn#V -3.134e-02 1.779e-03 4.950e+01 -17.616 < 2e-16 ***
# LocSpeech -1.882e-03 2.132e-04 1.001e+03 -8.826 < 2e-16 ***
# GlobalSpeechRate -1.767e-03 9.552e-04 1.067e+03 -1.850 0.06466 .
# PostPausePause -1.863e-03 8.937e-04 1.068e+03 -2.084 0.03739 *
# PrecSegDur -4.798e-02 1.793e-02 1.063e+03 -2.676 0.00756 **
anova(InComplexPC.lmer10,InComplexPC.lmer11)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 10 -6707.9 -6657.9 3364.0 -6727.9
# object 11 -6708.4 -6653.4 3365.2 -6730.4 2.4503 1 0.1175
# good!
# Now Global Speech Rate
InComplexPC.lmer12<- lmer(bc ~ Environment+
+ LocSpeech +
PostPause + PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer12)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.921e-01 3.393e-03 5.823e+02 262.894 < 2e-16 ***
# Environmentn#nV -1.019e-02 3.213e-03 4.730e+01 -3.170 0.00267 **
# Environmentn#V -3.109e-02 1.768e-03 4.900e+01 -17.585 < 2e-16 ***
# LocSpeech -2.047e-03 1.936e-04 1.070e+03 -10.570 < 2e-16 ***
# PostPausePause -1.042e-03 7.777e-04 1.077e+03 -1.339 0.18075
# PrecSegDur -4.785e-02 1.795e-02 1.064e+03 -2.665 0.00781 **
anova(InComplexPC.lmer11,InComplexPC.lmer12)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 9 -6706.5 -6661.5 3362.2 -6724.5
# object 10 -6707.9 -6657.9 3364.0 -6727.9 3.4208 1 0.06438 .
# hm okay, there is a connection between pause and speech rate - makes sense
# let's see what happens if we also throw out pause
# and what happens if we throw put pause but leave speech arte in
InComplexPC.lmer13<- lmer(bc ~ Environment+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.901e-01 3.069e-03 4.954e+02 290.032 < 2e-16 ***
# Environmentn#nV -1.013e-02 3.215e-03 4.730e+01 -3.150 0.00283 **
# Environmentn#V -3.122e-02 1.766e-03 4.870e+01 -17.680 < 2e-16 ***
# LocSpeech -1.928e-03 1.730e-04 1.088e+03 -11.147 < 2e-16 ***
# PrecSegDur -4.595e-02 1.790e-02 1.064e+03 -2.566 0.01041 *
anova(InComplexPC.lmer12,InComplexPC.lmer13)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 8 -6706.7 -6666.7 3361.3 -6722.7
# object 9 -6706.5 -6661.5 3362.2 -6724.5 1.8072 1 0.1788
#ä okay no difference here
InComplexPC.lmer14<- lmer(bc ~ Environment+ GlobalSpeechRate
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer14)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.905e-01 3.106e-03 5.170e+02 286.734 < 2e-16 ***
# Environmentn#nV -1.004e-02 3.224e-03 4.740e+01 -3.115 0.00312 **
# Environmentn#V -3.139e-02 1.779e-03 4.950e+01 -17.646 < 2e-16 ***
# GlobalSpeechRate -7.857e-04 8.317e-04 1.076e+03 -0.945 0.34499
# LocSpeech -1.814e-03 2.112e-04 9.936e+02 -8.589 < 2e-16 ***
# PrecSegDur -4.534e-02 1.791e-02 1.064e+03 -2.531 0.01151 *
# okay, not even significant. So this would be our final
# PC model without interactiosn
summary(InComplexPC.lmer13)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.901e-01 3.069e-03 4.954e+02 290.032 < 2e-16 ***
# Environmentn#nV -1.013e-02 3.215e-03 4.730e+01 -3.150 0.00283 **
# Environmentn#V -3.122e-02 1.766e-03 4.870e+01 -17.680 < 2e-16 ***
# LocSpeech -1.928e-03 1.730e-04 1.088e+03 -11.147 < 2e-16 ***
# PrecSegDur -4.595e-02 1.790e-02 1.064e+03 -2.566 0.01041 *
# now we need to check for interactions! Note PC is sign!
#############################################
# Interactions PC-Analyses
#############################################
# 1. Environment and PCs
# 2. Env and Stress and Accentuation
# 3. Stress and PC
# 4. Acc and PC
# 1. Environment and PCs
InComplexPC.lmer13EnvPC1<- lmer(bc ~ Environment*PCDec1+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvPC1)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.908e-01 3.081e-03 5.430e+02 289.133 < 2e-16 ***
# Environmentn#nV -5.878e-03 3.094e-03 4.510e+01 -1.900 0.06385 .
# Environmentn#V -3.101e-02 1.631e-03 4.550e+01 -19.011 < 2e-16 ***
# PCDec1 4.861e-04 5.446e-04 5.980e+01 0.893 0.37567
# LocSpeech -1.948e-03 1.739e-04 1.085e+03 -11.204 < 2e-16 ***
# PrecSegDur -4.631e-02 1.793e-02 1.066e+03 -2.583 0.00992 **
# Environmentn#nV:PCDec1 3.958e-03 1.385e-03 6.040e+01 2.858 0.00584 **
# Environmentn#V:PCDec1 -1.478e-03 8.982e-04 5.640e+01 -1.646 0.10537
visreg(InComplexPC.lmer13EnvPC1, "PCDec1", by="Environment")
# because of distribution not valid
InComplexPC.lmer13EnvPC2<- lmer(bc ~ Environment*PCDec2+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvPC2)
visreg(InComplexPC.lmer13EnvPC2, "PCDec2", by="Environment")
# okay same thing! but effect is the other way around? Really, no!
InComplexPC.lmer13EnvPC3<- lmer(bc ~ Environment*PCDec3+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvPC3)
# No
# SO - the interactions do not makes sense to consider
# 2. Env and Stress and Accentuation
InComplexPC.lmer13EnvStress<- lmer(bc ~ Environment*BaseInitialStress+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvStress)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.877e-01 3.014e-03 4.831e+02 294.493 < 2e-16 ***
# Environmentn#nV -3.004e-03 2.760e-03 4.140e+01 -1.088 0.28270
# Environmentn#V -2.406e-02 2.045e-03 4.330e+01 -11.765 4.44e-15 ***
# BaseInitialStressunstressed 6.383e-03 2.167e-03 4.350e+01 2.946 0.00516 **
# LocSpeech -1.889e-03 1.701e-04 1.025e+03 -11.105 < 2e-16 ***
# PrecSegDur -4.641e-02 1.785e-02 1.073e+03 -2.600 0.00946 **
# Environmentn#nV:BaseInitialStressunstressed -2.753e-02 5.548e-03 4.570e+01 -4.961 1.02e-05 ***
# Environmentn#V:BaseInitialStressunstressed -1.351e-02 2.886e-03 4.290e+01 -4.683 2.85e-05 ***
visreg(InComplexPC.lmer13EnvStress, "BaseInitialStress", by="Environment")
visreg(InComplexPC.lmer13EnvStress, "Environment", by="BaseInitialStress")
# okay this makes perfect sense. Let's see whether this model is better than
# the other
anova(InComplexPC.lmer13,InComplexPC.lmer13EnvStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 8 -6706.7 -6666.7 3361.3 -6722.7
# ..1 11 -6732.1 -6677.1 3377.0 -6754.1 31.372 3 7.096e-07 ***
#yes!
InComplexPC.lmer13EnvAcc<- lmer(bc ~ Environment*AccentuationCondition+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvAcc)
#
# (Intercept) 8.907e-01 3.179e-03 5.078e+02 280.136 < 2e-16 ***
# Environmentn#nV -8.249e-03 3.452e-03 6.280e+01 -2.389 0.01989 *
# Environmentn#V -3.293e-02 1.897e-03 6.440e+01 -17.360 < 2e-16 ***
# AccentuationConditionunaccented -1.619e-03 1.118e-03 1.054e+03 -1.448 0.14783
# LocSpeech -1.902e-03 1.970e-04 1.056e+03 -9.658 < 2e-16 ***
# PrecSegDur -4.691e-02 1.788e-02 1.062e+03 -2.624 0.00882 **
# Environmentn#nV:AccentuationConditionunaccented -3.700e-03 2.522e-03 1.019e+03 -1.467 0.14273
# Environmentn#V:AccentuationConditionunaccented 3.371e-03 1.354e-03 1.023e+03 2.490 0.01295 *
# yes!
visreg(InComplexPC.lmer13EnvAcc, "AccentuationCondition", by="Environment")
visreg(InComplexPC.lmer13EnvAcc, "Environment", by="AccentuationCondition")
# okay this makes perfect sense but effect is weak
anova(InComplexPC.lmer13EnvAcc,InComplexPC.lmer13EnvStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 11 -6712.5 -6657.5 3367.3 -6734.5
# ..1 11 -6732.1 -6677.1 3377.0 -6754.1 19.552 0 < 2.2e-16 ***
# other model is better!
InComplexPC.lmer13StressAcc<- lmer(bc ~ Environment+AccentuationCondition*BaseInitialStress+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13StressAcc)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.923e-01 3.265e-03 4.775e+02 273.318 < 2e-16 ***
# Environmentn#nV -1.023e-02 3.183e-03 4.660e+01 -3.213 0.00238 **
# Environmentn#V -2.997e-02 1.926e-03 4.900e+01 -15.560 < 2e-16 ***
# AccentuationConditionunaccented -2.099e-03 1.014e-03 1.051e+03 -2.070 0.03872 *
# BaseInitialStressunstressed -4.656e-03 1.942e-03 5.920e+01 -2.397 0.01969 *
# LocSpeech -1.945e-03 1.971e-04 1.060e+03 -9.870 < 2e-16 ***
# PrecSegDur -4.908e-02 1.792e-02 1.063e+03 -2.740 0.00625 **
# AccentuationConditionunaccented:BaseInitialStressunstressed 3.916e-03 1.287e-03 1.023e+03 3.044 0.00239 **
visreg(InComplexPC.lmer13StressAcc, "AccentuationCondition", by="BaseInitialStress")
visreg(InComplexPC.lmer13StressAcc, "BaseInitialStress", by="AccentuationCondition")
# okay this makes perfect sense but effect is weak
anova(InComplexPC.lmer13StressAcc,InComplexPC.lmer13EnvStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 11 -6712.2 -6657.2 3367.1 -6734.2
# ..1 11 -6732.1 -6677.1 3377.0 -6754.1 19.834 0 < 2.2e-16 ***
# other model is clearly better
# What about a 3-way interaction
InComplexPC.lmer13EnvStressAcc<- lmer(bc ~ Environment*AccentuationCondition*BaseInitialStress+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvStressAcc)
anova(InComplexPC.lmer13EnvStressAcc,InComplexPC.lmer13EnvStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 11 -6732.1 -6677.1 3377.0 -6754.1
# object 17 -6739.6 -6654.6 3386.8 -6773.6 19.536 6 0.003348 **
# what about 2 2 way interactions?
# 1. Stress and Env and Stress and Acc
InComplexPC.lmer13EnvStressEnvAcc<- lmer(bc ~ Environment*AccentuationCondition+ Environment*BaseInitialStress+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvStressEnvAcc)
visreg(InComplexPC.lmer13EnvStressEnvAcc, "Environment", by ="AccentuationCondition", overlay=T)
visreg(InComplexPC.lmer13EnvStressEnvAcc, "Environment", by ="BaseInitialStress", overlay=T)
anova(InComplexPC.lmer13EnvStressAcc,InComplexPC.lmer13EnvStressEnvAcc)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 14 -6738.5 -6668.5 3383.3 -6766.5
# object 17 -6739.6 -6654.6 3386.8 -6773.6 7.0678 3 0.06977 .
# same
# 2. Stress and Env, Stress and Acc
InComplexPC.lmer13EnvStressStressAcc<- lmer(bc ~ Environment*AccentuationCondition+ AccentuationCondition*BaseInitialStress+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvStressStressAcc)
# not sign
# okay this seems to be the best model so far
visreg(InComplexPC.lmer13StressAcc, "Environment", by="BaseInitialStress",
overlay=T,cond=list(AccentuationCondition="accented"))
visreg(InComplexPC.lmer13StressAcc, "Environment", by="BaseInitialStress",
overlay=T,cond=list(AccentuationCondition="unaccented"))
# 3. Stress and PC
InComplexPC.lmer13StressPC1<- lmer(bc ~ Environment+BaseInitialStress*PCDec1+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13StressPC1)
# no
InComplexPC.lmer13StressPC2<- lmer(bc ~ Environment+BaseInitialStress*PCDec2+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13StressPC2)
# no
InComplexPC.lmer13StressPC3<- lmer(bc ~ Environment+BaseInitialStress*PCDec3+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13StressPC3)
#no
# 4. Acc and PC
InComplexPC.lmer13AccPC1<- lmer(bc ~ Environment+AccentuationCondition*PCDec1+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13AccPC1)
anova(InComplexPC.lmer13EnvStressAcc,InComplexPC.lmer13AccPC1)
# other model is clearly better
InComplexPC.lmer13AccPC2<- lmer(bc ~ Environment+AccentuationCondition*PCDec2+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13AccPC2)
#no
InComplexPC.lmer13AccPC3<- lmer(bc ~ Environment+AccentuationCondition*PCDec3+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13AccPC3)
# no
##################
# SO NO EFFECT OF SEGEMENTABILIYT IN TERMS OF PC
# Final model would be either
summary(InComplexPC.lmer13EnvStressAcc)
# or
summary(InComplexPC.lmer13EnvStressEnvAcc)
# we need to (as in the final model) change reference level etc. - probably the 3-way
# interaction will then vanish
InComplexRating2<-rename(InComplexRating2,AccentuationAnnotator=Accentuation)
InComplexRating2<-rename(InComplexRating2,Accentuation=AccentuationCondition)
# need to rename the stress levels
levels(InComplexRating2$BaseInitialStress)
#[1] "prInary" "unstressed"
levels(InComplexRating2$BaseInitialStress)<-c("stressed" , "unstressed")
levels(InComplexRating2$BaseInitialStress)
#[1] "stressed" "unstressed"
# also need to change ref levels for environment
InComplexRating2$Environment <- relevel (InComplexRating2$Environment, ref= "n#nV")
# So, let's refit
InComplexPC.lmer13EnvStressAcc2<- lmer(bc ~ Environment*Accentuation*BaseInitialStress+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.lmer13EnvStressAcc2)
# yep its gone, so the following the the final model:
InComplexPC.Final<- lmer(bc ~ Environment*Accentuation+ Environment*BaseInitialStress+
+ LocSpeech +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplexRating2)
summary(InComplexPC.Final)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.872e-01 3.978e-03 2.074e+02 223.012 < 2e-16 ***
# Environmentn#C 9.451e-04 3.023e-03 6.090e+01 0.313 0.755602
# Environmentn#V -2.482e-02 3.220e-03 5.820e+01 -7.707 1.86e-10 ***
# Accentuationunaccented -5.614e-03 2.325e-03 1.025e+03 -2.415 0.015915 *
# BaseInitialStressunstressed -2.132e-02 5.087e-03 4.650e+01 -4.191 0.000123 ***
# LocSpeech -1.849e-03 1.926e-04 9.329e+02 -9.598 < 2e-16 ***
# PrecSegDur -4.719e-02 1.782e-02 1.071e+03 -2.649 0.008201 **
# Environmentn#C:Accentuationunaccented 3.866e-03 2.523e-03 1.019e+03 1.532 0.125712
# Environmentn#V:Accentuationunaccented 7.281e-03 2.458e-03 1.019e+03 2.963 0.003122 **
# Environmentn#C:BaseInitialStressunstressed 2.785e-02 5.515e-03 4.570e+01 5.050 7.53e-06 ***
# Environmentn#V:BaseInitialStressunstressed 1.421e-02 5.428e-03 4.590e+01 2.617 0.011960 *
#############
# Let's get the two models for the dissertation
table_final_model_PC<-as.data.frame(coef(summary(InComplexPC.Final)))
xtable(table_final_model_PC,digits = 3)
########################
# Normal model
##############################################################
# Let's refit our model incorportaing the "right variables"
InComplex.lmer3 <- lmer(ConsonantDur ~ Environment+ AccentuationCondition+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PostPause + PrecSegDur+
Affix+ (1|Item) + (1|Participant), data = InComplex)
(summary(InComplex.lmer3))
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 1.424e-01 6.614e-03 4.775e+02 21.528 < 2e-16 ***
# Environmentn#nV -1.254e-02 5.020e-03 4.490e+01 -2.498 0.01621 *
# Environmentn#V -3.913e-02 3.247e-03 4.560e+01 -12.052 8.88e-16 ***
# AccentuationConditionunaccented 1.857e-03 1.563e-03 8.929e+02 1.188 0.23518
# OrderRescale -1.437e-05 5.618e-05 1.105e+03 -0.256 0.79813
# logWordFormFreq 5.686e-04 5.355e-04 4.540e+01 1.062 0.29391
# BaseInitialStressunstressed -3.030e-03 2.895e-03 4.490e+01 -1.046 0.30098
# LocSpeech -2.952e-03 3.405e-04 1.138e+03 -8.670 < 2e-16 ***
# GlobalSpeechRate -5.206e-03 1.866e-03 7.422e+02 -2.791 0.00539 **
# PrePausePause 1.816e-03 1.252e-03 1.111e+03 1.450 0.14730
# PostPausePause -3.310e-03 1.406e-03 1.117e+03 -2.354 0.01876 *
# PrecSegDur -6.130e-02 2.768e-02 1.113e+03 -2.214 0.02701 *
# AffixLoc 9.774e-04 3.315e-03 4.950e+01 0.295 0.76935
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#
###################################################################
# #
# Let's now check the assumptions of our model: #
###################################################################
par(mfrow=c(1,1))
qqnorm (residuals (InComplex.lmer3))
qqline (residuals (InComplex.lmer3))
# That does not look that good.
## The qq plot shows that the residuals are not normally distributed --
# this means that the assumption of a linear relation between the dependent
# and the independent variable is violated.
# What to do?
# - transform the response variable
# - transform one or more of the predictors
# - add higher-order predictors
# Maybe a box-cox transformation will lead to a better
# distribuition of res. Let's try
InComplex.lm<-lm(ConsonantDur ~ Environment+ AccentuationCondition+OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PostPause + PrecSegDur+Affix, data = InComplex)
summary(InComplex.lm)
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 1.457e-01 5.635e-03 25.859 < 2e-16 ***
# Environmentn#nV -1.142e-02 2.338e-03 -4.882 1.20e-06 ***
# Environmentn#V -3.888e-02 1.515e-03 -25.657 < 2e-16 ***
# AccentuationConditionunaccented 1.378e-03 1.544e-03 0.892 0.372405
# OrderRescale 7.333e-06 6.327e-05 0.116 0.907749
# logWordFormFreq 5.344e-04 2.468e-04 2.165 0.030571 *
# BaseInitialStressunstressed -2.767e-03 1.329e-03 -2.082 0.037572 *
# LocSpeech -3.090e-03 3.394e-04 -9.105 < 2e-16 ***
# GlobalSpeechRate -5.576e-03 1.616e-03 -3.450 0.000582 ***
# PrePausePause 7.492e-04 1.315e-03 0.570 0.569017
# PostPausePause -6.095e-03 1.479e-03 -4.121 4.04e-05 ***
# PrecSegDur -4.985e-02 2.906e-02 -1.715 0.086534 .
# AffixLoc 1.097e-03 1.673e-03 0.656 0.512086
bc<-boxcox(InComplex.lm)
lambda <- bc$x[which.max(bc$y)]
lambda
#[1] 0.06060606
InComplex$bc <- InComplex$ConsonantDur^lambda
InComplex.lmerBC <- lmer(bc ~ Environment+ AccentuationCondition+OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PostPause + PrecSegDur+
Affix+ (1|Item) + (1|Participant), data = InComplex)
summary(InComplex.lmerBC)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.963e-01 4.603e-03 5.150e+02 194.696 < 2e-16 ***
# Environmentn#nV -9.303e-03 3.356e-03 4.490e+01 -2.772 0.00807 **
# Environmentn#V -2.891e-02 2.171e-03 4.560e+01 -13.320 < 2e-16 ***
# AccentuationConditionunaccented 3.597e-04 1.099e-03 9.458e+02 0.327 0.74346
# OrderRescale -1.422e-05 3.921e-05 1.105e+03 -0.363 0.71699
# logWordFormFreq 2.306e-04 3.580e-04 4.540e+01 0.644 0.52283
# BaseInitialStressunstressed -3.946e-03 1.935e-03 4.480e+01 -2.039 0.04738 *
# LocSpeech -1.976e-03 2.374e-04 1.134e+03 -8.325 2.22e-16 ***
# GlobalSpeechRate -2.244e-03 1.314e-03 8.204e+02 -1.708 0.08795 .
# PrePausePause 1.290e-03 8.753e-04 1.114e+03 1.473 0.14095
# PostPausePause -2.347e-03 9.827e-04 1.121e+03 -2.388 0.01710 *
# PrecSegDur -4.881e-02 1.935e-02 1.117e+03 -2.522 0.01180 *
# AffixLoc 1.040e-03 2.221e-03 4.990e+01 0.468 0.64158
#let's check the assumptions
qqnorm (residuals (InComplex.lmerBC))
qqline (residuals (InComplex.lmerBC))
# it is better but not that good. Let's remove outliers and see how it looks afterwards
outliers<-romr.fnc(InComplex.lmerBC, InComplex, trim = 2.5)
# n.removed = 22
# percent.removed = 1.904762
InComplex2<-outliers$data
dim(InComplex2)
#[1] 1133 92
dim(InComplex)
#[1] 1155 91
# okay it seemes to have worked
InComplex.lmerBC2 <- lmer(bc ~ Environment+ AccentuationCondition+ OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PostPause + PrecSegDur+
Affix+ (1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC2)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.948e-01 4.275e-03 4.760e+02 209.319 < 2e-16 ***
# Environmentn#nV -9.955e-03 3.234e-03 4.470e+01 -3.078 0.00355 **
# Environmentn#V -2.925e-02 2.085e-03 4.480e+01 -14.024 < 2e-16 ***
# AccentuationConditionunaccented 1.063e-04 1.012e-03 9.780e+02 0.105 0.91636
# OrderRescale -2.500e-05 3.536e-05 1.078e+03 -0.707 0.47977
# logWordFormFreq 2.117e-04 3.440e-04 4.470e+01 0.616 0.54131
# BaseInitialStressunstressed -3.146e-03 1.862e-03 4.430e+01 -1.690 0.09808 .
# LocSpeech -1.908e-03 2.172e-04 1.112e+03 -8.787 < 2e-16 ***
# GlobalSpeechRate -1.885e-03 1.212e-03 9.102e+02 -1.555 0.12019
# PrePausePause 5.287e-04 7.920e-04 1.089e+03 0.668 0.50458
# PostPausePause -1.970e-03 9.017e-04 1.096e+03 -2.184 0.02916 *
# PrecSegDur -4.640e-02 1.755e-02 1.093e+03 -2.644 0.00831 **
# AffixLoc 1.018e-03 2.129e-03 4.870e+01 0.478 0.63469 0.26011
qqnorm (residuals (InComplex.lmerBC2))
qqline (residuals (InComplex.lmerBC2))
# this looks actually pretty good.
# We will work with this model! --> InComplex.lmerBC2
#########################################################################################
# #
# SInplification of the model #
#########################################################################################
summary(InComplex.lmerBC2)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.948e-01 4.275e-03 4.760e+02 209.319 < 2e-16 ***
# Environmentn#nV -9.955e-03 3.234e-03 4.470e+01 -3.078 0.00355 **
# Environmentn#V -2.925e-02 2.085e-03 4.480e+01 -14.024 < 2e-16 ***
# AccentuationConditionunaccented 1.063e-04 1.012e-03 9.780e+02 0.105 0.91636
# OrderRescale -2.500e-05 3.536e-05 1.078e+03 -0.707 0.47977
# logWordFormFreq 2.117e-04 3.440e-04 4.470e+01 0.616 0.54131
# BaseInitialStressunstressed -3.146e-03 1.862e-03 4.430e+01 -1.690 0.09808 .
# LocSpeech -1.908e-03 2.172e-04 1.112e+03 -8.787 < 2e-16 ***
# GlobalSpeechRate -1.885e-03 1.212e-03 9.102e+02 -1.555 0.12019
# PrePausePause 5.287e-04 7.920e-04 1.089e+03 0.668 0.50458
# PostPausePause -1.970e-03 9.017e-04 1.096e+03 -2.184 0.02916 *
# PrecSegDur -4.640e-02 1.755e-02 1.093e+03 -2.644 0.00831 **
# AffixLoc 1.018e-03 2.129e-03 4.870e+01 0.478 0.63469
# let's throw out Accentuaion
InComplex.lmerBC3 <- lmer(bc ~ Environment+ PostPause+OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PrecSegDur+
Affix+(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC3)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.947e-01 4.134e-03 4.567e+02 216.408 < 2e-16 ***
# Environmentn#nV -9.956e-03 3.233e-03 4.470e+01 -3.079 0.00354 **
# Environmentn#V -2.925e-02 2.085e-03 4.480e+01 -14.031 < 2e-16 ***
# PostPausePause -1.990e-03 8.806e-04 1.099e+03 -2.260 0.02401 *
# OrderRescale -2.494e-05 3.534e-05 1.079e+03 -0.706 0.48055
# logWordFormFreq 2.093e-04 3.432e-04 4.430e+01 0.610 0.54508
# BaseInitialStressunstressed -3.139e-03 1.860e-03 4.420e+01 -1.688 0.09855 .
# LocSpeech -1.908e-03 2.169e-04 1.112e+03 -8.793 < 2e-16 ***
# GlobalSpeechRate -1.812e-03 9.906e-04 1.106e+03 -1.829 0.06765 .
# PrePausePause 5.300e-04 7.916e-04 1.091e+03 0.670 0.50327
# PrecSegDur -4.634e-02 1.753e-02 1.094e+03 -2.643 0.00832 **
# AffixLoc 1.013e-03 2.129e-03 4.870e+01 0.476 0.63610 0.25783
anova(InComplex.lmerBC2,InComplex.lmerBC3)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 15 -6939.2 -6863.7 3484.6 -6969.2
# object 16 -6937.2 -6856.7 3484.6 -6969.2 0.0108 1 0.9172
# model did not become worse
# let's throw out Affix
InComplex.lmerBC4 <- lmer(bc ~ Environment+ PostPause+OrderRescale +logWordFormFreq+
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC4)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.951e-01 4.022e-03 4.950e+02 222.564 < 2e-16 ***
# Environmentn#nV -1.014e-02 3.185e-03 4.550e+01 -3.183 0.00263 **
# Environmentn#V -2.955e-02 1.973e-03 4.760e+01 -14.980 < 2e-16 ***
# PostPausePause -1.985e-03 8.804e-04 1.100e+03 -2.254 0.02436 *
# OrderRescale -2.536e-05 3.533e-05 1.081e+03 -0.718 0.47298
# logWordFormFreq 2.233e-04 3.393e-04 4.550e+01 0.658 0.51393
# BaseInitialStressunstressed -3.047e-03 1.836e-03 4.560e+01 -1.659 0.10391
# LocSpeech -1.930e-03 2.116e-04 1.033e+03 -9.121 < 2e-16 ***
# GlobalSpeechRate -1.736e-03 9.775e-04 1.107e+03 -1.776 0.07597 .
# PrePausePause 5.488e-04 7.905e-04 1.095e+03 0.694 0.48764
# PrecSegDur -4.652e-02 1.752e-02 1.096e+03 -2.655 0.00804 **
# nothing has changed
anova(InComplex.lmerBC3,InComplex.lmerBC4)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 14 -6941.0 -6870.5 3484.5 -6969.0
# object 15 -6939.2 -6863.7 3484.6 -6969.2 0.2588 1 0.6109
# nothing has changed
# let's throw out Word From Freq
InComplex.lmerBC5 <- lmer(bc ~ Environment+ PostPause+OrderRescale +
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC5)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.959e-01 3.842e-03 7.100e+02 233.191 < 2e-16 ***
# Environmentn#nV -1.012e-02 3.167e-03 4.660e+01 -3.195 0.00251 **
# Environmentn#V -2.983e-02 1.916e-03 4.900e+01 -15.567 < 2e-16 ***
# PostPausePause -1.973e-03 8.801e-04 1.101e+03 -2.241 0.02519 *
# OrderRescale -2.468e-05 3.531e-05 1.082e+03 -0.699 0.48468
# BaseInitialStressunstressed -3.145e-03 1.820e-03 4.680e+01 -1.728 0.09052 .
# LocSpeech -1.938e-03 2.111e-04 1.026e+03 -9.182 < 2e-16 ***
# GlobalSpeechRate -1.672e-03 9.725e-04 1.100e+03 -1.719 0.08582 .
# PrePausePause 5.673e-04 7.900e-04 1.097e+03 0.718 0.47284
# PrecSegDur -4.654e-02 1.752e-02 1.096e+03 -2.657 0.00801 **
anova(InComplex.lmerBC4,InComplex.lmerBC5)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 13 -6942.5 -6877.1 3484.2 -6968.5
# object 14 -6941.0 -6870.5 3484.5 -6969.0 0.4764 1 0.4901
#nothing has changed
# let's throw out Order
InComplex.lmerBC6 <- lmer(bc ~ Environment+ PostPause +
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrePause + PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC6)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.954e-01 3.786e-03 6.910e+02 236.507 < 2e-16 ***
# Environmentn#nV -1.011e-02 3.165e-03 4.660e+01 -3.193 0.00252 **
# Environmentn#V -2.982e-02 1.915e-03 4.900e+01 -15.569 < 2e-16 ***
# PostPausePause -1.997e-03 8.792e-04 1.102e+03 -2.271 0.02333 *
# BaseInitialStressunstressed -3.135e-03 1.819e-03 4.680e+01 -1.724 0.09135 .
# LocSpeech -1.932e-03 2.108e-04 1.027e+03 -9.162 < 2e-16 ***
# GlobalSpeechRate -1.679e-03 9.722e-04 1.101e+03 -1.727 0.08451 .
# PrePausePause 6.106e-04 7.874e-04 1.098e+03 0.775 0.43826
# PrecSegDur -4.708e-02 1.750e-02 1.097e+03 -2.691 0.00724 ** 0.25876
anova(InComplex.lmerBC5,InComplex.lmerBC6)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 12 -6944.0 -6883.6 3484.0 -6968.0
# object 13 -6942.5 -6877.1 3484.2 -6968.5 0.4905 1 0.4837
# nothing has changed
# let's throw out PrePause
InComplex.lmerBC7 <- lmer(bc ~ Environment+ PostPause +
BaseInitialStress + LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC7)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.961e-01 3.678e-03 6.519e+02 243.628 < 2e-16 ***
# Environmentn#nV -1.009e-02 3.168e-03 4.660e+01 -3.186 0.00257 **
# Environmentn#V -2.981e-02 1.917e-03 4.900e+01 -15.553 < 2e-16 ***
# PostPausePause -1.975e-03 8.784e-04 1.104e+03 -2.248 0.02475 *
# BaseInitialStressunstressed -3.130e-03 1.820e-03 4.680e+01 -1.720 0.09207 .
# LocSpeech -1.923e-03 2.105e-04 1.032e+03 -9.137 < 2e-16 ***
# GlobalSpeechRate -1.863e-03 9.427e-04 1.097e+03 -1.976 0.04841 *
# PrecSegDur -4.730e-02 1.749e-02 1.098e+03 -2.704 0.00696 **
anova(InComplex.lmerBC6,InComplex.lmerBC7)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 11 -6945.4 -6890.0 3483.7 -6967.4
# object 12 -6944.0 -6883.6 3484.0 -6968.0 0.6024 1 0.4377
#still no difference
# let's throw out Stress
InComplex.lmerBC8 <- lmer(bc ~ Environment+ PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.949e-01 3.618e-03 6.832e+02 247.350 < 2e-16 ***
# Environmentn#nV -9.868e-03 3.226e-03 4.730e+01 -3.058 0.00366 **
# Environmentn#V -3.118e-02 1.776e-03 4.910e+01 -17.554 < 2e-16 ***
# PostPausePause -1.925e-03 8.782e-04 1.104e+03 -2.192 0.02858 *
# LocSpeech -1.902e-03 2.105e-04 1.037e+03 -9.035 < 2e-16 ***
# GlobalSpeechRate -1.865e-03 9.438e-04 1.101e+03 -1.976 0.04837 *
# PrecSegDur -4.763e-02 1.750e-02 1.097e+03 -2.722 0.00658 **
anova(InComplex.lmerBC7,InComplex.lmerBC8)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 10 -6944.3 -6894 3482.2 -6964.3
# object 11 -6945.4 -6890 3483.7 -6967.4 3.0889 1 0.07883 .
# lmerBc 7 might be better - let's wait for the interactions, otherwise throw it out (nly
#marginally sign. difference)
# so that would be the final model without interactions
################################
# Check influence of RelFreq and ST (showed to be signigifcant when
# only variables in model)
InComplex.lmerBC8SemT <- lmer(bc ~ Environment+ SemanticTransparency +PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8SemT)
# not significant
InComplex.lmerBC8RelF <- lmer(bc ~ Environment+ logRelFreq+ PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8RelF)
# not significant, so InComplex.lmerBC8 would be the final model without inter-
#actions
###########################################################################
# Checking for interactions #
###########################################################################
#This looks good already. Let's see however, whether interactions will
# add do the goodness of the model. I will only look at interactions which
# make sense from a theoretucal point if view
# There are actually which I would consider to be of interest
# 1. Environment and accentuation and stress and pause
# 2. Decomposability measures and accentuation
# 3. Decomposability measures and stress
# Let's see
# 1. Environment and accentuation and stress and pause
# Environment and stress
InComplex.lmerBC8EnvStress <- lmer(bc ~ Environment*BaseInitialStress + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8EnvStress)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.926e-01 3.590e-03 6.788e+02 248.614 < 2e-16 ***
# Environmentn#nV -3.058e-03 2.812e-03 4.160e+01 -1.088 0.28304
# Environmentn#V -2.396e-02 2.076e-03 4.290e+01 -11.538 9.77e-15 ***
# BaseInitialStressunstressed 5.728e-03 2.201e-03 4.340e+01 2.602 0.01262 *
# PostPausePause -1.818e-03 8.758e-04 1.112e+03 -2.076 0.03809 *
# LocSpeech -1.843e-03 2.049e-04 8.460e+02 -8.995 < 2e-16 ***
# GlobalSpeechRate -1.970e-03 9.290e-04 1.047e+03 -2.120 0.03420 *
# PrecSegDur -4.743e-02 1.745e-02 1.106e+03 -2.718 0.00667 **
# Environmentn#nV:BaseInitialStressunstressed -2.637e-02 5.651e-03 4.590e+01 -4.666 2.68e-05 ***
# Environmentn#V:BaseInitialStressunstressed -1.325e-02 2.938e-03 4.310e+01 -4.510 4.92e-05 ***
# is this better thaqb the one whtout interaction
anova(InComplex.lmerBC8,InComplex.lmerBC8EnvStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 10 -6944.3 -6894 3482.2 -6964.3
# ..1 13 -6968.4 -6903 3497.2 -6994.4 30.118 3 1.303e-06 ***
#YES
# let's have a look at it
visreg(InComplex.lmerBC8EnvStress, "Environment", by ="BaseInitialStress",
overlay=T, trans= function(x) x^(1/lambda)*1000)
# makes sense
# Environment and Acc
InComplex.lmerBC8EnvAcc <- lmer(bc ~ Environment*AccentuationCondition + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8EnvAcc)
# Fixed effects:
# Estimate Std. Error df t value
# (Intercept) 8.955e-01 3.795e-03 6.556e+02 235.998
# Environmentn#nV -8.253e-03 3.456e-03 6.230e+01 -2.388
# Environmentn#V -3.298e-02 1.899e-03 6.390e+01 -17.363
# AccentuationConditionunaccented -1.748e-03 1.283e-03 1.070e+03 -1.363
# PostPausePause -1.933e-03 8.959e-04 1.098e+03 -2.158
# LocSpeech -1.874e-03 2.104e-04 1.036e+03 -8.910
# GlobalSpeechRate -1.880e-03 1.162e-03 8.800e+02 -1.618
# PrecSegDur -4.841e-02 1.744e-02 1.094e+03 -2.776
# Environmentn#nV:AccentuationConditionunaccented -3.157e-03 2.498e-03 1.053e+03 -1.264
# Environmentn#V:AccentuationConditionunaccented 3.540e-03 1.321e-03 1.057e+03 2.681
# Pr(>|t|)
# (Intercept) < 2e-16 ***
# Environmentn#nV 0.01997 *
# Environmentn#V < 2e-16 ***
# AccentuationConditionunaccented 0.17315
# PostPausePause 0.03115 *
# LocSpeech < 2e-16 ***
# GlobalSpeechRate 0.10597
# PrecSegDur 0.00560 **
# Environmentn#nV:AccentuationConditionunaccented 0.20652
# Environmentn#V:AccentuationConditionunaccented 0.00746 **
visreg(InComplex.lmerBC8EnvAcc, "Environment", by ="AccentuationCondition",
overlay=T, trans= function(x) x^(1/lambda)*1000)
# barely anything but let' see whether this model is better than
# the other
anova(InComplex.lmerBC8EnvAcc,InComplex.lmerBC8EnvStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6950.4 -6884.9 3488.2 -6976.4
# ..1 13 -6968.4 -6903.0 3497.2 -6994.4 18.054 0 < 2.2e-16 ***
# the other is definetal better, what if both interactions are in?
# Environment and Acc
InComplex.lmerBC8EnvAccEnvStress <- lmer(bc ~ Environment*AccentuationCondition + PostPause +
LocSpeech + GlobalSpeechRate +
Environment*BaseInitialStress+PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8EnvAccEnvStress)
# both are significant, but is this model better?
anova(InComplex.lmerBC8EnvAccEnvStress,InComplex.lmerBC8EnvStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 13 -6968.4 -6903.0 3497.2 -6994.4
# object 16 -6975.2 -6894.7 3503.6 -7007.2 12.796 3 0.005099 **
# ---
# yes, it is. What about a 3-way-interaction
InComplex.lmerBC8EnvAccStress <- lmer(bc ~ Environment*AccentuationCondition*BaseInitialStress + PostPause +
LocSpeech + GlobalSpeechRate +
+PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8EnvAccStress)
# yes, let's have a look?
visreg(InComplex.lmerBC8EnvAccStress, "Environment", by ="BaseInitialStress",
cond=list(AccentuationCondition="accented"),
overlay=T, trans= function(x) x^(1/lambda)*1000)
visreg(InComplex.lmerBC8EnvAccStress, "Environment", by ="BaseInitialStress",
cond=list(AccentuationCondition="unaccented"),
overlay=T, trans= function(x) x^(1/lambda)*1000)
anova(InComplex.lmerBC8EnvAccEnvStress,InComplex.lmerBC8EnvAccStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 16 -6975.2 -6894.7 3503.6 -7007.2
# ..1 19 -6975.7 -6880.0 3506.8 -7013.7 6.4519 3 0.09158 .
# basically the same, and both models show the same...3-way is a little better
# Environment and PrePause
InComplex.lmerBC8EnvPause <- lmer(bc ~ Environment*PrePause + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8EnvPause)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.923e-01 3.741e-03 7.350e+02 238.520 < 2e-16 ***
# Environmentn#nV -6.206e-03 3.572e-03 7.430e+01 -1.737 0.086487 .
# Environmentn#V -2.810e-02 1.955e-03 7.590e+01 -14.373 < 2e-16 ***
# PrePausePause 3.708e-03 1.141e-03 1.085e+03 3.249 0.001194 **
# PostPausePause -2.121e-03 8.762e-04 1.102e+03 -2.420 0.015663 *
# LocSpeech -1.845e-03 2.103e-04 1.025e+03 -8.775 < 2e-16 ***
# GlobalSpeechRate -2.005e-03 9.731e-04 1.103e+03 -2.060 0.039637 *
# PrecSegDur -4.724e-02 1.741e-02 1.094e+03 -2.713 0.006775 **
# Environmentn#nV:PrePausePause -5.878e-03 2.632e-03 1.073e+03 -2.233 0.025728 *
# Environmentn#V:PrePausePause -5.023e-03 1.407e-03 1.068e+03 -3.570 0.000373 ***
visreg(InComplex.lmerBC8EnvPause, "Environment", by ="PrePause",
overlay=T, trans= function(x) x^(1/lambda)*1000)
# does not really make sense, I don't even know why I would look at this...
# Accentuation and PrePause
InComplex.lmerBC8AccPause <- lmer(bc ~ Environment+ Accentuation*PrePause + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8AccPause)
# no interaction
# Stress and PrePause
InComplex.lmerBC8StressPause <- lmer(bc ~ Environment+ PrePause*BaseInitialStress + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8StressPause)
# no interaction
# Stress and Acc
InComplex.lmerBC8StressAcc <- lmer(bc ~ Environment+BaseInitialStress*AccentuationCondition + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8StressAcc)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.972e-01 3.864e-03 6.353e+02 232.184 < 2e-16 ***
# Environmentn#nV -9.979e-03 3.171e-03 4.660e+01 -3.147 0.00287 **
# Environmentn#V -2.976e-02 1.918e-03 4.900e+01 -15.515 < 2e-16 ***
# BaseInitialStressunstressed -4.966e-03 1.929e-03 5.870e+01 -2.575 0.01256 *
# AccentuationConditionunaccented -1.902e-03 1.208e-03 1.060e+03 -1.574 0.11576
# PostPausePause -1.900e-03 8.967e-04 1.099e+03 -2.118 0.03436 *
# LocSpeech -1.911e-03 2.102e-04 1.036e+03 -9.091 < 2e-16 ***
# GlobalSpeechRate -1.946e-03 1.160e-03 8.745e+02 -1.677 0.09384 .
# PrecSegDur -5.035e-02 1.748e-02 1.096e+03 -2.880 0.00406 **
# BaseInitialStressunstressed:AccentuationConditionunaccented 3.664e-03 1.258e-03 1.056e+03 2.913 0.00366 **
visreg(InComplex.lmerBC8StressAcc, "BaseInitialStress", by ="AccentuationCondition",
overlay=T, trans= function(x) x^(1/lambda)*1000)
# tiny effect....
# this effect is already included in the 3-way interaction - we will use that model
# (unless one of the below is better)
################################################
# 2. Decomposability measures and accentuation
InComplex.lmerBC8AccRelFreq <- lmer(bc ~ Environment+ AccentuationCondition*logRelFreq + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8AccRelFreq)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.951e-01 3.788e-03 6.417e+02 236.294 < 2e-16 ***
# Environmentn#nV -9.248e-03 3.431e-03 4.620e+01 -2.695 0.009778 **
# Environmentn#V -3.148e-02 1.876e-03 4.760e+01 -16.783 < 2e-16 ***
# AccentuationConditionunaccented -7.387e-04 1.026e-03 1.005e+03 -0.720 0.471840
# logRelFreq 1.432e-04 2.634e-04 5.420e+01 0.544 0.588993
# PostPausePause -1.986e-03 8.953e-04 1.098e+03 -2.219 0.026719 *
# LocSpeech -1.893e-03 2.100e-04 1.040e+03 -9.016 < 2e-16 ***
# GlobalSpeechRate -1.826e-03 1.162e-03 8.892e+02 -1.572 0.116358
# PrecSegDur -4.846e-02 1.744e-02 1.093e+03 -2.779 0.005550 **
# AccentuationConditionunaccented:logRelFreq -5.660e-04 1.640e-04 1.058e+03 -3.452 0.000579 ***
visreg(InComplex.lmerBC8AccRelFreq, "logRelFreq", by ="AccentuationCondition",
overlay=T, trans= function(x) x^(1/lambda)*1000)
#no! Distribution, effect siz
anova(InComplex.lmerBC8AccRelFreq,InComplex.lmerBC8EnvAccStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6950.4 -6885 3488.2 -6976.4
# ..1 19 -6975.7 -6880 3506.8 -7013.7 37.217 6 1.597e-06 ***
# the other model is better, letr's see if we add the interaction with env and stress
InComplex.lmerBC8EnvStressAccRelFreq <- lmer(bc ~ Environment*BaseInitialStress*AccentuationCondition+ AccentuationCondition*logRelFreq + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8EnvStressAccRelFreq)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.932e-01 3.796e-03 6.495e+02 235.317 < 2e-16 ***
# Environmentn#nV -1.349e-03 3.265e-03 6.360e+01 -0.413 0.68102
# Environmentn#V -2.306e-02 2.453e-03 6.590e+01 -9.401 8.62e-14 ***
# BaseInitialStressunstressed 7.261e-03 2.611e-03 6.770e+01 2.781 0.00702 **
# AccentuationConditionunaccented -9.257e-04 1.444e-03 1.088e+03 -0.641 0.52149
# logRelFreq 1.960e-04 2.199e-04 6.140e+01 0.891 0.37631
# PostPausePause -1.882e-03 8.926e-04 1.101e+03 -2.109 0.03518 *
# LocSpeech -1.823e-03 2.046e-04 8.616e+02 -8.909 < 2e-16 ***
# GlobalSpeechRate -2.023e-03 1.142e-03 8.586e+02 -1.772 0.07680 .
# PrecSegDur -5.005e-02 1.738e-02 1.096e+03 -2.879 0.00406 **
# Environmentn#nV:BaseInitialStressunstressed -2.993e-02 6.445e-03 7.220e+01 -4.645 1.49e-05 ***
# Environmentn#V:BaseInitialStressunstressed -1.717e-02 3.423e-03 6.790e+01 -5.015 4.05e-06 ***
# Environmentn#nV:AccentuationConditionunaccented -3.008e-03 2.904e-03 1.050e+03 -1.036 0.30059
# Environmentn#V:AccentuationConditionunaccented -1.850e-03 2.209e-03 1.054e+03 -0.837 0.40254
# BaseInitialStressunstressed:AccentuationConditionunaccented -2.995e-03 2.331e-03 1.054e+03 -1.285 0.19913
# AccentuationConditionunaccented:logRelFreq -4.526e-04 1.987e-04 1.056e+03 -2.278 0.02295 *
# Environmentn#nV:BaseInitialStressunstressed:AccentuationConditionunaccented 6.851e-03 6.169e-03 1.051e+03 1.110 0.26706
# Environmentn#V:BaseInitialStressunstressed:AccentuationConditionunaccented 7.616e-03 3.056e-03 1.055e+03 2.492 0.01286 *
anova(InComplex.lmerBC8EnvStressAccRelFreq,InComplex.lmerBC8EnvAccStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 19 -6975.7 -6880.0 3506.8 -7013.7
# object 21 -6976.8 -6871.1 3509.4 -7018.8 5.152 2 0.07608 .
# not a real difference...so what - we will take the model which makes more sense!
# That is the 3- way intercation model
visreg(InComplex.lmerBC8EnvStressAccRelFreq, "logRelFreq", by ="AccentuationCondition",
overlay=T, trans= function(x) x^(1/lambda)*1000)
InComplex.lmerBC8SemTAcc <- lmer(bc ~ Environment+ AccentuationCondition*SemanticTransparency + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8SemTAcc)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.948e-01 3.902e-03 6.053e+02 229.333 < 2e-16 ***
# Environmentn#nV -1.002e-02 3.272e-03 4.640e+01 -3.062 0.00365 **
# Environmentn#V -3.152e-02 1.867e-03 4.660e+01 -16.879 < 2e-16 ***
# AccentuationConditionunaccented 8.204e-04 1.041e-03 1.017e+03 0.788 0.43073
# SemanticTransparencyopaque 1.228e-03 2.474e-03 6.200e+01 0.497 0.62126
# PostPausePause -1.939e-03 8.961e-04 1.098e+03 -2.164 0.03067 *
# LocSpeech -1.901e-03 2.126e-04 1.086e+03 -8.939 < 2e-16 ***
# GlobalSpeechRate -1.819e-03 1.166e-03 8.925e+02 -1.559 0.11927
# PrecSegDur -4.698e-02 1.746e-02 1.093e+03 -2.691 0.00723 **
# AccentuationConditionunaccented:SemanticTransparencyopaque -5.199e-03 1.689e-03 1.060e+03 -3.079 0.00213 **
visreg(InComplex.lmerBC8SemTAcc, "SemanticTransparency", by ="AccentuationCondition",
overlay=T, trans= function(x) x^(1/lambda)*1000)
visreg(InComplex.lmerBC8SemTAcc, "AccentuationCondition", by ="SemanticTransparency",
overlay=T, trans= function(x) x^(1/lambda)*1000)
anova(InComplex.lmerBC8SemTAcc,InComplex.lmerBC8EnvAccStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6948.1 -6882.7 3487.0 -6974.1
# ..1 19 -6975.7 -6880.0 3506.8 -7013.7 39.567 6 5.541e-07 ***
# yeah, the other one is better
# what if add environment and stress
InComplex.lmerBC8EnvStressSemTAcc <- lmer(bc ~ Environment*BaseInitialStress+ AccentuationCondition*SemanticTransparency + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8EnvStressSemTAcc)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.918e-01 3.870e-03 6.277e+02 230.447 < 2e-16 ***
# Environmentn#nV -2.866e-03 2.922e-03 4.110e+01 -0.981 0.33240
# Environmentn#V -2.387e-02 2.130e-03 4.180e+01 -11.206 3.60e-14 ***
# BaseInitialStressunstressed 5.821e-03 2.240e-03 4.270e+01 2.599 0.01278 *
# AccentuationConditionunaccented 8.836e-04 1.038e-03 1.030e+03 0.851 0.39491
# SemanticTransparencyopaque 3.192e-03 2.066e-03 6.370e+01 1.545 0.12726
# PostPausePause -1.834e-03 8.937e-04 1.106e+03 -2.051 0.04046 *
# LocSpeech -1.814e-03 2.086e-04 9.792e+02 -8.695 < 2e-16 ***
# GlobalSpeechRate -2.048e-03 1.151e-03 8.774e+02 -1.780 0.07547 .
# PrecSegDur -4.630e-02 1.742e-02 1.100e+03 -2.658 0.00798 **
# Environmentn#nV:BaseInitialStressunstressed -2.714e-02 6.047e-03 4.520e+01 -4.489 4.90e-05 ***
# Environmentn#V:BaseInitialStressunstressed -1.331e-02 2.989e-03 4.200e+01 -4.453 6.15e-05 ***
# AccentuationConditionunaccented:SemanticTransparencyopaque -5.219e-03 1.688e-03 1.061e+03 -3.091 0.00204 **
anova(InComplex.lmerBC8EnvStressSemTAcc,InComplex.lmerBC8EnvAccStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 16 -6972.0 -6891.4 3502.0 -7004.0
# ..1 19 -6975.7 -6880.0 3506.8 -7013.7 9.7129 3 0.02117 *
# okay the other is better - that is our final model!!!!
InComplex.lmerBC8RootAcc <- lmer(bc ~ Environment+ AccentuationCondition*TypeOfBase + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8RootAcc)
# no
InComplex.lmerBC8RatingAcc <- lmer(bc ~ Environment+ AccentuationCondition*Rating + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8RatingAcc)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.942e-01 3.961e-03 6.861e+02 225.783 < 2e-16 ***
# Environmentn#nV -1.007e-02 3.214e-03 4.730e+01 -3.133 0.00297 **
# Environmentn#V -3.145e-02 1.785e-03 5.050e+01 -17.619 < 2e-16 ***
# AccentuationConditionunaccented 3.409e-03 1.453e-03 1.045e+03 2.346 0.01914 *
# Rating 7.260e-04 4.895e-04 1.058e+03 1.483 0.13838
# PostPausePause -1.881e-03 9.125e-04 1.061e+03 -2.061 0.03957 *
# LocSpeech -1.888e-03 2.126e-04 1.014e+03 -8.883 < 2e-16 ***
# GlobalSpeechRate -2.139e-03 1.175e-03 8.409e+02 -1.820 0.06905 .
# PrecSegDur -4.975e-02 1.785e-02 1.059e+03 -2.788 0.00540 **
# AccentuationConditionunaccented:Rating -1.778e-03 6.000e-04 1.025e+03 -2.963 0.00311 **
visreg(InComplex.lmerBC8RatingAcc, "Rating", by ="AccentuationCondition",
overlay=T, trans= function(x) x^(1/lambda)*1000)
visreg(InComplex.lmerBC8RatingAcc, "AccentuationCondition", by ="Rating",
overlay=T, trans= function(x) x^(1/lambda)*1000)
# no
InComplex.lmerBC8AffixAcc <- lmer(bc ~ Environment+ AccentuationCondition*Affix + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8AffixAcc)
visreg(InComplex.lmerBC8AffixAcc, "Affix", by ="AccentuationCondition",
overlay=T, trans= function(x) x^(1/lambda)*1000)
visreg(InComplex.lmerBC8AffixAcc, "AccentuationCondition", by ="Affix",
overlay=T, trans= function(x) x^(1/lambda)*1000)
# mk
anova(InComplex.lmerBC8AffixAcc,InComplex.lmerBC8EnvAccStress)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6944.5 -6879.1 3485.3 -6970.5
# ..1 19 -6975.7 -6880.0 3506.8 -7013.7 43.126 6 1.101e-07 ***
# the other one is way better
############################################
# 3. Decomposability measures and stress
InComplex.lmerBC8StressRelFreq <- lmer(bc ~ Environment+ BaseInitialStress*logRelFreq + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8StressRelFreq)
# no
#############
InComplex.lmerBC8SemTStress <- lmer(bc ~ Environment+ BaseInitialStress*SemanticTransparency + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8SemTStress)
# no
InComplex.lmerBC8RootStress <- lmer(bc ~ Environment+ BaseInitialStress*TypeOfBase + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8RootStress)
# no
InComplex.lmerBC8RatingStress <- lmer(bc ~ Environment+ BaseInitialStress*Rating + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8RatingStress)
# no
InComplex.lmerBC8AffixStress <- lmer(bc ~ Environment+ BaseInitialStress*Affix + PostPause +
LocSpeech + GlobalSpeechRate +
PrecSegDur+
(1|Item) + (1|Participant), data = InComplex2)
summary(InComplex.lmerBC8AffixStress)
# no
##############################################################################################
# Summary interactions --> SInplification of our model ##
##############################################################################################
# I tested teh interactions of variables of interst (RelFreq, Type of Base, Rating,Environment) and Accentuation
# also tested stress and accentuation
# Our final model has a 3-way interaction;
# stress and environment and accentuation
visreg(InComplex.lmerBC8EnvAccStress,"Environment", by= "BaseInitialStress",
overlay=T, cond=list(AccentuationCondition="unaccented"),
trans= function(x) (x^(1/lambda))*1000, rug=F, ylab="duration in milliseconds",
xlab="environment by stress", cex.axis=0.9,ylim=c(20,180))
visreg(InComplex.lmerBC8EnvAccStress,"Environment", by= "BaseInitialStress",
overlay=T, cond=list(AccentuationCondition="accented"),
trans= function(x) (x^(1/lambda))*1000, rug=F, ylab="duration in milliseconds",
xlab="environment by stress", cex.axis=0.9,ylim=c(20,180))
# when base initial syllable has prInary stress doubles are as long as singletons
# follwed by a consonant, but longer than singletons followed by a vowel
# when base-inital syllable is unstressed, they are as long as singletons followed
# by a vowel, i.e. they only seem to geminate when primary stress on base-intial syllable
# we only have 4 types with double n - how are they distributed (stress)
unique(InComplex[InComplex$Environment=="n#nV", c("Item","BaseInitialStress")])
# Item BaseInitialStress
# 559 innervate unstressed
# 578 innocuous primary
# 599 innominate primary
# 622 innumerable primary
# so one type does not geminate - innocucous
#############################################################
# The final model:
summary(InComplex.lmerBC8EnvAccStress)
# Fixed effects:
# Estimate Std. Error df t value
# (Intercept) 8.933e-01 3.776e-03 6.559e+02 236.574
# Environmentn#nV -6.365e-04 3.131e-03 6.590e+01 -0.203
# Environmentn#V -2.373e-02 2.317e-03 6.850e+01 -10.244
# AccentuationConditionunaccented -1.313e-03 1.436e-03 1.089e+03 -0.915
# BaseInitialStressunstressed 6.540e-03 2.458e-03 6.940e+01 2.661
# PostPausePause -1.800e-03 8.933e-04 1.104e+03 -2.015
# LocSpeech -1.820e-03 2.043e-04 8.448e+02 -8.909
# GlobalSpeechRate -2.036e-03 1.141e-03 8.500e+02 -1.784
# PrecSegDur -5.038e-02 1.740e-02 1.100e+03 -2.895
# Environmentn#nV:AccentuationConditionunaccented -4.556e-03 2.832e-03 1.052e+03 -1.609
# Environmentn#V:AccentuationConditionunaccented -2.795e-04 2.103e-03 1.054e+03 -0.133
# Environmentn#nV:BaseInitialStressunstressed -2.945e-02 6.344e-03 7.510e+01 -4.642
# Environmentn#V:BaseInitialStressunstressed -1.646e-02 3.292e-03 6.990e+01 -4.999
# AccentuationConditionunaccented:BaseInitialStressunstressed -1.270e-03 2.209e-03 1.055e+03 -0.575
# Environmentn#nV:AccentuationConditionunaccented:BaseInitialStressunstressed 5.595e-03 6.159e-03 1.053e+03 0.908
# Environmentn#V:AccentuationConditionunaccented:BaseInitialStressunstressed 5.916e-03 2.971e-03 1.056e+03 1.991
# Pr(>|t|)
# (Intercept) < 2e-16 ***
# Environmentn#nV 0.83952
# Environmentn#V 1.78e-15 ***
# AccentuationConditionunaccented 0.36060
# BaseInitialStressunstressed 0.00968 **
# PostPausePause 0.04410 *
# LocSpeech < 2e-16 ***
# GlobalSpeechRate 0.07477 .
# PrecSegDur 0.00387 **
# Environmentn#nV:AccentuationConditionunaccented 0.10796
# Environmentn#V:AccentuationConditionunaccented 0.89432
# Environmentn#nV:BaseInitialStressunstressed 1.44e-05 ***
# Environmentn#V:BaseInitialStressunstressed 4.10e-06 ***
# AccentuationConditionunaccented:BaseInitialStressunstressed 0.56535
# Environmentn#nV:AccentuationConditionunaccented:BaseInitialStressunstressed 0.36386
# Environmentn#V:AccentuationConditionunaccented:BaseInitialStressunstressed 0.04668 *
lambda
#[1] 0.06060606
# I need to rename some variabels for the plot...
InComplex2<-rename(InComplex2,AccentuationAnnotator=Accentuation)
InComplex2<-rename(InComplex2,Accentuation=AccentuationCondition)
# need to rename the stress levels
levels(InComplex2$BaseInitialStress)
#[1] "prInary" "unstressed"
levels(InComplex2$BaseInitialStress)<-c("stressed" , "unstressed")
levels(InComplex2$BaseInitialStress)
#[1] "stressed" "unstressed"
# also need to change ref levels for environment
InComplex2$Environment <- relevel (InComplex2$Environment, ref= "n#nV")
final_In_complex_model.lmer<-lmer(bc ~ Environment*BaseInitialStress*Accentuation+LocSpeech+ GlobalSpeechRate+
+PrecSegDur+ PostPause+ (1|Participant)+ (1|Item) , data = InComplex2)
summary(final_In_complex_model.lmer)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.927e-01 4.522e-03 3.131e+02 197.406 < 2e-16 ***
# Environmentn#C 6.365e-04 3.131e-03 6.590e+01 0.203 0.839516
# Environmentn#V -2.310e-02 3.405e-03 6.840e+01 -6.782 3.42e-09 ***
# BaseInitialStressunstressed -2.291e-02 5.856e-03 7.640e+01 -3.912 0.000197 ***
# Accentuationunaccented -5.869e-03 2.694e-03 1.076e+03 -2.178 0.029597 *
# LocSpeech -1.820e-03 2.043e-04 8.448e+02 -8.909 < 2e-16 ***
# GlobalSpeechRate -2.036e-03 1.141e-03 8.500e+02 -1.784 0.074771 .
# PrecSegDur -5.038e-02 1.740e-02 1.100e+03 -2.895 0.003871 **
# PostPausePause -1.800e-03 8.933e-04 1.104e+03 -2.015 0.044101 *
# Environmentn#C:BaseInitialStressunstressed 2.945e-02 6.344e-03 7.510e+01 4.642 1.44e-05 ***
# Environmentn#V:BaseInitialStressunstressed 1.299e-02 6.251e-03 7.560e+01 2.078 0.041101 *
# Environmentn#C:Accentuationunaccented 4.556e-03 2.832e-03 1.052e+03 1.609 0.107956
# Environmentn#V:Accentuationunaccented 4.276e-03 3.083e-03 1.051e+03 1.387 0.165709
# BaseInitialStressunstressed:Accentuationunaccented 4.324e-03 5.734e-03 1.051e+03 0.754 0.450911
# Environmentn#C:BaseInitialStressunstressed:Accentuationunaccented -5.595e-03 6.159e-03 1.053e+03 -0.908 0.363863
# Environmentn#V:BaseInitialStressunstressed:Accentuationunaccented 3.209e-04 6.071e-03 1.051e+03 0.053 0.957861
# AHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
# If one changes the reference level, the 3-way intercation (which showed a very weak effect anyways
# vanishes!) The question is, what do I want to do?
# Let' see whether the second best model (which is actually not significantly worse), still has its intercations
final_In_complex_model.lmer2<-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation+LocSpeech+ GlobalSpeechRate+
+PrecSegDur+ PostPause+ (1|Participant)+ (1|Item) , data = InComplex2) )
summary(final_In_complex_model.lmer2)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.920e-01 4.485e-03 3.038e+02 198.866 < 2e-16 ***
# Environmentn#C 1.280e-03 3.061e-03 6.010e+01 0.418 0.677179
# Environmentn#V -2.446e-02 3.266e-03 5.780e+01 -7.488 4.53e-10 ***
# BaseInitialStressunstressed -2.081e-02 5.181e-03 4.690e+01 -4.017 0.000212 ***
# Accentuationunaccented -5.025e-03 2.428e-03 1.081e+03 -2.070 0.038719 *
# LocSpeech -1.816e-03 2.046e-04 8.447e+02 -8.878 < 2e-16 ***
# GlobalSpeechRate -2.015e-03 1.143e-03 8.527e+02 -1.763 0.078335 .
# PrecSegDur -4.828e-02 1.739e-02 1.103e+03 -2.775 0.005606 **
# PostPausePause -1.812e-03 8.933e-04 1.106e+03 -2.029 0.042718 *
# Environmentn#C:BaseInitialStressunstressed 2.669e-02 5.612e-03 4.600e+01 4.756 1.98e-05 ***
# Environmentn#V:BaseInitialStressunstressed 1.328e-02 5.532e-03 4.640e+01 2.400 0.020436 *
# Environmentn#C:Accentuationunaccented 3.302e-03 2.498e-03 1.054e+03 1.322 0.186532
# Environmentn#V:Accentuationunaccented 6.916e-03 2.436e-03 1.053e+03 2.839 0.004611 **
anova(final_In_complex_model.lmer2,final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 16 -6975.2 -6894.7 3503.6 -7007.2
# ..1 19 -6975.7 -6880.0 3506.8 -7013.7 6.4519 3 0.09158 .
visreg(final_In_complex_model.lmer2, trans= function(x) (x^(1/lambda))*1000,ylim=c(20,180))
# There is a strange connection between Gloabal Speech Rate and Post Pause
# well, if there is a pause - speech rate is lower...
# let's see
plot(InComplex$PostPause,InComplex$GlobalSpeechRate)
# let's throw out Post Pause
final_In_complex_model.lmer3<-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation+LocSpeech+ GlobalSpeechRate+
+PrecSegDur+ (1|Participant)+ (1|Item) , data = InComplex2) )
summary(final_In_complex_model.lmer3)
# Globalspeech Rate is not sign anymore, let's throw it ot tooo
final_In_complex_model.lmer4<-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation+LocSpeech+
+PrecSegDur+ (1|Participant)+ (1|Item) , data = InComplex2) )
summary(final_In_complex_model.lmer4)
# (Intercept) 8.870e-01 3.963e-03 2.013e+02 223.837 < 2e-16 ***
# Environmentn#C 1.087e-03 3.043e-03 6.000e+01 0.357 0.722054
# Environmentn#V -2.466e-02 3.246e-03 5.770e+01 -7.595 3.03e-10 ***
# BaseInitialStressunstressed -2.129e-02 5.145e-03 4.660e+01 -4.139 0.000145 ***
# Accentuationunaccented -5.630e-03 2.310e-03 1.060e+03 -2.437 0.014963 *
# LocSpeech -1.843e-03 1.899e-04 9.757e+02 -9.702 < 2e-16 ***
# PrecSegDur -4.557e-02 1.738e-02 1.104e+03 -2.621 0.008880 **
# Environmentn#C:BaseInitialStressunstressed 2.723e-02 5.572e-03 4.570e+01 4.887 1.30e-05 ***
# Environmentn#V:BaseInitialStressunstressed 1.403e-02 5.488e-03 4.600e+01 2.557 0.013908 *
# Environmentn#C:Accentuationunaccented 3.567e-03 2.499e-03 1.054e+03 1.428 0.153691
# Environmentn#V:Accentuationunaccented 7.075e-03 2.438e-03 1.054e+03 2.902 0.003788 **
anova(final_In_complex_model.lmer2,final_In_complex_model.lmer4)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5
# object 16 -6975.2 -6894.7 3503.6 -7007.2 5.6809 2 0.0584 .
# okay, THAT is our final model!!!!!!!!
Final_In_complex_model.lmer<-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation+LocSpeech+
PrecSegDur+ (1|Participant)+ (1|Item) , data = InComplex2) )
visreg(Final_In_complex_model.lmer, trans= function(x) (x^(1/lambda))*1000, rug=F, ylab="duration in milliseconds", cex.axis=0.9,ylim=c(20,180))
#############
# Let's get the two models for the dissertation
table_final_models<-as.data.frame(coef(summary(Final_In_complex_model.lmer)))
xtable(table_final_models,digits = 3)
#############################################################
# Let's now look at each factors contribution to the model
###############################################################
############################################################
# Do we need random effects?
#############################################
# Speaker
InComplex.finalWithoutSpeaker <-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation+LocSpeech+
+PrecSegDur+ (1|Item) , data = InComplex2) )
summary(InComplex.finalWithoutSpeaker)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.933e-01 4.026e-03 1.733e+02 221.896 < 2e-16 ***
# Environmentn#C 1.543e-03 3.291e-03 6.070e+01 0.469 0.640971
# Environmentn#V -2.326e-02 3.507e-03 5.810e+01 -6.632 1.21e-08 ***
# BaseInitialStressunstressed -2.194e-02 5.542e-03 4.640e+01 -3.960 0.000256 ***
# Accentuationunaccented -4.472e-03 2.573e-03 1.081e+03 -1.738 0.082459 .
# LocSpeech -2.399e-03 1.846e-04 1.039e+03 -12.995 < 2e-16 ***
# PrecSegDur -5.921e-02 1.775e-02 1.105e+03 -3.336 0.000879 ***
# Environmentn#C:BaseInitialStressunstressed 2.711e-02 6.000e-03 4.540e+01 4.518 4.43e-05 ***
# Environmentn#V:BaseInitialStressunstressed 1.455e-02 5.909e-03 4.570e+01 2.462 0.017647 *
# Environmentn#C:Accentuationunaccented 3.724e-03 2.793e-03 1.079e+03 1.333 0.182735
# Environmentn#V:Accentuationunaccented 6.968e-03 2.724e-03 1.079e+03 2.558 0.010662 *
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
cor(InComplex2$bc, fitted(InComplex.finalWithoutSpeaker))^2
#[1] 0.7091633
InComplex.finalWithoutItem <-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation+LocSpeech+
+PrecSegDur+ (1|Participant) , data = InComplex2) )
summary(InComplex.finalWithoutItem)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.852e-01 3.283e-03 8.286e+02 269.655 < 2e-16 ***
# Environmentn#C 1.980e-04 1.992e-03 1.097e+03 0.099 0.92082
# Environmentn#V -2.545e-02 2.087e-03 1.097e+03 -12.193 < 2e-16 ***
# BaseInitialStressunstressed -2.146e-02 2.996e-03 1.096e+03 -7.164 1.44e-12 ***
# Accentuationunaccented -6.523e-03 2.418e-03 1.097e+03 -2.698 0.00709 **
# LocSpeech -1.656e-03 1.739e-04 1.122e+03 -9.524 < 2e-16 ***
# PrecSegDur -3.826e-02 1.789e-02 1.122e+03 -2.139 0.03265 *
# Environmentn#C:BaseInitialStressunstressed 2.827e-02 3.202e-03 1.096e+03 8.830 < 2e-16 ***
# Environmentn#V:BaseInitialStressunstressed 1.387e-02 3.168e-03 1.096e+03 4.377 1.32e-05 ***
# Environmentn#C:Accentuationunaccented 4.293e-03 2.621e-03 1.096e+03 1.638 0.10170
# Environmentn#V:Accentuationunaccented 7.811e-03 2.558e-03 1.096e+03 3.054 0.00232 **
cor(InComplex2$bc, fitted(InComplex.finalWithoutItem))^2
#[1] 0.7424636
# Now, let's see how much each factor explains - we will take a look at the ACI for that
# Let's create models in which one of the preditor variables is missing
InComplex.finalWithoutInteraction1 <-(lmer(bc ~ Environment+BaseInitialStress+Environment*Accentuation+LocSpeech+
+PrecSegDur+ (1|Item)+(1|Participant) , data = InComplex2) )
InComplex.finalWithoutInteraction2 <-(lmer(bc ~ Environment*BaseInitialStress+Environment+Accentuation+LocSpeech+
(1|Item)+PrecSegDur+ (1|Participant) , data = InComplex2) )
InComplex.finalWithoutEnvironment<-(lmer(bc ~ BaseInitialStress+Accentuation+LocSpeech+
+PrecSegDur+ (1|Item)+ (1|Participant) , data = InComplex2) )
InComplex.finalWithoutStress <-(lmer(bc ~ Environment*Accentuation+LocSpeech+
+PrecSegDur+ (1|Item)+ (1|Participant) , data = InComplex2) )
InComplex.finalWithoutAccentuation <-(lmer(bc ~ Environment*BaseInitialStress+LocSpeech+
+PrecSegDur+ (1|Item)+ (1|Participant) , data = InComplex2) )
InComplex.finalWithoutLocSpeech <-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation
+PrecSegDur+ (1|Item)+ (1|Participant) , data = InComplex2) )
InComplex.finalWithoutPrecSeg <-(lmer(bc ~ Environment*BaseInitialStress+Environment*Accentuation+LocSpeech+
+ (1|Item)+(1|Participant) , data = InComplex2) )
###########################################################################
# Now, let's have a look at the contribution of each factor
###################################################################
anova(InComplex.finalWithoutSpeaker,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6790.6 -6725.1 3408.3 -6816.6
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 184.98 1 < 2.2e-16 ***
6973.5-6790.6
#[1] 182.9
anova(InComplex.finalWithoutItem,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6929.0 -6863.5 3477.5 -6955.0
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 46.565 1 8.862e-12 ***
6973.5-6929.0
#44.5
anova(InComplex.finalWithoutInteraction1,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 12 -6949.3 -6888.9 3486.7 -6973.3
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 28.232 2 7.406e-07 ***
6973.5-6949.3
#24.2
anova(InComplex.finalWithoutInteraction2,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 12 -6964.8 -6904.4 3494.4 -6988.8
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 12.71 2 0.001738 **
6973.5-6964.8
#8.7
anova(InComplex.finalWithoutEnvironment,Final_In_complex_model.lmer)
# object 8 -6853.5 -6813.2 3434.7 -6869.5
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 132.08 6 < 2.2e-16 ***
6973.5-6853.5
# 120
anova(InComplex.finalWithoutAccentuation,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 11 -6966.5 -6911.2 3494.3 -6988.5
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 12.985 3 0.00467 **
6973.5-6966.5
#7
anova(InComplex.finalWithoutStress,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 11 -6948.5 -6893.2 3485.3 -6970.5
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 31 3 8.5e-07 ***
6973.5-6948.5
#[1] 25
anova(InComplex.finalWithoutLocSpeech,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6885.1 -6819.7 3455.6 -6911.1
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 90.43 1 < 2.2e-16 ***
6973.5-6885.1
#88.4
anova(InComplex.finalWithoutPrecSeg,Final_In_complex_model.lmer)
# Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
# object 13 -6968.8 -6903.3 3497.4 -6994.8
# ..1 14 -6973.5 -6903.1 3500.8 -7001.5 6.7724 1 0.009258 **
# # ---
6973.5-6968.8
#4.7
########################################################
# When we look at the contribution of each factor in the model without
# the interaction, we see the following picture
#######################################################################
# Let's put these numbers in a table
AIC_decrease_InComplex<-matrix(c(183,120,88, 45, 25, 24, 9, 7, 5),ncol=9,byrow=TRUE)
colnames(AIC_decrease_InComplex)<-c("Speaker", "Environment", "Local-\nSpeechRate", "Item", "Base-\nInitialStress",
"Env. * Stress",
"Env. * Acc.","Accentuation","Preceding-\nSegmentDuration" )
rownames(AIC_decrease_InComplex)<-c("Decrease in AIC")
AIC_decrease_InComplex <- as.table(AIC_decrease_InComplex)
AIC_decrease_InComplex
#change directory for plots
setwd("C:/Users/sbenhedia/Dropbox/Geminates/Schriften/Diss/images/Experiment")
# plot effect sizes
png("AICdecreaseInComplex.png", units="cm", height=10, width=17, res=300, pointsize=09)
par(mar=c(2.6,8.1, 1.1, 2), xpd=TRUE, cex=0.9)
barplot((AIC_decrease_InComplex),horiz=T, col="lightgrey", names.arg =colnames(AIC_decrease_InComplex), las=2, xaxt="n")
xx<-barplot(AIC_decrease_InComplex, horiz=T, col="lightgrey",names.arg =colnames(AIC_decrease_InComplex), las=2, xaxt="n", border="lightgrey")
text(y = xx, x = AIC_decrease_InComplex ,label = AIC_decrease_InComplex, pos = 4, cex = 0.8, col = "black")
title(xlab="AIC increase", line=0, cex.lab=1.1)
dev.off()
# what is lambda
lambda
#[1] 0.06060606
##############################
# We should also plot the main effect (not covariates)
###############################
# Plot main effect
png("InModelInterEnvStress.png", units="cm", height=12, width=14, res=300, pointsize=15)
ylim=c(20,180)
par <- trellis.par.get()
#par <- lapply(par, function(x) replace(x, names(x) == "lwd", 4)) # das ist Liniendicke #
#par$plot.line$col <- default # das ist die Farbe der EstInate-Linie
#par$strip.border<-1
par$fontsize <- list(text=15) # das ist glaub ich logisch :)
par$strip.background$col <- "lightgrey" # das macht das pinke/orangefarbene weg
par$panel.background$col <- "white" # das macht das weiß In plot weg
#visreg(final_In_complex_model.lmer, "Environment",by="BaseInitialStress",ylab="duration in milliseconds", trans= function(x) (x^(1/lambda))*1000, rug=F, xlab="environment by base-initial stress",ylIn=ylIn,cex.axis=0.9,par.settings=par)
visreg(Final_In_complex_model.lmer, "Environment",by="BaseInitialStress",
ylab="duration in milliseconds", trans= function(x) (x^(1/lambda))*1000,
rug=F, xlab="environment",ylim=ylim,
overlay=TRUE ,line.par = list(col = c('cornflowerblue','darkblue')),cex=0.8)
dev.off()
png("InModelInterEnvAcc.png", units="cm", height=12, width=14, res=300, pointsize=15)
ylim=c(20,180)
par <- trellis.par.get()
#par <- lapply(par, function(x) replace(x, names(x) == "lwd", 4)) # das ist Liniendicke #
#par$plot.line$col <- default # das ist die Farbe der EstInate-Linie
#par$strip.border<-1
par$fontsize <- list(text=15) # das ist glaub ich logisch :)
par$strip.background$col <- "lightgrey" # das macht das pinke/orangefarbene weg
par$panel.background$col <- "white" # das macht das weiß In plot weg
#visreg(final_In_complex_model.lmer, "Environment",by="BaseInitialStress",ylab="duration in milliseconds", trans= function(x) (x^(1/lambda))*1000, rug=F, xlab="environment by base-initial stress",ylIn=ylIn,cex.axis=0.9,par.settings=par)
visreg(Final_In_complex_model.lmer, "Environment",by="Accentuation",
ylab="duration in milliseconds", trans= function(x) (x^(1/lambda))*1000,
rug=F, xlab="environment",ylim=ylim,
overlay=TRUE ,line.par = list(col = c('cornflowerblue','darkblue')),cex=0.8)
dev.off()
library (MuMIn)
# let's check whether the same factors are derived when we use
# the MuMin packe to compare the Inportance of the
# different factors.
options(na.action = "na.fail")
InComplex.lm1<- lm(ConsonantDur ~ Environment + Accentuation +
OrderRescale + logWordFormFreq + BaseInitialStress + LocSpeech +
GlobalSpeechRate + PrePause + PostPause + PrecSegDur + PCDec1+PCDec2+PCDec3+
PCDec4,
data = InComplexRating)
model_ranking <- dredge(InComplex.lm1)
model_average_<-model.avg(model_ranking)
summary(model_average_)
# Relative variable importance:
# Environment LocSpeech PostPause PCDec4 GlobalSpeechRate PCDec2 PCDec1 BaseInitialStress
# Importance: 1.00 1.00 1.00 0.99 0.96 0.91 0.89 0.84
# N containing models: 8192 8192 8192 8192 8192 8192 8192 8192
# logWordFormFreq PrecSegDur PrePause Accentuation PCDec3 OrderRescale
# Importance: 0.72 0.62 0.44 0.38 0.37 0.27
# N containing models: 8192 8192 8192 8192 8192 8192
# let's check MuMin with interactions
options(na.action = "na.fail")
InComplex.lm2<- lm(ConsonantDur ~ Environment*Accentuation*BaseInitialStress +
OrderRescale + logWordFormFreq + + LocSpeech +
GlobalSpeechRate + Accentuation*BaseInitialStress*PrePause +
PostPause + PrecSegDur + Environment*PCDec1+Environment*PCDec2+Environment*PCDec3+
Environment*PCDec4,
data = InComplexRating)
model_ranking2 <- dredge(InComplex.lm2)
model_average_2<-model.avg(model_ranking2)
summary(model_average_2)
# Relative variable importance:
# Environment LocSpeech BaseInitialStress BaseInitialStress:Environment Accentuation
# Importance: 1.00 1.00 1.00 1.00 1.00
# N containing models: 331776 175616 304640 160704 304640
# Accentuation:Environment Accentuation:BaseInitialStress PostPause
# Importance: 1.00 1.00 1.00
# N containing models: 160704 161664 175616
# Accentuation:BaseInitialStress:Environment GlobalSpeechRate PrePause
# Importance: 1.00 0.98 0.97
# N containing models: 31104 175616 273536
# BaseInitialStress:PrePause Accentuation:PrePause
# Importance: 0.93 0.92
# N containing models: 135744 135744
# Accentuation:BaseInitialStress:PrePause PCDec3 PCDec2 Environment:PCDec3 PCDec4
# Importance: 0.91 0.90 0.87 0.86 0.82
# N containing models: 26944 230912 230912 110592 230912
# Environment:PCDec4 PrecSegDur PCDec1 logWordFormFreq Environment:PCDec1
# Importance: 0.75 0.72 0.67 0.56 0.55
# N containing models: 110592 175616 230912 175616 110592
# Environment:PCDec2 OrderRescale
# Importance: 0.53 0.26
# N containing models: 110592 175616
###################################################################################
# Find out at which levels visreg draws lines
###################################################################################
summary(Final_In_complex_model.lmer)
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 8.870e-01 3.963e-03 2.013e+02 223.837 < 2e-16 ***
# Environmentn#C 1.087e-03 3.043e-03 6.000e+01 0.357 0.722054
# Environmentn#V -2.466e-02 3.246e-03 5.770e+01 -7.595 3.03e-10 ***
# BaseInitialStressunstressed -2.129e-02 5.145e-03 4.660e+01 -4.139 0.000145 ***
# Accentuationunaccented -5.630e-03 2.310e-03 1.060e+03 -2.437 0.014963 *
# LocSpeech -1.843e-03 1.899e-04 9.757e+02 -9.702 < 2e-16 ***
# PrecSegDur -4.557e-02 1.738e-02 1.104e+03 -2.621 0.008880 **
# Environmentn#C:BaseInitialStressunstressed 2.723e-02 5.572e-03 4.570e+01 4.887 1.30e-05 ***
# Environmentn#V:BaseInitialStressunstressed 1.403e-02 5.488e-03 4.600e+01 2.557 0.013908 *
# Environmentn#C:Accentuationunaccented 3.567e-03 2.499e-03 1.054e+03 1.428 0.153691
# Environmentn#V:Accentuationunaccented 7.075e-03 2.438e-03 1.054e+03 2.902 0.003788 **
visreg(final_In_complex_model.lmer)
# Conditions used in construction of plot
# Environment: n#V
# Accentuation: accented
# LocSpeech: 12.21309
# GlobalSpeechRate: 2.240557
# PrecSegDur: 0.06327784
# PostPause: No Pause
# Participant: Experiment_1_participant_4
# Item: intake
intercept = 8.870e-01
LocCondition= 12.21309
estSpeech= -1.843e-03
PrecSegCondition= 0.06327784
estPrecSeg= -4.557e-02
EstEnvironmentInC= 1.087e-03
EstEnvironmentInV= -2.466e-02
EstUnstressed=-2.129e-02
estUnaccented= -4.557e-02
InteractionEst1nCUnstressed= 2.723e-02
InteractionEst1nVUnstressed= 1.403e-02
InteractionEst2nCUnaccented= 3.567e-03
InteractionEst2nVUnaccented= 7.075e-03
visreg(Final_In_complex_model.lmer, "Environment",by="BaseInitialStress",ylab="duration in milliseconds", trans= function(x) (x^(1/lambda))*1000, rug=F, xlab="environment by base-initial stress",ylim=ylim,cex.axis=0.9,par.settings=par)
#level Inn stressed
((intercept+(LocCondition*estSpeech)+(PrecSegCondition*estPrecSeg))^(1/lambda))*1000
#[1] 85.62645
InnStressed= 85.62645
#level InC stressed
((intercept+(LocCondition*estSpeech)+(PrecSegCondition*estPrecSeg)+EstEnvironmentInC)^(1/lambda))*1000
#[1] 87.42642
InCStressed= 87.42642
#level InC Unstressed
((intercept+(LocCondition*estSpeech)+(PrecSegCondition*estPrecSeg)+EstUnstressed+EstEnvironmentInC+InteractionEst1nCUnstressed)^(1/lambda))*1000
#[1] 97.90693
InCUnstressed= 97.90693
#level InV stressed
((intercept+(LocCondition*estSpeech)+(PrecSegCondition*estPrecSeg)+EstEnvironmentInV)^(1/lambda))*1000
#[1] 87.42642
InVStressed= 53.03008
#level InV Unstressed
((intercept+(LocCondition*estSpeech)+(PrecSegCondition*estPrecSeg)+EstUnstressed+EstEnvironmentInV+InteractionEst1nVUnstressed)^(1/lambda))*1000
#[1] 45.92951
InVUnstressed= 45.92951
#level Inn Unstressed
((intercept+(LocCondition*estSpeech)+(PrecSegCondition*estPrecSeg)+EstUnstressed)^(1/lambda))*1000
#[1] 56.6654
InnUnstressed= 56.6654
# Unterschiede:
# Double single stressed
InnStressed-InCStressed
#-1.79997
InnStressed-InVStressed
#32.59637
InnUnstressed-InCUnstressed
#-41.24153
InnUnstressed-InVUnstressed
#[1] 10.73589
# Unterschiede zwischen singletons
InVUnstressed-InCUnstressed
#[1] -51.97742
InVStressed-InCStressed
#[1] -34.39634
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.