blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0686f76674c70be6b4bce2dd50f74325b309700d
|
9cc7423f4a94698df5173188b63c313a7df99b0e
|
/R/find_season.R
|
429e879b2e91e02fcc3b44a8c790e9d2c948c589
|
[
"MIT"
] |
permissive
|
HugoNjb/psycho.R
|
71a16406654b11007f0d2f84b8d36587c5c8caec
|
601eef008ec463040c68bf72ac1ed8d4a8f7751f
|
refs/heads/master
| 2020-03-27T01:24:23.389884
| 2018-07-19T13:08:53
| 2018-07-19T13:08:53
| 145,707,311
| 1
| 0
| null | 2018-08-22T12:39:27
| 2018-08-22T12:39:27
| null |
UTF-8
|
R
| false
| false
| 569
|
r
|
find_season.R
|
#' Find season of dates.
#'
#' Returns the season of an array of dates.
#'
#' @param date Array of dates. Must cover the 4 seasons.
#'
#' @return season
#'
#' @examples
#' library(psycho)
#'
#' dates <- c("2017-02-15", "2017-05-15", "2017-08-15", "2017-11-15")
#' find_season(dates)
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @export
find_season <- function(date) {
d <- as.Date(cut(as.Date(date), "month")) + 32
season <- factor(
quarters(d),
labels = c("Winter", "Spring", "Summer", "Fall")
)
return(season)
}
|
317b229f3da1c6cc0bf532d403a1a21c945c7728
|
a0ac7178b6ca4b13cc78102468ee2e0272358c9e
|
/man/createPostgreSQLTable.Rd
|
890dc05d1af47f804501b2c2eb2909c56b416725
|
[] |
no_license
|
FranciscoChen/TCGAParser
|
0a34187b1d9fb55679393a474edf2724a658f594
|
f3751019122f38c4e442a7d47dd27056f6ef5c8b
|
refs/heads/master
| 2021-01-10T19:04:57.819619
| 2014-09-09T09:04:59
| 2014-09-09T09:04:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
rd
|
createPostgreSQLTable.Rd
|
\name{createPostgreSQLTable}
\alias{createPostgreSQLTable}
\title{Create PostgreSQL table}
\usage{
createPostgreSQLTable(cancer, array, drv, ...)
}
\description{
Create the table in the PostgreSQL database (needs the
sampleinfo file from filterBarcodes).
}
\details{
Requires RPostgreSQL package and a connection to a
PostgreSQL database. drv A character string specifying
the database management system driver. ... Arguments
needed to connect to the database, such as user,
password, dbname, host, port, etc.
}
|
34b42e96cf18e8dd2ce32fc01e05bae16c64be3a
|
5a5f43179fe5675d91ef0dd31b662fedf7eb9b11
|
/tests/testthat/test-rpptx.R
|
a5a226e00d2010d3be07e26487e1ac44682dd3e5
|
[
"MIT"
] |
permissive
|
davidgohel/officedown
|
4e63a99cae4d6cb9f254d31ca6d5cf9d278f453c
|
a831d923b577bbf376070e155097d5f9dec2e9a0
|
refs/heads/master
| 2023-07-10T11:41:08.442432
| 2023-01-06T12:18:33
| 2023-01-06T12:18:33
| 126,241,290
| 333
| 36
|
NOASSERTION
| 2022-02-19T15:36:51
| 2018-03-21T21:12:48
|
R
|
UTF-8
|
R
| false
| false
| 504
|
r
|
test-rpptx.R
|
library(xml2)
library(officer)
library(rmarkdown)
skip_if_not(rmarkdown::pandoc_available())
skip_if_not(pandoc_version() >= numeric_version("2"))
source("utils.R")
test_that("visual testing tables", {
testthat::skip_if_not_installed("doconv")
testthat::skip_if_not(doconv::msoffice_available())
library(doconv)
pptx_file <- tempfile(fileext = ".pptx")
render_rmd("rmd/pptx.Rmd", output_file = pptx_file)
expect_snapshot_doc(x = pptx_file, name = "pptx-example", engine = "testthat")
})
|
a7c1424c72a97ed1aa3d6e53486235fd40002a60
|
b502e4acb77f172777b08b7978b7509ba4e774b5
|
/scripts/2_process/merge_data.R
|
7e86158ce6de93eef2a40764fb777732e9822e6a
|
[] |
no_license
|
limnoliver/GLRIeof
|
565513fe9132dceef6177c9fba9d14fcf8e533b6
|
ecabc93b124dfb349c00cac061312e5eda4215e4
|
refs/heads/master
| 2020-06-24T06:12:59.255261
| 2018-05-21T14:50:22
| 2018-05-21T14:50:22
| 96,921,879
| 1
| 1
| null | 2017-07-19T19:16:57
| 2017-07-11T17:59:55
|
R
|
UTF-8
|
R
| false
| false
| 1,167
|
r
|
merge_data.R
|
# script to merge wq, rain, and discharge data
library(dplyr)
site <- 'sw3'
temp_file <- paste0('data_cached/', site)
wq <- read.csv(paste0(temp_file, '_prepped_WQbystorm.csv'))
rain <- read.csv(paste0(temp_file, '_rain_variables.csv'))
discharge <- read.csv(paste0(temp_file, '_discharge_variables.csv'))
weather <- read.csv(paste0(temp_file, '_weather_by_storm.csv'))
field <- read.csv(paste0(temp_file, '_field_predictors.csv'))
# subset and rename columns to reduce duplicate cols
rain <- rename(rain, 'rain_startdate' = 'StartDate', 'rain_enddate' = 'EndDate')
rain <- select(rain, -stormnum, -site)
discharge <- select(discharge, unique_storm_number, ant_dis_1day_max:ant_dis_14day_max)
field <- field[,c(1,4:7)]
# merge dat
all.eof <- merge(wq, rain, by = 'unique_storm_number', all.x = TRUE)
all.eof <- merge(all.eof, discharge, by = 'unique_storm_number', all.x = TRUE)
all.eof <- merge(all.eof, weather, by = 'unique_storm_number', all.x = TRUE)
all.eof <- merge(all.eof, field, by = "unique_storm_number", all.x = TRUE)
tempfile_name <- file.path('data_cached', paste0(site, '_merged_dat.csv'))
write.csv(all.eof, tempfile_name, row.names = FALSE)
|
775599aaa7b3748b944690fa31999d3d23d7be7a
|
5de5417c72915decfc509e4510de7a08c22e5bc3
|
/R/summarise-cox-data.R
|
37faa4646e19f58068d3b69aa5c54c338c7cc9b1
|
[] |
no_license
|
kholsteen/n1coxeval
|
20a60f4340d568315963e87e1fd38b2ca6de96af
|
93613addc177a4953bb561fff4a139980b548854
|
refs/heads/master
| 2021-07-15T01:05:42.517982
| 2021-02-24T23:04:01
| 2021-02-24T23:04:01
| 234,411,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 988
|
r
|
summarise-cox-data.R
|
#' Summarise Cox data for n-of-1 power analysis
#' @keywords internal
summarise_cox_data <- function(data,
x_vars = NULL,
sim = TRUE) {
stopifnot(sim == TRUE | !is.null(x_vars))
## general results
## Note for the real data, n_days is missing all the dropped days...
r1 <- data %>% dplyr::summarise(
id = dplyr::first(.data$id),
n_days = dplyr::n()
)
## realized distribution of x
r2 <- data %>% dplyr::summarise_at(x_vars,
dplyr::funs("mean" = mean,
"sd" = sd),
na.rm = TRUE)
## survival time distribution
r3 <- data %>% dplyr::filter(mig.ind == 1) %>%
dplyr::summarise(
t1_list = list(round(.data$t1, 1)),
t1_mean = mean(.data$t1),
t1_med = median(.data$t1),
t1_min = min(.data$t1),
t1_max = max(.data$t1)
)
dplyr::bind_cols(r1, r2, r3)
}
|
61adfef31420d8a683ebf6d49529ee3fabd039bb
|
b27add4ba86007bed7d4c22124eae8578f1104c8
|
/tech/bitmap_to_netlogo.R
|
6f8e40a00c0b1221d8d78a745afd1725584d558e
|
[] |
no_license
|
lauterbur/logo_models
|
e56b6fb80dd4db6145214d84673356dbcc53c8ee
|
dd0e074de626c68203ee4c8a9221545259d42b47
|
refs/heads/main
| 2023-07-09T17:10:16.821183
| 2021-08-11T02:13:23
| 2021-08-11T02:13:23
| 343,675,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,785
|
r
|
bitmap_to_netlogo.R
|
library(tidyverse)
library(bmp)
library(pixmap)
b <- read.bmp("/home/lauterbur/Desktop/AAUW_project/tech/TownTechMap_Base.bmp")
b
m<-as.raster(b,max=255L)
m
plot(1, type = "n", axes = FALSE, xlab = "", ylab = "")
usr <- par("usr")
rasterImage(m, usr[1], usr[3], usr[2], usr[4])
dict<-setNames(c(7,9.9,115,66,15,95,87,1,45,62,33,103),unique(m))
dict
m_matrix<-matrix(unname(dict[as.vector(m)]), nrow=nrow(m),byrow=TRUE)
m_matrix
coords<-data.frame(x=as.vector(col(m_matrix)),y=rev(as.vector(row(m_matrix))),color=as.vector(m_matrix))
coords<-t(coords)
coords[3,]
m
write.table(coords,"/home/lauterbur/Desktop/TownTechMap_netlogo.txt",row.names=FALSE,col.names=FALSE)
files<-list.files(path = "/home/lauterbur/Desktop/AAUW_project/tech/wifi_maps/",pattern = "*.bmp",full.names = TRUE)
maps<- files[which(!grepl("sig",files))] %>%
map(~read.bmp(.))
maps
names(maps)<-files[which(!grepl("sig",files))]
maps
hex<-c( "#C3C3C3","#FFFFFF","#A349A4","#B5E61D","#ED1C24","#00A2E8","#99D9EA","#000000","#FFF200","#22B14C","#880015","#3F48CC",
"#FF7F27","#D952FS","#DA51F7","#DE51F7","#D952F5","#DC53F4","#DD52F5")
dict<-setNames(c(7,9.9,115,55,15,95,85,1,45,63,33,105,25,125,125,125,125,125,125),hex)
for (name in names(maps)) {
# print(i)
b<-maps[[name]]
m<-as.raster(b,max=255L)
plot(1, type = "n", axes = FALSE, xlab = "", ylab = "")
usr <- par("usr")
rasterImage(m, usr[1], usr[3], usr[2], usr[4])
dict
m_matrix<-matrix(unname(dict[as.vector(m)]), nrow=nrow(m),byrow=TRUE)
m_matrix
coords<-data.frame(x=as.vector(col(m_matrix)),y=rev(as.vector(row(m_matrix))),color=as.vector(m_matrix))
coords<-t(coords)
coords[3,]
m
newname<-str_remove(name,".bmp")
write.table(coords,paste(newname,".txt",sep=""),row.names=FALSE,col.names=FALSE)
}
|
4235c1bbd98e2cedd482fa9df9bf285b9bb5f93d
|
2975fba6bf359214c55e7d936f896a5a4be3d8f5
|
/man/riskRegression.Rd
|
3812e62ae0d8b0f81201502fba26b6a905e7aac5
|
[] |
no_license
|
tagteam/riskRegression
|
6bf6166f098bbdc25135f77de60122e75e54e103
|
fde7de8ca8d4224d3a92dffeccf590a786b16941
|
refs/heads/master
| 2023-08-08T03:11:29.465567
| 2023-07-26T12:58:04
| 2023-07-26T12:58:04
| 36,596,081
| 38
| 14
| null | 2023-05-17T13:36:27
| 2015-05-31T09:22:16
|
R
|
UTF-8
|
R
| false
| true
| 6,112
|
rd
|
riskRegression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/riskRegression-package.R, R/riskRegression.R
\docType{package}
\name{riskRegression}
\alias{riskRegression}
\alias{ARR}
\alias{LRR}
\title{Risk Regression
Fits a regression model for the risk of an event -- allowing for competing
risks.}
\usage{
riskRegression(
formula,
data,
times,
link = "relative",
cause,
conf.int = TRUE,
cens.model,
cens.formula,
max.iter = 50,
conservative = TRUE,
...
)
}
\arguments{
\item{formula}{Formula where the left hand side specifies the event
history event.history and the right hand side the linear predictor. See
examples.}
\item{data}{The data for fitting the model in which includes all
the variables included in formula.}
\item{times}{Vector of times. For each time point in \code{times}
estimate the baseline risk and the timevarying coefficients.}
\item{link}{\code{"relative"} for the absolute risk regression
model. \code{"logistic"} for the logistic risk regression model.
\code{"prop"} for the Fine-Gray regression model.}
\item{cause}{The cause of interest.}
\item{conf.int}{If \code{TRUE} return the iid decomposition, that
can be used to construct confidence bands for predictions.}
\item{cens.model}{Specified the model for the (conditional)
censoring distribution used for deriving weights (IFPW). Defaults
to "KM" (the Kaplan-Meier method ignoring covariates) alternatively
it may be "Cox" (Cox regression).}
\item{cens.formula}{Right hand side of the formula used for fitting
the censoring model. If not specified the right hand side of
\code{formula} is used.}
\item{max.iter}{Maximal number of iterations.}
\item{conservative}{If \code{TRUE} use variance formula that ignores the contribution
by the estimate of the inverse of the probability of censoring weights}
\item{...}{Further arguments passed to \code{comp.risk}}
}
\description{
This is a wrapper for the function \code{comp.risk} from the timereg package.
The main difference is one marks variables in the formula that should have a
time-dependent effect whereas in \code{comp.risk} one marks variables that
should have a time constant (proportional) effect.
}
\examples{
library(prodlim)
data(Melanoma,package="riskRegression")
## tumor thickness on the log-scale
Melanoma$logthick <- log(Melanoma$thick)
# Single binary factor
## absolute risk regression
library(survival)
library(prodlim)
fit.arr <- ARR(Hist(time,status)~sex,data=Melanoma,cause=1)
print(fit.arr)
# show predicted cumulative incidences
plot(fit.arr,col=3:4,newdata=data.frame(sex=c("Female","Male")))
## compare with non-parametric Aalen-Johansen estimate
library(prodlim)
fit.aj <- prodlim(Hist(time,status)~sex,data=Melanoma)
plot(fit.aj,conf.int=FALSE)
plot(fit.arr,add=TRUE,col=3:4,newdata=data.frame(sex=c("Female","Male")))
## with time-dependent effect
fit.tarr <- ARR(Hist(time,status)~strata(sex),data=Melanoma,cause=1)
plot(fit.tarr,newdata=data.frame(sex=c("Female","Male")))
## logistic risk regression
fit.lrr <- LRR(Hist(time,status)~sex,data=Melanoma,cause=1)
summary(fit.lrr)
# Single continuous factor
## tumor thickness on the log-scale
Melanoma$logthick <- log(Melanoma$thick)
## absolute risk regression
fit2.arr <- ARR(Hist(time,status)~logthick,data=Melanoma,cause=1)
print(fit2.arr)
# show predicted cumulative incidences
plot(fit2.arr,col=1:5,newdata=data.frame(logthick=quantile(Melanoma$logthick)))
## comparison with nearest neighbor non-parametric Aalen-Johansen estimate
library(prodlim)
fit2.aj <- prodlim(Hist(time,status)~logthick,data=Melanoma)
plot(fit2.aj,conf.int=FALSE,newdata=data.frame(logthick=quantile(Melanoma$logthick)))
plot(fit2.arr,add=TRUE,col=1:5,lty=3,newdata=data.frame(logthick=quantile(Melanoma$logthick)))
## logistic risk regression
fit2.lrr <- LRR(Hist(time,status)~logthick,data=Melanoma,cause=1)
summary(fit2.lrr)
## change model for censoring weights
library(rms)
fit2a.lrr <- LRR(Hist(time,status)~logthick,
data=Melanoma,
cause=1,
cens.model="cox",
cens.formula=~sex+epicel+ulcer+age+logthick)
summary(fit2a.lrr)
## compare prediction performance
Score(list(ARR=fit2.arr,AJ=fit2.aj,LRR=fit2.lrr),formula=Hist(time,status)~1,data=Melanoma)
# multiple regression
library(riskRegression)
library(prodlim)
# absolute risk model
multi.arr <- ARR(Hist(time,status)~logthick+sex+age+ulcer,data=Melanoma,cause=1)
# stratified model allowing different baseline risk for the two gender
multi.arr <- ARR(Hist(time,status)~thick+strata(sex)+age+ulcer,data=Melanoma,cause=1)
# stratify by a continuous variable: strata(age)
multi.arr <- ARR(Hist(time,status)~tp(thick,power=0)+strata(age)+sex+ulcer,
data=Melanoma,
cause=1)
fit.arr2a <- ARR(Hist(time,status)~tp(thick,power=1),data=Melanoma,cause=1)
summary(fit.arr2a)
fit.arr2b <- ARR(Hist(time,status)~timevar(thick),data=Melanoma,cause=1)
summary(fit.arr2b)
## logistic risk model
fit.lrr <- LRR(Hist(time,status)~thick,data=Melanoma,cause=1)
summary(fit.lrr)
## nearest neighbor non-parametric Aalen-Johansen estimate
library(prodlim)
fit.aj <- prodlim(Hist(time,status)~thick,data=Melanoma)
plot(fit.aj,conf.int=FALSE)
# prediction performance
x <- Score(list(fit.arr2a,fit.arr2b,fit.lrr),
data=Melanoma,
formula=Hist(time,status)~1,
cause=1,
split.method="none")
}
\references{
Thomas A Gerds, Thomas H Scheike, and Per K Andersen. Absolute risk
regression for competing risks: interpretation, link functions, and
prediction. Statistics in medicine, 31(29):3921--3930, 2012.
Scheike, Zhang and Gerds (2008), Predicting cumulative incidence probability
by direct binomial regression, Biometrika, 95, 205-220.
Scheike and Zhang (2007), Flexible competing risks regression modelling and
goodness of fit, LIDA, 14, 464-483.
Martinussen and Scheike (2006), Dynamic regression models for survival data,
Springer.
}
\author{
Thomas A. Gerds \email{tag@biostat.ku.dk}, Thomas H. Scheike \email{ts@biostat.ku.dk}
}
\keyword{survival}
|
a7b28b0e96329f7a96f456f4ad766fd3ca80e305
|
1e3f537764f4ad82ecef32b59115877aba4d79dc
|
/analysis/debugging_codes/make_fake_vaccine_codes.R
|
2b40ae9e2916f0a7db2c00dbf5a878b5977a5e51
|
[
"MIT"
] |
permissive
|
opensafely/openprompt-vaccine-long-covid
|
8201b51617192d142733a7cbaf35237fe97d27d9
|
70d4815d221b8cf9242d050310e1c8f2dccd96ea
|
refs/heads/main
| 2023-08-23T10:39:27.462357
| 2023-08-21T16:00:50
| 2023-08-21T16:00:50
| 561,415,672
| 1
| 0
|
MIT
| 2023-07-31T12:15:58
| 2022-11-03T16:33:24
|
R
|
UTF-8
|
R
| false
| false
| 3,900
|
r
|
make_fake_vaccine_codes.R
|
library(tidyverse)
library(lubridate)
vacc_names <-
c(
"Comirnaty COVID-19 mRNA Vacc ready to use 0.3ml in md vials",
"Comirnaty Original/Omicron BA.1 COVID-19 Vacc md vials",
"COVID-19 mRNA Vaccine Comirnaty 30micrograms/0.3ml dose conc for susp for inj MDV (Pfizer)",
"COVID-19 mRNA Vaccine Comirnaty Children 5-11yrs 10mcg/0.2ml dose con for disp for inj MDV (Pfizer)",
"COVID-19 mRNA Vaccine Spikevax (nucleoside modified) 0.1mg/0.5mL dose disp for inj MDV (Moderna)",
"COVID-19 Vac AZD2816 (ChAdOx1 nCOV-19) 3.5x10*9 viral part/0.5ml dose sol for inj MDV (AstraZeneca)",
"COVID-19 Vac CoronaVac (adjuvanted) 600U/0.5ml dose susp for inj vials",
"COVID-19 Vac Covaxin (NIV-2020-770 inactivated) micrograms/0.5ml dose susp for inj MDV",
"COVID-19 Vac Covishield (ChAdOx1 S recombinant) 5x10*9 viral particles/0.5ml dose sol for in MDV",
"COVID-19 Vac Covovax (adjuvanted) micrograms/0.5ml dose susp for inj MDV (Serum Institute of India)",
"COVID-19 Vac Nuvaxovid (recombinant, adj) micrograms/0.5ml dose susp for inj MDV (Novavax CZ a.s.)",
"COVID-19 Vac Sanofi (Cov2 preS dM monovalent D614 (recombinant)) 5mcg/0.5ml dose susp for inj MDV",
"COVID-19 Vac Sinopharm BIBP (inactivated adjuvanted) 6.5U/0.5ml dose susp for inj vials",
"COVID-19 Vac Spikevax (Zero) /(Omicron) in md vials",
"COVID-19 Vac Sputnik V Component I 0.5ml multidose vials",
"COVID-19 Vacc Sputnik V Component II 0.5ml multidose vials",
"COVID-19 Vaccine Convidecia 0.5ml in vials",
"COVID-19 Vaccine Jansen (Ad26.COV2-S (recomb)) 0.5ml dose solution for injection multidose vials",
"COVID-19 Vaccine Medicago (CoVLP) 3.75micrograms/0.5ml dose emulsion for injection multidose vials",
"COVID-19 Vaccine Moderna (mRNA-1273.529) 50micrograms/0.25ml dose sol for in MOV",
"COVID-19 Vaccine Sputnik V Component I 0.5ml inj vials",
"COVID-19 Vaccine Sputnik V Component II 0.5ml inj vials",
"COVID-19 Vaccine Valneva (inactivated adj whole virus) 40antigen units/0.5ml dose susp for inj MDV",
"COVID-19 Vaccine Vaxzevria 0.5ml inj multidose vials (AstraZeneca)")
#NA
# )
vacc_weights <- rep(0.2/21, 24)
vacc_weights[vacc_names == "COVID-19 Vaccine Vaxzevria 0.5ml inj multidose vials (AstraZeneca)"] <- 0.35
vacc_weights[vacc_names == "COVID-19 mRNA Vaccine Comirnaty 30micrograms/0.3ml dose conc for susp for inj MDV (Pfizer)"] <- 0.4
vacc_weights[vacc_names == "COVID-19 mRNA Vaccine Spikevax (nucleoside modified) 0.1mg/0.5mL dose disp for inj MDV (Moderna)"] <- 0.05
#vacc_weights[is.na(vacc_names)] <- 0.1
sum(vacc_weights)
if(!exists("data_size")){data_size = 2000}
set.seed(4214)
test_mrna_code <- data.frame(
patient_id = 1:data_size,
vaccine_dose_1_manufacturer = sample(vacc_names, size = data_size, replace = TRUE, prob = vacc_weights),
vaccine_dose_2_manufacturer = sample(vacc_names, size = data_size, replace = TRUE, prob = vacc_weights),
vaccine_dose_3_manufacturer = sample(vacc_names, size = data_size, replace = TRUE, prob = vacc_weights)
) %>% mutate(
vaccine_dose_2_manufacturer = ifelse(is.na(vaccine_dose_1_manufacturer), NA, vaccine_dose_2_manufacturer),
vaccine_dose_3_manufacturer = ifelse(is.na(vaccine_dose_2_manufacturer), NA, vaccine_dose_3_manufacturer),
no_prev_vacc_interim = as.numeric(!is.na(vaccine_dose_1_manufacturer)) +
as.numeric(!is.na(vaccine_dose_2_manufacturer)) +
as.numeric(!is.na(vaccine_dose_3_manufacturer)),
no_prev_vacc = no_prev_vacc_interim + sample(0:2, size = data_size, replace = TRUE),
no_prev_vacc = ifelse(no_prev_vacc_interim==0,0,no_prev_vacc),
vaccine_dose_1_date = as.Date("2020-11-01") + sample(0:365, size = data_size, replace = TRUE),
vaccine_dose_2_date = vaccine_dose_1_date + sample(19:180, size = data_size, replace = TRUE),
vaccine_dose_3_date = vaccine_dose_2_date + sample(58:365, size = data_size, replace = TRUE)
)
|
8ed5cf7ebe01f50e9a1cefee0e7af12d86662996
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610054742-test.R
|
391386584be84d4c4b8ca369e6a2049ddf585815
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
1610054742-test.R
|
testlist <- list(a = -724249388L, b = -724249388L, x = c(-1111638595L, -1111638595L, NA, -1111638595L, -1120859393L, -4342339L, -1111621699L, -1109533185L, -14083301L, 1362168575L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
855aa1d1cc8dc1086f920b1754980fdac396f7dd
|
c4a3a7701643529faca199777c55466462ff565d
|
/tests/testthat/test_GeoShiny.R
|
93e81414fc4780e3af56537fe52ddc3de31d09a7
|
[] |
no_license
|
mariatreesa/Shiny-App
|
e00da85428f1e3b96ddab494ab35fc0cdf13f61f
|
f9bd19e19f605ffcc206ec89a14ec4ad44bd84cf
|
refs/heads/master
| 2022-11-15T22:41:11.251663
| 2020-07-07T12:58:57
| 2020-07-07T12:58:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,080
|
r
|
test_GeoShiny.R
|
# Test suits for package GeoShiny
# Function to be tested
context("geocode_response")
# Test that function takes a valid address (No nulls or multiples)
test_that("geocode_response takes one address at a time", {
expect_error(geocode_response(address = c("Kerala", "Nairobi")))
expect_error(geocode_response(address = ""))
})
# test that no special characters in address
test_that("Found special characters",{
expect_error(geocode_response(address = "Nai#&%"))
}
)
# test that map key is give, so that no request is sent to the api without the key
test_that("No API",{
expect_error(geocode_response(address = "Nairobi", map_key = ""))
}
)
context("reverse_geocode_response")
#Test that the function stops if invalide ccordinates are give for latitude and longitude
test_that("reverse_geocode_response takes correct coordinates for lattitude and longitude", {
expect_error(reverse_geocode_response(lat = 91.8867, long = 182.4321, map_key = ""))
})
test_that("API present",{
expect_error(reverse_geocode_response(lat = 58.4108,long =15.6214, map_key = ""))
}
)
|
2b73fa51e095c2b72346bdf0a933f8bd062510e9
|
afcd366c47419daf0687137c06e94c9b32117bdb
|
/man/peg_referencia.Rd
|
8cb7c9a43ef3734ee6052b4f011fdb04e0240788
|
[] |
no_license
|
luizmartins1980/apida
|
1963bcfff0be75805f057efb60d2af0b3be21a80
|
98677327fafaf5bc648f55b25bd179a3cae766fb
|
refs/heads/master
| 2021-04-25T06:40:43.017069
| 2017-07-22T03:52:37
| 2017-07-22T03:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 469
|
rd
|
peg_referencia.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peg.R
\name{peg_referencia}
\alias{peg_referencia}
\title{Pegar tabela de referรชncia de alguma categoria}
\usage{
peg_referencia(tipo_referencia)
}
\arguments{
\item{tipo_referencia}{Tipo da referรชncia (situacoesDeputado,
situacoesEvento, situacoesOrgao, situacoesProposicao, tiposEvento,
tiposOrgao, tiposProposicao, uf)}
}
\description{
Pegar tabela de referรชncia de alguma categoria
}
|
6ab84cb84dd176946b6b59c1764652a3517bea3c
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/factorstochvol/inst/doc/paper.R
|
53e2075917c4cdc91d74c11280ff1245b3204225
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,376
|
r
|
paper.R
|
## ----setup, include=FALSE, cache=FALSE----------------------------------------
knitr::render_sweave()
knitr::opts_chunk$set(prompt = TRUE,
fig.show = "hide",
warning = FALSE,
error = FALSE,
message = FALSE,
echo = FALSE,
cache = TRUE,
fig.path = "Figures/article-")
base::options(continue = "+ ", prompt = "R> ")
#used_packages <- c("LSD", "RColorBrewer")
#for (p in used_packages) {
# if (!require(p, character.only = TRUE)) {
# install.packages(p)
# }
#}
## ----presvrunmodel, eval=TRUE, echo=FALSE-------------------------------------
set.seed(1)
library("stochvol")
data("exrates")
ind <- which(exrates$date >= as.Date("2008-03-01") &
exrates$date <= as.Date("2012-03-01"))
CHF_price <- exrates$CHF[ind]
## ----svrunmodel, eval=FALSE, echo=TRUE----------------------------------------
# set.seed(1)
# library("stochvol")
# data("exrates")
# ind <- which(exrates$date >= as.Date("2008-03-01") &
# exrates$date <= as.Date("2012-03-01"))
# CHF_price <- exrates$CHF[ind]
# res_sv <- svsample(CHF_price, designmatrix = "ar1")
## ----presvtrunmodel, echo=FALSE, eval=TRUE, dependson="presvrunmodel"---------
set.seed(2)
CHF_logret <- 100 * logret(CHF_price)
## ----svtrunmodel, echo=TRUE, eval=FALSE---------------------------------------
# set.seed(2)
# CHF_logret <- 100 * logret(CHF_price)
# res_svt <- svtsample(CHF_logret, designmatrix = "ar0")
## ----svlrunmodel, echo=TRUE, eval=TRUE, dependson="presvtrunmodel"------------
set.seed(3)
X <- cbind(constant = 1,
100 * logret(exrates$USD[ind]),
100 * logret(exrates$JPY[ind]))
res_svl <- svlsample(CHF_logret, designmatrix = X)
## ----svlplot, echo=TRUE, dependson="svlrunmodel", fig.height=4----------------
plot(res_svl, showobs = FALSE,
dates = exrates$date[ind[-1]])
## ----svlbetaplot, echo=2, dependson="svlrunmodel", fig.width=6.7, fig.height=3.5----
opar <- par(mar = c(2.5, 1.5, 0.5, 0.5), mfrow = c(3, 2), mgp = c(1.7, 0.5, 0))
for (i in seq_len(3)) {
coda::traceplot(svbeta(res_svl)[, i])
coda::densplot(svbeta(res_svl)[, i], show.obs = FALSE)
}
par(opar)
## ----printsummary, echo=TRUE, eval=TRUE, results="markup"---------------------
summary(res_svl, showlatent = FALSE)
## ----svlpredict, echo=TRUE, eval=TRUE-----------------------------------------
set.seed(4)
pred_ind <- seq(tail(ind, 1), length.out = 25)
pred_X <- cbind(constant = 1,
100 * logret(exrates$USD[pred_ind]),
100 * logret(exrates$JPY[pred_ind]))
pred_svl <- predict(res_svl, 24, newdata = pred_X)
## ----plotsvlpred, echo=TRUE, eval=TRUE, fig.height=3.5, fig.width=10----------
opar <- par(mgp = c(1.7, 0.5, 0))
obs_CHF <- 100 * logret(exrates$CHF[pred_ind])
ts.plot(cbind(t(apply(predy(pred_svl), 2, quantile, c(0.05, 0.5, 0.95))),
obs_CHF), xlab = "Periods ahead", lty = c(rep(1, 3), 2),
col = c("gray80", "black", "gray80", "red"))
par(opar)
## ----svroll, echo=TRUE, eval=FALSE--------------------------------------------
# set.seed(5)
# res <- svsample_roll(CHF_logret, n_ahead = 1,
# forecast_length = 30,
# refit_window = "moving",
# calculate_quantile = c(0.01, 0.05),
# calculate_predictive_likelihood = TRUE)
## ----printpriordefault, echo=TRUE, eval=FALSE---------------------------------
# svsample(CHF_logret, priormu = c(0, 100), priorphi = c(5, 1.5),
# priorsigma = 1, priorbeta = c(0, 10000))
# svtsample(CHF_logret, priormu = c(0, 100), priorphi = c(5, 1.5),
# priorsigma = 1, priorbeta = c(0, 10000), priornu = 0.1)
# svlsample(CHF_logret, priormu = c(0, 100), priorphi = c(5, 1.5),
# priorsigma = 1, priorbeta = c(0, 10000), priorrho = c(4, 4))
# svtlsample(CHF_logret, priormu = c(0, 100), priorphi = c(5, 1.5),
# priorsigma = 1, priorbeta = c(0, 10000), priornu = 0.1,
# priorrho = c(4, 4))
## ----printpriorspecdefault, echo=TRUE, eval=FALSE-----------------------------
# ps <- specify_priors(
# mu = sv_normal(mean = 0, sd = 100),
# phi = sv_beta(shape1 = 5, shape2 = 1.5),
# sigma2 = sv_gamma(shape = 0.5, rate = 0.5),
# nu = sv_infinity(),
# rho = sv_constant(0),
# latent0_variance = "stationary",
# beta = sv_multinormal(mean = 0, sd = 10000, dim = 1))
# svsample(CHF_logret, priorspec = ps)
## ----eval=FALSE---------------------------------------------------------------
# y <- svsim(50)$y
# svsample(y, expert = list(correct_model_misspecification = TRUE))
## ----fsvprepdata, echo=TRUE, fig.width=10, fig.height=5-----------------------
library("factorstochvol")
library("zoo")
data("exrates", package = "stochvol")
m <- 6
n <- 1000
y <- 100 * logret(tail(exrates[, seq_len(m)], n + 1))
y <- zoo(y, order.by = tail(exrates$date, n))
plot(y, main = "", xlab = "Time")
## ----preorder, echo=TRUE------------------------------------------------------
preorder(y, factors = 2)
## ----findrestrict, echo=TRUE--------------------------------------------------
findrestrict(y, factors = 2)
## ----runmodel, echo=TRUE------------------------------------------------------
set.seed(1)
res <- fsvsample(y, factors = 2, draws = 10000,
zeromean = FALSE, thin = 10, quiet = TRUE)
## ----printrres, echo = TRUE---------------------------------------------------
res
## ----covn, echo = TRUE--------------------------------------------------------
dim(cov_n <- covmat(res))
## ----logdetcovn, echo = 2:5, fig.width = 10, fig.height=3.5-------------------
opar <- par(mfrow = c(1, 2), mgp = c(1.7, 0.5, 0), mar = c(3, 3, 1, 1))
logdet <- function (x) log(det(x))
logdet_n <- apply(cov_n[,,,1], 3, logdet)
ts.plot(logdet_n)
acf(logdet_n, main = "")
par(opar)
## ----covess, echo = TRUE------------------------------------------------------
round(apply(cov_n, 1:2, coda::effectiveSize))
## ----corimageplot, echo=2-----------------------------------------------------
opar <- par(mfrow = c(1, 3), xpd = TRUE)
corimageplot(res, these = seq(1, n, length.out = 3), plotCI = "circle",
plotdatedist = 2, date.cex = 1.1)
par(opar)
## ----voltimeplot, echo=2:3, fig.width = 10, fig.height = 3, cache.rebuild = TRUE----
opar <- par(mgp = c(1.7, 0.5, 0), mar = c(2, 1.5, 1, 0.5))
palette(RColorBrewer::brewer.pal(7, "Dark2")[-5])
voltimeplot(res, legend = "top")
par(opar)
## ----cortimeplot, echo=2:4, fig.width = 10, fig.height = 5, cache.rebuile = TRUE----
opar <- par(mfrow = c(2, 1), mgp = c(1.7, 0.5, 0), mar = c(2, 1.5, 1, 0.5))
palette(RColorBrewer::brewer.pal(6, "Dark2"))
cortimeplot(res, 1)
cortimeplot(res, 2)
par(opar)
## ----comtimeplot, echo=2, fig.height = 6.5------------------------------------
opar <- par(mgp = c(1.7, 0.5, 0), mar = c(3, 3, 1, 1))
comtimeplot(res, maxrows = 6)
par(opar)
## ----loadplot2, fig.width=4.5, fig.height=4.5---------------------------------
opar <- par(mgp = c(1.7, 0.5, 0), mar = c(2.7, 2.7, 2, 0.5))
facloadpairplot(res)
facloadcredplot(res)
par(opar)
## ----varplot, fig.width=10, fig.height=4--------------------------------------
opar <- par(mgp = c(1.7, 0.5, 0), mar = c(2.7, 2.7, 2, 0.5))
logvartimeplot(res, show = "fac")
par(opar)
## ----varplot2, fig.width=7, fig.height=6.5------------------------------------
opar <- par(mgp = c(1.7, 0.5, 0), mar = c(2.7, 2.7, 2, 0.5))
logvartimeplot(res, show = "idi", maxrows = 6)
par(opar)
## ----evdiag, fig.width=10, fig.height=4, echo=2:4, results = 'hide'-----------
opar <- par(mgp = c(1.7, 0.5, 0), mar = c(2.7, 2.7, 2, 0.5))
set.seed(6)
largemodel <- fsvsample(y, factors = 6)
evdiag(largemodel)
par(opar)
## ----predcov1, echo=TRUE------------------------------------------------------
set.seed(4)
predcor1 <- predcor(res)
round(apply(predcor1[,,,1], 1:2, mean), 2)
round(apply(predcor1[,,,1], 1:2, sd), 2)
## ----preddist, fig.height = 6, fig.width = 9, echo = TRUE---------------------
set.seed(5)
predcov_1 <- predcov(res)
effectivedraws <- res$config$draws/res$config$thin
preddraws <- matrix(NA_real_, effectivedraws, m)
for (i in seq_len(effectivedraws))
preddraws[i,] <- chol(predcov_1[,,i,1]) %*% rnorm(m)
plotlims <- quantile(preddraws, c(0.01, 0.99))
LSD::heatpairs(preddraws, labels = colnames(y),
cor.cex = 1.5, gap = 0.3, xlim = plotlims, ylim = plotlims)
## ----echo = TRUE--------------------------------------------------------------
set.seed(6)
predloglik(res, matrix(0, nrow = 2, ncol = m), ahead = 1:2, each = 10)
|
2fb9bf908a1e490e9b4d2aae5b6307173e6954b7
|
1a4c6cdabc81e7f06353cfeec61160e5e4a8b5dd
|
/Project 1/histo.R
|
100f3c88f83021701ccef3dc99a8292cdba0e3a9
|
[] |
no_license
|
dhananjaymuddappa/rainman_assignments
|
d8a62ffe93f145939591b395547d319136e4b4c6
|
53c8d5d54b1230530099dbbc3c3515a21981ede4
|
refs/heads/master
| 2021-01-10T12:01:54.831940
| 2016-03-22T20:21:34
| 2016-03-22T20:21:34
| 54,506,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
histo.R
|
histo <- function(fileName, param)
{
data <- read.csv(fileName)
title <- paste("Histogram for",param) #to print title using given parameter
#draw histogram of given parameter with red bars and black border
hist(data[,param], main=title,xlab="Subject",border="black",col="red")
}
|
0ef236bd4b9f28b1e3e3bd8b3c2e0ef4927dcb02
|
1170116acf04e3e7d5baf8563fd36ee313917573
|
/man/workout.Rd
|
57f7080288d91f9d01fcb9659d66e28785e16724
|
[
"MIT"
] |
permissive
|
r-lib/bench
|
9cbd5403ea2ac07c38066fd922edd0af756f064c
|
8d4ab5ea8219f00cc476a4702df91d2b18f47b12
|
refs/heads/main
| 2023-05-11T03:42:08.981583
| 2023-05-04T17:09:03
| 2023-05-04T17:09:03
| 128,975,118
| 218
| 33
|
NOASSERTION
| 2023-05-04T17:07:41
| 2018-04-10T18:01:13
|
R
|
UTF-8
|
R
| false
| true
| 1,203
|
rd
|
workout.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workout.R
\name{workout}
\alias{workout}
\alias{workout_expressions}
\title{Workout a group of expressions individually}
\usage{
workout(expr, description = NULL)
workout_expressions(exprs, env = parent.frame(), description = NULL)
}
\arguments{
\item{expr}{one or more expressions to workout, use \code{{}} to pass multiple
expressions.}
\item{description}{A name to label each expression, if not supplied the
deparsed expression will be used.}
\item{exprs}{A list of calls to measure.}
\item{env}{The environment in which the expressions should be evaluated.}
}
\description{
Given an block of expressions in \code{{}} \code{\link[=workout]{workout()}} individually times each
expression in the group. \code{\link[=workout_expressions]{workout_expressions()}} is a lower level function most
useful when reading lists of calls from a file.
}
\examples{
workout({
x <- 1:1000
evens <- x \%\% 2 == 0
y <- x[evens]
length(y)
length(which(evens))
sum(evens)
})
# The equivalent to the above, reading the code from a file
workout_expressions(as.list(parse(system.file("examples/exprs.R", package = "bench"))))
}
|
c7c1b68515db85d4ca90efff042199f1edbd692a
|
39b49fcf536a01a19471e999e25eebba96fe4ef8
|
/ๅบไบ่ฏๅ
ธ็ๆ
ๆๅๆ-ๅด.R
|
23f34926b17ed1c2482956501ee980a75f76996a
|
[] |
no_license
|
ZuoRX/textming
|
fad537d062e1b734cafb1d395ea3764b43fcdb4e
|
3d4595d441cc6c7cd0ef122115c1d8c4c73cc363
|
refs/heads/master
| 2020-05-09T13:40:33.843434
| 2019-04-13T11:48:30
| 2019-04-13T11:48:30
| 181,162,111
| 2
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 3,012
|
r
|
ๅบไบ่ฏๅ
ธ็ๆ
ๆๅๆ-ๅด.R
|
#ๅฏผๅ
ฅ้่ฆ็ๅ
library(readxl)
library(jiebaR)
library(plyr)
library(wordcloud2)
# ่ฏปๅ
ฅ่ฏ่ฎบๆฐๆฎ
evaluation <-read.csv("C:/Users/lenovo/Desktop/่ฏ่ฎบ.csv",quote = "",sep = "\"", header = T, stringsAsFactors = F)
###ไนๅฏไปฅ่ฟๆ ทๅฏผๅ
ฅ
#evaluation <- read_excel(file.choose())
head(evaluation)
str(evaluation)
##ๅฏผๅ
ฅๆญฃ/่ด้ข่ฏๅ
ธไปฅๅๅ็จ่ฏ
pos <- readLines(file.choose())
neg <- readLines(file.choose())
stopwords <- readLines(file.choose())
#็ไธไธๆญฃ่ด้ข่ฏๅ
ธๅๅ็จ่ฏ
pos
stopwords
# ๅๅนถๆ
ๆ่ฏๅบ
mydict <- c(pos, neg)
##็ไธไธๅๅนถ็
mydict
# ไธบjiebaๅ่ฏๅๅคๅทฅไฝๅผๆ
engine <- worker()
sentence <- '่ถ
้ง็ปๅฏ๏ผๆนฟๆฐดไธๆ็ ด'
segment(sentence, engine)
# ๆทปๅ ่ชๅฎไน่ฏๆฑ
##ไธพไพ
new_user_word(engine, 'ไธๆ็ ด')
segment(sentence, engine)
# ๆทปๅ ๅ
ฅๆญฃ/่ด้ข่ฏๅ
ธ
new_user_word(engine, mydict)
## ๅฏนๆฏไธๆก่ฏ่ฎบ่ฟ่กๅ่ฏ
segwords <- sapply(evaluation$content, segment, engine)
#ๅฑ็คบไธไธ็ปๆ
segwords
head(segwords)
# ๅ ้คๅๆญข่ฏ๏ผๅณๅฏนๅๆๆฒกๆๆไน็่ฏ๏ผๅฆไป่ฏใ่่ฏ็ญ๏ผ
## ่ชๅฎไนๅฝๆฐ๏ผ็จไบๅ ้คๅๆญข่ฏ
removewords <- function(target_words,stop_words){
target_words = target_words[target_words%in%stop_words== FALSE]
return(target_words)
}
segwords2 <- sapply(segwords, removewords, stopwords)
#ๅฑ็คบไธไธๅป้คๅ็จ่ฏๅ็
head(segwords2)
#่ชๅฎไนๆ
ๆ็ฑปๅๅพๅๅฝๆฐ
fun <- function( x, y) x %in% y
getEmotionalType <- function( x,pwords,nwords){
pos.weight = sapply(llply( x,fun,pwords),sum)
neg.weight = sapply(llply( x,fun,nwords),sum)
total = pos.weight - neg.weight
return(data.frame( pos.weight, neg.weight, total))
}
# ่ฎก็ฎๆฏๆก่ฏ่ฎบ็ๆญฃ่ดๅพๅ
score <- getEmotionalType(segwords2, pos, neg)
#ๅฑ็คบไธไธ
head(score)
#ๅๅนถไธไธ็ปๆ
evalu.score<- cbind(evaluation, score)
#ๅฑ็คบ
evalu.score
#่ฟไธๆญฅ็ป็ปๆ่ดดไธๆ ็ญพpos/neg
evalu.score <- transform(evalu.score,
emotion = ifelse(total>= 0, 'Pos', 'Neg'))
#ๅฑ็คบ่ดดไธpos/negๆ ็ญพ็็ปๆ
evalu.score
# ้ๆบๆ้10ๆก่ฏ่ฎบ๏ผๅไธไธช้ช่ฏ
set.seed( 1)
validation <- evalu.score[sample( 1:nrow(evalu.score),size = 10),]
validation
#่พๅบ็ปๆๅฐๆก้ข
write.csv(evalu.score,"c:/users/lenovo/desktop/ๆ
ๆๅๆ็ปๆ.csv")
# ่ฎก็ฎ่ฏ้ข(ๅปๆๅ็จ่ฏๅ็)
wf <- unlist(segwords2)
wf <- as.data.frame(table(wf))
wf <- arrange(wf, desc(Freq))
head(wf)
wordcloud2(wf[ 1: 25,], backgroundColor = 'black')
#่ชๅฎไนๅฝๆฐ๏ผไฟ็่ณๅฐ2ไธชๅญ็ฌฆ้ฟๅบฆ็่ฏ่ฏญ
more2words <- function(x){
words = c()
for(word in x) {
if(nchar(word)> 1) words = c(words,word)
}
return(words)
}
#้ๆฐๅฎไนไธขไธ็่ฏ
segwords3 <- more2words(unlist(segwords2))
# ่ฎก็ฎ่ฏ้ข
wf2 <- unlist(segwords3)
wf2 <- as.data.frame(table(wf2))
wf2 <- arrange(wf2, desc(Freq))
head(wf2)
wordcloud2(wf2[ 1: 25,], backgroundColor = 'black')
wordcloud2(wf2[2:51,], backgroundColor = 'black')
|
59f8a3271fa9979e113f0f942fab9d6bea009ffd
|
5f35705a49444701a9a95b4d5f110f8d3243c718
|
/psopso.R
|
b547a205ba9db65e5198f2f9dc4ef98548f15425
|
[] |
no_license
|
bdkv5/Project_2_EXP_DATA
|
8c904498de49a21dbc04b8b5b09bb3b16538458f
|
b63bac34912dc82afd7b5f7e29243831a4f55119
|
refs/heads/master
| 2020-12-24T18:23:08.707837
| 2016-05-10T01:08:40
| 2016-05-10T01:08:40
| 57,287,129
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,605
|
r
|
psopso.R
|
km_data<- iris[,1:4]
km_data_1<- as.matrix(km_data)
library(clv)
km <- kmeans(km_data_1,6)
km_scatter <- cls.scatt.data(km_data_1,km$cluster)
km_intra <- mean(km_scatter$intracls.average)
km_inter<- (sum(km_scatter$intercls.centroid))/(length(km_scatter$intercls.centroid) -sqrt(length(km_scatter$intercls.centroid)))
fn_data<- iris[,1:4]
fn_data_1<- as.matrix(fn_data)
library(clv)
fn <- fanny(fn_data_1,6,maxit = 1000)
fn_scatter <- cls.scatt.data(fn_data_1,fn$cluster)
fn_intra <- mean(fn_scatter$intracls.average)
fn_inter<- (sum(fn_scatter$intercls.centroid))/(length(fn_scatter$intercls.centroid) -sqrt(length(fn_scatter$intercls.centroid)))
pam_data<- iris[,1:4]
pam_data_1<- as.matrix(pam_data)
library(clv)
pam <- pam(pam_data_1,6)
pam_scatter <- cls.scatt.data(pam_data_1,pam$cluster)
pam_intra <- mean(pam_scatter$intracls.average)
pam_inter<- (sum(pam_scatter$intercls.centroid))/(length(pam_scatter$intercls.centroid) -sqrt(length(pam_scatter$intercls.centroid)))
out <- read.csv(file = "/Users/bhargavdevarapalli/Documents/MATLAB/hyd_2.txt",sep="", na.strings = "?")
out1 <- as.matrix(out)
out3 <- subset(out1, select = 'Data')
out4 <- as.vector(out3)
hyd_data<- iris[,1:4]
hyd_data_1<- as.matrix(hyd_data)
library(clv)
hyd_scatter <- cls.scatt.data(hyd_data_1,out4)
hyd_intra <- mean(hyd_scatter$intracls.average)
hyd_inter<- (sum(hyd_scatter$intercls.centroid))/(length(hyd_scatter$intercls.centroid) -sqrt(length(hyd_scatter$intercls.centroid)))
print(fn_inter)
print(km_inter)
print(pam_inter)
print(fn_intra)
print(km_intra)
print(pam_intra)
print(hyd_inter)
print(hyd_intra)
|
6e280545a7823ef2ac66e60b274fa0795a418249
|
86e964e857881f13fb7c93bfe42c53d9d9829ab6
|
/elevation_analysis.R
|
5f9d4e85a2f7de05f22c98ff78dd3e4f8ce63345
|
[] |
no_license
|
tsze/elevation_analysis
|
4fe6a354dfd4af3f1ad0846e5cc74bc7b700d181
|
9b2938710ddb8f4c5d0b2356b9e112e00bfcc125
|
refs/heads/master
| 2021-01-13T01:27:30.547901
| 2015-07-02T21:54:38
| 2015-07-02T21:54:38
| 38,458,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,597
|
r
|
elevation_analysis.R
|
###############################################################
# Strava elevation analysis
#
# 1. Load a gpx file of a workout
# 2. The file will be displayed on a map
# 3. For the coordinates in the gpx file, google maps elevation is queried
# 4. The plot and analysis will compare the elevation of the gpx records
#
#
###############################################################
# Read and parse gpx files
library(XML)
file <- file.choose() # will prompt you for an input gpx file
# Parse the GPX file
pfile <- htmlTreeParse(file,error = function (...) {}, useInternalNodes = T)
# Get all elevations, times and coordinates via the respective xpath
elevation <- as.numeric(xpathSApply(pfile, path = "//trkpt/ele", xmlValue))
#times <- xpathSApply(get(gpx.parsed[i]), path = "//trkpt/time", xmlValue) # disabled, as only work for garmin gpx
coords <- xpathSApply(pfile, path = "//trkpt", xmlAttrs)
# Extract latitude and longitude from the coordinates
lon <- as.numeric(coords["lon",])
lat <- as.numeric(coords["lat",])
# Put everything in a dataframe and get rid of old variables
geodf <- data.frame(lon = lon,lat = lat, elevation = elevation)
#geodf <- data.frame(lat = lats, lon = lons, elevation = elevations, time = times) # times works only for garmin
rm(list=c("elevation", "lat", "lon", "coords")) # , "pfile""times", needs to be added if used
# Calculate distance between trackpoints and total distance
distance <- distCosine(geodf[,1:2],geodf[-1,1:2])
total_distance <- cumsum(distance)
geodf <- data.frame(cbind(geodf,distance,total_distance))
# Calculate total elevation gain
elevation.temp <- c(NA,head(geodf$elevation,-1))
elevation.diff <- geodf$elevation - elevation.temp
rm(elevation.temp)
geodf <- data.frame(cbind(geodf,elevation.diff))
#geodf$id <- 'input'
#colnames(geodf)[colnames(geodf)=='elevation'] <- paste('elevation.',i,sep="")
######################
# Create Map
######################
library(ggmap)
## create the map with the gps track
map <- get_map(location = c(lon = median(geodf$lon), lat = median(geodf$lat)),
zoom = 11,maptype = c("terrain"))
p <- ggmap(map,extent = 'panel')
p <- p+ geom_point(aes(x = lon,y = lat),data = geodf,colour = "red",size = 1,pch = 20)
plot(p)
dev.copy(png,"map.png",width=8,height=6,units="in",res=300)
dev.off()
######################
# Google Maps Elevation
# note: this works only with a google elevation API subscription
# url: https://developers.google.com/maps/documentation/elevation/
######################
library(rgbif)
library(audio)
apikey <- getOption("g_elevation_api")
multi.fun <- function(x) {
c(elevation(latitude=x$lat,longitude=x$lon,key = apikey),wait(1))
}
#elevation.gmaps <- data.frame(multi.fun(geodf[1:100,])[3])
elevation.gmaps <- read.csv('garmin_google_elevation.csv',header=TRUE) #if locally available (I don't have a Google API account, so used a workaround by splitting up the data)
#elevation.gmaps <- read.csv('strava_google_elevation.csv',header=TRUE) #if locally available (I don't have a Google API account, so used a workaround by splitting up the data)
colnames(elevation.gmaps)[colnames(elevation.gmaps)=='elevation'] <- 'elevation.gmaps'
geodf <- cbind(geodf,elevation.gmaps)
# Calculate total elevation gain for gmaps results
elevation.temp <- c(NA,head(geodf$elevation.gmaps,-1))
elevation.diff.gmaps <- geodf$elevation.gmaps - elevation.temp
rm(elevation.temp)
geodf <- data.frame(cbind(geodf,elevation.diff.gmaps))
######################
# Plot elevation profiles
######################
library("ggplot2")
library("reshape")
## melt data frame for ggplot2
dat <- melt(geodf[,c(3,5,7)],id.vars ='total_distance')
## Create a plot for all elevation profiles in the data frame
p <- ggplot(data=dat,aes(x=total_distance, y=value,group=variable,colour=variable))#, group=id,colour=id))
p <- p + geom_line(size=0.5)
p <- p + geom_path(alpha = 0.1)
plot(p)
dev.copy(png,"profile.png",width=8,height=6,units="in",res=300)
dev.off()
######################
# elevation analysis
######################
correlation <- cor(geodf$elevation,geodf$elevation.gmaps) #provides the correlation coefficient between measured and gmaps elevation
variance <- var(geodf$elevation,geodf$elevation.gmaps) #provides the correlation coefficient between measured and gmaps elevation
elevation.gain.gpx <- sum(ifelse(geodf$elevation.diff>0,geodf$elevation.diff,0),na.rm=T)
elevation.gain.gmaps <- sum(ifelse(geodf$elevation.diff.gmaps>0,geodf$elevation.diff.gmaps,0),na.rm=T)
print(paste('gpx overfitted by ',elevation.gain.gpx/elevation.gain.gmaps-1,sep=''))
|
5cab151d225deafebbfa3e09bb436ffb80af4871
|
208786be9a52ff77f0b96ba6fa0b66767b9bde53
|
/R_script/K means/k_means_occu_2_5.R
|
c7a400fb21f3fdaa42a9ca77bcacf526106d4f19
|
[] |
no_license
|
federico1ciceri/AR1DP
|
cde17efdf02c216e5e1c6f7f1536c96e81e953ea
|
22cb172345af68ee77928ce32142317da860f4bd
|
refs/heads/main
| 2023-03-03T12:31:30.234623
| 2021-02-17T09:31:32
| 2021-02-17T09:31:32
| 330,157,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,974
|
r
|
k_means_occu_2_5.R
|
#######################################################
#### Gender bias occupations (jitter) --> k-means #####
#######################################################
# import some libraries
library(clusterCrit)
library(cluster)
library(factoextra)
library(openxlsx)
library(tidyverse)
rm(list = ls())
###########################################################################
# set the seed
set.seed(48)
# load data
load("occu_jitter_names.RData") #carichiamo i dati con i nomi delle occupazioni
#change occujitt and the file name in the other file
occujitt<- occujitt[ order(row.names(occujitt)), ] #sort alphabetically
occu_names<-row.names(occujitt) #we get the names of the rows
filename='k_means_occu_2_5.xlsx' #name of the excel file
n_obs<-dim(occujitt)[1] #number of observations
n_decade<-11 #decades
##### USEFUL FUNCTIONS #####
#we need some functions to find the indeces and the max and min values in a cluster
# function finding the indexes
find_indexes<-function(decade, cluster_label){
indexes<-as.vector(which(cluster_labels[,decade] %in% cluster_label))
return (indexes)
}
# function finding the min value
find_min_value <-function(decade,indexes){
min_value<-occujitt[indexes[1],decade]
if (length(indexes) < 2) {
return(min_value)
}
for (i in 2:length(indexes)){
if(occujitt[indexes[i],decade]<min_value){
min_value<-occujitt[indexes[i],decade]
}
}
return (min_value)
}
# function finding the max value
find_max_value <-function(decade,indexes){
max_value<-occujitt[indexes[1],decade]
if (length(indexes) < 2) {
return(max_value)
}
for (i in 2:length(indexes)){
if(occujitt[indexes[i],decade]>max_value){
max_value<-occujitt[indexes[i],decade]
}
}
return (max_value)
}
# function finding the mean value of a cluster (centroid)
find_mean_value <- function(decade, indexes){
cluster<-occujitt[indexes, decade]
return (mean(cluster))
}
# function finding the standard deviation value of a cluster
find_sd_value <- function (decade, indexes){
cluster<-occujitt[indexes, decade]
std_value=0
if(length(indexes)>1){
std_value=sd(cluster)
}
return (std_value)
}
###########
## K = 2 ##
###########
n_clust<-2 #number of clusters
# initialize some useful objects
cluster_labels<- matrix(ncol=n_decade, nrow=n_obs)
centroids<-matrix(ncol=n_decade, nrow=n_clust)
sizes<-matrix(ncol=n_decade, nrow=n_clust)
wss<-sizes<-matrix(ncol=n_decade, nrow=n_clust)
std_deviations<-matrix(ncol=n_decade, nrow=n_clust)
### K-means analysis ###
for (i in 1:n_decade){ #iterate in the decades
data_decade<-occujitt[,i]
km_res <- kmeans(data_decade, n_clust, nstart = 15) #k-means
cluster_labels[,i]<-as.vector(km_res$cluster) #clusters
centroids[,i]<-as.vector(km_res$centers) #centroids
sizes[,i]<-as.vector(km_res$size) #size of clusters
wss[,i]<-as.vector(km_res$withinss) #WSS
# for(j in 1:n_clust){
# std_deviations[j,i]<-sqrt(wss[j,i]/(sizes[j,i])) #standard deviation
# }
}
# copy cluster labels for final evaluations with indices
cluster_labels_2=cluster_labels
# CREATE EXCEL OBJECTS
M=matrix(data=NA,nrow=n_decade*2,ncol = n_clust) #matrix that will be printed in the excel file
Y=c() #vector with the names of the decades
for (i in 1:n_decade){
Y[2*i]=sprintf("Year %d", 1890+10*i) #string such as "Year 1920"
for (j in 1: n_clust){ #iterate up the number of clusters
indexes_decade_i_label_j<-find_indexes(i,j)
min_val<-find_min_value(i,indexes_decade_i_label_j)
max_val<-find_max_value(i,indexes_decade_i_label_j)
mean_val<-find_mean_value(i,indexes_decade_i_label_j)
sd_val<-find_sd_value(i,indexes_decade_i_label_j)
#newvalues and newnames are strings. thew will be the content of the cells
newvalues<-sprintf("Cluster %d Mean(%f) Std(%f) Min (%f) Max (%f) Count(%d)",
j,mean_val,sd_val,min_val,max_val,sizes[j,i])
newnames=paste(occu_names[indexes_decade_i_label_j], collapse=", ") #components (adj or occu) of a new cluster
#each column represents the j-th cluster (among all the decades)
M[2*i-1,j]=newvalues #upper cell: characteristcs of the cluster (or white cell)
M[2*i,j]=newnames #lower cell: components (adj or occu) of the cluster (or white cell)
}
#cat ("anno", i, "terminato", "\n") per vedere se completa il ciclo
}
# merge results
M_final2=cbind(Y,M) #merge Y and M
###########
## K = 3 ##
###########
rm(list = c('i','j','M','centroids','cluster_labels','km_res','sizes','std_deviations','wss','data_decade','indexes_decade_i_label_j','max_val','mean_val','min_val','n_clust','newnames','newvalues','Y'))
n_clust<-3 #number of clusters
# initialize some useful objects
cluster_labels<- matrix(ncol=n_decade, nrow=n_obs)
centroids<-matrix(ncol=n_decade, nrow=n_clust)
sizes<-matrix(ncol=n_decade, nrow=n_clust)
wss<-sizes<-matrix(ncol=n_decade, nrow=n_clust)
std_deviations<-matrix(ncol=n_decade, nrow=n_clust)
### K-means analysis ###
for (i in 1:n_decade){ #iterate in the decades
data_decade<-occujitt[,i]
km_res <- kmeans(data_decade, n_clust, nstart = 15) #k-means
cluster_labels[,i]<-as.vector(km_res$cluster) #clusters
centroids[,i]<-as.vector(km_res$centers) #centroids
sizes[,i]<-as.vector(km_res$size) #size of clusters
wss[,i]<-as.vector(km_res$withinss) #WSS
# for(j in 1:n_clust){
# std_deviations[j,i]<-sqrt(wss[j,i]/(sizes[j,i])) #standard deviation
# }
}
# copy cluster labels for final evaluations with indices
cluster_labels_3=cluster_labels
# CREATE EXCEL OBJECTS
M=matrix(data=NA,nrow=n_decade*2,ncol = n_clust) #matrix that will be printed in the excel file
Y=c() #vector with the names of the decades
for (i in 1:n_decade){
Y[2*i]=sprintf("Year %d", 1890+10*i) #string such as "Year 1920"
for (j in 1: n_clust){ #iterate up the number of clusters
indexes_decade_i_label_j<-find_indexes(i,j)
min_val<-find_min_value(i,indexes_decade_i_label_j)
max_val<-find_max_value(i,indexes_decade_i_label_j)
mean_val<-find_mean_value(i,indexes_decade_i_label_j)
sd_val<-find_sd_value(i,indexes_decade_i_label_j)
#newvalues and newnames are strings. thew will be the content of the cells
newvalues<-sprintf("Cluster %d Mean(%f) Std(%f) Min (%f) Max (%f) Count(%d)",
j,mean_val,sd_val,min_val,max_val,sizes[j,i])
newnames=paste(occu_names[indexes_decade_i_label_j], collapse=", ") #components (adj or occu) of a new cluster
#each column represents the j-th cluster (among all the decades)
M[2*i-1,j]=newvalues #upper cell: characteristcs of the cluster (or white cell)
M[2*i,j]=newnames #lower cell: components (adj or occu) of the cluster (or white cell)
}
#cat ("anno", i, "terminato", "\n") per vedere se completa il ciclo
}
# merge results
M_final3=cbind(Y,M) #merge Y and M
###########
## K = 4 ##
###########
rm(list = c('i','j','M','centroids','cluster_labels','km_res','sizes','std_deviations','wss','data_decade','indexes_decade_i_label_j','max_val','mean_val','min_val','n_clust','newnames','newvalues','Y'))
n_clust<-4 #number of clusters
# initialize some useful objects
cluster_labels<- matrix(ncol=n_decade, nrow=n_obs)
centroids<-matrix(ncol=n_decade, nrow=n_clust)
sizes<-matrix(ncol=n_decade, nrow=n_clust)
wss<-sizes<-matrix(ncol=n_decade, nrow=n_clust)
std_deviations<-matrix(ncol=n_decade, nrow=n_clust)
### K-means analysis ###
for (i in 1:n_decade){ #iterate in the decades
data_decade<-occujitt[,i]
km_res <- kmeans(data_decade, n_clust, nstart = 15) #k-means
cluster_labels[,i]<-as.vector(km_res$cluster) #clusters
centroids[,i]<-as.vector(km_res$centers) #centroids
sizes[,i]<-as.vector(km_res$size) #size of clusters
wss[,i]<-as.vector(km_res$withinss) #WSS
# for(j in 1:n_clust){
# std_deviations[j,i]<-sqrt(wss[j,i]/(sizes[j,i])) #standard deviation
# }
}
# copy cluster labels for final evaluations with indices
cluster_labels_4=cluster_labels
# CREATE EXCEL OBJECTS
M=matrix(data=NA,nrow=n_decade*2,ncol = n_clust) #matrix that will be printed in the excel file
Y=c() #vector with the names of the decades
for (i in 1:n_decade){
Y[2*i]=sprintf("Year %d", 1890+10*i) #string such as "Year 1920"
for (j in 1: n_clust){ #iterate up the number of clusters
indexes_decade_i_label_j<-find_indexes(i,j)
min_val<-find_min_value(i,indexes_decade_i_label_j)
max_val<-find_max_value(i,indexes_decade_i_label_j)
mean_val<-find_mean_value(i,indexes_decade_i_label_j)
sd_val<-find_sd_value(i,indexes_decade_i_label_j)
#newvalues and newnames are strings. thew will be the content of the cells
newvalues<-sprintf("Cluster %d Mean(%f) Std(%f) Min (%f) Max (%f) Count(%d)",
j,mean_val,sd_val,min_val,max_val,sizes[j,i])
newnames=paste(occu_names[indexes_decade_i_label_j], collapse=", ") #components (adj or occu) of a new cluster
#each column represents the j-th cluster (among all the decades)
M[2*i-1,j]=newvalues #upper cell: characteristcs of the cluster (or white cell)
M[2*i,j]=newnames #lower cell: components (adj or occu) of the cluster (or white cell)
}
#cat ("anno", i, "terminato", "\n") per vedere se completa il ciclo
}
M_final4=cbind(Y,M) #merge Y and M
###########
## K = 5 ##
###########
rm(list = c('i','j','M','centroids','cluster_labels','km_res','sizes','std_deviations','wss','data_decade','indexes_decade_i_label_j','max_val','mean_val','min_val','n_clust','newnames','newvalues','Y'))
n_clust<-5 #number of clusters
# initialize some useful objects
cluster_labels<- matrix(ncol=n_decade, nrow=n_obs)
centroids<-matrix(ncol=n_decade, nrow=n_clust)
sizes<-matrix(ncol=n_decade, nrow=n_clust)
wss<-sizes<-matrix(ncol=n_decade, nrow=n_clust)
std_deviations<-matrix(ncol=n_decade, nrow=n_clust)
### K-means analysis ###
for (i in 1:n_decade){ #iterate in the decades
data_decade<-occujitt[,i]
km_res <- kmeans(data_decade, n_clust, nstart = 15) #k-means
cluster_labels[,i]<-as.vector(km_res$cluster) #clusters
centroids[,i]<-as.vector(km_res$centers) #centroids
sizes[,i]<-as.vector(km_res$size) #size of clusters
wss[,i]<-as.vector(km_res$withinss) #WSS
# for(j in 1:n_clust){
# std_deviations[j,i]<-sqrt(wss[j,i]/(sizes[j,i])) #standard deviation
# }
}
# copy cluster labels for final evaluations with indices
cluster_labels_5=cluster_labels
# CREATE EXCEL OBJECTS
M=matrix(data=NA,nrow=n_decade*2,ncol = n_clust) #matrix that will be printed in the excel file
Y=c() #vector with the names of the decades
for (i in 1:n_decade){
Y[2*i]=sprintf("Year %d", 1890+10*i) #string such as "Year 1920"
for (j in 1: n_clust){ #iterate up the number of clusters
indexes_decade_i_label_j<-find_indexes(i,j)
min_val<-find_min_value(i,indexes_decade_i_label_j)
max_val<-find_max_value(i,indexes_decade_i_label_j)
mean_val<-find_mean_value(i,indexes_decade_i_label_j)
sd_val<-find_sd_value(i,indexes_decade_i_label_j)
#newvalues and newnames are strings. thew will be the content of the cells
newvalues<-sprintf("Cluster %d Mean(%f) Std(%f) Min (%f) Max (%f) Count(%d)",
j,mean_val,sd_val,min_val,max_val,sizes[j,i])
newnames=paste(occu_names[indexes_decade_i_label_j], collapse=", ") #components (adj or occu) of a new cluster
#each column represents the j-th cluster (among all the decades)
M[2*i-1,j]=newvalues #upper cell: characteristcs of the cluster (or white cell)
M[2*i,j]=newnames #lower cell: components (adj or occu) of the cluster (or white cell)
}
#cat ("anno", i, "terminato", "\n") per vedere se completa il ciclo
}
M_final5=cbind(Y,M) #merge Y and M
rm(list = c('i','j','M','centroids','cluster_labels','km_res','sizes','std_deviations','wss','data_decade','indexes_decade_i_label_j','max_val','mean_val','min_val','n_clust','newnames','newvalues','Y'))
##### WRITE IN THE EXCEL FILE #####
# ATTENTION:
# one can use different methods to write in the excel file, just choose the one that works for you
##############FIRST METHOD (library xlsx, rJava required)
# library(xlsx)
#
# wb = xlsx::createWorkbook()
#
# sheet = xlsx::createSheet(wb, "K = 2")
#
# xlsx::addDataFrame(M_final2, sheet=sheet, startColumn=1, row.names=FALSE, col.names=FALSE)
#
# sheet = xlsx::createSheet(wb, "K = 3")
#
# xlsx::addDataFrame(M_final3, sheet=sheet, startColumn=1, row.names=FALSE, col.names=FALSE)
#
# sheet = xlsx::createSheet(wb, "K = 4")
#
# xlsx::addDataFrame(M_final4, sheet=sheet, startColumn=1, row.names=FALSE, col.names=FALSE)
#
# sheet = xlsx::createSheet(wb, "K = 5")
#
# xlsx::addDataFrame(M_final5, sheet=sheet, startColumn=1, row.names=FALSE, col.names=FALSE)
#
# xlsx::saveWorkbook(wb, filename)
##############SECOND METHOD (library openxlsx, no rJava required. Be sure that the excel file has actually 4 sheets)
#
# write.xlsx(
# M_final2,
# filename,
# sheetName = "K=2",
# col.names = FALSE,
# row.names = FALSE,
# append = TRUE,
# showNA = FALSE,
# password = NULL
# )
#
# write.xlsx(
# M_final3,
# filename,
# sheetName = "K=3",
# col.names = FALSE,
# row.names = FALSE,
# append = TRUE,
# showNA = FALSE,
# password = NULL
# )
#
# write.xlsx(
# M_final4,
# filename,
# sheetName = "K=4",
# col.names = FALSE,
# row.names = FALSE,
# append = TRUE,
# showNA = FALSE,
# password = NULL
# )
#
# write.xlsx(
# M_final5,
# filename,
# sheetName = "K=5",
# col.names = FALSE,
# row.names = FALSE,
# append = TRUE,
# showNA = FALSE,
# password = NULL
# )
##############THIRD METHOD (library openxlsx, no rJava required)
wb <- createWorkbook()
addWorksheet(wb, "K=2")
addWorksheet(wb, "K=3")
addWorksheet(wb, "K=4")
addWorksheet(wb, "K=5")
writeData(wb, sheet = "K=2", x = M_final2, colNames = FALSE, rowNames = FALSE)
writeData(wb, sheet = "K=3", x = M_final3, colNames = FALSE, rowNames = FALSE)
writeData(wb, sheet = "K=4", x = M_final4, colNames = FALSE, rowNames = FALSE)
writeData(wb, sheet = "K=5", x = M_final5, colNames = FALSE, rowNames = FALSE)
saveWorkbook(wb, file=filename, overwrite = TRUE)
######## Cluster estimates visualization ##################
## (le immagini dei boxplots sono nella cartella "plots")##
#choose k by changing cluster_labels_k (k=2,3,4,5) and the title of the plots if you like
data_plot<-as.data.frame(occujitt[,1])
data_plot$labels<-as.factor(cluster_labels_4[,1])
gg_plot_occu_1900 <- ggplot(data_plot, aes(x = labels, y = occujitt[,1])) +
geom_boxplot(width=0.5, color="black", alpha=0.6) +
ggtitle("Gender bias occu 1900 (4-Means)") +geom_jitter(color= "red",width=0.15)+geom_hline(yintercept=0, color= "grey")
data_plot<-as.data.frame(occujitt[,6])
data_plot$labels<-as.factor(cluster_labels_4[,6])
gg_plot_occu_1950 <- ggplot(data_plot, aes(x = labels, y = occujitt[,6])) +
geom_boxplot(width=0.5, color="black", alpha=0.6) +
ggtitle("Gender bias occu 1950 (4-Means)") +geom_jitter(color= "red",width=0.15)+geom_hline(yintercept=0, color= "grey")
data_plot<-as.data.frame(occujitt[,11])
data_plot$labels<-as.factor(cluster_labels_4[,11])
gg_plot_occu_2000 <- ggplot(data_plot, aes(x = labels, y = occujitt[,11])) +
geom_boxplot(width=0.5, color="black", alpha=0.6) +
ggtitle("Gender bias occu 2000 (4-Means)") +geom_jitter(color= "red",width=0.15)+geom_hline(yintercept=0, color= "grey")
x11()
require(gridExtra)
gg_plot_occu_1900
gg_plot_occu_1950
gg_plot_occu_2000
grid.arrange(gg_plot_occu_1900, gg_plot_occu_1950, gg_plot_occu_2000, ncol=3)
##### Co-clustering represetations #######
#### We define a function that takes as input a vector containing the labels that a particular clustering algorithm
#### has assigned to the observations and builds the co-clustering matrix for those labels. If the entry [i,j] of
#### the co-clustering matrix is 1 it means that observation i and observation j have been assigned to the same cluster
co_clust<- function(labels)
{
n=length(labels)
result= matrix(0,nrow= n, ncol= n)
for(i in 1:n)
{
for(j in 1:n)
{
if (labels[i]== labels[j])
result[i,j]=1
}
}
return(result)
}
#### We define a function that takes as input a vector containing the labels that a particular clustering algorithm
#### has assigned to the observations and builds a matrix whose entries are such that if [i,j]=k
#### it means that observation i and observation j have been assigned to cluster k. If [i,j]=0 it means that
#### observations i and j have been assigned to different clusters (we suppose that the labels are {1,2,3,...})
matrix_labels <- function (labels)
{
n=length(labels)
result= matrix(0,nrow= n, ncol= n)
for(i in 1:n)
{
for(j in 1:n)
{
if (labels[i]== labels[j])
result[i,j]= labels[i]
}
}
return(result)
}
#### Now we compute those matrix and we plot them
dec = 3 #choose a decade
co_clust_matrix = co_clust(cluster_labels_3[,dec]) #here choose k
mat_labels = matrix_labels(cluster_labels_3[,dec])
#We plot mt_labels
cols <- c(
'0' = "#FFFFFF",
'1' = "#CCCCCC",
'2' = "#99FF33",
'3' = "#FFF000",
'4' = "#3300FF",
'5' = "#CC0099",
'6' = "#FF9933",
'7' = "#FF0000",
'8' = "#000333",
'9' = "#CC9966",
'10' = "#CCCC00"
)
image(1:nrow(mat_labels), 1:ncol(mat_labels), t(apply(mat_labels, 2, rev)), col=cols, xlab= "", ylab="")
title(main =paste("Clustering structure of the observations for 3-means (occu) in decade", dec, sep=" "), font.main = 1)
|
5851b5fe7425ce641a8159d313cf7c8406cf2f54
|
ed129e26a61f1b241a5cf89c825f75134243f331
|
/missingVars.r
|
eccfe61819c6aac5dd82b92de9f8f538e4f3721a
|
[] |
no_license
|
orduek/analysisR_va
|
53d29c8dce0f6115222d181de26cfbbd2ebebdb2
|
3baeba96900065ab963483799bf6c6a8a43daf42
|
refs/heads/master
| 2021-07-04T08:18:33.729832
| 2020-09-29T09:33:02
| 2020-09-29T09:33:02
| 177,653,903
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
missingVars.r
|
library(mice)
library(missForest)
data <- iris
summary(iris)
iris.mis <- prodNA(iris, noNA = 0.1)
summary(iris.mis)
md.pattern(antiDP_dat)
# lets create a data frame just for PCL scores
# then we can merge it back to the original data frame
#
justPCl <- select(antiDP_dat, PTSDIND, contains("PCL"))
md.pattern(justPCl)
library(VIM)
mice_plot <- aggr(justPCl, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names(justPCl), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
imputed_Data <- mice(justPCl, m=5, maxit = 50, method = 'pmm') #, seed = 500)
inde <- dplyr::sample_n(justPCl, 50)
pclNoNa <- filter(antiDP_dat, !is.na(BPCLTOT))
head(pclNoNa)
|
bfc44da78189dfa25053ea6261ba48300d042785
|
37e0c547fc64f1d18e698d041e2f37e6bd240018
|
/coryGenomeScaleModel.R
|
c28fba43457391b90af58e9347f65d366b80c110
|
[] |
no_license
|
metabdel/genomescale_scripts
|
bbd947e876f0267299fd93ef53ab51974bf94b5d
|
cd2e6ce87020dffeee74580620566c884fe70b1b
|
refs/heads/master
| 2022-12-01T01:47:54.717618
| 2020-08-20T23:14:57
| 2020-08-20T23:14:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,074
|
r
|
coryGenomeScaleModel.R
|
library(trena)
library(BiocParallel)
library(RPostgreSQL)
library(dplyr)
#----------------------------------------------------------------------------------------------------
# Bring in the TF-motif mapping
motifsgenes <- readRDS("/ssd/cory/github/genomescale_scripts/2017_10_26_Motif_TF_Map.RDS")
#----------------------------------------------------------------------------------------------------
createGenomeScaleModel <- function(mtx.assay,
gene.list,
genome.db.uri,
project.db.uri,
size.upstream=1000,
size.downstream=1000,
num.cores = NULL,
nCores.sqrt = 4,
solverNames){
lapply(dbListConnections(dbDriver(drv="PostgreSQL")), dbDisconnect)
# Setup the parallel structure with a default of half the cores
if(is.null(num.cores)){
num.cores <- detectCores()/2}
# Use BiocParallel
register(MulticoreParam(workers = num.cores,
stop.on.error = FALSE,
log = TRUE),
default = TRUE)
# Make the model-creation into a function
createGeneModel <- function(target.gene, mtx.assay, genome.db.uri, project.db.uri,
size.upstream, size.downstream, solverNames){
# Create the footprint filter and get candidates with it
footprint.filter <- FootprintFilter(genomeDB = genome.db.uri,
footprintDB = project.db.uri,
geneCenteredSpec = list(targetGene = target.gene,
tssUpstream = size.upstream,
tssDownstream = size.downstream),
regionsSpec = list())
out.list <- try(getCandidates(footprint.filter),silent = TRUE)
# Solve the trena problem using the supplied values and the ensemble solver
if(!(class(out.list) == "try-error")){
if(length(out.list$tfs) > 0){
trena <- EnsembleSolver(mtx.assay,
targetGene = target.gene,
candidateRegulators = out.list$tfs,
solverNames = solverNames,
nCores.sqrt = nCores.sqrt)
return(solve(trena))
}
else{return(NULL)}
}
else{return(NULL)}
}
# Run the function for the gene list using bplapply
result <- bplapply(gene.list, createGeneModel,
mtx.assay = mtx.assay,
genome.db.uri = genome.db.uri,
project.db.uri = project.db.uri,
size.upstream = size.upstream,
size.downstream = size.downstream,
solverNames = solverNames)
return(result)
} # createGenomeScaleModel
#----------------------------------------------------------------------------------------------------
# Note: Run this on a dataframe of regions, including gene names (geneSymbol column)
getTfsFromDb <- function(regions, genome.db.uri, project.db.uri,
size.upstream=5000, size.downstream=5000, num.cores = 8){
# Setup the parallel structure with a default of half the cores
# if(is.null(num.cores)){
# num.cores <- detectCores()/2}
# Use BiocParallel
register(MulticoreParam(workers = num.cores,
# register(SerialParam(
stop.on.error = FALSE,
log = TRUE),
default = TRUE)
# Transform the given regions into a list of region dataframes
regions.wo.genes <- dplyr::select(regions, -geneSymbol)
# Make the dataframe into a list of dataframes
dfToList <- function(regions){
df.list <- list()
for(i in 1:floor(nrow(regions)/10)){
idx1 <- 10*i-9
idx2 <- 10*i
df.list[[i]] <- regions[idx1:idx2,]
}
if(nrow(regions) %% 10 != 0){
i <- floor(nrow(regions)/10)
idx1 <- 10*i+1
idx2 <- nrow(regions)
df.list[[i+1]] <- regions[idx1:idx2,]
}
return(df.list)
}
regions.list <- dfToList(regions)
# Function to convert motifs to tfs
convertMotifsToTfs <- function(motifs){
# Catch footprints that don't exist
if(is.character(motifs)) return(NA)
tf.df <- motifsgenes %>%
filter(motif %in% motifs$motifName)
return(unique(tf.df$tf))
}
selectOrNA <- function(output){
# If it's a dataframe, return the motifName column
if(is.character(output)){
return(output)
} else if(nrow(output) == 0){
return("No footprints found")}
return(dplyr::select(output, motifName))
}
findGeneFootprints <- function(regions, genome.db.uri, project.db.uri){
# Create the footprint filter from the target gene
footprint.filter <- try(FootprintFilter(genomeDB = genome.db.uri,
footprintDB = project.db.uri,
regions = regions),
silent = TRUE)
# Only grab candidates if the filter is valid
if(class(footprint.filter) == "FootprintFilter"){
out.list <- getCandidates(footprint.filter)
# Catch empty lists
if(length(out.list) == 0) return(character(0))
# Only return TFs if candidate grab is not null
if(class(out.list) != "NULL"){
# Use a semi join to grab the correct tfs
motif.list <- lapply(out.list, selectOrNA)
tf.list <- lapply(motif.list, convertMotifsToTfs)
return(tf.list)
} else {
return("No Candidates Found")
}
} else{
return(footprint.filter[1])
}
}
full.result.list <- bplapply(regions.list, findGeneFootprints,
genome.db.uri = genome.db.uri,
project.db.uri = project.db.uri)
# Un-nest and Name the list after the genes supplied
full.result.list <- unlist(full.result.list, recursive = FALSE)
names(full.result.list) <- regions$geneSymbol
# Remove any where the content is wrong
no.fp <- which(!(sapply(full.result.list, is.character)))
full.result.list[no.fp] <- NULL
return(full.result.list)
} # getTfsFromDb
#------------------------------------------------------------------------------------------------------
createSpecialModel <- function(mtx.assay, gene.list, num.cores = NULL,
extraArgs = list()){
trena <- TReNA(mtx.assay, solver = "ensemble")
#lapply(dbListConnections(dbDriver(drv="PostgreSQL")), dbDisconnect)
# Setup the parallel structure with a default of half the cores
if(is.null(num.cores)){
num.cores <- detectCores()/2}
cl <- makePSOCKcluster(num.cores)
registerDoParallel(cl)
full.result.list <- foreach(i = 1:length(names(gene.list)), .packages='TReNA', .errorhandling="pass") %dopar% {
# Designate the target gene and grab the tfs
target.gene <- names(gene.list)[[i]]
# Solve the trena problem using the supplied values and the ensemble solver
if(!(class(gene.list[[target.gene]]) == "try-error")){
if(length(gene.list[[target.gene]]$tfs) > 0){
solve(trena, target.gene, gene.list[[target.gene]]$tfs, extraArgs = extraArgs)}
else{NULL}
}
else{NULL}
}
# Stop the cluster
stopCluster(cl)
# Name the list after the genes supplied
names(full.result.list) <- names(gene.list)
return(full.result.list)
} # createSpecialModel
#----------------------------------------------------------------------------------------------------
getTfsFromAllDbs <- function(mtx.assay, gene.list, genome.db.uri, project.list,
size.upstream=1000, size.downstream=1000, num.cores = NULL)
{
footprint.filter <- FootprintFilter(mtx.assay = mtx.assay)
# Setup the parallel structure with a default of half the cores
if(is.null(num.cores)){
num.cores <- detectCores() - 1}
cl <- makeForkCluster(num.cores)
registerDoParallel(cl)
# Pass the appropriate variables
# clusterExport(cl, varlist = c("footprint.filter","gene.list",
# "genome.db.uri","project.list","size.upstream",
# "size.downstream"))
result.list <- foreach(i = 1:length(gene.list)) %dopar% {
# 1}
#Sys.sleep(runif(1, 0, 10))
# Designate the target gene and grab the tfs only from each of the 4 databases
my.target <- gene.list[[i]]
all.tfs <- character()
# Loop through the list of project dbs and grab tfs from each
for(project in project.list){
out.list <- try(getCandidates(footprint.filter,extraArgs = list(
"target.gene" = my.target,
"genome.db.uri" = genome.db.uri,
"project.db.uri" = project,
"size.upstream" = size.upstream,
"size.downstream" = size.downstream)),
silent = TRUE)
# Add to the list only if it has tfs
if(!(class(out.list) == "try-error")){
if(length(out.list$tfs) > 0){
all.tfs <- c(all.tfs,out.list$tfs)
}
}
}
# Return the union
return(unique(all.tfs))
}
# Stop the cluster
stopCluster(cl)
# Name the list after the genes supplied
names(result.list) <- gene.list
return(result.list)
} # getTfsFromAllDbs
#----------------------------------------------------------------------------------------------------
createAverageModel <- function(mtx.assay, gene.list, num.cores = NULL,
extraArgs = list()){
trena <- TReNA(mtx.assay, solver = "ensemble")
#lapply(dbListConnections(dbDriver(drv="PostgreSQL")), dbDisconnect)
# Setup the parallel structure with a default of half the cores
if(is.null(num.cores)){
num.cores <- detectCores() - 1}
cl <- makePSOCKcluster(num.cores)
registerDoParallel(cl)
full.result.list <- foreach(i = 1:length(names(gene.list)), .packages='TReNA', .errorhandling="pass") %dopar% {
# Designate the target gene and grab the tfs
target.gene <- names(gene.list)[[i]]
# Solve the trena problem using the supplied values and the ensemble solver
if(!(class(gene.list[[target.gene]]) == "try-error")){
if(length(gene.list[[target.gene]]) > 0){
solve(trena, target.gene, gene.list[[target.gene]], extraArgs = extraArgs)}
else{NULL}
}
else{NULL}
}
# Stop the cluster
stopCluster(cl)
# Name the list after the genes supplied
names(full.result.list) <- names(gene.list)
return(full.result.list)
} # createAverageModel
#----------------------------------------------------------------------------------------------------
createModelFromGeneList <- function(mtx.assay, gene.list, num.cores = NULL,
solverList = c("lasso","ridge"),
nCores.sqrt = 2){
# Remove genes from the list that don't have any TFs
rm.idx <- which(sapply(gene.list,length) == 1)
gene.list[rm.idx] <- NULL
# Create parallel structure w/ BiocParallel
register(MulticoreParam(workers = num.cores,
stop.on.error = FALSE,
log = TRUE),
default = TRUE)
# Create a function that:
# 1) Takes a Named List (name = target.gene, list = regulators)
# 2) Creates an ensemble solver with the prescribed solvers
# 3) Solves the solver
buildAndSolveForGene <- function(idx,gene.list, mtx.assay, solverList, nCores.sqrt){
# Build the ensemble solver
e.solver <- EnsembleSolver(mtx.assay = mtx.assay,
targetGene = names(gene.list)[idx],
candidateRegulators = gene.list[[idx]],
solverNames = solverList,
nCores.sqrt = nCores.sqrt)
# Solve the ensemble solver
return(run(e.solver))
}
full.result.list <- bptry(bplapply(1:length(gene.list), buildAndSolveForGene,
gene.list = gene.list,
mtx.assay = mtx.assay,
solverList = solverList,
nCores.sqrt = nCores.sqrt
)
)
# Name the list after the genes supplied
names(full.result.list) <- names(gene.list)
return(full.result.list)
} # createModelFromGeneList
#----------------------------------------------------------------------------------------------------
getTfsFromSampleIDs <- function(gene.list, sampleIDs, genome.db.uri, project.db.uri,
size.upstream=1000, size.downstream=1000, num.cores = 8){
# Setup the parallel structure with a default of half the cores
if(is.null(num.cores)){
num.cores <- detectCores()/2}
# Use BiocParallel
register(MulticoreParam(workers = num.cores,
#register(SerialParam(
stop.on.error = FALSE,
log = TRUE),
default = TRUE)
findGeneFootprints <- function(target.gene, genome.db.uri, project.db.uri,
size.upstream, size.downstream, sampleIDs){
# Create the footprint filter from the target gene
footprint.filter <- try(FootprintFilter(genomeDB = genome.db.uri,
footprintDB = project.db.uri,
geneCenteredSpec = list(targetGene = target.gene,
tssUpstream = size.upstream,
tssDownstream = size.downstream),
regionsSpec = list()),
silent = TRUE)
# Only grab candidates if the filter is valid
if(class(footprint.filter) == "FootprintFilter"){
out.list <- getCandidates(footprint.filter)
# Only return TFs if candidate grab is not null
if(class(out.list) != "NULL"){
# Filter out only the desired sampleIDs
out.list$tbl <- filter(out.list$tbl, sample_id %in% sampleIDs)
out.list$tfs <- unique(out.list$tbl$tf)
return(out.list$tfs)
} else {
return("No Candidates Found")
}
} else{
return(footprint.filter[1])
}
}
full.result.list <- bplapply(gene.list, findGeneFootprints,
genome.db.uri = genome.db.uri,
project.db.uri = project.db.uri,
size.upstream = size.upstream,
size.downstream = size.downstream,
sampleIDs = sampleIDs)
# Name the list after the genes supplied
names(full.result.list) <- gene.list
return(full.result.list)
} # getTfsFromSampleIDs
#------------------------------------------------------------------------------------------------------
getTfsFromSampleIDsMultiDB <- function(gene.list, sampleIDs, genome.db.uri, projectList,
size.upstream=1000, size.downstream=1000, num.cores = 8){
# Setup the parallel structure with a default of half the cores
if(is.null(num.cores)){
num.cores <- detectCores()/2}
# Use BiocParallel
register(MulticoreParam(workers = num.cores,
#register(SerialParam(
stop.on.error = FALSE,
log = TRUE),
default = TRUE)
findGeneFootprints <- function(target.gene, genome.db.uri, project.db.uri,
size.upstream, size.downstream, sampleIDs){
# Create the footprint filter from the target gene
footprint.filter <- try(FootprintFilter(genomeDB = genome.db.uri,
footprintDB = project.db.uri,
geneCenteredSpec = list(targetGene = target.gene,
tssUpstream = size.upstream,
tssDownstream = size.downstream),
regionsSpec = list()),
silent = TRUE)
# Only grab candidates if the filter is valid
if(class(footprint.filter) == "FootprintFilter"){
out.list <- getCandidates(footprint.filter)
# Only return TFs if candidate grab is not null
if(class(out.list) != "NULL"){
# Filter out only the desired sampleIDs
out.list$tbl <- filter(out.list$tbl, sample_id %in% sampleIDs)
out.list$tfs <- unique(out.list$tbl$tf)
return(out.list$tfs)
} else {
return("No Candidates Found")
}
} else{
return("Cannot create filter")
}
}
# Define a function that loops through a list and accumulates TF lists
combineTFsFromDBs <- function(target.gene, genome.db.uri, projectList,
size.upstream, size.downstream, sampleIDs){
# Create an empty vector
all.tfs <- character(0)
# Find Footprints from each DB and add to list
for(project.db.uri in projectList){
new.tfs <- findGeneFootprints(target.gene, genome.db.uri, project.db.uri,
size.upstream, size.downstream, sampleIDs)
all.tfs <- union(all.tfs, new.tfs)
}
# Return the full list
return(all.tfs)
}
# This part should remain the same
full.result.list <- bplapply(gene.list, combineTFsFromDBs,
genome.db.uri = genome.db.uri,
projectList = projectList,
size.upstream = size.upstream,
size.downstream = size.downstream,
sampleIDs = sampleIDs)
# Name the list after the genes supplied
names(full.result.list) <- gene.list
return(full.result.list)
} # getTfsFromSampleIDsMultiDB
#----------------------------------------------------------------------------------------------------
# Note: Run this on a dataframe of regions, including gene names (geneSymbol column)
getTfsFromMultiDB <- function(regions, genome.db.uri, projectList,num.cores = 8){
# Make the dataframe into a list of dataframes
dfToList <- function(regions){
df.list <- list()
for(i in 1:floor(nrow(regions)/10)){
idx1 <- 10*i-9
idx2 <- 10*i
df.list[[i]] <- regions[idx1:idx2,]
}
if(nrow(regions) %% 10 != 0){
i <- floor(nrow(regions)/10)
idx1 <- 10*i+1
idx2 <- nrow(regions)
df.list[[i+1]] <- regions[idx1:idx2,]
}
return(df.list)
}
regions.list <- dfToList(regions)
# Function to convert motifs to tfs
convertMotifsToTfs <- function(motifs){
# Catch footprints that don't exist
if(is.character(motifs)) return(NA)
tf.df <- motifsgenes %>%
filter(motif %in% motifs$motifName)
return(unique(tf.df$tf))
}
selectOrNA <- function(output){
# If it's a dataframe, return the motifName column
if(is.character(output)){
return(output)
} else if(nrow(output) == 0){
return("No footprints found")}
return(dplyr::select(output, motifName))
}
findGeneFootprints <- function(regions, genome.db.uri, project.db.uri){
# Create the footprint filter from the target gene
footprint.filter <- try(FootprintFilter(genomeDB = genome.db.uri,
footprintDB = project.db.uri,
regions = regions),
silent = TRUE)
# Only grab candidates if the filter is valid
if(class(footprint.filter) == "FootprintFilter"){
out.list <- getCandidates(footprint.filter)
# filter footprints based on score
if(grepl("hint", project.db.uri)){
# the number 200 was chosen based on it filtering out ~ 2/3 of the hits
out.list <- lapply(out.list, filter, score1 >= 200)
}
else if(grepl("wellington", project.db.uri)){
# the number -10 was chosen based on it filtering out ~1/3 of the hits (because wellington is more conservative to begin with)
out.list <- lapply(out.list, filter, score1 <= -15)
}
# Catch empty lists
if(length(out.list) == 0) return(character(0))
# Only return TFs if candidate grab is not null
if(class(out.list) != "NULL"){
motif.list <- lapply(out.list, selectOrNA)
tf.list <- lapply(motif.list, convertMotifsToTfs)
return(tf.list)
} else {
return("No Candidates Found")
}
} else{
return(footprint.filter[1])
}
}
# Define a function that loops through a list and accumulates TF lists
combineTFsFromDBs <- function(regions, genome.db.uri, projectList){
# Take in the regions DF with gene symbol and pull it off
regions.wo.genes <- dplyr::select(regions, -geneSymbol)
# Find the first set of footprints
all.tfs <- findGeneFootprints(regions.wo.genes,
genome.db.uri,
projectList[1])
# Name the list after the genes supplied
names(all.tfs) <- regions$geneSymbol
# collapse multiple entries for the same gene
all.tfs <- sapply(unique(names(all.tfs)), function(x) unique(unlist(all.tfs[names(all.tfs) == x], use.names = FALSE)), simplify = FALSE)
# Find Footprints from each DB and add to list
for(i in 1:length( projectList)){
new.tfs <- findGeneFootprints(regions.wo.genes,
genome.db.uri,
projectList[i])
# Name and un-nest the TFs as before
names(new.tfs) <- regions$geneSymbol
# collapse multiple entries for the same gene
new.tfs <- sapply(unique(names(new.tfs)), function(x) unique(unlist(new.tfs[names(new.tfs) == x], use.names = FALSE)), simplify = FALSE)
# Consolidate the 2 lists
keys <- names(all.tfs)
all.tfs <- setNames(mapply(union,
all.tfs[keys],
new.tfs[keys]),
keys)
}
# Return the full list
return(all.tfs)
}
# Use BiocParallel
register(MulticoreParam(workers = num.cores,
stop.on.error = FALSE,
log = TRUE),
default = TRUE)
full.result.list <- bplapply(regions.list, combineTFsFromDBs,
genome.db.uri = genome.db.uri,
projectList = projectList)
# Un-nest the list
full.result.list <- unlist(full.result.list, recursive = FALSE)
# Remove any where the content is wrong
no.fp <- which(!(sapply(full.result.list, is.character)))
full.result.list[no.fp] <- NULL
# To accomodate genes with multiple regions, include this step that combines regions with the same name
#full.result.list <- sapply(unique(names(full.result.list)), function(x) unique(unlist(full.result.list[names(full.result.list) == x], use.names = FALSE)), simplify = FALSE)
return(full.result.list)
} # getTfsFromSampleIDsMultiDB
#----------------------------------------------------------------------------------------------------
# Example Cory Script
# Assume my.mtx is the matrix, hg38 is the genome.db, brain is the tissue, shoulder is 5000
# Also assume this has 128 cores!!
# Step 1: Get all the genes
testRun <- function(my.mtx){
all.genes <- getTfsFromMultiDB(rownames(my.mtx),
genome.db.uri = "postgres://localhost/hg38",
projectList = c("postgres://localhost/brain_hint_20",
"postgres://localhost/brain_hint_16",
"postgres://localhost/brain_wellington_20",
"postgres://localhost/brain_wellington_16"),
size.upstream = 5000,
size.downstream = 5000,
num.cores = 100)
# Step 2: Use all the genes to make ALL the models
all.models <- createModelFromGeneList(my.mtx, all.genes, num.cores = 30,
solverList = c("lasso","ridge","pearson",
"spearman","randomforest",
"lassopv","sqrtlasso"),
nCores.sqrt = 4)
}
#----------------------------------------------------------------------------------------------------
getProxProbesPromoter <- function(probeIDs,
tssUpstream = 5000,
tssDownstream = 5000){
# Switch the name of the database and filter we use
db.name <- "hsapiens_gene_ensembl"
filter.name <- "illumina_humanht_12_v4"
my.mart <- biomaRt::useMart(biomart="ensembl", dataset= db.name)
tbl.geneInfo <- biomaRt::getBM(attributes=c("chromosome_name",
"transcription_start_site",
"transcript_tsl",
"hgnc_symbol",
filter.name),
filters=filter.name, value=probeIDs, mart=my.mart)
if(nrow(tbl.geneInfo) == 0)
return(NA)
# Sort by hgnc_symbol and transcript_tsl, then pull the first entry for each gene
tbl.geneInfo <- tbl.geneInfo[order(tbl.geneInfo[[filter.name]],
tbl.geneInfo$transcript_tsl),]
tbl.geneInfo <- tbl.geneInfo[match(unique(tbl.geneInfo[[filter.name]]),
tbl.geneInfo[[filter.name]]),]
# remove contigs and check to make sure it's just 1 chromosome
tbl.geneInfo <- subset(tbl.geneInfo, chromosome_name %in% c(1:22, "X", "Y", "MT"))
chrom <- sprintf("chr%s", tbl.geneInfo$chromosome_name)
tss <- tbl.geneInfo$transcription_start_site
start.loc <- tss - tssDownstream
end.loc <- tss + tssUpstream
temp <- data.frame(geneSymbol=tbl.geneInfo$hgnc_symbol,
chrom=chrom,
start=start.loc,
end=end.loc,
stringsAsFactors=FALSE)
return (temp[!(duplicated(temp$geneSymbol)),])
}
#----------------------------------------------------------------------------------------------------
# How to call it; a sample function
sampleCall <- function(regions){
# Assume we've got a set of regions...
genome.db.uri <- "postgres://localhost/hg38"
projectList <- c("postgres://localhost/brain_hint_20",
"postgres://localhost/brain_hint_16",
"postgres://localhost/brain_wellington_20",
"postgres://localhost/brain_wellington_16")
# Call using 30 cores
all.candidates <- getTfsFromMultiDB(regions, genome.db.uri, projectList, 30)
} #sampleCall
# For Cory: assuming you've called your file "my.regions"
# my.stuff <- sampleCall(my.regions)
|
6e51a8d01798b3f7b9464cffb61a79a28f70a19a
|
130c45c5c2983020b27665af9b008b0171ef0662
|
/martymeninopayroll1.R
|
ac558e106d34d7356b16e12179916a8bc88ad937
|
[] |
no_license
|
andrewbtran/scripts
|
f0032cee6a66b68d71840d7ad8a9b65ca34f3bd8
|
042e78c82b3bb88d1df606fab5a70e99978763bd
|
refs/heads/master
| 2021-03-12T22:12:59.425729
| 2014-08-06T21:29:36
| 2014-08-06T21:29:36
| 20,237,435
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,065
|
r
|
martymeninopayroll1.R
|
setwd("C:/Users/andrew.tran/Downloads")
payroll = read.csv("newpayroll.csv")
payroll$Service.Dt = as.Date(payroll$Service.Dt)
marty = subset(payroll, Service.Dt >= "2014-01-06")
menino = subset(payroll, Service.Dt < "2014-01-06")
menino = subset(menino, Service.Dt >= "1993-07-12")
#martyA = subset(marty, Department.Name =="Mayor's Office")
#meninoA = subset(menino, Department.Name=="Mayor's Office")
#martyB = subset(marty, Department.Name =="Mayor's Office-Public Info")
#meninoB = subset(menino, Department.Name=="Mayor's Office-Public Info")
#marty = rbind(martyA, martyB)
#menino = rbind(meninoA, meninoB)
foo <- data.frame(do.call('rbind', strsplit(as.character(menino$Employee.Name),',',fixed=TRUE)))
foo2 <- data.frame(do.call('rbind', strsplit(as.character(foo$X2)," ")))
menino$Name = foo2$X1
menino$Last.Name = foo$X1
names = read.csv("malefemalenames.csv")
meninogenders = merge(menino, names, by="Name")
write.csv(meninogenders, "meninonames.csv")
meninounion = table(meninogenders$prob.gender, meninogenders$Union.Status)
write.csv(meninounion, "meninounion.csv")
foo <- data.frame(do.call('rbind', strsplit(as.character(marty$Employee.Name),',',fixed=TRUE)))
foo2 <- data.frame(do.call('rbind', strsplit(as.character(foo$X2)," ")))
marty$Name = foo2$X1
marty$Last.Name = foo$X1
names = read.csv("malefemalenames.csv")
martygenders = merge(marty, names, by="Name")
write.csv(martygenders, "martynames.csv")
meninogenders = read.csv("meninonames.csv")
martyunion = table(martygenders$prob.gender, martygenders$Union.Status)
write.csv(martyunion, "martyunion.csv")
malemenino = subset(meninogenders, prob.gender=="Male")
femalemenino = subset(meninogenders, prob.gender=="Female")
unknownmenino = subset(meninogenders, prob.gender=="Unknown")
median(malemenino$Annual.Rt)
median(femalemenino$Annual.Rt)
median(unknownmenino$Annual.Rt)
mean(malemenino$Annual.Rt)
mean(femalemenino$Annual.Rt)
mean(unknownmenino$Annual.Rt)
tapply(malemenino$Annual.Rt, malemenino$Union.Status, median)
tapply(femalemenino$Annual.Rt, femalemenino$Union.Status, median)
tapply(unknownmenino$Annual.Rt, unknownmenino$Union.Status, median)
tapply(malemenino$Annual.Rt, malemenino$Union.Status, mean)
tapply(femalemenino$Annual.Rt, femalemenino$Union.Status, mean)
tapply(unknownmenino$Annual.Rt, unknownmenino$Union.Status, mean)
malemarty = subset(martygenders, prob.gender=="Male")
femalemarty = subset(martygenders, prob.gender=="Female")
unknownmarty = subset(martygenders, prob.gender=="Unknown")
median(malemarty$Annual.Rt)
median(femalemarty$Annual.Rt)
median(unknownmarty$Annual.Rt)
mean(malemarty$Annual.Rt)
mean(femalemarty$Annual.Rt)
mean(unknownmarty$Annual.Rt)
tapply(malemarty$Annual.Rt, malemarty$Union.Status, median)
tapply(femalemarty$Annual.Rt, femalemarty$Union.Status, median)
tapply(unknownmarty$Annual.Rt, unknownmarty$Union.Status, median)
tapply(malemarty$Annual.Rt, malemarty$Union.Status, mean)
tapply(femalemarty$Annual.Rt, femalemarty$Union.Status, mean)
tapply(unknownmarty$Annual.Rt, unknownmarty$Union.Status, mean)
|
7e7b3e2cd00f9203bfc3a9c06af4bded350f8057
|
cfe1a4bb705d1ced1cab374d8fecde20bf4f8f8b
|
/R projects/Exploratory Data Analysis on Official Flu Data (CDS) using R/Part 3/sagnikghLab1Part3shiny.R
|
0f8e516955344bff1059323a918b4ed653534b64
|
[] |
no_license
|
githubsagnik/UB-Projects
|
73cb81fb5ea65aa3f9744f1e1e192283ed37ade1
|
f90cfd6ccaef0fb4ffeb0ecfb56cd7a82259f5a7
|
refs/heads/master
| 2020-07-30T03:24:26.977195
| 2019-09-22T06:23:32
| 2019-09-22T06:23:32
| 210,069,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,397
|
r
|
sagnikghLab1Part3shiny.R
|
#install.packages("shinyWidgets")
#install.packages("shinythemes")
library(shiny)
library(shinyWidgets)
library(datasets)
library(shinythemes)
# App URL: https://sagnikgh-dic-lab1.shinyapps.io/FluHeatmap/
ui <- fluidPage(theme = shinytheme("superhero"),
titlePanel("Data Intensive Computing: Lab 1: Part3"),
sidebarLayout(
sidebarPanel(
helpText("Select Heatmap from below list"),
selectInput("var",
label = "Choose:",
choices = c("2018-19 Seasonal CDC HeatMap vs 2019 Jan 4th Weekly CDC HeatMap" = "cdc0",
"2018-19 Seasonal CDC HeatMap vs Twitter(#flu, #illness, #disease, #Influenza)" = "cdc1",
"2019 Jan 4th Week CDC HeatMap vs Twitter(#flu, #illness, #disease, #Influenza)" = "cdc2",
"#Influenza vs #Fluseason" = "flu1",
"#Flushot vs #Fluseason" = "flu2",
"#Flushot vs #Influenza" = "flu3"),
selected = "Percent White")
),
#mainPanel(plotOutput("map"))
mainPanel(imageOutput("myImage"))
)
)
server <- function(input, output) {
output$myImage <- renderImage({
switch(input$var,
"cdc0" = list(src = 'AverageVSLastweek.png', contentType = 'image/png', width = 840, height = 840),
"cdc1" = list(src = 'CDCvsTwitter.png', contentType = 'image/png', width = 840, height = 840),
"cdc2" = list(src = 'CDCvsTwitter(Last Week).png', contentType = 'image/png', width = 840, height = 840),
"flu1" = list(src = 'InfluVSseason.png', contentType = 'image/png', width = 840, height = 840),
"flu2" = list(src = 'shotVSseason.png', contentType = 'image/png', width = 840, height = 840),
"flu3" = list(src = 'InfluenzaVSFlushot.png', contentType = 'image/png', width = 840, height = 840)
)
}, deleteFile = FALSE)
}
shinyApp(ui, server)
|
45fbaf0c1973911f51cc820724393e2036fe6b36
|
101c721fddf7b7235e233e58c514f769ce1d0897
|
/pkg/OPI/man/MOCS.Rd
|
e0fd7b226302609a8514359b2290e09f8f034f3e
|
[
"Apache-2.0"
] |
permissive
|
turpinandrew/OPI
|
b426334e847db985c0f608adaadec25ed04b8545
|
b201374e0032cf7c7f231cce9b2ba8e30913700b
|
refs/heads/master
| 2023-08-31T03:19:04.586421
| 2023-08-21T09:41:03
| 2023-08-21T09:41:03
| 11,876,860
| 9
| 5
| null | 2017-08-09T17:26:04
| 2013-08-04T10:01:17
|
R
|
UTF-8
|
R
| false
| true
| 8,996
|
rd
|
MOCS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mocs.r
\name{MOCS}
\alias{MOCS}
\title{Method of Constant Stimuli (MOCS)}
\usage{
MOCS(
params = NA,
order = "random",
responseWindowMeth = "constant",
responseFloor = 1500,
responseHistory = 5,
keyHandler = function(correct, ret) return(list(seen = TRUE, time = 0, err = NULL)),
interStimMin = 200,
interStimMax = 500,
beep_function,
makeStim,
stim_print,
...
)
}
\arguments{
\item{params}{A matrix where each row is \code{x y i n correct_n ll1 ll2 ... llm} where
\itemize{
\item{\code{x} is X coordinate of location}
\item{\code{y} is Y coordinate of location}
\item{\code{i} is a location number (assigned by caller)}'
\item{\code{n} is Number of times this location/luminance(s) should be repeated}
\item{\code{correct_n} is the index i of the luminance level (\code{lli}) that
should be treated as a ``correct'' response (the correct interval). For a
standard MOCS, this will be 1; for a 2AFC, this will be 1 or 2. This number will
be in the range \code{[1,m]}.}
\item{\code{lli} is the i'th luminance level to be used at this location for
interval i of the presentation in cd/\eqn{\mbox{m}^2}{m^2}. For a standard MOCS,
i=1, and the \code{params} matrix will have 5 columns. For a 2AFC, there will be
two lli's, and \code{params} will have 6 columns.}
}}
\item{order}{Control the order in which the stimuli are presented.
\itemize{
\item{\code{"random"} Randomise the order of trials/locations.}
\item{\code{"fixed"} Present each row of \code{params} in order of
\code{1:nrow(params)}, ignoring the \code{n} (4th) column in \code{params}.}
}}
\item{responseWindowMeth}{Control time perimeter waits for response.
\itemize{
\item{\code{"speed"} After an average of the last \code{speedHistory}
response times, with a minimum of \code{responseFloor}. Initially
\code{responseFloor}.}
\item{\code{"constant"} Always use \code{responseFloor}.}
\item{\code{"forceKey"} Wait for a keyboard input.}
}}
\item{responseFloor}{Minimum response window (for any \code{responseWindowMeth}
except \code{"forceKey"}).}
\item{responseHistory}{Number of past yeses to average to get response window
(only used if \code{responseWindowMeth} is \code{"speed"}).}
\item{keyHandler}{Function to get a keyboard input and returns as for \code{opiPresent}:
list(err={NULL|msg}, seen={TRUE|FALSE}, time = response time (in ms)). The parameters passed to
the function are the correct interval number (column 4 of \code{params}), and the
result of \code{opiPresent}. See Examples.}
\item{interStimMin}{Regardless of response, wait \code{runif(interStimMin, interStimMax)} ms.}
\item{interStimMax}{Regardless of response, wait \code{runif(interStimMin, interStimMax)} ms.}
\item{beep_function}{A function that takes the string \code{'correct'}, the string
\code{'incorrect'}, or a stimulus number and plays an appropriate sound. See examples.}
\item{makeStim}{A helper function to take a row of \code{params} and a response window length
in ms, and create a list of OPI stimuli types for passing to opiPresent. This may include a
\code{checkFixationOK} function. See Example.}
\item{stim_print}{A function that takes an \code{opiStaticStimulus} and return list from
\code{opiPresent} and returns a string to print for each presentation. It is called
immediately after each \code{opiPresent}, and the string is prepended with the
(x,y) coordinates of the presentation and ends with a newline.}
\item{...}{Extra parameters to pass to the opiPresent function.}
}
\value{
Returns a data.frame with one row per stimulus copied from params with extra columns
appended: checkFixation checks, and the return values from \code{opiPresent()}
(see example). These last values will differ depending on which
machine/simulation you are running (as chosen with \code{chooseOpi()}.
\itemize{
\item{column 1: x}
\item{column 2: y}
\item{column 3: location number}
\item{column 4: number of times to repeat this stim}
\item{column 5: correct stimulus index}
\item{column 6: TRUE/FALSE was fixating for all presentations in this trial according to
\code{checkFixationOK}}
\item{column 7...: columns from params}
\item{...: columns from opiPresent return}
}
}
\description{
MOCS performs either a yes/no or n-interval-forced-choice Method of
Constant Stimuli test
}
\details{
Whether the test is yes/no or forced-choice is determined by the number of columns
in \code{params}. The code simply presents all columns from 5 onwards and collects a
response at the end. So if there is only 5 columns, it is a yes/no task. If there are 6
columns it is a 2-interval-forced-choice. Generally, an nIFC experiment has 4+n columns in
\code{params}.
Note that when the \code{order} is \code{"random"}, the number of trials in the test will be
the sum of the 3rd column of \code{params}. When the \code{order} is \code{"fixed"}, there is
only one presentation per row, regardless of the value in the 3rd column of \code{params}.
If a response is received before the final trial in a nIFC experiment, it is ignored.
If the \code{checkFixationOK} function is present in a stimulus, then it is called after each
presentation, and the result is ``anded'' with each stimulus in a trial to get a TRUE/FALSE
for fixating on all stimuli in a trial.
}
\examples{
# For the Octopus 900
# Check if pupil centre is within 10 pixels of (160,140)
checkFixationOK <- function(ret) return(sqrt((ret$pupilX - 160)^2 + (ret$pupilY - 140)^2) < 10)
# Return a list of opi stim objects (list of class opiStaticStimulus) for each level (dB) in
# p[5:length(p)]. Each stim has responseWindow BETWEEN_FLASH_TIME, except the last which has
# rwin. This one assumes p is on old Octopus 900 dB scale (0dB == 4000 cd/m^2).
makeStim <- function(p, rwin) {
BETWEEN_FLASH_TIME <- 750 # ms
res <- NULL
for(i in 5:length(p)) {
s <- list(x=p[1], y=p[2], level=dbTocd(p[i],4000/pi), size=0.43, duration=200,
responseWindow=ifelse(i < length(p), BETWEEN_FLASH_TIME, rwin),
checkFixationOK=NULL)
class(s) <- "opiStaticStimulus"
res <- c(res, list(s))
}
return(res)
}
################################################################
# Read in a key press 'z' is correct==1, 'm' otherwise
# correct is either 1 or 2, whichever is the correct interval
#
# Return list(seen={TRUE|FALSE}, time=time, err=NULL))
# seen is TRUE if correct key pressed
################################################################
\dontrun{
if (length(dir(".", "getKeyPress.py")) < 1)
stop('Python script getKeyPress.py missing?')
}
keyHandler <- function(correct, ret) {
return(list(seen=TRUE, time=0, err=NULL))
ONE <- "b'z'"
TWO <- "b'm'"
time <- Sys.time()
key <- 'q'
while (key != ONE && key != TWO) {
a <- system('python getKeyPress.py', intern=TRUE)
key <- a # substr(a, nchar(a), nchar(a))
print(paste('Key pressed: ',key,'from',a))
if (key == "b'8'")
stop('Key 8 pressed')
}
time <- Sys.time() - time
if ((key == ONE && correct == 1) || (key == TWO && correct == 2))
return(list(seen=TRUE, time=time, err=NULL))
else
return(list(seen=FALSE, time=time, err=NULL))
}
################################################################
# Read in return value from opipresent with F310 controller.
# First param is correct, next is 1 for left button, 2 for right button
# Left button (LB) is correct for interval 1, RB for interval 2
# correct is either 1 or 2, whichever is the correct interval
#
# Return list(seen={TRUE|FALSE}, time=time, err=NULL))
# seen is TRUE if correct key pressed
################################################################
F310Handler <- function(correct, opiResult) {
z <- opiResult$seen == correct
opiResult$seen <- z
return(opiResult)
}
################################################################
# 2 example beep_function
################################################################
\dontrun{
require(beepr)
myBeep <- function(type='None') {
if (type == 'correct') {
beepr::beep(2) # coin noise
Sys.sleep(0.5)
}
if (type == 'incorrect') {
beepr::beep(1) # system("rundll32 user32.dll,MessageBeep -1") # system beep
#Sys.sleep(0.0)
}
}
require(audio)
myBeep <- function(type="None") {
if (type == 'correct') {
wait(audio::play(sin(1:10000/10)))
}
if (type == 'incorrect') {
wait(audio::play(sin(1:10000/20)))
}
}
}
################################################################
# An example stim_print function
################################################################
\dontrun{
stim_print <- function(s, ret) {
sprintf("\%4.1f \%2.0f",cdTodb(s$level,10000/pi), ret$seen)
}
}
}
\references{
A. Turpin, P.H. Artes and A.M. McKendrick. "The Open Perimetry Interface: An enabling tool for
clinical visual psychophysics", Journal of Vision 12(11) 2012.
}
\seealso{
\code{\link{dbTocd}}, \code{\link{opiPresent}}
}
|
25631d0ec0a62db2b0057d0e5c722107d88bf468
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/synthACS/examples/all_geog_optimize_microdata.Rd.R
|
b5efd628fc3e396de729c32223c789369f96ef2e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 552
|
r
|
all_geog_optimize_microdata.Rd.R
|
library(synthACS)
### Name: all_geog_optimize_microdata
### Title: Optimize the selection of a micro data population for a set of
### geographies.
### Aliases: all_geog_optimize_microdata
### ** Examples
## Not run:
##D # assumes that micro_synthetic and cll already exist in your environment
##D # see: examples for derive_synth_datasets() and all_geogs_add_constraint()
##D optimized_la <- all_geog_optimize_microdata(micro_synthetic, prob_name= "p",
##D constraint_list_list= cll, p_accept= 0.01, max_iter= 1000L)
## End(Not run)
|
8885f9ea317570847a98e86500a44057bbb1b4a8
|
7f28759b8f7d4e2e4f0d00db8a051aecb5aa1357
|
/R/manuscript_code/age_trends.R
|
2c9cd3fb0bfd0e3a34d76bbcc3ab67cd872fc297
|
[] |
no_license
|
DataFusion18/TreeRings
|
02b077d7ed2a5980ae35be7c04a60c28f0ba3928
|
e57f6ee4d774d2bda943f009b148e6e054e6c1d1
|
refs/heads/master
| 2023-03-29T02:44:34.186155
| 2021-03-31T00:01:13
| 2021-03-31T00:01:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229,767
|
r
|
age_trends.R
|
library(dplR)
library(ggplot2)
library(plyr)
library(raster)
library(data.table)
library(rgdal)
library(mgcv)
library(tidyr)
library(SPEI)
library(boot)
library(dplyr)
# lets look atrelationship to climate with age:
setwd("/Users/kah/Documents/TreeRings")
#####################################
#read in rwl & add site + year codes#
#####################################
# quick function to read, detrend, and add the year as a column:
# this function will also just calculate BAI instead
read_detrend_year <- function( filename, method , rwiorbai, site){
if(site %in% c("HIC", "AVO", "UNI", "GLL1", "GLL2", "GLL3")){
newseries <- read.csv(paste0("cleanrwl/",site,"ww.csv"))
rwl.stats(newseries)
file.tuc <- read.tucson( filename )
rownames(newseries) <- rownames(file.tuc)
}else{if(site %in% "GLL4"){
newseries <- read.csv(paste0("cleanrwl/",site,"ww.csv"))
rownames(newseries) <- newseries$year
newseries <- newseries[,1:(length(newseries)-1)] # remove yr column
rwl.stats(newseries)
}
newseries <- read.tucson( filename )
rwl.stats(newseries)
}
# average the cores by tree (for the sites with multiple cores):
#gp.ids <- read.ids(newseries, stc = autoread.ids(newseries))
gp.treeMean <- treeMean(newseries, autoread.ids(newseries))
gp.treeMean2 <- treeMean(newseries, autoread.ids(newseries), na.rm=TRUE)
mean.rwl.stat <- rwl.stats(gp.treeMean2)
write.csv(mean.rwl.stat, paste0("outputs/Stats/mean.rwl.stats.", site,".csv"))
ifelse(rwiorbai == "rwi",
detrended <- detrend(rwl = newseries, method = method),
detrended <- bai.out(rwl = newseries))
if(site %in% "HIC"){
detrended.mean <- treeMean(detrended, read.ids(detrended, stc = c(3,4,1)), na.rm=TRUE)
colnames(detrended.mean) <- paste0(site,colnames(detrended.mean))
}else{
if(site %in% "GLL4"){
detrended.mean <- treeMean(detrended, read.ids(detrended, stc = c(4,7,1)), na.rm=TRUE)
colnames(detrended.mean) <- paste0(site, colnames(detrended.mean))
# quick fix for GLL4:
colnames(detrended.mean) <- c("GLL41", "GLL413", "GLL414", "GLL415", "GLL42", "GLL45", "GLL47", "GLL48", "GLL49")
}else{
detrended.mean <- treeMean(detrended, autoread.ids(detrended), na.rm=TRUE)
colnames(detrended.mean) <- paste0(site,colnames(detrended.mean))
}
}
mean.rwi.stat <- rwl.stats(detrended.mean)
write.csv(mean.rwi.stat, paste0("outputs/Stats/mean.rwi.stats.", site,".csv"))
# save chronology for plotting moving correlations of chronologies:
site.short <- ifelse(site %in% "GLL1", "GL1",
ifelse(site %in% "GLL2", "GL2",
ifelse(site %in% "GLL3", "GL2",
ifelse(site %in% "GLL4", "GL4",site))))
crnl <- chron(detrended.mean, prefix = site.short)
write.crn(crnl, paste0("outputs/chron/", site, "_",rwiorbai,"_",method,".crn"))
#read.crn(paste0("outputs/chron/", site, "_",rwiorbai,"_",method,".crn"))
# plot spag plots:
png(paste0("outputs/spagplots/", site, "_", rwiorbai,"_mean_", method,"_detrended.png"))
plot(detrended.mean, "spag")
dev.off()
detrended.mean$year <- rownames(detrended.mean)
detrended.mean$site<- site
write.csv(detrended,paste0("cleanrwl/detrended_rwi_", site, ".csv"))
detrended.mean
}
#calculate BAI or the detrended RWI: switch the rwiorbai argument
Hickory.bai <- read_detrend_year(filename = "cleanrwl/HICww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "HIC")
StCroix.bai <- read_detrend_year("cleanrwl/STCww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "STC")
Bonanza.bai <- read_detrend_year("cleanrwl/BONww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "BON")
Townsend.bai <- read_detrend_year("cleanrwl/TOWww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "TOW")#townsedn woods
Pleasant.bai <- read_detrend_year("cleanrwl/PLEww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "PLE") #Pleasant valley conservency
Coral.bai <- read_detrend_year("cleanrwl/CORww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "COR")
Uncas.bai <- read_detrend_year("cleanrwl/UNCww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "UNC")
Glacial.bai <- read_detrend_year("cleanrwl/GLAww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "GLA")
Englund.bai <- read_detrend_year("cleanrwl/ENGww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "ENG")
Mound.bai <- read_detrend_year("cleanrwl/MOUww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "MOU")
GLL1.bai <- read_detrend_year(filename = "cleanrwl/GLL1ww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "GLL1")
GLL2.bai <- read_detrend_year("cleanrwl/GLL2ww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "GLL2")
GLL3.bai <- read_detrend_year("cleanrwl/GLL3ww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "GLL3")
GLL4.bai <- read_detrend_year("cleanrwl/GLL4ww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "GLL4")
PVC.bai <- read_detrend_year("cleanrwl/PVCww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "PVC")
AVO.bai <- read_detrend_year(filename = "cleanrwl/AVOww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "AVO")
UNI.bai <- read_detrend_year("cleanrwl/UNIww.rwl", method = "ModNegExp", rwiorbai = "rwi", site = "UNI")
Hickory.bai <- read_detrend_year(filename = "cleanrwl/HICww.rwl", method = "Spline", rwiorbai = "rwi", site = "HIC")
StCroix.bai <- read_detrend_year("cleanrwl/STCww.rwl", method = "Spline", rwiorbai = "rwi", site = "STC")
Bonanza.bai <- read_detrend_year("cleanrwl/BONww.rwl", method = "Spline", rwiorbai = "rwi", site = "BON")
Townsend.bai <- read_detrend_year("cleanrwl/TOWww.rwl", method = "Spline", rwiorbai = "rwi", site = "TOW")#townsedn woods
Pleasant.bai <- read_detrend_year("cleanrwl/PLEww.rwl", method = "Spline", rwiorbai = "rwi", site = "PLE") #Pleasant valley conservency
Coral.bai <- read_detrend_year("cleanrwl/CORww.rwl", method = "Spline", rwiorbai = "rwi", site = "COR")
Uncas.bai <- read_detrend_year("cleanrwl/UNCww.rwl", method = "Spline", rwiorbai = "rwi", site = "UNC")
Glacial.bai <- read_detrend_year("cleanrwl/GLAww.rwl", method = "Spline", rwiorbai = "rwi", site = "GLA")
Englund.bai <- read_detrend_year("cleanrwl/ENGww.rwl", method = "Spline", rwiorbai = "rwi", site = "ENG")
Mound.bai <- read_detrend_year("cleanrwl/MOUww.rwl", method = "Spline", rwiorbai = "rwi", site = "MOU")
GLL1.bai <- read_detrend_year(filename = "cleanrwl/GLL1ww.rwl", method = "Spline", rwiorbai = "rwi", site = "GLL1")
GLL2.bai <- read_detrend_year("cleanrwl/GLL2ww.rwl", method = "Spline", rwiorbai = "rwi", site = "GLL2")
GLL3.bai <- read_detrend_year("cleanrwl/GLL3ww.rwl", method = "Spline", rwiorbai = "rwi", site = "GLL3")
GLL4.bai <- read_detrend_year("cleanrwl/GLL4ww.rwl", method = "Spline", rwiorbai = "rwi", site = "GLL4")
PVC.bai <- read_detrend_year("cleanrwl/PVCww.rwl", method = "Spline", rwiorbai = "rwi", site = "PVC")
AVO.bai <- read_detrend_year(filename = "cleanrwl/AVOww.rwl", method = "Spline", rwiorbai = "rwi", site = "AVO")
UNI.bai <- read_detrend_year("cleanrwl/UNIww.rwl", method = "Spline", rwiorbai = "rwi", site = "UNI")
Hickory.bai <- read_detrend_year(filename = "cleanrwl/HICww.rwl", method = "none", rwiorbai = "rwi", site = "HIC")
StCroix.bai <- read_detrend_year("cleanrwl/STCww.rwl", method = "none", rwiorbai = "rwi", site = "STC")
Bonanza.bai <- read_detrend_year("cleanrwl/BONww.rwl", method = "none", rwiorbai = "rwi", site = "BON")
Townsend.bai <- read_detrend_year("cleanrwl/TOWww.rwl", method = "none", rwiorbai = "rwi", site = "TOW")#townsedn woods
Pleasant.bai <- read_detrend_year("cleanrwl/PLEww.rwl", method = "none", rwiorbai = "rwi", site = "PLE") #Pleasant valley conservency
Coral.bai <- read_detrend_year("cleanrwl/CORww.rwl", method = "none", rwiorbai = "rwi", site = "COR")
Uncas.bai <- read_detrend_year("cleanrwl/UNCww.rwl", method = "none", rwiorbai = "rwi", site = "UNC")
Glacial.bai <- read_detrend_year("cleanrwl/GLAww.rwl", method = "none", rwiorbai = "rwi", site = "GLA")
Englund.bai <- read_detrend_year("cleanrwl/ENGww.rwl", method = "none", rwiorbai = "rwi", site = "ENG")
Mound.bai <- read_detrend_year("cleanrwl/MOUww.rwl", method = "none", rwiorbai = "rwi", site = "MOU")
GLL1.bai <- read_detrend_year(filename = "cleanrwl/GLL1ww.rwl", method = "none", rwiorbai = "rwi", site = "GLL1")
GLL2.bai <- read_detrend_year("cleanrwl/GLL2ww.rwl", method = "none", rwiorbai = "rwi", site = "GLL2")
GLL3.bai <- read_detrend_year("cleanrwl/GLL3ww.rwl", method = "none", rwiorbai = "rwi", site = "GLL3")
GLL4.bai <- read_detrend_year("cleanrwl/GLL4ww.rwl", method = "none", rwiorbai = "rwi", site = "GLL4")
PVC.bai <- read_detrend_year("cleanrwl/PVCww.rwl", method = "none", rwiorbai = "rwi", site = "PVC")
AVO.bai <- read_detrend_year(filename = "cleanrwl/AVOww.rwl", method = "none", rwiorbai = "rwi", site = "AVO")
UNI.bai <- read_detrend_year("cleanrwl/UNIww.rwl", method = "none", rwiorbai = "rwi", site = "UNI")
detrended.list <- list(Hickory.bai, StCroix.bai, Bonanza.bai,Townsend.bai,Pleasant.bai, Coral.bai,
Uncas.bai, Glacial.bai, Englund.bai, Mound.bai, GLL1.bai, GLL2.bai,
GLL3.bai, GLL4.bai, PVC.bai, AVO.bai)#, UNI.bai) # omitting UNI right now
# read in the site level data for each of these sites:
test <- read.csv("data/site_maps/stand_metadata/GLL1_full_xy.csv")
test$short %in% colnames(detrended.mean)
# make example chronology to plot out:
hic.raw <- read.rwl("cleanrwl/HICww.rwl")
hic.raw$year <- as.numeric(row.names( hic.raw))
png(width=6,height=4,units="in",res = 300,bg = "transparent","raw_rw_transparent.png")
ggplot(Hickory.bai, aes(hic.raw$year, hic.raw[,11]))+geom_line(color = "white")+theme_minimal()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "white"), axis.text = element_text(colour = "white"), axis.title = element_text(color = "white"))+ylab("Raw Ring Width")+xlab("Year")
dev.off()
png(width=6,height=4,units="in",res = 300,bg = "transparent","raw_rw_transparent_short.png")
ggplot(Hickory.bai, aes(hic.raw$year, hic.raw[,8]))+geom_line(color = "white")+theme_minimal()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "white"), axis.text = element_text(colour = "white"), axis.title = element_text(color = "white"))+ylab("Raw Ring Width")+xlab("Year")
dev.off()
Hic.m<- melt(Hickory.bai, id.vars = c('year','site'))
Hic.m$year <- as.numeric(Hic.m$year)
Hickory.bai$year <- as.numeric(Hickory.bai$year)
png(width=6,height=4,units="in",res = 300,bg = "transparent","det_transparent.png")
ggplot(Hickory.bai, aes(Hickory.bai$year, Hickory.bai[,11]))+geom_line(color = "white")+theme_minimal()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "white"), axis.text = element_text(colour = "white"), axis.title = element_text(color = "white"))+ylab("Detrended Ring Width Index")+xlab("Year")
dev.off()
png(width=6,height=4,units="in",res = 300,bg = "transparent","det_transparent_short.png")
ggplot(Hickory.bai, aes(Hickory.bai$year, Hickory.bai[,8]))+geom_line(color = "white")+theme_minimal()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "white"), axis.text = element_text(colour = "white"), axis.title = element_text(color = "white"))+ylab("Detrended Ring Width Index")+xlab("Year")
dev.off()
hic.chron <- chron(Hickory.bai)
hic.chron$year <- as.numeric(row.names(hic.chron))
png(width=6,height=4,units="in",res = 300,bg = "transparent","transparent_chronology.png")
ggplot(hic.chron, aes(year, xxxstd))+xlim(1856, 2016) +ylim(0,2)+geom_line(color = "white")+theme_minimal()+xlab("Year")+ylab("Detrended Ring Width Index")+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "white"), axis.text = element_text(colour = "white"), axis.title = element_text(color = "white"))
dev.off()
# make example PDSI reconstruction to plot out:
PDSImi <- read.table("/Users/kah/Documents/TreeRings/outputs/data/850w_425n_226.txt", header = TRUE)
png(width=6,height=4,units="in",res = 300,bg = "transparent","transparent_reconstruction.png")
ggplot(PDSImi, aes(YEAR, RECON))+#xlim(1500, 2016) +ylim(0,2)+
geom_line(color = "white")+theme_minimal()+xlab("Year")+ylab("Reconstructed PDSI")+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "white"), axis.text = element_text(colour = "white"), axis.title = element_text(color = "white"))
dev.off()
##########################################################
# tree age_agg adds on the ages of the trees at each year
# can do this with BAI or detrended RWI
source("R/tree_age_agg.R")
# apply the tree_age_agg function on all of the detrended tree ring series
detrended.age <- lapply(detrended.list, FUN = tree_age_agg, age1950 = 10, type = "RWI_Spline_detrended" )
# use do.calll to make these a dataframe
detrended.age.df <- do.call(rbind, detrended.age)
age.classes <- detrended.age.df %>% group_by(site, ID) %>% drop_na() %>% summarise(pre1800 = min(year) < 1880 , pre1950 = min(year, na.rm = TRUE) <1930 & min(year, na.rm = TRUE) >=1880 , post1950 = min(year, na.rm = TRUE) >1930)
age.classes %>% group_by(site) %>% summarise(pre1800_n=sum(pre1800, na.rm=TRUE), pre1950_n = sum(pre1950, na.rm=TRUE), post1950_n = sum(post1950, na.rm=TRUE))
write.csv(age.classes, "data/site_stats/n_trees_ageclass_by_site.csv")
###################################
# add climate data to the age trends
####################################
# note about climate data: GHCN climate data provides PDSI esimtates, while PRISM is more commonly used and can be used to get VPD data.
# both GHCN and PRISM have Precip and temperature estimates, but PRSIM data should be used for this b/c GHCN is over the whole climate zone, PRISM is point estimates
# this function reads in climate data from each site and adds it to the appropriate site
get.clim <- function(site.df, climatedata){
site.code <- site.df[1,]$site
if(climatedata == "GHCN"){
if(site.code %in% c("BON", "GLL1", "GLL2", "GLL3", "GLL4")){
MNcd.clim <- read.csv("data/West_central_MN_nclimdiv.csv")
} else{ if(site.code %in% c("HIC", "COR","GLA", "PVC" )){
MNcd.clim <- read.csv("data/NE_illinois_climdiv.csv")
} else{ if(site.code == "W-R" ){
MNcd.clim <- read.csv("data/West_central_MN_nclimdiv.csv")
} else{ if(site.code == 'SAW'){
MNcd.clim <- read.csv("data/NE_illinois_climdiv.csv")
}else{ if(site.code == "STC"){
MNcd.clim <- read.csv("data/East_Central_MN_CDODiv5039587215503.csv")
}else{ if(site.code == "ENG"){
MNcd.clim <- read.csv("data/Central_MN_CDO.csv")
}else{ if(site.code == "TOW"){
MNcd.clim <- read.csv("data/South_central_MN_CDO.csv")
}else{ if(site.code == "MOU"){
MNcd.clim <- read.csv("data/South_East_MN_CDO.csv")
}else{ if(site.code %in% c("UNC", "AVO")){
MNcd.clim <- read.csv("data/East_Central_MN_CDODiv5039587215503.csv")
}else { if(site.code == 'PLE'){
MNcd.clim <- read.csv('data/south_central_WI_climdiv.csv')
}else { if(site.code == 'YRF'){
MNcd.clim <- read.csv('IA_nclim_div_northeast.csv')
}else{
cat("missing climate data")}
}
}
}
}
}
}
}
}
}}
MNcd.clim$PCP <- MNcd.clim$PCP*25.54
keeps <- c("Year", "Month", "PCP")
keepstavg <- c("Year", "Month", "TAVG")
keepst <- c("Year", "Month", "TMAX")
keepstmin <- c("Year", "Month", "TMIN")
keepspdsi <- c("Year", "Month", "PDSI")
#create a dataset for Precip
MNp.df <- MNcd.clim[,keeps]
MNp.df[MNp.df == -9999]<- NA
#for tmax
MNt.df <- MNcd.clim[,keepst]
MNt.df[MNt.df == -9999]<- NA
#for tmin
MNtmin.df<- MNcd.clim[,keepstmin]
MNtmin.df[MNtmin.df == -9999]<- NA
#for tavg
MNtavg.df <- MNcd.clim[,keepstavg]
MNtavg.df[MNtavg.df == -9999]<- NA
MNpdsi.df <- MNcd.clim[,keepspdsi]
MNpdsi.df[MNpdsi.df == -9999]<- NA
#for precipitation
total.p <- aggregate(PCP ~ Year + Month, data=MNp.df, FUN=sum, na.rm = T)
months <- 6:9
MNpjja.df <- MNp.df[MNp.df$Month %in% months,]
jja.p <- aggregate(PCP ~ Year, data = MNpjja.df, FUN = sum, na.rm = T)
total.p <- aggregate(PCP ~ Year + Month, data=MNp.df, FUN=sum, na.rm = T)
may.p <- total.p[total.p$Month == 5, ]
tavg.m <- aggregate(TAVG ~ Year + Month, data=MNtavg.df, FUN=sum, na.rm = T)
jun.tavg <- tavg.m[tavg.m$Month == 6,]
tmin.m <- aggregate(TMIN ~ Year + Month, data = MNtmin.df, FUN = sum, na.rm = T)
jun.tmin <- tmin.m[tmin.m$Month == 6, ]
tmax.m <- aggregate(TMAX ~ Year + Month, data = MNt.df, FUN = sum, na.rm = T)
jun.tmax <- tmax.m[tmax.m$Month == 6, ]
#pr.yr <- aggregate(PCP ~ Year , data=MNp.df, FUN=sum, na.rm = T)
#plot(pr.yr[1:120,1], pr.yr[1:120,2], type = "l", xlab = "Year", ylab = "Annual Precip (mm)")
#precip <- dcast(total.p, Year ~ Month)
annual.p <- aggregate(PCP~Year, data = MNp.df[1:1440,], FUN = sum, na.rm=T)
annual.t <- aggregate(TAVG ~ Year, data = MNtavg.df[1:1440,], FUN = 'mean', na.rm=T)
annual.mint <- aggregate(TMIN ~Year, data = MNtmin.df[1:1440,], FUN = 'mean', na.rm = T)
annual.pdsi <- aggregate(PDSI ~ Year, data = MNpdsi.df[1:1440,], FUN = 'mean', na.rm = T)
annual.pdsi.m <- aggregate(PDSI ~ Year + Month, data = MNpdsi.df[1:1440,], FUN = 'mean', na.rm = T)
jul.pdsi <- annual.pdsi.m[annual.pdsi.m$Month == 7,]
jja.pdsi <- aggregate(PDSI ~ Year, data = MNpdsi.df[MNpdsi.df$Month %in% 6:8 & MNpdsi.df$Year %in% 1895:2014,], FUN = 'mean', na.rm = T)
jja.pdsi.m <- aggregate(PDSI ~ Year + Month, data = MNpdsi.df[MNpdsi.df$Month %in% 6:8 & MNpdsi.df$Year %in% 1895:2014,], FUN = 'mean', na.rm = T)
annuals <- data.frame(year = annual.p$Year,
PCP = annual.p$PCP,
TMIN = annual.mint$TMIN,
TAVG = annual.t$TAVG,
PDSI = annual.pdsi$PDSI,
JJA.pdsi = jja.pdsi$PDSI,
MAY.p = may.p[1:120,]$PCP,
JJA.p = jja.p[1:120,]$PCP,
JUNTmin = jun.tmin[1:120,]$TMIN,
JUNTavg = jun.tavg[1:120,]$TAVG,
JUNTmax = jun.tmax[1:120,]$TMAX,
Jul.pdsi = jul.pdsi[1:120,]$PDSI)
df <- merge(site.df, annuals, by = "year")
df
}else{
MNcd.clim <- read.csv(paste0("data/PRISM/",list.files("data/PRISM/", pattern = site.code)), header = TRUE, skip = 10 )
colnames(MNcd.clim) <- c("Date", "PCP", "TMIN", "TAVG", "TMAX", "TdAVG", "VPDmin", "VPDmax" )
# get latitude (need for PET calculation):
lat <- as.numeric(unlist(strsplit(list.files("data/PRISM/", pattern = site.code), split = "_"))[5])
#split date into month and year:
MNcd.clim <- MNcd.clim %>% separate(Date, c("Year", "Month"), "-")
# conversions to metric b/c PRISM still uses Farenheit and inches \_O_/
MNcd.clim$PCP <- MNcd.clim$PCP*25.54 # convert to mm
# convert temperatures to celcius
MNcd.clim$TMIN <- (MNcd.clim$TMIN - 32)/1.8
MNcd.clim$TMAX <- (MNcd.clim$TMAX - 32)/1.8
MNcd.clim$TAVG <- (MNcd.clim$TAVG - 32)/1.8
MNcd.clim$TdAVG <- (MNcd.clim$TdAVG - 32)/1.8
# calculate PET using thornthwaite method:
MNcd.clim$PET <- as.numeric(thornthwaite(MNcd.clim$TAVG, lat))
#calculate water balance for each month:
MNcd.clim$BAL <- MNcd.clim$PCP - MNcd.clim$PET
MNcd.clim$Month<- as.numeric(MNcd.clim$Month)
# make separate DF for each of the variables:
keeps <- c("Year", "Month", "PCP")
keepstavg <- c("Year", "Month", "TAVG")
keepst <- c("Year", "Month", "TMAX")
keepstmin <- c("Year", "Month", "TMIN")
keepsvpdmin <- c("Year", "Month", "VPDmin")
keepsvpdmax <- c("Year", "Month", "VPDmax")
keepsPET <- c("Year", "Month", "PET")
keepsBAL <- c("Year", "Month", "BAL")
#create a dataset for Precip
MNp.df <- MNcd.clim[,keeps]
MNp.df[MNp.df == -9999]<- NA
#for tmax
MNt.df <- MNcd.clim[,keepst]
MNt.df[MNt.df == -9999]<- NA
#for tmin
MNtmin.df<- MNcd.clim[,keepstmin]
MNtmin.df[MNtmin.df == -9999]<- NA
#for tavg
MNtavg.df <- MNcd.clim[,keepstavg]
MNtavg.df[MNtavg.df == -9999]<- NA
# for vpdmin
MNvpdmin.df<- MNcd.clim[,keepsvpdmin]
MNvpdmin.df[MNvpdmin.df == -9999]<- NA
# for vpdmax
MNvpdmax.df<- MNcd.clim[,keepsvpdmax]
MNvpdmax.df[MNvpdmax.df == -9999]<- NA
#for PET (thornthwaite):
MNPET.df<- MNcd.clim[,keepsPET]
MNPET.df[MNPET.df == -9999]<- NA
#for water balance (P- PET)
MNBAL.df <- MNcd.clim[,keepsBAL]
MNBAL.df[MNBAL.df == -9999] <- NA
total.p <- aggregate(PCP ~ Year + Month, data=MNp.df, FUN=sum, na.rm = T)
months <- 6:9
MNpjja.df <- MNp.df[as.numeric(MNp.df$Month) %in% months,]
jja.p <- aggregate(PCP ~ Year, data = MNpjja.df, FUN = sum, na.rm = T)
total.p <- aggregate(PCP ~ Year + Month, data=MNp.df, FUN=sum, na.rm = T)
may.p <- total.p[total.p$Month == 5, ]
tavg.m <- aggregate(TAVG ~ Year + Month, data=MNtavg.df, FUN=sum, na.rm = T)
jun.tavg <- tavg.m[tavg.m$Month == 6,]
tmin.m <- aggregate(TMIN ~ Year + Month, data = MNtmin.df, FUN = sum, na.rm = T)
jun.tmin <- tmin.m[tmin.m$Month == 6, ]
tmax.m <- aggregate(TMAX ~ Year + Month, data = MNt.df, FUN = sum, na.rm = T)
jun.tmax <- tmax.m[tmax.m$Month == 6, ]
VPDmax.m <- aggregate(VPDmax ~ Year + Month, data = MNvpdmax.df, FUN = sum, na.rm = T)
jul.VPDmax <- VPDmax.m[VPDmax.m$Month == 7, ]
BAL.m <- aggregate(BAL ~ Year + Month, data = MNBAL.df[1:1440,], FUN = sum, na.rm = T)
jul.BAL <- BAL.m[BAL.m$Month == 7, ]
annual.p <- aggregate(PCP~Year, data = MNp.df[1:1440,], FUN = sum, na.rm=T)
annual.t <- aggregate(TAVG ~ Year, data = MNtavg.df[1:1440,], FUN = 'mean', na.rm=T)
annual.mint <- aggregate(TMIN ~Year, data = MNtmin.df[1:1440,], FUN = 'mean', na.rm = T)
annual.VPDmax <- aggregate(VPDmax ~ Year, data = MNvpdmax.df[1:1440,], FUN = 'mean', na.rm = T)
annual.BAL <- aggregate(BAL ~ Year, data = MNBAL.df[1:1440,], FUN = 'sum', na.rm = T)
jja.VPDmax <- aggregate(VPDmax ~ Year, data = MNvpdmax.df[MNvpdmax.df$Month %in% 6:8 & MNvpdmax.df$Year %in% 1895:2014,], FUN = 'mean', na.rm = T)
annuals <- data.frame(year = annual.p$Year,
PCP = annual.p$PCP,
TMIN = annual.mint$TMIN,
TAVG = annual.t$TAVG,
VPDmax = annual.VPDmax$VPDmax,
jja.VPDmax = jja.VPDmax$VPDmax,
BAL = annual.BAL$BAL,
MAY.p = may.p[1:120,]$PCP,
JJA.p = jja.p[1:120,]$PCP,
JUNTmin = jun.tmin[1:120,]$TMIN,
JUNTavg = jun.tavg[1:120,]$TAVG,
JUNTmax = jun.tmax[1:120,]$TMAX,
jul.VPDmax = jul.VPDmax[1:120,]$VPDmax,
jul.BAL = jul.BAL[1:120,]$BAL)
write.csv(annuals, paste0("data/climate/PRISM/", site.code, "full.clim.csv"))
df <- merge(site.df, annuals, by = "year")
df
}
}
# get prism climate and merge for all:
det.age.clim.prism <- lapply(detrended.age, get.clim, climatedata = "PRISM")
det.age.clim.prism.df <- do.call(rbind,det.age.clim.prism)
# get GHCN climate and merge for all:
det.age.clim.ghcn <- lapply(detrended.age, get.clim, climatedata = "GHCN")
det.age.clim.ghcn.df <- do.call(rbind, det.age.clim.ghcn)
# plot the RWI vs July.pdsi
ggplot(det.age.clim.prism.df, aes(x = jul.VPDmax, y = RWI, color = ageclass))+geom_point()+stat_smooth(method = 'lm')+facet_wrap(~site, ncol = 5)
ggplot(det.age.clim.ghcn.df, aes(x = Jul.pdsi, y = RWI, color = ageclass))+geom_point()+stat_smooth(method = 'lm')+facet_wrap(~site, ncol = 5)
ggplot(det.age.clim.ghcn.df, aes(x = JJA.pdsi, y = RWI, color = ageclass))+geom_point()+stat_smooth(method = 'lm')+facet_wrap(~site, ncol = 5)
#age.classes <- det.age.clim.ghcn.df %>% group_by(site, ID) %>% summarise(pre1800 = min(year, na.rm = TRUE) < 1880, pre1950 = min(year, na.rm = TRUE) <1930 & min(year, na.rm = TRUE) >=1880 , post1950 = min(year, na.rm = TRUE) >1930)
#test <- age.classes %>% group_by(site) %>% summarise(pre1800_n=sum(pre1800 , na.rm=TRUE), pre1950_n = sum(pre1950, na.rm=TRUE), post1950_n = sum(post1950, na.rm=TRUE))
# write these dfs to a csv:
write.csv(det.age.clim.prism.df, "outputs/data/full_det_prism_rwi.csv", row.names = FALSE)
write.csv(det.age.clim.ghcn.df, "outputs/data/full_det_ghcn_rwi.csv", row.names = FALSE)
png(height = 3, width = 5, units = "in", res =300,"outputs/pdsi_over_time_bw.png")
ggplot(data = det.age.clim.ghcn.df[det.age.clim.ghcn.df$ID %in% "BON13", ], aes(year, Jul.pdsi))+geom_point()+stat_smooth(method = "lm", se = FALSE)+geom_line(color = "White")+theme_black(base_size = 20)+ylab("July PDSI")+geom_hline(yintercept = 0, color = "grey", linetype = "dashed")
dev.off()
# ------------------------- Get tree DBH at time of coring + put in DBH classes ----------------------------------
# This function uses DBH at time of coring and annual growth records to estimate Tree DBH over time
# based on the DBH at each time step, specify the DBH class over time.
# for some reason these throw up alot of warnings now, but seem to be okay
read_DBH_year <- function( filename, site){
if(site %in% c("HIC", "AVO", "UNI", "GLL1", "GLL2", "GLL3", "GLL4")){
newseries <- read.csv(paste0("cleanrwl/",site,"ww.csv"))
row.names(newseries) <- newseries$year
newseries <- newseries[!names(newseries) %in% "year"]
}else{
newseries <- read.tucson( filename )
}
rwl.stats(newseries)
# average the cores by tree (for the sites with multiple cores):
gp.treeMean <- treeMean(newseries, read.ids(newseries, stc = c(3,1,2)))
gp.treeMean2 <- treeMean(newseries, autoread.ids(newseries), na.rm=TRUE)
# if multiple cores were sampled per each site, we need to average the widths of the cores before estimating diamters:
mult.core.sites <- c("TOW", "COR", "HIC", "STC", "MOU", "ENG", "PVC", "HIC","UNI", "BON", "PLE", "GLL1", "GLL2", "GLL3", "GLL4")
if(site %in% mult.core.sites){
if(site %in% "COR"){
colnames(gp.treeMean2) <- paste0(site,19, colnames(gp.treeMean2))
}else{
if(site %in% "MOU"){
gp.treeMean2 <- treeMean(newseries, read.ids(newseries, stc = c(3,1,2)))
colnames(gp.treeMean2) <- paste0(site,colnames(gp.treeMean2))
}else{
if(site %in% "HIC"){
gp.treeMean2 <- treeMean(newseries, read.ids(newseries, stc = c(3,4,1)), na.rm=TRUE)
colnames(gp.treeMean2) <- paste0(site, colnames(gp.treeMean2))
}else{
if(site %in% "GLL4"){
gp.treeMean2 <- treeMean(newseries, read.ids(newseries, stc = c(4,7,1)), na.rm=TRUE)
colnames(gp.treeMean2) <- paste0(site, colnames(gp.treeMean2))
# quick fix for GLL4:
colnames(gp.treeMean2) <- c("GLL41", "GLL413", "GLL414", "GLL415", "GLL42", "GLL45", "GLL47", "GLL48", "GLL49")
}else{
if(site %in% "UNI"){
colnames(gp.treeMean2) <- paste0(site, colnames(gp.treeMean))
}else{
colnames(gp.treeMean2) <- paste0(site, colnames(gp.treeMean2))
}}}}}
newseries <- gp.treeMean2
site.data <- read.csv(paste0("/Users/kah/Documents/TreeRings/data/site_maps/all_metadata/", site, "_full_xy.csv"))
if(site %in% "AVO"){
diams <- site.data[complete.cases(site.data[c("full_tellervo", "DBH", "SpecCode")]), ]
diams.agg <- aggregate(diams[,c("full_tellervo", "DBH")], list(diams$full_tellervo), mean, na.rm = TRUE)
colnames(diams.agg) <- c("ID", "short", "DBH")
#spec <- site.data[complete.cases(site.data[,c("full_tellervo", "SpecCode")]),c("full_tellervo", "SpecCode")]
#diams.agg <- merge(diams.agg, spec, by.x = "ID", by.y = "full_tellervo")
spec <- site.data[complete.cases(site.data[,c("full_tellervo", "SpecCode")]),c("full_tellervo", "SpecCode")]
spec <- spec[!duplicated(spec),]
diams.agg <- merge(diams.agg, spec, by.x = "ID", by.y = "full_tellervo")
diams <- diams.agg[,c("ID", "DBH", "SpecCode")]
diams$DBH <- c(diams$DBH) # subtract ~2cm for barkwidth and convert to mm
colnames(diams) <- c("ID", "DBH", "SpecCode")
# only find records where we have both DBH and tellervo entries:
# writecsv with tree rwl that are missing for each site so we can check these:
not.in.rwl <- diams [!diams$ID %in% colnames(newseries),]
if(length(not.in.rwl$ID) > 0){ # if there are any records missing, make a csv output
write.csv(not.in.rwl, paste0("data/site_stats/", site, "-IDS_not_in_tellervo.csv"))
}
diams <- diams [diams$ID %in% colnames(newseries),]
newseries <- newseries[,colnames(newseries) %in% diams$ID]
write.csv(diams,paste0("outputs/DBH/species_codes_", sitecode, ".csv"))
}else{
diams <- site.data[c("short", "DBH", "SpecCode")]
#diams <- diams[2:length(diams$short),]
diams$DBH <- as.numeric(as.character(diams$DBH))
diams.agg <- aggregate(diams, list(diams$short), mean, na.rm = TRUE)
colnames(diams.agg) <- c("ID", "short", "DBH")
diams.agg<- diams.agg[!duplicated(diams.agg),]
spec <- site.data[complete.cases(site.data[,c("short", "SpecCode")]),c("short", "SpecCode")]
spec <- spec[!duplicated(spec),]
diams.agg <- merge(diams.agg, spec, by.x = "ID", by.y = "short")
diams <- diams.agg[,c("ID", "DBH", "SpecCode")]
diams$DBH <- c(diams$DBH) # may need to subtract ~2cm for barkwidth
colnames(diams) <- c("ID", "DBH", "SpecCode")
# only find records where we have both DBH and tellervo entries:
# writecsv with tree rwl that are missing for each site so we can check these:
not.in.rwl <- diams [!diams$ID %in% colnames(newseries),]
if(length(not.in.rwl$ID) > 0){ # if there are any records missing, make a csv output
write.csv(not.in.rwl, paste0("data/site_stats/", site, "-IDS_not_in_tellervo.csv"))
}
diams <- diams [diams$ID %in% colnames(newseries),]
newseries <- newseries[,colnames(newseries) %in% diams$ID]
write.csv(diams ,paste0("outputs/DBH/species_codes_", sitecode, ".csv"))
}
rwl <- newseries*0.1 # convert measuremnts to CM:
# below code is adapted from dplR function bai.out to just estimate tree diameter at this point:
# if the data is messed up, send up some error warnings!
if (!is.data.frame(newseries))
stop("'rwl' must be a data.frame")
if (!is.null(diams)) {
if (ncol(newseries) != nrow(diams[!names(diams) %in% "SpecCode"]))
stop("dimension problem: ", "'ncol(rw)' != 'nrow(diam)'")
if (!all(diams[, 1] %in% names(newseries)))
stop("series ids in 'diam' and 'rwl' do not match")
diam.vec <- diams[, 2]
}
# setting up and reordering vectors to match diameters to the tellervo records:
out <- rwl
n.vec <- seq_len(nrow(rwl))
diam <- diams[ order(match(diams$ID, colnames(rwl))), ] # reorder diameter vector to match trees
diam.vec <- diam[, 2]
# for each column and year, calculate the tree diameter:
for (i in seq_len(ncol(rwl))) {
dat <- rwl[[i]]
dat2 <- na.omit(dat)
#if (is.null(diams))
# d <- sum(dat2) * 2
#else
d <- diam.vec[i]
r0 <- d/2 - c(0, cumsum(rev(dat2)))
#bai <- -pi * rev(diff(r0 * r0))
# add space for NA values in rwl style files:
na <- attributes(dat2)$na.action
if(min( n.vec[!n.vec %in% na]) == 1){
no.na <- c( n.vec[!n.vec %in% na])
out[no.na, i] <- rev(r0[1:length(r0)-1])*2 # only report back the diameters
}else{
no.na <- c(na[length(na)], n.vec[!n.vec %in% na])
out[no.na, i] <- rev(r0[1:length(r0)])*2 # only report back the diameters
}
}
}else{
# if sites only have one core per tree:
site.data <- read.csv(paste0("/Users/kah/Documents/TreeRings/data/site_maps/all_metadata/", site, "_full_xy.csv"))
diams <- site.data[c("full_tellervo", "DBH")]
diams$DBH <- (diams$DBH)
colnames(diams) <- c("ID", "DBH")
spec <- site.data[complete.cases(site.data[,c("full_tellervo", "SpecCode")]),c("full_tellervo", "SpecCode")]
spec <- spec[!duplicated(spec),]
#diams.agg <- merge(diams.agg, spec, by.x = "ID", by.y = "full_tellervo")
diams.agg <- merge(diams, spec, by.x = "ID", by.y = "full_tellervo")
diams <- diams.agg[,c("ID", "DBH", "SpecCode")]
diams$DBH <- c(diams$DBH) # subtract ~2cm for barkwidth and convert to mm
colnames(diams) <- c("ID", "DBH", "SpecCode")
# writecsv with tree rwl that are missing for each site so we can check these:
not.in.rwl <- diams [!diams$ID %in% colnames(newseries),]
if(length(not.in.rwl$ID) > 0){ # if there are any records missing, make a csv output
write.csv(not.in.rwl, paste0("data/site_stats/", site, "-IDS_not_in_tellervo.csv"))
}
# only find records where we have both DBH and tellervo entries:
diams <- diams [diams$ID %in% colnames(newseries),]
newseries <- newseries[,colnames(newseries) %in% diams$ID]
write.csv(diams,paste0("outputs/DBH/species_codes_", site, ".csv"))
rwl <- newseries*0.1 # convert measuremnts to CM:
# below code is adapted from dplR function bai.out to just estimate tree diameter at this point:
# if the data is messed up, send up some error warnings!
if (!is.data.frame(rwl))
stop("'rwl' must be a data.frame")
if (!is.null(diam)) {
if (ncol(rwl) != nrow(diams))
stop("dimension problem: ", "'ncol(rwl)' != 'nrow(diam)'")
if (!all(diams[, 1] %in% names(rwl)))
stop("series ids in 'diam' and 'rwl' do not match")
diam.vec <- diams[, 2]
}
# setting up and reordering vectors to match diameters to the tellervo records:
out <- rwl
n.vec <- seq_len(nrow(rwl))
diam <- diams[ order(match(diams$ID, colnames(rwl))), ] # reorder diameter vector to match trees
diam.vec <- diam[, 2]
# for each column and year, calculate the tree diameter:
for (i in seq_len(ncol(rwl))) {
dat <- rwl[[i]]
dat2 <- na.omit(dat)
if (is.null(diam))
d <- sum(dat2) * 2
else d <- diam.vec[i]
r0 <- d/2 - c(0, cumsum(rev(dat2)))
#bai <- -pi * rev(diff(r0 * r0))
# add space for NA values in rwl style files:
na <- attributes(dat2)$na.action
if(min( n.vec[!n.vec %in% na]) == 1){
no.na <- c( n.vec[!n.vec %in% na])
out[no.na, i] <- rev(r0[1:length(r0)-1])*2 # only report back the diameters
}else{
no.na <- c(na[length(na)], n.vec[!n.vec %in% na])
out[no.na, i] <- rev(r0[1:length(r0)])*2 # only report back the diameters
}
}
}
# rename df
yearly.diams <- out
# add on year and site names
yearly.diams$year <- row.names(yearly.diams)
yearly.diams$site <- site
# output yearly dataframe
yearly.diams
}
Hickory.DBH <- read_DBH_year(filename = "cleanrwl/HICww.rwl", site = "HIC")
StCroix.DBH <- read_DBH_year("cleanrwl/STCww.rwl", site = "STC")
Bonanza.DBH <- read_DBH_year(filename = "cleanrwl/BONww.rwl", site = "BON") # missing 1 core
Townsend.DBH <- read_DBH_year(filename = "cleanrwl/TOWww.rwl", site = "TOW") #missing 1 core
Pleasant.DBH <- read_DBH_year(filename = "cleanrwl/PLEww.rwl", site = "PLE") #missing 3
Coral.DBH <- read_DBH_year(filename = "cleanrwl/CORww.rwl", site = "COR") #bai needs the 19 in front of numbers
Uncas.DBH <- read_DBH_year(filename = "cleanrwl/UNCww.rwl", site = "UNC") # bai is miisng full names...save as csv?
Glacial.DBH <- read_DBH_year("cleanrwl/GLAww.rwl", site = "GLA") # messed up and DBH not averaged ring
Englund.DBH <- read_DBH_year(filename = "cleanrwl/ENGww.rwl", site = "ENG")
Mound.DBH <- read_DBH_year(filename = "cleanrwl/MOUww.rwl", site = "MOU") # bai is messed up
GLL1.DBH <- read_DBH_year("cleanrwl/GLL1ww.rwl", site = "GLL1")# bai removed extra ones
GLL2.DBH <- read_DBH_year("cleanrwl/GLL2ww.rwl", site = "GLL2") # bai removed extra onesi
GLL3.DBH <- read_DBH_year("cleanrwl/GLL3ww.rwl", site = "GLL3")
GLL4.DBH <- read_DBH_year(filename = "cleanrwl/GLL4ww.rwl", site = "GLL4") # error
PVC.DBH <- read_DBH_year("cleanrwl/PVCww.rwl", site = "PVC")
AVO.DBH <- read_DBH_year(filename = "cleanrwl/AVOww.rwl", site = "AVO")
UNI.DBH <- read_DBH_year(filename = "cleanrwl/UNIww.rwl", site = "UNI") # DBH has multiple cores listed
dbh.list <- list(Hickory.DBH, StCroix.DBH, Bonanza.DBH,Townsend.DBH,Pleasant.DBH, Coral.DBH,
Uncas.DBH, Glacial.DBH, Englund.DBH, Mound.DBH, GLL1.DBH, GLL2.DBH,
GLL3.DBH, GLL4.DBH, PVC.DBH, AVO.DBH) #, UNI.DBH)
# function to assign DBH class to all dataframes and make plots of DBH
DBH.classify <- function(dbh.df, n.classes){
Hic <- dbh.df
# plot rwi vs. tree age:
DBH.m <- melt(Hic)
colnames(DBH.m) <- c("year","site", "ID", "DBH")
DBH.m$year <- as.numeric(DBH.m$year)
site <- unique(DBH.m$site)
# print out trajectory of DBH at each sites
dbh.plot <- ggplot(DBH.m, aes(x = year, y = DBH, color = ID)) + geom_line()+theme_bw()
ggsave(plot = dbh.plot, filename = paste0("outputs/DBH/", site, "_DBH_time.png"))
DBH.m$ID <- as.character(DBH.m$ID)
DBH.m$dbhclass <- "small"
site.code <- unique(DBH.m$site)
# need to assign trees to age classes:
if(n.classes == 9){
class.dbh <- ifelse(is.na(DBH.m$DBH), "NA",
ifelse(DBH.m$DBH <= 10, "< 10",
ifelse(DBH.m$DBH > 10 & DBH.m$DBH <= 20 , "10 - 20",
ifelse(DBH.m$DBH > 20 & DBH.m$DBH <= 30 , "20 - 30",
ifelse(DBH.m$DBH > 30 & DBH.m$DBH <= 40 , "30 - 40",
ifelse(DBH.m$DBH > 40 & DBH.m$DBH <= 50 , "40 - 50",
ifelse(DBH.m$DBH > 50 & DBH.m$DBH <= 60 , "50 - 60",
ifelse(DBH.m$DBH > 60 & DBH.m$DBH <= 70 , "60 - 70",
ifelse(DBH.m$DBH > 70 & DBH.m$DBH <= 80 , "70 - 80",
">80")))))))))
}else{ if(n.classes == 5){
class.dbh <- ifelse(is.na(DBH.m$DBH), "NA",
ifelse(DBH.m$DBH <= 20, "< 20",
ifelse(DBH.m$DBH > 20 & DBH.m$DBH <= 40 , "20 - 40",
ifelse(DBH.m$DBH > 40 & DBH.m$DBH <= 60 , "40 - 60",
ifelse(DBH.m$DBH > 60 & DBH.m$DBH <= 80 , "60 - 80",">80")))))
}else{
class.dbh <- ifelse(is.na(DBH.m$DBH), "NA",
ifelse(DBH.m$DBH <= 30, "< 30",
ifelse(DBH.m$DBH > 30 & DBH.m$DBH <= 60 , "20 - 60",
ifelse(DBH.m$DBH > 60 & DBH.m$DBH <= 80 , "60 - 80",
ifelse(DBH.m$DBH > 80 , "> 80",">80")))))
}}
DBH.m$dbhclass <- class.dbh # output DBH dataframe
# DBH.m$ID <- substr(DBH.m$ID, start = 4, 10)
DBH.m
}
dbh.class <- lapply(dbh.list, DBH.classify, n.classes = 5)
dbh.class.df <- do.call(rbind, dbh.class) # make into df
# summarize # of cores each site has est before 1900, 1900-1950, and after 1950:
summary(dbh.class.df)
detach(package: plyr)
minyear.by.ID <- dbh.class.df %>% group_by(site, ID) %>% summarise(min(year, na.rm = TRUE))
#group.by(site) %>% summarise()
age.classes <- dbh.class.df %>% group_by(site, ID) %>% drop_na() %>% summarise(pre1800 = min(year, na.rm = TRUE) <1880, pre1950 = min(year, na.rm = TRUE) <1930 & min(year, na.rm = TRUE) >=1880 , post1950 = min(year, na.rm = TRUE) >1930)
age.classes %>% group_by(site) %>% summarise(pre1800_n=sum(pre1800, na.rm=TRUE), pre1950_n = sum(pre1950, na.rm=TRUE), post1950_n = sum(post1950, na.rm=TRUE))
# fixing some ID's with UNI:
test.uni<- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "UNI",]$ID
det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "UNI",]$ID<- substr(test.uni, 1, 4)
det.age.clim.prism.df[det.age.clim.prism.df$site %in% "UNI",]$ID<- substr(test.uni, 1, 4)
# merge the diameter class df with the climate/growth dataframes:
det.age.clim.ghcn.df <- merge(det.age.clim.ghcn.df, dbh.class.df, by = c("year", "site", "ID"))
det.age.clim.prism.df <- merge(det.age.clim.prism.df, dbh.class.df, by = c("year", "site", "ID"))
test.ghcn.df <- merge(det.age.clim.ghcn.df, dbh.class.df, by = c("year", "site", "ID"))
test.prism.df <- merge(det.age.clim.prism.df, dbh.class.df, by = c("year", "site", "ID"))
# change factor order of dbhclass to make prettier plots:
det.age.clim.ghcn.df$dbhclass <- factor(det.age.clim.ghcn.df$dbhclass, levels = c("< 20", "20 - 40", "40 - 60", "60 - 80", ">80"))
png("outputs/DBH/July_clim_sens_by_dbh.png")
ggplot(na.omit(det.age.clim.ghcn.df), aes(Jul.pdsi, RWI, color = dbhclass))+stat_smooth(method = "lm", se = TRUE, aes(fill = dbhclass), alpha = 0.1)+theme_bw()+theme_black()
dev.off()
png("outputs/DBH/JJA_clim_sens_by_dbh.png")
ggplot(na.omit(det.age.clim.ghcn.df), aes(JJA.pdsi, RWI, color = dbhclass))+stat_smooth(method = "lm", se = TRUE, aes(fill = dbhclass), alpha = 0.1)+theme_bw()+theme_black(base_size = 20)+ylab("Detrended Ring Width Index")+xlab("Summer PDSI")
dev.off()
det.age.clim.ghcn.df$ageclass <- factor(det.age.clim.ghcn.df$ageclass, levels = c("Past", "Modern"))
png("outputs/DBH/July_clim_sens_by_ageclass.png")
ggplot(na.omit(det.age.clim.ghcn.df), aes(Jul.pdsi, RWI, color = dbhclass))+stat_smooth(method = "lm", se = TRUE, aes(fill = dbhclass), alpha = 0.1)+theme_bw()+theme_black()+facet_wrap(~ageclass)
dev.off()
png("outputs/DBH/JJA_clim_sens_by_ageclass.png")
ggplot(na.omit(det.age.clim.ghcn.df), aes(JJA.pdsi, RWI, color = dbhclass))+stat_smooth(method = "lm", se = TRUE, aes(fill = dbhclass), alpha = 0.1)+theme_bw()+theme_black(base_size = 12)+facet_wrap(~ageclass)
dev.off()
summary(aov(RWI~JJA.pdsi+ageclass, data=det.age.clim.class.ghcn.df))
summary(aov(RWI~JJA.pdsi*ageclass, data=det.age.clim.class.ghcn.df))
ggplot(na.omit(det.age.clim.ghcn.df), aes(JJA.pdsi, RWI, color = ageclass))+stat_smooth(method = "lm", se = TRUE, aes(fill = dbhclass), alpha = 0.1)+theme_bw()+theme_black()+facet_wrap(~ageclass)
det.age.clim.class.ghcn.df <- merge(det.age.clim.ghcn.df, locs, by.x = "site", by.y = "code")
png("outputs/DBH/JJA_clim_sens_by_coverclass.png")
ggplot(na.omit(det.age.clim.class.ghcn.df), aes(JJA.pdsi, RWI, color = Description))+geom_point(size = 0.8)+stat_smooth(method = "lm", se = TRUE, aes(fill = Description), alpha = 0.1)+theme_bw()+theme_black()+ylab("Detrended Ring Width Index")+xlab("Summer PDSI")
dev.off()
summary(lm(RWI ~ JJA.pdsi, data = det.age.clim.class.ghcn.df[det.age.clim.class.ghcn.df$Description %in% "Forest",]))
summary(lm(RWI ~ JJA.pdsi, data = det.age.clim.class.ghcn.df[det.age.clim.class.ghcn.df$Description %in% "Savanna",]))
summary(aov(RWI~JJA.pdsi+Description, data=det.age.clim.class.ghcn.df))
summary(aov(RWI~JJA.pdsi*Description, data=det.age.clim.class.ghcn.df))
png("outputs/DBH/July_clim_sens_by_site.png")
ggplot(na.omit(det.age.clim.ghcn.df), aes(Jul.pdsi, RWI, color = dbhclass))+stat_smooth(method = "lm", se = TRUE, aes(fill = ageclass), alpha = 0.1)+theme_bw()+theme_black()+facet_wrap(~site)
dev.off()
png("outputs/DBH/JJA_clim_sens_by_site.png")
ggplot(na.omit(det.age.clim.ghcn.df), aes(JJA.pdsi, RWI, color = dbhclass))+stat_smooth(method = "lm", se = TRUE, aes(fill = dbhclass), alpha = 0.1)+theme_bw()+theme_black()+facet_wrap(~site)
dev.off()
summary(lm(RWI ~ Jul.pdsi:dbhclass, data = na.omit(det.age.clim.ghcn.df)))
summary(lm(RWI ~ JJA.pdsi:dbhclass, data = na.omit(det.age.clim.ghcn.df)))
# make these plots with correlation coefficent, not linear relationships:
head(det.age.clim.ghcn.df)
require(plyr)
cor.func <- function(xx)
{
return(data.frame(COR = cor(xx$RWI, xx$JJA.pdsi)))
}
nona.age.df <- det.age.clim.ghcn.df[!is.na(det.age.clim.ghcn.df$RWI) & !is.na(det.age.clim.ghcn.df$JJA.pdsi),]
# get correlation ceofficient with ages class
det.age.dbhclass.cor <- ddply(nona.age.df, .(dbhclass, site), cor.func)
ggplot(det.age.dbhclass.cor, aes(dbhclass, COR))+geom_bar(stat = "identity")+facet_wrap(~site)
cor.boot <- function(df){
dat <- df[,c("RWI", "JJA.pdsi")]
N <- nrow(dat)
R <- 2500
cor.orig <- cor(dat)[1,2]
cor.boot <- NULL
for (i in 1:R) {
idx <- sample.int(N, N, replace = TRUE)
cor.boot[i] <- cor(dat[idx, ])[1,2]
}
cor.boot
}
det.age.dbhclass.cor.b <- ddply(nona.age.df, .(dbhclass), cor.boot)
hist(cor.boot)
# lets find to cores that need to be checked (not sensitive to climate)
corrs <- data.frame(a = 1:length(unique(det.age.clim.ghcn.df$ID)),
id = unique(det.age.clim.ghcn.df$ID))
for(i in 1:length(unique(det.age.clim.ghcn.df$ID))){
id <- unique(det.age.clim.ghcn.df$ID)[i]
a <- cor(det.age.clim.ghcn.df[det.age.clim.ghcn.df$ID %in% id,]$RWI, det.age.clim.ghcn.df[det.age.clim.ghcn.df$ID %in% id,]$Jul.pdsi, use = "pairwise.complete.obs")
corrs[i,]$a <- a
corrs[i,]$id <- id
}
removes <- corrs[corrs$a <= 0.1 | is.na(corrs$a),]$id
det.age.clim.ghcn.df <- det.age.clim.ghcn.df[!det.age.clim.ghcn.df$ID %in% removes,]
write.csv(det.age.clim.ghcn.df, "outputs/det.age.clim.ghcn.sizes.csv", row.names = FALSE)
write.csv(det.age.clim.class.ghcn.df, "outputs/det.age.clim.ghcn.sizes.covclass.csv", row.names = FALSE)
# ------------------------How does growth vary over time:
#library(treeclim)
# we will us the dcc function in the tree clim package, but this funtion takes monthly data:
#test <- det.age.clim.ghcn.df
# moving correlations between climate and tree growth
#these funcitons print out plots time moving correlations for all of the climate parameters
# not run by default b/c they take along time to run:
clim.cor <- function(climate, chron, site.name){
site.code <- site.df[1,]$site
if(climatedata == "GHCN"){
if(site.code %in% c("BON", "GLL1", "GLL2", "GLL3", "GLL4")){
MNcd.clim <- read.csv("data/West_central_MN_nclimdiv.csv")
} else{ if(site.code %in% c("HIC", "COR","GLA", "PVC" )){
MNcd.clim <- read.csv("data/NE_illinois_climdiv.csv")
} else{ if(site.code == "W-R" ){
MNcd.clim <- read.csv("data/West_central_MN_nclimdiv.csv")
} else{ if(site.code == 'SAW'){
MNcd.clim <- read.csv("data/NE_illinois_climdiv.csv")
}else{ if(site.code == "STC"){
MNcd.clim <- read.csv("data/East_Central_MN_CDODiv5039587215503.csv")
}else{ if(site.code == "ENG"){
MNcd.clim <- read.csv("data/Central_MN_CDO.csv")
}else{ if(site.code == "TOW"){
MNcd.clim <- read.csv("data/South_central_MN_CDO.csv")
}else{ if(site.code == "MOU"){
MNcd.clim <- read.csv("data/South_East_MN_CDO.csv")
}else{ if(site.code == "UNC"){
MNcd.clim <- read.csv("data/East_Central_MN_CDODiv5039587215503.csv")
}else { if(site.code == 'PLE'){
MNcd.clim <- read.csv('data/south_central_WI_climdiv.csv')
}else { if(site.code == 'YRF'){
MNcd.clim <- read.csv('IA_nclim_div_northeast.csv')}
#MNcd.clim <-read.csv('data/CDODiv2154347072867.csv')}
}
}
}
}
}
}
}
}
}
}
MNcd.clim$PCP <- MNcd.clim$PCP*25.54
keeps <- c("Year", "Month", "PCP")
keepstavg <- c("Year", "Month", "TAVG")
keepst <- c("Year", "Month", "TMAX")
keepstmin <- c("Year", "Month", "TMIN")
keepspdsi <- c("Year", "Month", "PDSI")
#create a dataset for Precip
MNp.df <- MNcd.clim[,keeps]
MNp.df[MNp.df == -9999]<- NA
#for tmax
MNt.df <- MNcd.clim[,keepst]
MNt.df[MNt.df == -9999]<- NA
#for tmin
MNtmin.df<- MNcd.clim[,keepstmin]
MNtmin.df[MNtmin.df == -9999]<- NA
#for tavg
MNtavg.df <- MNcd.clim[,keepstavg]
MNtavg.df[MNtavg.df == -9999]<- NA
MNpdsi.df <- MNcd.clim[,keepspdsi]
MNpdsi.df[MNpdsi.df == -9999]<- NA
#for precipitation
}else{
MNcd.clim <- read.csv(paste0("data/PRISM/",list.files("data/PRISM/", pattern = site.code)), header = TRUE, skip = 10 )
colnames(MNcd.clim) <- c("Date", "PCP", "TMIN", "TAVG", "TMAX", "TdAVG", "VPDmin", "VPDmax" )
# get latitude (need for PET calculation):
lat <- as.numeric(unlist(strsplit(list.files("data/PRISM/", pattern = site.code), split = "_"))[5])
#split date into month and year:
MNcd.clim <- MNcd.clim %>% separate(Date, c("Year", "Month"), "-")
# conversions to metric b/c PRISM still uses Farenheit and inches \_O_/
MNcd.clim$PCP <- MNcd.clim$PCP*25.54 # convert to mm
# convert temperatures to celcius
MNcd.clim$TMIN <- (MNcd.clim$TMIN - 32)/1.8
MNcd.clim$TMAX <- (MNcd.clim$TMAX - 32)/1.8
MNcd.clim$TAVG <- (MNcd.clim$TAVG - 32)/1.8
MNcd.clim$TdAVG <- (MNcd.clim$TdAVG - 32)/1.8
# calculate PET using thornthwaite method:
MNcd.clim$PET <- as.numeric(thornthwaite(MNcd.clim$TAVG, lat))
#calculate water balance for each month:
MNcd.clim$BAL <- MNcd.clim$PCP - MNcd.clim$PET
MNcd.clim$Month<- as.numeric(MNcd.clim$Month)
}
climate <- MNcd.clim
PREC <- climate[,c('Year', 'Month', 'PCP')]
PREC$Year <- as.numeric(PREC$Year)
#PREC$PCP <- PREC$PCP*25.54
PREC <- PREC[1:1452,]
# PDSI
PDSI <- climate[,c('Year', 'Month', 'PDSI')]
PDSI$Year <- as.numeric(PDSI$Year)
#PREC$PCP <- PREC$PCP*25.54
PDSI <- PDSI[1:1452,]
chron <- chron[chron$Year >=1895,]
hic.pdsi.static <- dcc(chron, PREC, dynamic = 'static', win_size = 35, win_offset = 30)
pdf(paste0('outputs/correlations/moving_site_cors/PREC_', site.name,'dynamic.pdf'))
print(plot(hic.pdsi.static))
#g_test(hic.pdsi.moving)
#traceplot(hic.pdsi.moving)
#plot(skills(hic.pdsi.moving))
hic.prec.moving <- dcc(chron, PREC, dynamic = 'moving', win_size = 45, win_offset = 5, ci = 0.05, boot = "std")
print(plot(hic.prec.moving))
#g_test(hic.pdsi.moving)
print(traceplot(hic.prec.moving))
#plot(skills(hic.pdsi.moving))
dev.off()
write.csv(hic.prec.moving, paste0('outputs/correlations/moving_site_cors/PREC_', site.name,'dynamic.csv'))
#PDSI
PDSI <- climate[,c('Year', 'Month', 'PDSI')]
PDSI <- PDSI[1:1452,]
pdf(paste0('outputs/correlations/moving_site_cors/PDSI_', site.name,'dynamic.pdf'))
hic.pdsi.static <- dcc(chron, PDSI, dynamic = 'static', win_size = 35, win_offset = 5)
print(plot(hic.pdsi.static))
#g_test(hic.pdsi.moving)
#traceplot(hic.pdsi.moving)
#plot(skills(hic.pdsi.moving))
hic.pdsi.moving <- dcc(chron, PDSI, dynamic = 'moving', win_size = 35, win_offset = 5)
print(plot(hic.pdsi.moving))
#g_test(hic.pdsi.moving)
print(traceplot(hic.pdsi.moving))
#plot(skills(hic.pdsi.moving))
dev.off()
write.csv(hic.pdsi.moving, paste0('outputs/correlations/moving_site_cors/PDSI_', site.name,'dynamic.csv'))
#TAVG
TAVG <- climate[,c('Year', 'Month', 'TAVG')]
TAVG <- TAVG[1:1452,]
pdf(paste0('outputs/correlations/moving_site_cors/TAVG_', site.name,'dynamic.pdf'))
hic.pdsi.static <- dcc(chron, TAVG, dynamic = 'static', win_size = 35, win_offset = 5)
print(plot(hic.pdsi.static))
hic.pdsi.moving <- dcc(chron, TAVG, dynamic = 'moving', win_size = 35, win_offset = 5)
print(plot(hic.tavg.moving))
#g_test(hic.pdsi.moving)
print(traceplot(hic.pdsi.moving))
dev.off()
write.csv(hic.tavg.moving, paste0('outputs/correlations/moving_site_cors/PDSI_', site.name,'dynamic.csv'))
#TMAX
TMAX <- climate[,c('Year', 'Month', 'TMAX')]
TMAX <- TMAX[1:1452,]
pdf(paste0('outputs/correlations/moving_site_cors/TMAX_', site.name,'dynamic.pdf'))
hic.pdsi.static <- dcc(chron, TMAX, dynamic = 'static', win_size = 35, win_offset = 5)
print(plot(hic.pdsi.static))
hic.pdsi.moving <- dcc(chron, TMAX, dynamic = 'moving', win_size = 35, win_offset = 5)
print(plot(hic.pdsi.moving))
#g_test(hic.pdsi.moving)
print(traceplot(hic.pdsi.moving))
#plot(skills(hic.pdsi.moving))
dev.off()
#TMIN
TMIN <- climate[,c('Year', 'Month', 'TMIN')]
TMIN <- TMIN[1:1452,]
pdf(paste0('outputs/correlations/moving_site_cors/TMIN_', site.name,'dynamic.pdf'))
hic.pdsi.static <- dcc(chron, TMIN, dynamic = 'static', win_size = 35, win_offset = 5)
print(plot(hic.pdsi.static))
hic.pdsi.moving <- dcc(chron, TMIN, dynamic = 'moving', win_size = 35, win_offset = 5)
print(plot(hic.pdsi.moving))
#g_test(hic.pdsi.moving)
print(traceplot(hic.pdsi.moving))
#plot(skills(hic.pdsi.moving))
dev.off()
dev.off()
}
#clim.cor(IL.clim, Hickory, 'Hickory_Grove_')
#clim.cor(MNwc.clim, Bonanza, 'Bonanza_Prairie_')
#clim.cor(MNwc.clim, Desoix, 'Desoix_')
#clim.cor(WIsc.clim, Pleasant, 'Pleasant_Valley_Conservancy_')
#clim.cor(MNec.clim, Townsend, 'Townsend_woods_')
#clim.cor(MNec.clim, StCroix, 'StCroix_savanna_')
#clim.cor(MNse.clim, Mound, 'Mound_prairie_')
# ------------------------What is the factor that affects growth-------------
# gam models:
gam1 <- gam(RWI~ s(TAVG, k = 6, by = year) +
s(PCP, k = 6, by = year) +
site + year,
# random=list(Site=~1, PlotID=~1, TreeID=~1),
data=det.age.clim.prism.df)
summary(gam1)$r.sq # R-squared
summary(gam1)$dev.expl # explained deviance
anova(gam1)
AIC(gam1)
# plot pred vs. obs:
preds <- predict(gam1, det.age.clim.prism.df)
plot(det.age.clim.prism.df$RWI, preds)
ggplot(det.age.clim.prism.df, aes(TAVG, RWI, color = site))+geom_point()+facet_wrap(~site)
ggplot(det.age.clim.prism.df, aes(PCP, RWI, color = site))+geom_point()+facet_wrap(~site)
# save climate + tree ring dfs detrened.age.clim.prism.df
write.csv(det.age.clim.prism.df, "outputs/data/Isotope_climate/detrened_age_rwi_PRISMclimate.df.csv", row.names = FALSE)
write.csv(det.age.clim.ghcn.df, "outputs/data/Isotope_climate/detrened_age_rwi_GHCNclimate.df.csv", row.names = FALSE)
# ------------------plot climate parameters vs growth for all the tree ring series
# this function plots a scatter plot of a climate param vs. growth (RWI)
# with two separate slopes for the "Modern" and the "Past" trees
plot.Modern.Past <- function(x, Climate, xlab, ylab){
Site <- x[1,]$site
if(length(unique(x$ageclass)) > 1){
#create dummy variable
x$group <- 0
ifelse(x$ageclass %in% "Past", x$group <- 1, x$group <- 0)
co2.low.yr <- x[x$year < 1950 & x$ageclass %in% 'Past',]
co2.high.yr <- x[x$year >= 1950 & x$ageclass %in% 'Modern',]
x <- rbind(co2.low.yr, co2.high.yr)
#if the dummy variable is significant, then the two slopes are different
print(summary(aov(x$RWI ~ x[,c(Climate)] * x$ageclass)))
#print(summary(lm(value ~ Climate:group, data = x)))
#print(summary(aov(value~Climate*class, data=x)))
print(anova(lm(x$RWI ~ x[,c(Climate)] * x$ageclass), lm(x$RWI ~ x[,c(Climate)])))
#print(summary(lm(value~Climate/group-1, data=x)))
#print(summary(aov(value~Climate/group, data = x)))
# Extend the regression lines beyond the domain of the data
p<- ggplot(x, aes(x=x[,Climate], y=x$RWI, colour=x$ageclass)) + geom_point(shape=1) +
#scale_colour_hue(l=50) +
#+ylim(-1.0,1.0)
#+xlim(-4,4)# Use a slightly darker palette than normal
geom_smooth(method='lm', # Add linear regression lines
se=TRUE, # add shaded confidence region
fullrange=FALSE)+# Extend regression lines
scale_color_manual(values=c('Past'="red",'Modern'="blue"), name = "Tree Age")+
#xlim(-8, 8)+
#ylim(0.5, 1.5) +
theme_bw()+
theme(text = element_text(size = 10), plot.title = element_text(hjust = 0.5))+
ylab(ylab) +
xlab( xlab ) +
ggtitle(Site)
}else{
print(anova(lm(x$RWI ~ x[,c(Climate)])))
#print(summary(lm(value~Climate/group-1, data=x)))
#print(summary(aov(value~Climate/group, data = x)))
# Extend the regression lines beyond the domain of the data
p<- ggplot(x, aes(x=x[,Climate], y=x$RWI, colour=x$ageclass)) + geom_point(shape=1) +
#scale_colour_hue(l=50) +
#+ylim(-1.0,1.0)
#+xlim(-4,4)# Use a slightly darker palette than normal
geom_smooth(method='lm', # Add linear regression lines
se=TRUE, # add shaded confidence region
fullrange=FALSE)+# Extend regression lines
scale_color_manual(values=c('Modern'="blue",'Past'="red"), name = "Tree Age")+
#xlim(-8, 8)+
#ylim(0.5, 1.5) +
theme_bw()+
theme(text = element_text(size = 10), plot.title = element_text(hjust = 0.5))+
ylab("RWI") +
xlab( xlab ) +
ggtitle(Site)
}
p
#ggsave(filename = paste0('outputs/correlations/Modern_Past_jul_pdsi_',Site,".png"), plot = p, width = 5, height = 3.5 )
}
# make all the plots for ghcn data: outputs to outputs/correlations/fPaster
allModern.Past.plots.pdsi <- lapply(det.age.clim.ghcn, plot.Modern.Past, Climate = "PDSI",xlab = "PDSI", ylab = "RWI")
png(width = 10, height = 10, units = 'in', res = 300, "outputs/correlations/Modern_Past_pdsi_allsite.png")
n <- length(allModern.Past.plots.pdsi)
nCol <- floor(sqrt(n))
do.call("grid.arrange", c(allModern.Past.plots.pdsi, ncol=3))
dev.off()
# lets look at July VPDmax:
# make all the plots for ghcn data: outputs to outputs/correlations/fPaster
allModern.Past.plots.julvpdmax <- lapply(det.age.clim.prism, plot.Modern.Past, Climate = "jul.VPDmax",xlab = "July VPDmax", ylab = "RWI")
png(width = 10, height = 10, units = 'in', res = 300, "outputs/correlations/Modern_Past_jul_VPDmax_allsite.png")
n <- length(allModern.Past.plots.julvpdmax)
nCol <- floor(sqrt(n))
do.call("grid.arrange", c(allModern.Past.plots.julvpdmax, ncol=3))
dev.off()
# looking at July moisture balance:
allModern.Past.plots.julBAL <- lapply(det.age.clim.prism, plot.Modern.Past, Climate = "jul.BAL",xlab = "July P - PET", ylab = "RWI")
png(width = 10, height = 10, units = 'in', res = 300, "outputs/correlations/Modern_Past_jul_BAL_allsite.png")
n <- length(allModern.Past.plots.julBAL)
nCol <- floor(sqrt(n))
do.call("grid.arrange", c(allModern.Past.plots.julBAL, ncol=3))
dev.off()
# can do this for the remaining climate variables:
# the previous plost were showing differences in responses across tree ages, but are there differences before and after 1950 in general?
plot.pre.post <- function(x, Climate, xlab, ylab){
Site <- x[1,]$site # assign site name
#create dummy variable
x$time <- 0
x[x$year < 1950 ,]$time <- "Pre-1950"
x[x$year >= 1950 ,]$time <- "Post-1950"
#x <- rbind(co2.low.yr, co2.high.yr)
#if the dummy variable is significant, then the two slopes are different
print(summary(aov(x$RWI ~ x[,c(Climate)] * x$time)))
#print(summary(lm(value ~ Climate:group, data = x)))
#print(summary(aov(value~Climate*class, data=x)))
print(anova(lm(x$RWI ~ x[,c(Climate)] * x$time), lm(x$RWI ~ x[,c(Climate)])))
#print(summary(lm(value~Climate/group-1, data=x)))
#print(summary(aov(value~Climate/group, data = x)))
# Extend the regression lines beyond the domain of the data
p<- ggplot(x, aes(x=x[,Climate], y=x$RWI, colour=x$time)) + geom_point(shape=1) +
#scale_colour_hue(l=50) +
#+ylim(-1.0,1.0)
#+xlim(-4,4)# Use a slightly darker palette than normal
geom_smooth(method='lm', # Add linear regression lines
se=TRUE, # add shaded confidence region
fullrange=FALSE)+# Extend regression lines
scale_color_manual(values=c('Pre-1950'="red",'Post-1950'="blue"))+
#xlim(-8, 8)+
#ylim(0.5, 1.5) +
theme_bw()+
theme(text = element_text(size = 10), plot.title = element_text(hjust = 0.5), legend.title=element_blank())+
ylab(ylab) +
xlab( xlab ) +
ggtitle(Site)
p
#ggsave(filename = paste0('outputs/correlations/pre_post_jul_pdsi_',Site,".png"), plot = p, width = 5, height = 3.5 )
}
# for PDSI (mean):
allpre.post.plots.PDSI <- lapply(det.age.clim.ghcn, plot.pre.post, Climate = "PDSI", xlab = "PDSI", ylab = "RWI")
png(width = 10, height = 10, units = 'in', res = 300, "outputs/correlations/pre_post1950_PDSI_allsite.png")
n <- length(allpre.post.plots.PDSI)
nCol <- floor(sqrt(n))
do.call("grid.arrange", c(allpre.post.plots.PDSI, ncol=3))
dev.off()
# for PDSI (July):
allpre.post.plots.JulPDSI <- lapply(det.age.clim.ghcn, plot.pre.post, Climate = "Jul.pdsi", xlab = "July PDSI", ylab = "RWI")
png(width = 10, height = 10, units = 'in', res = 300, "outputs/correlations/pre_post1950_jul_PDSI_allsite.png")
n <- length(allpre.post.plots.JulPDSI)
nCol <- floor(sqrt(n))
do.call("grid.arrange", c(allpre.post.plots.JulPDSI, ncol=3))
dev.off()
# pre-post July VPDmax:
allpre.post.plots.julvpdmax <- lapply(det.age.clim.prism, plot.pre.post, Climate = "jul.VPDmax",xlab = "July VPDmax", ylab = "RWI")
png(width = 10, height = 10, units = 'in', res = 300, "outputs/correlations/pre_post1950_jul_VPDmax_allsite.png")
n <- length(allpre.post.plots.julvpdmax)
nCol <- floor(sqrt(n))
do.call("grid.arrange", c(allpre.post.plots.julvpdmax, ncol=3))
dev.off()
# pre-post July P-PET:
allpre.post.plots.julBAL <- lapply(det.age.clim.prism, plot.pre.post, Climate = "jul.BAL",xlab = "July P - PET", ylab = "RWI")
png(width = 10, height = 10, units = 'in', res = 300, "outputs/correlations/pre_post1950_jul_BAL_allsite.png")
n <- length(allpre.post.plots.julBAL)
nCol <- floor(sqrt(n))
do.call("grid.arrange", c(allpre.post.plots.julBAL, ncol=3))
dev.off()
# a look at most of the sites altogether
ggplot(det.age.clim.df, aes(x = PDSI, y = RWI, color = site))+geom_point()+stat_smooth()
summary(lm(RWI~PDSI, data = det.age.clim.df))
summary(lm(RWI~Jul.pdsi:dbhclass, data = det.age.clim.ghcn.df))
summary(lm(RWI~jul.VPDmax:dbhclass, data = det.age.clim.prism.df))
summary(lm(RWI~year, data = det.age.clim.df))
summary(lm(RWI~year:site, data = det.age.clim.df))
ggplot(det.age.clim.df, aes(x = year, y = RWI, color = site))+geom_point()+stat_smooth(method = "lm")
###################################################################
# Lets directly compare Past and Modern years with similar climates:
##################################################################
#df <- aggregate(Jul.pdsi~year, data = det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",], FUN = mean )
df <- aggregate(Jul.pdsi~year, data = det.age.clim.ghcn.df, FUN = mean )
df<- df[order(df$Jul.pdsi),]
df$order <- 1:length(df$Jul.pdsi)
df$deficit <- ifelse(df$Jul.pdsi < 0, "deficit" ,"surplus")
png(height = 4, width = 6, units = 'in', res = 300, "outputs/climate_25dry_percentile.png")
ggplot(df, aes(order,Jul.pdsi, fill = deficit))+geom_bar(stat = "identity", width = 0.75) + scale_fill_manual(values = c("red", "blue"))+ylab("July Drought")+xlab(" ")+theme_black(base_size = 25)+theme(legend.title = element_blank(), legend.position = "none", axis.text.x = element_blank(), axis.ticks.x = element_blank())+
geom_vline(xintercept = 30, color = "grey", linetype = "dashed")+geom_vline(xintercept = 0, color = "grey", linetype = "dashed")
dev.off()
png(height = 4, width = 6, units = 'in', res = 300, "outputs/climate_75wet_percentile.png")
ggplot(df, aes(order, Jul.pdsi, fill = deficit))+geom_bar(stat = "identity", width = 0.75) + scale_fill_manual(values = c("red", "blue"))+ylab("July Drought")+xlab(" ")+theme_black(base_size = 25)+theme(legend.title = element_blank(), legend.position = "none", axis.text.x = element_blank(), axis.ticks.x = element_blank())+
geom_vline(xintercept = 91, color = "grey", linetype = "dashed")+geom_vline(xintercept = 120, color = "grey", linetype = "dashed")
dev.off()
dry <- quantile(df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(df$Jul.pdsi, 0.75) # value of the wettest years
pre.dry <- df[df$year < 1950 & df$Jul.pdsi <= dry,]
pre.dry$class <- "pre-1950"
pre.dry$climclass <- "Dry_0.25"
post.dry <- df[df$year >=1950 & df$Jul.pdsi <= dry,]
post.dry$class <- "post-1950"
post.dry$climclass <- "Dry_0.25"
pre.wet <- df[df$year < 1950 & df$Jul.pdsi >= wet,]
pre.wet$class <- "pre-1950"
pre.wet$climclass <- "Wet_0.25"
post.wet <- df[df$year >=1950 & df$Jul.pdsi >= wet,]
post.wet$class <- "post-1950"
post.wet$climclass <- "Wet_0.25"
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
dfs <- det.age.clim.ghcn.df
sim.df <- merge(dfs, similar.clims[,c("year", "class", "climclass")], by = c('year'))
ggplot(sim.df, aes(Jul.pdsi, RWI))+geom_point()+facet_wrap(~class + climclass)+stat_smooth(method = "lm")
#sign relationship with PDSI in wetter years:
summary(lm(Jul.pdsi ~ RWI, data = sim.df[sim.df$climclass %in% "Wet_0.25",]))
summary(lm(Jul.pdsi ~ RWI:class, data = sim.df[sim.df$climclass %in% "Wet_0.25",]))
# get an idea of the slopes both pre and post:
summary(lm(Jul.pdsi ~ RWI , data = sim.df[sim.df$climclass %in% "Wet_0.25" & sim.df$class %in% "pre-1950",]))
summary(lm(Jul.pdsi ~ RWI , data = sim.df[sim.df$climclass %in% "Wet_0.25" & sim.df$class %in% "post-1950",]))
#sign relationship with PDSI in drier years:
summary(lm(Jul.pdsi ~ RWI, data = sim.df[sim.df$climclass %in% "Dry_0.25",]))
summary(lm(Jul.pdsi ~ RWI , data = sim.df[sim.df$climclass %in% "Dry_0.25" & sim.df$class %in% "pre-1950",]))
summary(lm(Jul.pdsi ~ RWI , data = sim.df[sim.df$climclass %in% "Dry_0.25" & sim.df$class %in% "post-1950",]))
#---------------------Get Climate sensitivity for the two time periods in dry + wet------------------
# get bootstrapped estimates of climate sensitivity for dry years for wet years before and after 1950 :
get.clim.sens.by.dry <- function(df, model.func){
df <- aggregate(Jul.pdsi~year , data = det.age.clim.ghcn.df, FUN = mean )
dry <- quantile(df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(df$Jul.pdsi, 0.75) # value of the wettest years
pre.dry <- df[df$year < 1950 & df$Jul.pdsi <= dry,]
pre.dry$class <- "Pre-1950"
pre.dry$climclass <- "Dry_0.25"
post.dry <- df[df$year >=1950 & df$Jul.pdsi <= dry,]
post.dry$class <- "Post-1950"
post.dry$climclass <- "Dry_0.25"
pre.wet <- df[df$year < 1950 & df$Jul.pdsi >= wet,]
pre.wet$class <- "Pre-1950"
pre.wet$climclass <- "Wet_0.25"
post.wet <- df[df$year >=1950 & df$Jul.pdsi >= wet,]
post.wet$class <- "Post-1950"
post.wet$climclass <- "Wet_0.25"
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
dfs <- det.age.clim.ghcn.df
sim.df <- merge(dfs, similar.clims[,c("year", "class", "climclass")], by = c('year'))
sim.df <- sim.df[sim.df$climclass %in% "Dry_0.25",]
coeffs <- matrix ( 0, length(unique(sim.df$site))*2, 8 ) # set up matrix for coefficients
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
for(s in 1:length(unique(sim.df$site))){
name <- unique(sim.df$site)[s]
site.data <- na.omit(sim.df[sim.df$site == name ,])
# function used in boot strapping below
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# for the "Post-1950" class:
if(nrow(site.data[ site.data$class %in% "Post-1950" ,]) > 0){
# bootstrapping the linear regression model
results <- boot(data=site.data[site.data$class == "Post-1950" & site.data$year >= 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,3:4] <- results$t0
coeffs[s , 1] <- name
coeffs[s,2] <- "Post-1950"
coeffs[s,5] <- as.data.frame(int.cis$normal)$V2
coeffs[s,6] <- as.data.frame(int.cis$normal)$V3
coeffs[s,7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,8] <- as.data.frame(slope.cis$normal)$V3
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$class == "Post-1950" ,])
coeffs[s,3:8] <- c(NA,NA)
coeffs[s , 2] <- "Post-1950"
coeffs[s,1] <- name
}
# for the "Pre-1950" class:
if(nrow(site.data[ site.data$class %in% "Pre-1950" ,]) > 2){
results <- boot(data=site.data[site.data$class == "Pre-1950" & site.data$year < 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s+length(unique(sim.df$site)),3:4] <- results$t0
coeffs[s+length(unique(sim.df$site)) , 1] <- name
coeffs[s+length(unique(sim.df$site)),2] <- "Pre-1950"
coeffs[s+length(unique(sim.df$site)),5] <- as.data.frame(int.cis$normal)$V2
coeffs[s+length(unique(sim.df$site)),6] <- as.data.frame(int.cis$normal)$V3
coeffs[s+length(unique(sim.df$site)),7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s+length(unique(sim.df$site)),8] <- as.data.frame(slope.cis$normal)$V3
}else{
coeffs[s+length(unique(sim.df$site)),3:8] <- c(NA,NA)
coeffs[s +length(unique(sim.df$site)), 2] <- "Pre-1950"
coeffs[s+length(unique(sim.df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs$climclass <- "Dry_0.25"
coeffs
}
sens.jul.pdsi_dry0.25 <- get.clim.sens.by.dry(df = det.age.clim.ghcn.df, model.func = "RWI ~ Jul.pdsi")
sens.jja.pdsi_dry0.25 <- get.clim.sens.by.dry(df = det.age.clim.ghcn.df, model.func = "RWI ~ JJA.pdsi")
# get bootstrapped estimates of climate sensitivity for wet years before and after 1950:
get.clim.sens.by.wet <- function(df,climateclass, model.func){
df <- aggregate(Jul.pdsi~year, data = det.age.clim.ghcn.df, FUN = mean )
dry <- quantile(df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(df$Jul.pdsi, 0.75) # value of the wettest years
pre.dry <- df[df$year < 1950 & df$Jul.pdsi <= dry,]
pre.dry$class <- "Pre-1950"
pre.dry$climclass <- "Dry_0.25"
post.dry <- df[df$year >=1950 & df$Jul.pdsi <= dry,]
post.dry$class <- "Post-1950"
post.dry$climclass <- "Dry_0.25"
pre.wet <- df[df$year < 1950 & df$Jul.pdsi >= wet,]
pre.wet$class <- "Pre-1950"
pre.wet$climclass <- "Wet_0.25"
post.wet <- df[df$year >=1950 & df$Jul.pdsi >= wet,]
post.wet$class <- "Post-1950"
post.wet$climclass <- "Wet_0.25"
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
dfs <- det.age.clim.ghcn.df
sim.df <- merge(dfs, similar.clims[,c("year", "class", "climclass")], by = c('year'))
# only use wet years across the region:
sim.df <- sim.df[sim.df$climclass %in% climateclass,]
coeffs <- matrix ( 0, length(unique(sim.df$site))*2, 8 ) # set up matrix for coefficients
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
for(s in 1:length(unique(sim.df$site))){
name <- unique(sim.df$site)[s]
site.data <- na.omit(sim.df[sim.df$site == name ,])
# function used in boot strapping below
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# for the "Post-1950" class:
if(nrow(site.data[ site.data$class %in% "Post-1950" ,]) > 2){
# bootstrapping the linear regression model
results <- boot(data=site.data[site.data$class == "Post-1950" & site.data$year >= 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,3:4] <- results$t0
coeffs[s , 1] <- name
coeffs[s,2] <- "Post-1950"
coeffs[s,5] <- as.data.frame(int.cis$normal)$V2
coeffs[s,6] <- as.data.frame(int.cis$normal)$V3
coeffs[s,7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,8] <- as.data.frame(slope.cis$normal)$V3
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$class == "Post-1950" ,])
coeffs[s,3:8] <- c(NA,NA)
coeffs[s , 2] <- "Post-1950"
coeffs[s,1] <- name
}
# for the "Pre-1950" class:
if(nrow(site.data[ site.data$class %in% "Pre-1950" ,]) > 2){
results <- boot(data=site.data[site.data$class == "Pre-1950" & site.data$year < 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s+length(unique(sim.df$site)),3:4] <- results$t0
coeffs[s+length(unique(sim.df$site)) , 1] <- name
coeffs[s+length(unique(sim.df$site)),2] <- "Pre-1950"
coeffs[s+length(unique(sim.df$site)),5] <- as.data.frame(int.cis$normal)$V2
coeffs[s+length(unique(sim.df$site)),6] <- as.data.frame(int.cis$normal)$V3
coeffs[s+length(unique(sim.df$site)),7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s+length(unique(sim.df$site)),8] <- as.data.frame(slope.cis$normal)$V3
}else{
coeffs[s+length(unique(sim.df$site)),3:8] <- c(NA,NA)
coeffs[s +length(unique(sim.df$site)), 2] <- "Pre-1950"
coeffs[s+length(unique(sim.df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs$climclass <- "Dry_0.25"
coeffs
}
sens.jul.pdsi_wet0.25 <- get.clim.sens.by.dry(df = det.age.clim.ghcn.df, model.func = "RWI ~ Jul.pdsi")
sens.jja.pdsi_wet0.25 <- get.clim.sens.by.dry(df = det.age.clim.ghcn.df, model.func = "RWI ~ JJA.pdsi")
ggplot(sens.jul.pdsi_wet0.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(sens.jul.pdsi_dry0.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(sens.jja.pdsi_wet0.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(sens.jja.pdsi_dry0.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
#---------------------Get Climate sensitivity for the modern and past in dry + wet------------------
# get bootstrapped estimates of climate sensitivy for dry years Modern and Past, before + after 1950
get.clim.sens.age.by.moisture <- function(df, climateclass ,model.func){
coeffs <- matrix ( 0, length(unique(df$site))*2, 9 ) # set up matrix for coefficients
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
coeffs <- matrix ( 0, length(unique(df$site))*2, 9 ) # set up matrix for coefficients
for(s in 1: length(unique(df$site))){
name <- unique(df$site)[s]
site.data <- na.omit(df[df$site == name ,])
sim.df <- aggregate(Jul.pdsi~year, data = site.data, FUN = mean )
dry <- quantile(sim.df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(sim.df$Jul.pdsi, 0.75) # value of the wettest years
pre.dry <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi <= dry,]
pre.dry$class <- "Pre-1950"
pre.dry$climclass <- "Dry_0.25"
post.dry <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi <= dry,]
post.dry$class <- "Post-1950"
post.dry$climclass <- "Dry_0.25"
pre.wet <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi >= wet,]
pre.wet$class <- "Pre-1950"
pre.wet$climclass <- "Wet_0.25"
post.wet <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi >= wet,]
post.wet$class <- "Post-1950"
post.wet$climclass <- "Wet_0.25"
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
sim.df <- merge(site.data, similar.clims[,c("year", "class", "climclass")], by = c('year', "class"))
# save the similar climates as a csv so we can pick trees to sample:
write.csv(sim.df, paste0("outputs/data/Isotope_climate/",name, "_wet_dry_climate_age_class.csv"))
# only use wet years across the region:
sim.df <- sim.df[sim.df$climclass %in% climateclass,]
# function used in boot strapping below
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# for the "Modern" class:
if(nrow(sim.df[sim.df$site == name & sim.df$ageclass == "Modern" ,]) > 0){
# bootstrapping the linear regression model
results <- boot(data=sim.df[sim.df$ageclass == "Modern" & sim.df$year >= 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,3:4] <- results$t0
coeffs[s , 1] <- name
coeffs[s,2] <- "Modern"
coeffs[s,5] <- as.data.frame(int.cis$normal)$V2
coeffs[s,6] <- as.data.frame(int.cis$normal)$V3
coeffs[s,7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,8] <- as.data.frame(slope.cis$normal)$V3
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,3:8] <- c(NA,NA)
coeffs[s , 2] <- "Modern"
coeffs[s,1] <- name
}
# for the "Past" class:
if(nrow(sim.df[sim.df$site == name & sim.df$ageclass == "Past" ,]) > 0){
results <- boot(data=sim.df[sim.df$ageclass == "Past" & sim.df$year < 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s+length(unique(df$site)),3:4] <- results$t0
coeffs[s+length(unique(df$site)) , 1] <- name
coeffs[s+length(unique(df$site)),2] <- "Past"
coeffs[s+length(unique(df$site)),5] <- as.data.frame(int.cis$normal)$V2
coeffs[s+length(unique(df$site)),6] <- as.data.frame(int.cis$normal)$V3
coeffs[s+length(unique(df$site)),7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s+length(unique(df$site)),8] <- as.data.frame(slope.cis$normal)$V3
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$site)),3:8] <- c(NA,NA)
coeffs[s +length(unique(df$site)), 2] <- "Modern"
coeffs[s+length(unique(df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
sens.jul.pdsi.age_wet.25 <- get.clim.sens.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Wet_0.25", model.func = "RWI ~ Jul.pdsi" )
sens.jja.pdsi.age_wet.25 <- get.clim.sens.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Wet_0.25", model.func = "RWI ~ JJA.pdsi" )
# get bootstrapped estimates of climate sensitivy for wet years Modern and Past, before + after 1950
sens.jul.pdsi.age_dry.25 <- get.clim.sens.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Dry_0.25", model.func = "RWI ~ Jul.pdsi" )
sens.jja.pdsi.age_dry.25 <- get.clim.sens.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Dry_0.25", model.func = "RWI ~ JJA.pdsi" )
# plot slope estimates
ggplot(sens.jul.pdsi.age_wet.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(sens.jja.pdsi.age_wet.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(sens.jul.pdsi.age_dry.25, aes(age, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)+facet_wrap(~site)
ggplot(sens.jul.pdsi.age_dry.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(sens.jja.pdsi.age_dry.25, aes(site, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
# get bootstrapped differences between slopes:
slope.diff.boot<- function(df, climateclass ,model.func){
coeffs <- matrix ( 0, length(unique(df$site))*2, 8 ) # set up matrix for coefficients
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
coeffs <- matrix ( 0, length(unique(df$site))*2, 8 ) # set up matrix for coefficients
for(s in 1: length(unique(df$site))){
name <- unique(df$site)[s]
site.data<- na.omit(df[df$site == name ,])
sim.df <- aggregate(Jul.pdsi~year, data = site.data, FUN = mean )
dry <- quantile(sim.df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(sim.df$Jul.pdsi, 0.75) # value of the wettest years
pre.dry <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi <= dry,]
pre.dry$class <- "Pre-1950"
pre.dry$climclass <- "Dry_0.25"
post.dry <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi <= dry,]
post.dry$class <- "Post-1950"
post.dry$climclass <- "Dry_0.25"
pre.wet <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi >= wet,]
pre.wet$class <- "Pre-1950"
pre.wet$climclass <- "Wet_0.25"
post.wet <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi >= wet,]
post.wet$class <- "Post-1950"
post.wet$climclass <- "Wet_0.25"
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
sim.df <- merge(site.data, similar.clims[,c("year", "class", "climclass")], by = c('year', "class"))
# only use wet years across the region:
sim.df <- sim.df[sim.df$climclass %in% climateclass,]
formula <- "Jul.pdsi ~ class/RWI -1"
fit <- lm(formula, data=site.data)
print(unique(site.data$site))
print(summary(fit))
}
# function used in boot strapping below
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# for the "Modern" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Modern" ,]) > 0){
# bootstrapping the linear regression model
results <- boot(data=site.data[site.data$ageclass == "Modern" & site.data$year >= 1950 ,], statistic=bs, R=2000, formula=model.func)
results <- boot(data=site.data,statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,3:4] <- results$t0
coeffs[s , 1] <- name
coeffs[s,2] <- "Modern"
coeffs[s,5] <- as.data.frame(int.cis$normal)$V2
coeffs[s,6] <- as.data.frame(int.cis$normal)$V3
coeffs[s,7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,8] <- as.data.frame(slope.cis$normal)$V3
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,3:8] <- c(NA,NA)
coeffs[s , 2] <- "Modern"
coeffs[s,1] <- name
}
# for the "Past" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Past" ,]) > 0){
results <- boot(data=site.data[site.data$ageclass == "Past" & site.data$year < 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s+length(unique(df$site)),3:4] <- results$t0
coeffs[s+length(unique(df$site)) , 1] <- name
coeffs[s+length(unique(df$site)),2] <- "Past"
coeffs[s+length(unique(df$site)),5] <- as.data.frame(int.cis$normal)$V2
coeffs[s+length(unique(df$site)),6] <- as.data.frame(int.cis$normal)$V3
coeffs[s+length(unique(df$site)),7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s+length(unique(df$site)),8] <- as.data.frame(slope.cis$normal)$V3
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$site)),3:8] <- c(NA,NA)
coeffs[s +length(unique(df$site)), 2] <- "Modern"
coeffs[s+length(unique(df$site)),1] <- name
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
# get the bootstrapped correlations for wet years and dry years:
get.clim.cor.age.by.moisture <- function(df, climateclass,clim){
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
coeffs <- matrix ( 0, length(unique(df$site))*2, 8 ) # set u
coeffs <- matrix ( 0, length(unique(df$site))*2, 5 ) # set up matrix for coefficients
# function used in boot strapping below
boot.cor <- function(data, ind, colno ){
return(cor(data[ind,c(colno)], data[ind,]$RWI, use = "pairwise.complete.obs"))
}
for(s in 1: length(unique(df$site))) {
name <- unique(df$site)[s]
site.data<- na.omit(df[df$site == name ,])
sim.df <- aggregate(Jul.pdsi~year, data = site.data, FUN = mean )
dry <- quantile(sim.df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(sim.df$Jul.pdsi, 0.75) # value of the wettest years
pre.dry <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi <= dry,]
pre.dry$class <- "Pre-1950"
pre.dry$climclass <- "Dry_0.25"
post.dry <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi <= dry,]
post.dry$class <- "Post-1950"
post.dry$climclass <- "Dry_0.25"
pre.wet <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi >= wet,]
pre.wet$class <- "Pre-1950"
pre.wet$climclass <- "Wet_0.25"
post.wet <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi >= wet,]
post.wet$class <- "Post-1950"
post.wet$climclass <- "Wet_0.25"
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
sim.df <- merge(site.data, similar.clims[,c("year", "class", "climclass")], by = c('year'))
# only use wet years across the region:
sim.df <- sim.df[sim.df$climclass %in% climateclass,]
site.data <- sim.df
# for the "Modern" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Modern" ,]) > 0){
# bootstrapping the correlation coefficients:
results <- boot(data=site.data[site.data$ageclass == "Modern" & site.data$year >= 1950 ,], colno = clim, statistic=boot.cor, R=2000)
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s,3] <-t
coeffs[s , 1] <- name
coeffs[s,2] <- "Modern"
coeffs[s,4] <- ci.mo[1]
coeffs[s,5] <- ci.mo[2]
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,3:5] <- c(NA,NA, NA)
coeffs[s , 2] <- "Modern"
coeffs[s,1] <- name
}
# for the "Past" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Past" ,]) > 0){
results <- boot(data=site.data[site.data$ageclass == "Past" & site.data$year < 1950 ,], colno = clim, statistic=boot.cor, R=2000)
# bootstrapping the correlation coefficients:
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s+length(unique(df$site)),3] <-t
coeffs[s+length(unique(df$site)) , 1] <- name
coeffs[s+length(unique(df$site)),2] <- "Past"
coeffs[s+length(unique(df$site)),4] <- ci.mo[1]
coeffs[s+length(unique(df$site)),5] <- ci.mo[2]
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$site)),3:5] <- c(NA,NA, NA)
coeffs[s +length(unique(df$site)), 2] <- "Past"
coeffs[s+length(unique(df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'cor.est', "ci.min", "ci.max")
coeffs$site <- as.character(coeffs$site)
coeffs$cor.est <- as.numeric(as.character(coeffs$cor.est))
coeffs$ci.min <- as.numeric(as.character(coeffs$ci.min))
coeffs$ci.max <- as.numeric(as.character(coeffs$ci.max))
#coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
#coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
cor.jul.pdsi.age_dry.25 <- get.clim.cor.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Dry_0.25", clim = "Jul.pdsi" )
cor.jul.pdsi.age_wet.25 <- get.clim.cor.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Wet_0.25", clim = "Jul.pdsi" )
cor.jja.pdsi.age_dry.25 <- get.clim.cor.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Dry_0.25", clim = "JJA.pdsi" )
cor.jja.pdsi.age_wet.25 <- get.clim.cor.age.by.moisture(df =det.age.clim.ghcn.df, climateclass = "Wet_0.25", clim = "JJA.pdsi" )
ggplot(cor.jul.pdsi.age_dry.25, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)
ggplot(cor.jja.pdsi.age_dry.25, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)
#-------------------Get correlation by age class, using each tree------------------------
# get a correlation for each tree:
get.clim.cor.age.by.moist.ID <- function(df, climateclass,clim){
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
coeffs <- matrix ( 0, length(unique(df$ID))*2, 8 ) # set u
coeffs <- matrix ( 0, length(unique(df$ID))*2, 6 ) # set up matrix for coefficients
# function used in boot strapping below
boot.cor <- function(data, ind, colno ){
return(cor(data[ind,c(colno)], data[ind,]$RWI, use = "pairwise.complete.obs"))
}
for(s in 1: length(unique(df$ID))) {
IDname <- unique(df$ID)[s]
site.data <- df[df$ID == IDname & !is.na(df$RWI),] # for cases where we have missing DBH, but not RWI
name <- unique(site.data$site)
sim.df <- aggregate(Jul.pdsi~year, data = site.data, FUN = mean )
dry <- quantile(sim.df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(sim.df$Jul.pdsi, 0.75) # value of the wettest years
sim.df$class <- ifelse(sim.df$year < 1950, "Pre-1950", "Post-1950" )
sim.df$climclass <- ifelse(sim.df$Jul.pdsi <= dry, "Dry_0.25",
ifelse(sim.df$Jul.pdsi >= wet,"Wet_0.25", "NA" ))
pre.dry <- sim.df[sim.df$class %in% "Pre-1950" & sim.df$climclass %in% "Dry_0.25", ]
post.dry <- sim.df[sim.df$class %in% "Post-1950" & sim.df$climclass %in% "Dry_0.25", ]
pre.wet <- sim.df[sim.df$class %in% "Pre-1950" & sim.df$climclass %in% "Wet_0.25", ]
post.wet <- sim.df[sim.df$class %in% "Post-1950" & sim.df$climclass %in% "Wet_0.25", ]
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
sim.df <- merge(site.data, similar.clims[,c("year", "class", "climclass")], by = c('year'))
# only use wet years across the region:
sim.df <- sim.df[sim.df$climclass %in% climateclass,]
site.data <- sim.df
# for the "Modern" class:
if(nrow(site.data[site.data$ID == IDname & site.data$ageclass == "Modern" ,]) > 2){
# bootstrapping the correlation coefficients:
results <- boot(data=site.data[site.data$ageclass == "Modern" & site.data$year >= 1950 ,], colno = clim, statistic=boot.cor, R=2000)
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s,4] <-t
coeffs[s , 1] <- name
coeffs[s, 2] <- IDname
coeffs[s,3] <- "Modern"
coeffs[s,5] <- ci.mo[1]
coeffs[s,6] <- ci.mo[2]
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,4:6] <- c(NA,NA, NA)
coeffs[s , 3] <- "Modern"
coeffs[s,1] <- name
coeffs[s,2]<- IDname
}
# for the "Past" class:
if(nrow(site.data[site.data$ID == IDname & site.data$ageclass == "Past" ,]) > 2){
results <- boot(data=site.data[site.data$ageclass == "Past" & site.data$year < 1950 ,], colno = clim, statistic=boot.cor, R=2000)
# bootstrapping the correlation coefficients:
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s+length(unique(df$ID)),4] <-t
coeffs[s+length(unique(df$ID)) , 1] <- name
coeffs[s + length(unique(df$ID)), 2] <- IDname
coeffs[s+length(unique(df$ID)),3] <- "Past"
coeffs[s+length(unique(df$ID)),5] <- ci.mo[1]
coeffs[s+length(unique(df$ID)),6] <- ci.mo[2]
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$ID)), 4:6] <- c(NA,NA, NA)
coeffs[s +length(unique(df$ID)), 3] <- "Past"
coeffs[s+length(unique(df$ID)), 2] <- IDname
coeffs[s+length(unique(df$ID)), 1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","ID","age",'cor.est', "ci.min", "ci.max")
coeffs$site <- as.character(coeffs$site)
coeffs$cor.est <- as.numeric(as.character(coeffs$cor.est))
coeffs$ci.min <- as.numeric(as.character(coeffs$ci.min))
coeffs$ci.max <- as.numeric(as.character(coeffs$ci.max))
#coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
#coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
cor.jul.pdsi.age_dry.25.id <- get.clim.cor.age.by.moist.ID(df = det.age.clim.ghcn.df, climateclass = "Dry_0.25", clim = "Jul.pdsi" )
ggplot(cor.jul.pdsi.age_dry.25.id, aes(age, cor.est, color = age))+geom_boxplot()+facet_wrap(~site)#+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)
ggplot(cor.jul.pdsi.age_dry.25.id, aes(age, cor.est, color = age))+geom_boxplot()+facet_wrap(~site)#+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)
# get a senstivity for each tree at each site:
get.clim.sens.age.by.moisture.ID <- function(df, climateclass ,model.func){
coeffs <- matrix ( 0, length(unique(df$ID))*2, 9 ) # set up matrix for coefficients
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
coeffs <- matrix ( 0, length(unique(df$ID))*2, 9 ) # set up matrix for coefficients
for(s in 1: length(unique(df$ID))){
IDname <- unique(df$ID)[s]
site.data<- df[df$ID == IDname & !is.na(df$RWI),]
name <- unique(site.data$site)
sim.df <- aggregate(Jul.pdsi~year, data = site.data, FUN = mean )
dry <- quantile(sim.df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(sim.df$Jul.pdsi, 0.75) # value of the wettest years
sim.df$class <- ifelse(sim.df$year < 1950, "Pre-1950", "Post-1950" )
sim.df$climclass <- ifelse(sim.df$Jul.pdsi <= dry, "Dry_0.25",
ifelse(sim.df$Jul.pdsi >= wet,"Wet_0.25", "NA" ))
pre.dry <- sim.df[sim.df$class %in% "Pre-1950" & sim.df$climclass %in% "Dry_0.25", ]
post.dry <- sim.df[sim.df$class %in% "Post-1950" & sim.df$climclass %in% "Dry_0.25", ]
pre.wet <- sim.df[sim.df$class %in% "Pre-1950" & sim.df$climclass %in% "Wet_0.25", ]
post.wet <- sim.df[sim.df$class %in% "Post-1950" & sim.df$climclass %in% "Wet_0.25", ]
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
sim.df <- merge(site.data, similar.clims[,c("year", "class", "climclass")], by = c('year'))
# only use dry or wet years across the region:
sim.df <- sim.df[sim.df$climclass %in% climateclass,]
# function used in boot strapping below
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# for the "Modern" class:
if(nrow(sim.df[sim.df$ID == IDname & sim.df$ageclass == "Modern" ,]) > 1){
# bootstrapping the linear regression model
results <- boot(data=sim.df[sim.df$ageclass == "Modern" & sim.df$year >= 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,4:5] <- results$t0
coeffs[s , 2] <- name
coeffs[s , 1] <- IDname
coeffs[s,3] <- "Modern"
coeffs[s,6] <- as.data.frame(int.cis$normal)$V2
coeffs[s,7] <- as.data.frame(int.cis$normal)$V3
coeffs[s,8] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,9] <- as.data.frame(slope.cis$normal)$V3
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,4:9] <- c(NA,NA)
coeffs[s , 3] <- "Modern"
coeffs[s,2] <- name
coeffs[s,1] <- IDname
}
# for the "Past" class:
if(nrow(sim.df[sim.df$ID == IDname & sim.df$ageclass == "Past" ,]) > 1){
results <- boot(data=sim.df[sim.df$ageclass == "Past" & sim.df$year < 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s+length(unique(df$ID)),4:5] <- results$t0
coeffs[s+length(unique(df$ID)) , 2] <- name
coeffs[s+length(unique(df$ID)) , 1] <- IDname
coeffs[s+length(unique(df$ID)),3] <- "Past"
coeffs[s+length(unique(df$ID)),6] <- as.data.frame(int.cis$normal)$V2
coeffs[s+length(unique(df$ID)),7] <- as.data.frame(int.cis$normal)$V3
coeffs[s+length(unique(df$ID)),8] <- as.data.frame(slope.cis$normal)$V2
coeffs[s+length(unique(df$ID)),9] <- as.data.frame(slope.cis$normal)$V3
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$ID)),4:9] <- c(NA,NA)
coeffs[s +length(unique(df$ID)), 3] <- "Past"
coeffs[s+length(unique(df$ID)),2] <- name
coeffs[s+length(unique(df$ID)),1] <- IDname
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","ID","age",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
jul.pdsi.age_dry.25.id <- get.clim.sens.age.by.moisture.ID(df =det.age.clim.ghcn.df, climateclass = "Dry_0.25", model.func = "RWI ~ Jul.pdsi" )
colnames(jul.pdsi.age_dry.25.id)[1:2] <- c("ID", "site")
jja.pdsi.age_dry.25.id <- get.clim.sens.age.by.moisture.ID(df =det.age.clim.ghcn.df, climateclass = "Dry_0.25", model.func = "RWI ~ JJA.pdsi" )
colnames(jja.pdsi.age_dry.25.id)[1:2] <- c("ID", "site")
ggplot(jul.pdsi.age_dry.25.id, aes(age, slope.est, color = age))+geom_boxplot()+facet_wrap(~ID)#+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)
dfnona<- jul.pdsi.age_dry.25.id[complete.cases(jul.pdsi.age_dry.25.id),]
t.test(dfnona[dfnona$age %in% "Modern",]$slope.est, dfnona[dfnona$age %in% "Past",]$slope.est)
# aesthetics off with this:
ggplot(cor.jul.pdsi.age_dry.25, aes(site, cor.est, fill = age))+geom_bar(stat="identity", position = position_dodge(width = 0.9))#+geom_errorbar(data = cor.jul.pdsi.age_dry.25,aes(ymin=ci.min, ymax = ci.max,fill = age, position = position_dodge(width = 0.5)), size = 0.2, width = 0.5)
ggplot(cor.jul.pdsi.age_wet.25, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)
ggplot(cor.jja.pdsi.age_dry.25, aes(site, cor.est, fill = age))+geom_bar(stat="identity", position = position_dodge(width = 0.9))#+geom_errorbar(data = cor.jul.pdsi.age_dry.25,aes(ymin=ci.min, ymax = ci.max,fill = age, position = position_dodge(width = 0.5)), size = 0.2, width = 0.5)
#ggplot(cor.jja.pdsi.age_wet.25, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)
get.clim.cor.age.by.moisture.dbh <- function(df, climateclass, clim){
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
# function used in boot strapping below
boot.cor <- function(data, ind, colno ){
return(cor(data[ind,c(colno)], data[ind,]$RWI, use = "pairwise.complete.obs"))
}
bydbh <- list()
# for each dbh class, lets get the sensitivy to climate:
for(d in 1:length(unique(df$dbhclass))){
sizeclass <- unique(df$dbhclass)[d]
coeffs <- matrix ( 0, length(unique(df$site))*2, 5 ) # set up matrix for coefficients
for(s in 1: length(unique(df$site))) {
name <- unique(df$site)[s]
site.data<- na.omit(df[df$site == name,])
sim.df <- aggregate(Jul.pdsi~year, data = site.data, FUN = mean )
dry <- quantile(sim.df$Jul.pdsi, 0.25) # value of the driest years
wet <- quantile(sim.df$Jul.pdsi, 0.75) # value of the wettest years
pre.dry <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi <= dry,]
pre.dry$class <- "Pre-1950"
pre.dry$climclass <- "Dry_0.25"
post.dry <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi <= dry,]
post.dry$class <- "Post-1950"
post.dry$climclass <- "Dry_0.25"
pre.wet <- sim.df[sim.df$year < 1950 & sim.df$Jul.pdsi >= wet,]
pre.wet$class <- "Pre-1950"
pre.wet$climclass <- "Wet_0.25"
post.wet <- sim.df[sim.df$year >=1950 & sim.df$Jul.pdsi >= wet,]
post.wet$class <- "Post-1950"
post.wet$climclass <- "Wet_0.25"
similar.clims <- rbind(post.wet, pre.wet, pre.dry, post.dry)
#dfs <- det.age.clim.ghcn.df[det.age.clim.ghcn.df$site %in% "HIC",]
sim.df <- merge(site.data, similar.clims[,c("year", "class", "climclass")], by = c('year'))
# only use wet years across the region:
sim.df <- sim.df[sim.df$climclass %in% climateclass & sim.df$dbhclass %in% sizeclass,]
# for the "Modern" class:
if(nrow(sim.df[sim.df$site == name & sim.df$ageclass == "Modern" ,]) > 1){
# bootstrapping the correlation coefficients:
results <- boot(data=sim.df[sim.df$ageclass == "Modern" & sim.df$year >= 1950 ,], colno = clim, statistic=boot.cor, R=2000)
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s,3] <-t
coeffs[s , 1] <- name
coeffs[s,2] <- "Modern"
coeffs[s,4] <- ci.mo[1]
coeffs[s,5] <- ci.mo[2]
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,3:5] <- c(NA,NA, NA)
coeffs[s , 2] <- "Modern"
coeffs[s,1] <- name
}
# for the "Past" class:
if(nrow(sim.df[sim.df$site == name & sim.df$ageclass == "Past" & sim.df$year < 1950 ,]) > 2){
results <- boot(data=sim.df[sim.df$ageclass == "Past" & sim.df$year < 1950 ,], colno = clim, statistic=boot.cor, R=2000)
# bootstrapping the correlation coefficients:
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s+length(unique(df$site)),3] <-t
coeffs[s+length(unique(df$site)) , 1] <- name
coeffs[s+length(unique(df$site)),2] <- "Past"
coeffs[s+length(unique(df$site)),4] <- ci.mo[1]
coeffs[s+length(unique(df$site)),5] <- ci.mo[2]
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$site)),3:5] <- c(NA,NA, NA)
coeffs[s +length(unique(df$site)), 2] <- "Past"
coeffs[s+length(unique(df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'cor.est', "ci.min", "ci.max")
coeffs$site <- as.character(coeffs$site)
coeffs$cor.est <- as.numeric(as.character(coeffs$cor.est))
coeffs$ci.min <- as.numeric(as.character(coeffs$ci.min))
coeffs$ci.max <- as.numeric(as.character(coeffs$ci.max))
#coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
#coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs$dbhclass <- sizeclass
bydbh[[d]]<- coeffs
}
names(bydbh) <- unique(df$dbhclass)
bydbh.df <- do.call(rbind, bydbh) # make list into a dataframe to output!
bydbh.df
}
cor.jul.pdsi.age_wet.25.dbh <- get.clim.cor.age.by.moisture.dbh(df =det.age.clim.ghcn.df[!det.age.clim.ghcn.df$site %in% "MOU",], climateclass = "Wet_0.25", clim = "Jul.pdsi" )
cor.jul.pdsi.age_dry.25.dbh <- get.clim.cor.age.by.moisture.dbh(df =det.age.clim.ghcn.df[!det.age.clim.ghcn.df$site %in% "MOU",], climateclass = "Dry_0.25", clim = "Jul.pdsi" )
cor.jja.pdsi.age_wet.25.dbh <- get.clim.cor.age.by.moisture.dbh(df =det.age.clim.ghcn.df[!det.age.clim.ghcn.df$site %in% "MOU",], climateclass = "Wet_0.25", clim = "JJA.pdsi" )
cor.jja.pdsi.age_dry.25.dbh <- get.clim.cor.age.by.moisture.dbh(df =det.age.clim.ghcn.df[!det.age.clim.ghcn.df$site %in% "MOU",], climateclass = "Dry_0.25", clim = "JJA.pdsi" )
ggplot(cor.jul.pdsi.age_wet.25.dbh, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)+facet_wrap(~dbhclass)
ggplot(cor.jul.pdsi.age_dry.25.dbh, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)+facet_wrap(~dbhclass)
ggplot(cor.jja.pdsi.age_dry.25.dbh, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max), size = 0.2, width = 0.5)+facet_wrap(~dbhclass)
# july pdsi is signicantly increaseing
ggplot(df, aes(year, Jul.pdsi))+geom_point()+stat_smooth(method = "lm")
test <- lm(Jul.pdsi~year, data = df)# no significant change
##########################################################################
# get a function to extract the senstivity of Growth-climate relationship of each site
##########################################################################
# function to extract whole time series slope of lm(RWI ~ PDSI)
get.clim.sensitivity <- function(df, model.func){
coeffs <- matrix ( 0, length(unique(df$site)), 7 ) # set up matrix for coefficients
# for loop
for(s in 1: length(unique(df$site))){
name <- unique(df$site)[s]
site.data<- na.omit(df[df$site == name ,])
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
results <- boot(data=site.data, statistic=bs,
R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,2:3] <- results$t0
coeffs[s , 1] <- name
coeffs[s,4] <- as.data.frame(int.cis$normal)$V2
coeffs[s,5] <- as.data.frame(int.cis$normal)$V3
coeffs[s,6] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,7] <- as.data.frame(slope.cis$normal)$V3
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
# get all the sensitivities for pdsi:
df <- test.ghcn.df
pdsi.sens <- get.clim.sensitivity(df = det.age.clim.ghcn.df, model.func = "RWI ~ PDSI")
Julpdsi.sens <- get.clim.sensitivity(df = det.age.clim.ghcn.df, model.func = "RWI ~ Jul.pdsi")
JJApdsi.sens <- get.clim.sensitivity(df = det.age.clim.ghcn.df, model.func = "RWI ~ JJA.pdsi")
TMIN.sens <- get.clim.sensitivity(df = det.age.clim.ghcn.df, model.func = "RWI ~ TMIN")
May.pr.sens <- get.clim.sensitivity(df = det.age.clim.ghcn.df, model.func = "RWI ~ MAY.p")
# make a plot with error bars
ggplot(pdsi.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(Julpdsi.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(JJApdsi.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(TMIN.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(May.pr.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
# for prism data:
VPDmax.sens <- get.clim.sensitivity(df = det.age.clim.prism.df, model.func = "RWI ~ VPDmax")
JulVPDmax.sens <- get.clim.sensitivity(df = det.age.clim.prism.df, model.func = "RWI ~ jul.VPDmax")
TMIN.sens <- get.clim.sensitivity(df = det.age.clim.prism.df, model.func = "RWI ~ TMIN")
BAL.sens <- get.clim.sensitivity(df = det.age.clim.prism.df, model.func = "RWI ~ BAL")
ggplot(VPDmax.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(JulVPDmax.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(TMIN.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
ggplot(BAL.sens, aes(site, slope.est))+geom_bar(stat = "identity")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), size = 0.2, width = 0.5)
# function to extract slopes for Modern an Past trees of lm(RWI~PDSI)
get.clim.sens.age <- function(df, model.func){
coeffs <- matrix ( 0, length(unique(df$site))*2, 8 ) # set up matrix for coefficients
for(s in 1: length(unique(df$site))){
name <- unique(df$site)[s]
site.data<- na.omit(df[df$site == name ,])
# function used in boot strapping below
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# for the "Modern" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Modern" ,]) > 0){
# bootstrapping the linear regression model
results <- boot(data=site.data[site.data$ageclass == "Modern" & site.data$year >= 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,3:4] <- results$t0
coeffs[s , 1] <- name
coeffs[s,2] <- "Modern"
coeffs[s,5] <- as.data.frame(int.cis$normal)$V2
coeffs[s,6] <- as.data.frame(int.cis$normal)$V3
coeffs[s,7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,8] <- as.data.frame(slope.cis$normal)$V3
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,3:8] <- c(NA,NA)
coeffs[s , 2] <- "Modern"
coeffs[s,1] <- name
}
# for the "Past" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Past" ,]) > 0){
results <- boot(data=site.data[site.data$ageclass == "Past" & site.data$year < 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s+length(unique(df$site)),3:4] <- results$t0
coeffs[s+length(unique(df$site)) , 1] <- name
coeffs[s+length(unique(df$site)),2] <- "Past"
coeffs[s+length(unique(df$site)),5] <- as.data.frame(int.cis$normal)$V2
coeffs[s+length(unique(df$site)),6] <- as.data.frame(int.cis$normal)$V3
coeffs[s+length(unique(df$site)),7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s+length(unique(df$site)),8] <- as.data.frame(slope.cis$normal)$V3
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$site)),3:8] <- c(NA,NA)
coeffs[s +length(unique(df$site)), 2] <- "Modern"
coeffs[s+length(unique(df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
julpdsi.age.sens <- get.clim.sens.age(df = det.age.clim.ghcn.df, model.func = "RWI ~ Jul.pdsi")
jjapdsi.age.sens <- get.clim.sens.age(df = det.age.clim.ghcn.df, model.func = "RWI ~ JJA.pdsi")
pdsi.age.sens <- get.clim.sens.age(df = det.age.clim.ghcn.df, "RWI ~ PDSI")
jjap.age.sens <- get.clim.sens.age(df = det.age.clim.prism.df, "RWI ~ JJA.p")
# function to get the bootstrapped correlation coefficients across ages:
get.clim.cor.age <- function(df, clim){
coeffs <- matrix ( 0, length(unique(df$site))*2, 5 ) # set up matrix for coefficients
# function used in boot strapping below
boot.cor <- function(data, ind, colno ){
return(cor(data[ind,c(colno)], data[ind,]$RWI, use = "pairwise.complete.obs"))
}
for(s in 1: length(unique(df$site))) {
name <- unique(df$site)[s]
site.data <- na.omit(df[df$site == name ,])
# for the "Modern" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Modern" ,]) > 0){
# bootstrapping the correlation coefficients:
results <- boot(data=site.data[site.data$ageclass == "Modern" & site.data$year >= 1950 ,], colno = clim, statistic=boot.cor, R=2000)
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s,3] <-t
coeffs[s , 1] <- name
coeffs[s,2] <- "Modern"
coeffs[s,4] <- ci.mo[1]
coeffs[s,5] <- ci.mo[2]
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Modern" ,])
coeffs[s,3:5] <- c(NA,NA, NA)
coeffs[s , 2] <- "Modern"
coeffs[s,1] <- name
}
# for the "Past" class:
if(nrow(site.data[site.data$site == name & site.data$ageclass == "Past" ,]) > 0){
results <- boot(data=site.data[site.data$ageclass == "Past" & site.data$year < 1950 ,], colno = clim, statistic=boot.cor, R=2000)
# bootstrapping the correlation coefficients:
#int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
#slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
cis <- boot.ci(boot.out = results, type = "norm")
ci.mo <- cis$normal[2:3]
t <- results$t0
coeffs[s+length(unique(df$site)),3] <-t
coeffs[s+length(unique(df$site)) , 1] <- name
coeffs[s+length(unique(df$site)),2] <- "Past"
coeffs[s+length(unique(df$site)),4] <- ci.mo[1]
coeffs[s+length(unique(df$site)),5] <- ci.mo[2]
}else{
#lmest2 <- lm(RWI ~ PDSI, data = df[df$site == name & df$ageclass == "Past" ,])
coeffs[s+length(unique(df$site)),3:5] <- c(NA,NA, NA)
coeffs[s +length(unique(df$site)), 2] <- "Past"
coeffs[s+length(unique(df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'cor.est', "ci.min", "ci.max")
coeffs$site <- as.character(coeffs$site)
coeffs$cor.est <- as.numeric(as.character(coeffs$cor.est))
coeffs$ci.min <- as.numeric(as.character(coeffs$ci.min))
coeffs$ci.max <- as.numeric(as.character(coeffs$ci.max))
#coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
#coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
# for ghcn
age.pdsi.rf.df<- get.clim.cor.age(df = det.age.clim.ghcn.df, clim = "PDSI")
age.julpdsi.rf.df <- get.clim.cor.age(df = det.age.clim.ghcn.df, clim = "Jul.pdsi")
age.jjapdsi.rf.df <- get.clim.cor.age(df = det.age.clim.ghcn.df, clim = "JJA.pdsi")
age.pcp.rf.df <- get.clim.cor.age(df = det.age.clim.ghcn.df, clim = "PCP")
# for prism
age.vpdmax.rf.df <- get.clim.cor.age(df = det.age.clim.prism.df, clim = "VPDmax")
age.BAL.rf.df <- get.clim.cor.age(df = det.age.clim.prism.df, clim = "BAL")
age.Prismpcp.rf.df <- get.clim.cor.age(df = det.age.clim.prism.df, clim = "PCP")
age.julvpdmax.rf.df <- get.clim.cor.age(df = det.age.clim.prism.df, clim = "VPDmax")
age.julBAL.rf.df <- get.clim.cor.age(df = det.age.clim.prism.df, clim = "jul.BAL")
age.jjaPrismpcp.rf.df <- get.clim.cor.age(df = det.age.clim.prism.df, clim = "JJA.p")
age.jjaPrismpcp.rf.df <- get.clim.cor.age(df = det.age.clim.prism.df, clim = "JJA.p")
ggplot(age.julpdsi.rf.df, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin = ci.min, ymax=ci.max))
ggplot(age.jjaPrismpcp.rf.df, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin = ci.min, ymax=ci.max))
ggplot(age.julvpdmax.rf.df, aes(site, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin = ci.min, ymax=ci.max))
# function to extrat the slope for all trees before and after 1950
get.clim.sens.year <- function(df, model.func){
coeffs <- matrix ( 0, length(unique(df$site))*2, 8 ) # set up matrix for coefficients
yr <- 1895:1950
yr.post <- 1950:2014
df$class <- '9999'
df[df$year %in% yr,]$class <- 'Pre-1950'
df[df$year %in% yr.post,]$class <- 'Post-1950'
for(s in 1:length(unique(df$site))){
name <- unique(df$site)[s]
site.data <- na.omit(df[df$site == name ,])
# function used in boot strapping below
bs <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# for the "Post-1950" class:
if(nrow(site.data[ site.data$class == "Post-1950" ,]) > 0){
# bootstrapping the linear regression model
results <- boot(data=site.data[site.data$class == "Post-1950" & site.data$year >= 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s,3:4] <- results$t0
coeffs[s , 1] <- name
coeffs[s,2] <- "Post-1950"
coeffs[s,5] <- as.data.frame(int.cis$normal)$V2
coeffs[s,6] <- as.data.frame(int.cis$normal)$V3
coeffs[s,7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s,8] <- as.data.frame(slope.cis$normal)$V3
} else{
#lmest <- lm(RWI ~ PDSI, data = df[df$site == name & df$class == "Post-1950" ,])
coeffs[s,3:8] <- c(NA,NA)
coeffs[s , 2] <- "Post-1950"
coeffs[s,1] <- name
}
# for the "Pre-1950" class:
if(nrow(site.data[ site.data$class == "Pre-1950" ,]) > 0){
results <- boot(data=site.data[site.data$class == "Pre-1950" & site.data$year < 1950 ,], statistic=bs, R=2000, formula=model.func)
int.cis <- boot.ci(boot.out = results, type = "norm", index = 1)# intercept
slope.cis <- boot.ci(boot.out = results, type = "norm", index = 2)
coeffs[s+length(unique(df$site)),3:4] <- results$t0
coeffs[s+length(unique(df$site)) , 1] <- name
coeffs[s+length(unique(df$site)),2] <- "Pre-1950"
coeffs[s+length(unique(df$site)),5] <- as.data.frame(int.cis$normal)$V2
coeffs[s+length(unique(df$site)),6] <- as.data.frame(int.cis$normal)$V3
coeffs[s+length(unique(df$site)),7] <- as.data.frame(slope.cis$normal)$V2
coeffs[s+length(unique(df$site)),8] <- as.data.frame(slope.cis$normal)$V3
}else{
coeffs[s+length(unique(df$site)),3:8] <- c(NA,NA)
coeffs[s +length(unique(df$site)), 2] <- "Pre-1950"
coeffs[s+length(unique(df$site)),1] <- name
}
}
coeffs <- data.frame(coeffs)
colnames(coeffs) <- c("site","age",'int.est', "slope.est", "int.min","int.max", "slope.min", "slope.max")
coeffs$site <- as.character(coeffs$site)
coeffs$slope.est <- as.numeric(as.character(coeffs$slope.est))
coeffs$int.est <- as.numeric(as.character(coeffs$int.est))
coeffs$int.min <- as.numeric(as.character(coeffs$int.min))
coeffs$int.max <- as.numeric(as.character(coeffs$int.max))
coeffs$slope.min <- as.numeric(as.character(coeffs$slope.min))
coeffs$slope.max <- as.numeric(as.character(coeffs$slope.max))
coeffs
}
pdsi.yr.sens <- get.clim.sens.year(df, "RWI ~ Jul.pdsi")
# ---------------------------read in soil, xy characteristics
locs <- read.csv("outputs/priority_sites_locs.csv")
locs$code <- as.character(locs$code)
locs[9:12,]$code <- c( "GLL1", "GLL2", "GLL3", "GLL4")
sites <- c("COR", "HIC", "STC", "GLA", "TOW", "ENG", "UNC", "BON", "MOU", "GLL4", "GLL3", "GLL2", "GLL1", "PVC", "AVO", "PLE", "UNI")
speciesdf<- data.frame(code = c("BON", "COR", "GLA", "GLL1", "GLL2", "GLL3", "GLL4",
"HIC", "MOU", "PLE", "PVC", "STC", "TOW", "UNC", "AVO", "ENG", "PLE", "UNI"),
species = c( "QUMA", "QUAL", "QUAL/QUMA", "QUMA","QUMA", "QUMA","QUMA",
"QUAL/QUMA", "QURA/QUVE", "QUAL/QUMA", "QUMA", "QUMA", "QURA", "QUMA", "QURA", "QURA", "QUAL", "QUAL"))
#---------------------------- merge plot summary data with the locs and species df:
locs <- merge(locs, speciesdf, by = "code")
workingdir <- "/Users/kah/Documents/bimodality/data/"
# read in and average prism data (this is modern 30year normals)
prism <- raster(paste0(workingdir,"PRISM_ppt_30yr_normal_4kmM2_all_bil/PRISM_ppt_30yr_normal_4kmM2_annual_bil.bil"))
prism.alb <- projectRaster(prism, crs='+init=epsg:3175')
locs$pr30yr <- raster::extract(prism.alb, locs[,c("coords.x1","coords.x2")])
workingdir <- "/Users/kah/Documents/bimodality/data/"
# read in and average prism temperature data (this is modern 30year normals)
prism.t <- raster(paste0(workingdir,'PRISM_tmean_30yr_normal_4kmM2_annual_bil/PRISM_tmean_30yr_normal_4kmM2_annual_bil.bil'))
prismt.alb <- projectRaster(prism.t, crs='+init=epsg:3175')
# extract temp
locs$tm30yr <- raster::extract(prismt.alb, locs[,c("coords.x1","coords.x2")])
workingdir <- "/Users/kah/Documents/TreeRings"
write.csv(locs, "outputs/priority_sites_locs_with_soil_clim.csv")
# read in the N & S deposition data:
#sdep.files <- list.files("data/total_Sdep/")
#ndep.files <- list.files("data/total_Ndep/")
#s.filenames <- paste0("data/total_Sdep/", sdep.files)
#s <- stack(s.filenames)
#n.filenames <- paste0("data/total_Ndep/", ndep.files)
#n <- stack(n.filenames)
#plot(n[[2]])
#plot(mapdata, add = TRUE)
#projection(n) <- CRS('+init=epsg:4269')
#n.alb <- projectRaster(n,CRS('+init=epsg:3175'))
# -------------------------merge sensitivitites with the location/site information------------------
site.df <- merge(Julpdsi.sens, locs, by.x = 'site', by.y = 'code')
sens.df <- merge(pdsi.age.sens, locs, by = "site",by.y = 'code')
yr.sens.df <- merge(pdsi.yr.sens, locs, by = "site",by.y = 'code')
jja.sens.df <- merge(JJApdsi.sens, locs, by = "site",by.y = 'code')
#site.df <- merge(pdsi.sens, locs, by.x = 'site', by.y = 'code')
site.df.age <- merge(julpdsi.age.sens, locs, by.x = 'site', by.y = 'code')
jja.site.df.age <- merge(jjapdsi.age.sens, locs, by.x = 'site', by.y = 'code')
site.df.yr <- merge(pdsi.yr.sens, locs, by.x = 'site', by.y = 'code')
site.df.age.dry <- merge(sens.jul.pdsi.age_dry.25, locs, by.x = 'site', by.y = 'code')
site.df.age.wet <- merge(sens.jul.pdsi.age_wet.25, locs, by.x = 'site', by.y = 'code')
jja.site.df.age.dry <- merge(sens.jja.pdsi.age_dry.25, locs, by.x = 'site', by.y = 'code')
jja.site.df.age.wet <- merge(sens.jja.pdsi.age_wet.25, locs, by.x = 'site', by.y = 'code')
site.df.age.dry.id <- merge(jul.pdsi.age_dry.25.id, locs, by.x = "site", by.y= "code")
jja.site.df.age.dry.id <- merge(jja.pdsi.age_dry.25.id, locs, by.x = "site", by.y= "code")
# -----------------------------------map out sensitivities in space: -----------------------------
df_states <- map_data("state")
states <- subset(df_states, region %in% c( "illinois", "minnesota", "wisconsin", "iowa", "south dakota",
"north dakota", 'michigan', 'missouri', 'indiana') )
coordinates(states) <- ~long+lat
class(states)
proj4string(states) <-CRS("+proj=longlat +datum=NAD83")
mapdata<-spTransform(states, CRS('+init=epsg:3175'))
mapdata.h<-spTransform(states, CRS('+proj=aea +lat_1=0 +lat_2=29.5 +lat_0=45.5 +lon_0=0 +x_0=0 +y_0=-96 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0'))
mapdata<-data.frame(mapdata)
png("outputs/maps/JJA.pdsi_sensitivity.png")
ggplot(jja.sens.df, aes(coords.x1, coords.x2, color = slope.est))+geom_point()+scale_color_gradient(low = "blue", high = "red")+ geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + coord_cartesian(xlim = c(-59495.64, 724000), ylim=c(68821.43, 1480021))
dev.off()
png("outputs/maps/Jul.pdsi_sensitivity.png")
ggplot(site.df, aes(coords.x1, coords.x2, color = slope.est))+geom_point()+scale_color_gradient(low = "blue", high = "red")+ geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + coord_cartesian(xlim = c(-59495.64, 724000), ylim=c(68821.43, 1480021))
dev.off()
png(width = 6, height = 4, units = 'in', res = 300,"outputs/maps/Jul.pdsi_sensitivity_age.png")
ggplot(site.df.age, aes(coords.x1, coords.x2, color = slope.est))+geom_point()+ geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),colour = "darkgrey", fill = NA)+theme_bw() +facet_wrap(~age)+scale_color_gradient(low = "blue", high = "red")+ coord_cartesian(xlim = c(-59495.64, 724000), ylim=c(68821.43, 1480021))
dev.off()
png(width = 6, height = 4, units = 'in', res = 300,"outputs/maps/Jul.pdsi_sensitivity_year.png")
ggplot(site.df.yr, aes(coords.x1, coords.x2, color = slope.est))+geom_point()+ geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),colour = "darkgrey", fill = NA)+theme_bw() +facet_wrap(~age)+scale_color_gradient(low = "blue", high = "red") + coord_cartesian(xlim = c(-59495.64, 724000), ylim=c(68821.43, 1480021))
dev.off()
png(width = 6, height = 4, units = 'in', res = 300,"outputs/maps/JJA.pdsi_sensitivity_age.png")
ggplot(jja.site.df.age, aes(coords.x1, coords.x2, color = slope.est))+geom_point()+ geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),colour = "darkgrey", fill = NA)+theme_bw() +facet_wrap(~age)+scale_color_gradient(low = "blue", high = "red")+ coord_cartesian(xlim = c(-59495.64, 724000), ylim=c(68821.43, 1480021))
dev.off()
cor.age.df <- merge(age.julpdsi.rf.df, site.df, by = "site")
#yr.sens.df <- merge(s, site.df, by = "site")
#---------------------------------------------------------------------------------------------------------------------
# how does July PDSI sensitivity to drought vary by climate, envtl factors?
#----------------------------------------------------------------------------------------------------------------------
# prelimnary plots sugges that higher precipitation and higher T places might be more sensitive to PDSI
ggplot(site.df[!site.df$site %in% "PVC",], aes(slope.max, slope.est, color = site))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
ggplot(site.df, aes(tm30yr, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
ggplot(site.df[!site.df$site %in% "UNC",], aes(sand, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
ggplot(site.df, aes(Description, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
png(height = 4, width = 6, units = "in", res = 300, "outputs/sensitivity_v_site_DBH.png")
ggplot(site.df, aes(DBH, slope.est))+geom_point(color = "white")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), color = 'white')+theme_black(base_size = 20)+ylab("Sensitivity to July PDSI")+xlab("Diameter at Breast Height (cm)")+stat_smooth(method = "lm", se = FALSE)
dev.off()
png(height = 4, width = 6, units = "in", res = 300, "outputs/sensitivity_v_site_MAP.png")
ggplot(site.df, aes(pr30yr, slope.est))+geom_point(color = "white")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), color = 'white')+theme_black(base_size = 20)+ylab("Sensitivity to July PDSI")+xlab("Mean Annual Precipitation (mm)")+stat_smooth(method = "lm", se = FALSE)
dev.off()
png(height = 4, width = 6, units = "in", res = 300, "outputs/sensitivity_v_site_sand.png")
ggplot(site.df, aes(sand, slope.est))+geom_point(color = "white")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), color = 'white')+theme_black(base_size = 20)+ylab("Sensitivity to July PDSI")+xlab("% sand ")+stat_smooth(method = "lm", se = FALSE)
dev.off()
summary(lm(slope.est~sand, data =site.df[!site.df$site %in% "UNC",]))
summary(lm(slope.est~pr30yr, data =site.df[!site.df$site %in% "UNC",]))
summary(lm(slope.est~DBH, data =site.df[!site.df$site %in% "UNC",]))
ggplot(site.df, aes( BA, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
# fit a gam on the slope estimate
gam.sens <- mgcv::gam(slope.est ~ pr30yr + DBH , data = site.df)
site.df$gam_ypred <- predict.gam(gam.sens, newdata = site.df)
sand <- lm(slope.est ~ pr30yr + DBH, data = site.df) # outside of UNCAS dusnes, sesnsitivyt depends on soil type
summary(gam.sens) # explains 58.7% of deviance:
summary(sand)
library(plot3D)
# predict 3d sensitivity:
plot3dsensitivity.all <- function(sens.df, age, class, add ){
df <- sens.df[sens.df[,c(age)] == class,]
df <- df[!is.na(df$slope.est),]
# x, y, z variables
x <- df$pr30yr
y <- df$DBH
z <- df$slope.est
# Compute the linear regression (z = ax + by + d)
fit <- lm(z ~ x + y)
# predict values on regular xy grid
grid.lines = 25
x.pred <- seq(min(x), max(x), length.out = grid.lines)
y.pred <- seq(min(y), max(y), length.out = grid.lines)
xy <- expand.grid( x = x.pred, y = y.pred)
z.pred <- matrix(predict(fit, newdata = xy),
nrow = grid.lines, ncol = grid.lines)
# fitted points for droplines to surface
fitpoints <- predict(fit)
# scatter plot with regression plane
scatter3D(x, y, z, pch = 18, cex = 2, colvar = z,
theta = 50, phi = 35, bty="u", lwd.panel= 2, space = 0.15,ticktype = "detailed",
xlab = "\n\n\n\n Precip (mm/yr)", ylab = "\n\n\n\n DBH (cm)", zlab = "\n\n\n\n drought sensitivity", add= add ,
surf = list(x = x.pred, y = y.pred, z = z.pred,
facets = NA, fit = fitpoints), main = paste("Drought Sensitivity by climate"),
zlim=c(0,0.06))
}
site.df$age <- "all"
png(height = 4, width = 7, units = 'in', res = 300, "outputs/full_pdsi_sens_3dplot.png")
plot3dsensitivity.all(site.df, "age", class = "all", add =FALSE)
dev.off()
ggplot(site.df, aes(gam_ypred, slope.est))+geom_point()
png('outputs/modeled_sensitivity_Jul_PDSI_age_DBH_climate.png')
ggplot(site.df, aes(gam_ypred, slope.est)) + geom_point(color = "white") + geom_abline(color = "red", linetype = "dashed")+theme_black(base_size = 20)+ylab("Observed Sensitivity to July PDSI")+xlab("Predicted Sensitivity to July PDSI")
dev.off()
#---------------------------------------------------------------------------------------------------------------------
# how does SUMMER (JJA) PDSI sensitivity to drought vary by climate, envtl factors?
#----------------------------------------------------------------------------------------------------------------------
# prelimnary plots sugges that higher precipitation and higher T places might be more sensitive to PDSI
ggplot(jja.sens.df, aes(slope.max, slope.est, color = site))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
ggplot(jja.sens.df[!jja.sens.df$site %in% "PLE",], aes(tm30yr, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+stat_smooth(method = "lm")
ggplot(jja.sens.df[!jja.sens.df$site %in% "PLE",], aes(sand, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+stat_smooth(method = "lm")
ggplot(jja.sens.df, aes(Description, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
png(height = 4, width = 6, units = "in", res = 300, "outputs/JJA_pdsi_sensitivity_v_site_DBH.png")
ggplot(jja.sens.df[!jja.sens.df$site %in% "PLE",], aes(DBH, slope.est))+geom_point(color = "white")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), color = 'white')+theme_black(base_size = 20)+ylab("Sensitivity to July PDSI")+xlab("Diameter at Breast Height (cm)")+stat_smooth(method = "lm", se = FALSE)
dev.off()
png(height = 4, width = 6, units = "in", res = 300, "outputs/JJA_pdsi_sensitivity_v_site_MAP.png")
ggplot(jja.sens.df[!jja.sens.df$site %in% c("PLE"),], aes(pr30yr, slope.est))+geom_point(color = "white")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), color = 'white')+theme_black(base_size = 20)+ylab("Sensitivity to July PDSI")+xlab("Mean Annual Precipitation (mm)")+stat_smooth(method = "lm", se = FALSE)
dev.off()
png(height = 4, width = 6, units = "in", res = 300, "outputs/JJA_pdsi_sensitivity_v_site_sand.png")
ggplot(jja.sens.df[!jja.sens.df$site %in% c("PLE"),], aes(sand, slope.est))+geom_point(color = "white")+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), color = 'white')+theme_black(base_size = 20)+ylab("Sensitivity to July PDSI")+xlab("% sand ")+stat_smooth(method = "lm", se = FALSE)
dev.off()
summary(lm(slope.est~sand, data =jja.sens.df[!jja.sens.df$site %in% "UNC",]))
summary(lm(slope.est~pr30yr, data =jja.sens.df[!jja.sens.df$site %in% "UNC",]))
summary(lm(slope.est~DBH, data =jja.sens.df[!jja.sens.df$site %in% "UNC",]))
summary(lm(slope.est~BA, data =jja.sens.df[!jja.sens.df$site %in% "UNC",]))
ggplot(jja.sens.df, aes( BA, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))
# fit a gam on the slope estimate
gam.sens <- mgcv::gam(slope.est ~ pr30yr + DBH , data = jja.sens.df)
jja.sens.df$gam_ypred <- predict.gam(gam.sens, newdata = jja.sens.df)
sand <- lm(slope.est ~ pr30yr + DBH, data = jja.sens.df) # outside of UNCAS dusnes, sesnsitivyt depends on soil type
summary(gam.sens) # explains 58.7% of deviance:
summary(sand)
library(plot3D)
# predict 3d sensitivity:
plot3dsensitivity.all <- function(sens.df, age, class, add ){
df <- sens.df[sens.df[,c(age)] == class,]
df <- df[!is.na(df$slope.est),]
# x, y, z variables
x <- df$pr30yr
y <- df$DBH
z <- df$slope.est
# Compute the linear regression (z = ax + by + d)
fit <- lm(z ~ x + y)
# predict values on regular xy grid
grid.lines = 25
x.pred <- seq(min(x), max(x), length.out = grid.lines)
y.pred <- seq(min(y), max(y), length.out = grid.lines)
xy <- expand.grid( x = x.pred, y = y.pred)
z.pred <- matrix(predict(fit, newdata = xy),
nrow = grid.lines, ncol = grid.lines)
# fitted points for droplines to surface
fitpoints <- predict(fit)
# scatter plot with regression plane
scatter3D(x, y, z, pch = 18, cex = 2, colvar = z,
theta = 50, phi = 35, bty="u", lwd.panel= 2, space = 0.15,ticktype = "detailed",
xlab = "\n\n\n\n Precip (mm/yr)", ylab = "\n\n\n\n DBH (cm)", zlab = "\n\n\n\n drought sensitivity", add= add ,
surf = list(x = x.pred, y = y.pred, z = z.pred,
facets = NA, fit = fitpoints), main = paste("Drought Sensitivity by climate"),
zlim=c(0,0.06))
}
jja.sens.df$age <- "all"
png(height = 4, width = 7, units = 'in', res = 300, "outputs/full_JJApdsi_sens_3dplot.png")
plot3dsensitivity.all(jja.sens.df, "age", class = "all", add =FALSE)
dev.off()
# this model doesnt fit very well
ggplot(jja.sens.df, aes(gam_ypred, slope.est))+geom_point()
png('outputs/modeled_sensitivity_JJA_PDSI_age_DBH_climate.png')
ggplot(jja.sens.df, aes(gam_ypred, slope.est)) + geom_point(color = "white") + geom_abline(color = "red", linetype = "dashed")+theme_black(base_size = 20)+ylab("Observed Sensitivity to July PDSI")+xlab("Predicted Sensitivity to July PDSI")
dev.off()
###########################################################################################
# make plots for Modern and Past trees sensitivity to Jul PDSI
###########################################################################################
# prelimnary plots sugges that higher precipitation and higher T places might be more sensitive to PDSI (though this is NS)
# specify color for modern and past trees, and order factors
ageColors <- c( "#009E73", "#D55E00")
#ageColors <- c( "blue", "#D55E00")
site.df.age$age <- factor(site.df.age$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(site.df.age$age)
site.df.age.dry$age <- factor(site.df.age.dry$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(site.df.age.dry$age)
site.df.age.wet$age <- factor(site.df.age.wet$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(site.df.age.wet$age)
site.df.age.dry.id$age <- factor(site.df.age.dry.id$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(site.df.age.dry.id$age)
# plot box plot differences and run t tests on them
png(height = 6.5, width = 8, units = "in", res =300, "outputs/boxplot_Past_Modern_sens.png")
ggplot(site.df.age, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI)")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))
dev.off()
# find differences in means of the site.df.ages
t.out<- t.test(site.df.age[site.df.age$age %in% "Past" & !site.df.age$site %in% "UNI",]$slope.est, site.df.age[site.df.age$age %in% "Modern" & !site.df.age$site %in% "UNI",]$slope.est )
round(t.out$p.value, digits = 5)
png(height = 6.5, width = 8, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25.png")
ggplot(site.df.age.dry[!site.df.age.dry$site %in% "UNI",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))
dev.off()
t.outdry<- t.test(site.df.age.dry[site.df.age.dry$age %in% "Past" & !site.df.age$site %in% c("UNI","AVO"),]$slope.est, site.df.age.dry[site.df.age.dry$age %in% "Modern" & !site.df.age$site %in% c("UNI","AVO"),]$slope.est )
round(t.outdry$p.value, digits = 5)
png(height = 6.5, width = 8, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_wet_0.25.png")
ggplot(site.df.age.wet, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))
dev.off()
t.outwet <- t.test(site.df.age.wet[site.df.age.wet$age %in% "Past",]$slope.est, site.df.age.wet[site.df.age.wet$age %in% "Modern",]$slope.est )
round(t.outwet$p.value, digits = 5)
png(height = 6.5, width = 8, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_id.png")
ggplot(site.df.age.dry.id[ !site.df.age$site %in% "UNI",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))+ylim(0,0.15)
dev.off()
t.outdryid <- t.test(site.df.age.dry.id[site.df.age.dry.id$age %in% "Past" & !site.df.age$site %in% "UNI",]$slope.est, site.df.age.dry.id[site.df.age.dry.id$age %in% "Modern" & !site.df.age$site %in% "UNI",]$slope.est )
round(t.outdryid$p.value, digits = 5)
site.df.age.wet[site.df.age.wet$site %in% c("AVO", "ENG", "UNI"),]$Description <- "Forest"
site.df.age.dry[site.df.age.dry$site %in% c("AVO", "ENG", "UNI"),]$Description <- "Forest"
site.df.age.dry.id[site.df.age.dry.id$site %in% c("AVO", "ENG", "UNI"),]$Description <- "Forest"
site.df.age.dry[site.df.age.dry$site %in% "",]$Description <- "Forest"
site.df.age.wet[site.df.age.wet$species %in% "QUAL/QUMA",]$species <- "QUAL"
site.df.age.wet[site.df.age.wet$species %in% "QURA/QUVE",]$species <- "QURA"
site.df.age.dry[site.df.age.dry$species %in% "QUAL/QUMA",]$species <- "QUAL"
site.df.age.dry[site.df.age.dry$species %in% "QURA/QUVE",]$species <- "QURA"
site.df.age.dry.id[site.df.age.dry.id$species %in% "QUAL/QUMA",]$species <- "QUAL"
site.df.age.dry.id[site.df.age.dry.id$species %in% "QURA/QUVE",]$species <- "QURA"
png(height = 6.5, width = 9, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_wet_0.25_by_stand_type.png")
ggplot(site.df.age.wet, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~Description)
dev.off()
t.outwetsav <- t.test(site.df.age.wet[site.df.age.wet$age %in% "Past" & site.df.age.wet$Description %in% "Savanna",]$slope.est, site.df.age.wet[site.df.age.wet$age %in% "Modern" & site.df.age.wet$Description %in% "Savanna",]$slope.est )
round(t.outwetsav$p.value, digits = 5)
t.outwetfor <- t.test(site.df.age.wet[site.df.age.wet$age %in% "Past" & site.df.age.wet$Description %in% "Forest",]$slope.est, site.df.age.wet[site.df.age.wet$age %in% "Modern" & site.df.age.wet$Description %in% "Forest",]$slope.est )
round(t.outwetfor$p.value, digits = 5)
png(height = 6.5, width = 9, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_stand_type.png")
ggplot(site.df.age.dry, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~Description)
dev.off()
t.outdrysav <- t.test(site.df.age.dry[site.df.age.dry$age %in% "Past" & site.df.age.dry$Description %in% "Savanna",]$slope.est, site.df.age.wet[site.df.age.dry$age %in% "Modern" & site.df.age.dry$Description %in% "Savanna",]$slope.est )
round(t.outdrysav$p.value, digits = 5)
t.outdryfor <- t.test(site.df.age.dry[site.df.age.wet$age %in% "Past" & site.df.age.dry$Description %in% "Forest",]$slope.est, site.df.age.dry[site.df.age.dry$age %in% "Modern" & site.df.age.wet$Description %in% "Forest",]$slope.est )
round(t.outdryfor$p.value, digits = 5)
# for the sensitivity estimated by id:
png(height = 6.5, width = 9, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_stand_type_id.png")
ggplot(site.df.age.dry.id, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~Description)
dev.off()
t.outdrysav.id <- t.test(site.df.age.dry.id[site.df.age.dry.id$age %in% "Past" & site.df.age.dry.id$Description %in% "Savanna" & !site.df.age.dry.id$site %in% "UNI",]$slope.est, site.df.age.dry.id[site.df.age.dry.id$age %in% "Modern" & site.df.age.dry.id$Description %in% "Savanna" & !site.df.age.dry.id$site %in% "UNI",]$slope.est )
round(t.outdrysav.id$p.value, digits = 5)
t.outdryfor.id <- t.test(site.df.age.dry.id[site.df.age.dry.id$age %in% "Past" & site.df.age.dry.id$Description %in% "Forest",]$slope.est, site.df.age.dry.id[site.df.age.dry.id$age %in% "Modern" & site.df.age.dry.id$Description %in% "Forest",]$slope.est )
round(t.outdryfor.id$p.value, digits = 5)
nonas <- site.df.age.dry.id[complete.cases(site.df.age.dry.id$slope.est),]
nonas %>% group_by(Description) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
nonas %>% group_by( species) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
site.slope.table <- nonas %>% group_by(site) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
site.slope.table.age <- nonas %>% group_by(site, age) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
write.csv(site.slope.table, "outputs/site_n_trees_slope_table.csv")
write.csv(site.slope.table.age, "outputs/site_n_trees_slope_table_age.csv")
#-------- for the stisitivty by species:
png(width = 12, height = 6, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_species.png")
ggplot(site.df.age.dry, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)
dev.off()
# tests for differences between species
t.outdrywhite <- t.test(site.df.age.dry[site.df.age.wet$age %in% "Past" & site.df.age.dry$species %in% "QUAL",]$slope.est, site.df.age.dry[site.df.age.dry$age %in% "Modern" & site.df.age.dry$species %in% "QUAL",]$slope.est )
t.outdryred <- t.test(site.df.age.dry[site.df.age.wet$age %in% "Past" & site.df.age.dry$species %in% "QURA",]$slope.est, site.df.age.dry[site.df.age.dry$age %in% "Modern" & site.df.age.dry$species %in% "QURA",]$slope.est )
t.outdrybur <- t.test(site.df.age.dry[site.df.age.wet$age %in% "Past" & site.df.age.dry$species %in% "QUMA",]$slope.est, site.df.age.dry[site.df.age.dry$age %in% "Modern" & site.df.age.dry$species %in% "QUMA",]$slope.est )
round(t.outdrybur$p.value, digits = 5)
appends<- data.frame(x = 0.75, y = 0.06 ,label = c( as.character(round(t.outdrywhite$p.value, digits = 5)),
as.character(round(t.outdrybur$p.value, digits = 5)),
as.character(round(t.outdryred$p.value, digits = 5))),
color = c("QUAL", "QUMA", "QURA"))
png(width = 12, height = 6, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_species.png")
ggplot(site.df.age.dry, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+
scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)#+geom_text(data=appends,
# aes(x,y,label=label), inherit.aes=FALSE)
dev.off()
png(width = 12, height = 6, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_wet_0.25_by_species.png")
ggplot(site.df.age.wet, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)
dev.off()
# for sensitivy by species, estimated by tree ID:
png(width = 12, height = 6, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_species_ID.png")
ggplot(site.df.age.dry.id, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)+ylim(0, 0.15)
dev.off()
# tests for differences between species
t.outdrywhite <- t.test(site.df.age.dry.id[site.df.age.dry.id$age %in% "Past" & site.df.age.dry.id$species %in% "QUAL",]$slope.est, site.df.age.dry.id[site.df.age.dry.id$age %in% "Modern" & site.df.age.dry.id$species %in% "QUAL",]$slope.est )
t.outdryred <- t.test(site.df.age.dry.id[site.df.age.dry.id$age %in% "Past" & site.df.age.dry.id$species %in% "QURA",]$slope.est, site.df.age.dry.id[site.df.age.dry.id$age %in% "Modern" & site.df.age.dry.id$species %in% "QURA",]$slope.est )
t.outdrybur <- t.test(site.df.age.dry.id[site.df.age.dry.id$age %in% "Past" & site.df.age.dry.id$species %in% "QUMA",]$slope.est, site.df.age.dry.id[site.df.age.dry.id$age %in% "Modern" & site.df.age.dry.id$species %in% "QUMA",]$slope.est )
round(t.outdrybur$p.value, digits = 5)
site.df.age.dry.id$site <- factor(site.df.age.dry.id$site, levels = c("BON", "GLL1", "GLL2", "GLA",
"GLL3", "UNC", "MOU", "HIC",
"GLL4","TOW", "AVO", "ENG",
"COR","STC", "PVC", "PLE", "UNI"))
# plot by site--using the sensitibity estimated by id:
png(width = 12, height = 4.5, units = "in", res = 300,"outputs/boxplot_Past_Modern_sens_wet_0.25_by_site_id_first.png")
ggplot(site.df.age.dry.id[!site.df.age.dry.id$site %in% c("UNI", NA, "GLL4","TOW", "AVO", "ENG",
"COR","STC", "PVC", "PLE"),], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(),strip.text = element_text(face="bold", size=9),
strip.background = element_rect( colour="black",size=0.01))+facet_wrap(~site, scales = "free_y", ncol = 4)#+ylim(-0.01, 0.15)
dev.off()
png(width = 12, height = 4.5, units = "in", res = 300,"outputs/boxplot_Past_Modern_sens_wet_0.25_by_site_id_second.png")
ggplot(site.df.age.dry.id[!site.df.age.dry.id$site %in% c("UNI", NA, "BON", "GLL1", "GLL2", "GLA",
"GLL3", "UNC", "MOU", "HIC"),], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(),strip.text = element_text(face="bold", size=9),
strip.background = element_rect( colour="black",size=0.01))+facet_wrap(~site, scales = "free_y", ncol = 4)#+ylim(-0.01, 0.15)
dev.off()
pairs <- c( 'GLA' , 'GLL1', 'GLL2', 'GLL3', 'MOU' , 'UNC')
for(i in 1:length(pairs)){
sitei <- unique(pairs)[i]
testresults<- t.test(site.df.age.dry.id[site.df.age.dry.id$age %in% "Past" & site.df.age.dry.id$site %in% sitei,]$slope.est, site.df.age.dry.id[site.df.age.dry.id$age %in% "Modern" & site.df.age.dry.id$site %in% sitei,]$slope.est )
print(sitei)
print(testresults)
}
png(width = 12, height = 6, units = "in", res = 300, "outputs/boxplot_Past_Modern_sens_wet_0.25_by_site.png")
ggplot(site.df.age.dry, aes(age, slope.est, fill = age))+geom_bar(stat = "identity", position = "dodge")+geom_errorbar(aes(ymin = slope.min, ymax = slope.max ), color = "grey", width = 0.2)+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~site)
dev.off()
ggplot(cor.jul.pdsi.age_dry.25.dbh[cor.jul.pdsi.age_dry.25.dbh$dbhclass %in% c("< 20", "20 - 40", "40 - 60", "60 - 80"),], aes(age, cor.est, fill = age))+geom_bar(stat="identity")+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~site)
png("outputs/boxplot_Past_Modern_sens_dry_0.25_bydbh_class.png")
ggplot(cor.jul.pdsi.age_dry.25.dbh[cor.jul.pdsi.age_dry.25.dbh$dbhclass %in% c("< 20", "20 - 40", "40 - 60", "60 - 80"),], aes(age, cor.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~dbhclass)
dev.off()
png("outputs/boxplot_Past_Modern_sens.png")
ggplot(sens.jul.pdsi.age_dry.25, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI)")+theme(legend.title = element_blank())
dev.off()
ggplot(site.df.age.wet, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(site.df.age.dry, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(site.df.age, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(site.df.age, aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(site.df.age, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = "lm")
ggplot(site.df.age, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = "lm")
#ggplot(site.df.age, aes(CW_avg, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = "lm")
png("outputs/sensitivity_v_siteDBH_age.png")
ggplot(site.df.age, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Site Avg DBH")+theme(legend.title = element_blank())
dev.off()
ggplot(site.df.age.wet, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Site Avg DBH")+theme(legend.title = element_blank())
summary(lm(slope.est ~DBH + pr30yr + age, data = site.df.age))
gam.pr.dbh <- gam(slope.est ~ pr30yr+ DBH + age,data = site.df.age)
summary(gam.pr.dbh)
site.df.age$ypred <- predict(gam.pr.dbh, site.df.age)
summary(site.df.age)
png('outputs/modeled_sensitivity_v_DBH_age.png')
ggplot(site.df.age, aes(ypred, slope.est)) + geom_point(color = "white") + geom_abline(color = "red", linetype = "dashed")+theme_black(base_size = 20)+ylab("Observed Sensitivity to July PDSI")+xlab("Predicted Sensitivity to July PDSI")
dev.off()
png("outputs/sensitivity_v_DBH_age.png")
ggplot(site.df.age, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_sand_age.png")
ggplot(site.df.age, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_sand_age.png")
ggplot(site.df.age.dry, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())+ylim(0,0.2)
dev.off()
png("outputs/sensitivity_v_sand_age_dry_years.png")
ggplot(site.df.age.wet, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_MAP_age.png")
ggplot(site.df.age, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Annual Precipitation")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_MAP_age_dry_years.png")
ggplot(site.df.age.dry, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Annual Precipitation")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_TMEAN_age.png")
ggplot(site.df.age, aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.1)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Monthly Temperature (DegC)")+theme(legend.title = element_blank())
dev.off()
#ggplot(cor.age.df[!cor.age.df$site %in% "UNC",], aes(sand, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max))+stat_smooth(method = "lm")
#sens.Modern <- gam(slope.est ~ pr30yr + tm30yr +sand , data = site.df.age[site.df.age$age=="Modern",])
#summary(sens.Modern) # explains 47.7% of deviance:
#sens.Past <- gam(slope.est ~ pr30yr + tm30yr +sand , data = sens.df[sens.df$age=="Past",])
#summary(sens.Past) # explains 90.5% of deviance:
##############################################################
# make prelimnary plots for pre- and post- 1950
###############################################################3
site.df.yr$age <- factor(site.df.yr$age,levels = rev(levels(site.df.yr$age)),ordered = TRUE)
yrColors <- c( "#009E73", "#D55E00")
names(yrColors) <- levels(site.df.yr$age)
#colScale <- scale_colour_manual(name = "grp",values = myColors)
png("outputs/boxplot_pre_post_sens.png")
ggplot(site.df.yr, aes(age, slope.est, fill = age))+geom_boxplot()+theme_black(base_size = 20)+scale_fill_manual(values = yrColors)+ylab("Growth Sensitivity to Drought (PDSI)")+theme(legend.title = element_blank())
dev.off()
ggplot(site.df.yr, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(site.df.yr, aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
png("outputs/sensitivity_v_sand_pre_post.png")
ggplot(site.df.yr, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_MAP_pre_post.png")
ggplot(site.df.yr, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Annual Precipitation")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_TMEAN_pre_post.png")
ggplot(site.df.yr, aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.05)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Monthly Temperature (DegC)")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_DBH_pre_post.png")
ggplot(site.df.yr, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.05)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("DBH (cm)")+theme(legend.title = element_blank())
dev.off()
ggplot(site.df.yr, aes(sand, pr30yr,color = slope.est, shape = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5) + ylim(500, 1000)
summary(lm(slope.est ~ sand + age ,data = site.df.yr))
summary(lm(slope.est ~ sand + pr30yr + age ,data = site.df.age))
summary(lm(slope.est ~ pr30yr + age +DBH,data = site.df.age))
reformed.df <- dcast(site.df.age[c("site", "age", "coords.x1", "coords.x2", 'slope.est', "DBH" , "pr30yr", "tm30yr",'sand')], coords.x1 + coords.x2+site+DBH+pr30yr+tm30yr+sand ~ age, mean, na.rm=TRUE, value.var = 'slope.est')
reformed.df$diff <- reformed.df$Past - reformed.df$Modern
sens.dif <- gam(Modern ~ pr30yr + DBH , data = reformed.df)
summary(sens.dif) #Deviance explained = 41.1%
gam.sens.age <- gam(slope.est ~ pr30yr + DBH , data = site.df.age)
summary(gam.sens.age)
#sens.post <- gam(slope.est ~ pr30yr + tm30yr +sand , data = yr.sens.df[yr.sens.df$age=="Post-1950",])
#summary(sens.post) # explains 36.8% of deviance:
sens.df <- site.df.age
#install.packages("plot3D")
library(plot3D)
# created a funciton that takes the data of interest, fits the gam model:
# gam(sensitivity ~ precip + temperature) and plots a 3d surface of it
plot3dsensitivity <- function(sens.df, age, class, col, add ){
df <- sens.df[sens.df[,c(age)] == class,]
df <- df[!is.na(df$slope.est),]
# x, y, z variables
x <- df$pr30yr
y <- df$DBH
z <- df$slope.est
# Compute the linear regression (z = ax + by + d)
fit <- lm(z ~ x + y)
# predict values on regular xy grid
grid.lines = 25
x.pred <- seq(min(x), max(x), length.out = grid.lines)
y.pred <- seq(min(y), max(y), length.out = grid.lines)
xy <- expand.grid( x = x.pred, y = y.pred)
z.pred <- matrix(predict(fit, newdata = xy),
nrow = grid.lines, ncol = grid.lines)
# fitted points for droplines to surface
fitpoints <- predict(fit)
# scatter plot with regression plane
scatter3D(x, y, z, pch = 18, cex = 2, col= col,
theta = 50, phi = 25, bty="u", lwd.panel= 2, space = 0.15,ticktype = "detailed",
xlab = "\n\n\n\n Precip", ylab = "\n\n\n\n DBH (cm)", zlab = "\n\n\n\n drought sensitivity", add= add ,
surf = list(x = x.pred, y = y.pred, z = z.pred,
facets = NA, fit = fitpoints), main = paste("Drought Sensitivity by climate"),
zlim=c(0,0.1))
}
# plot Past and Modern predictive surfaces on the smae plot
png(height = 5, width = 9, units = 'in', res= 300, 'outputs/sensitivity_surface3d_age.png')
plot3dsensitivity(site.df.age, "age","Past", "#009E73",FALSE)
plot3dsensitivity(site.df.age, "age","Modern", "#D55E00",TRUE)
legend(x = 0.5, y = 0 ,
legend = c(expression(atop("Modern pre-1950", "(low CO"[2]*")")), expression(atop("Modern post-1950", "(high CO"[2]*")"))),
col = c("#009E73",
"#D55E00"),
pch = c(18, 18),
bty = "n",
pt.cex = 2,
cex = 1.2,
text.col = "black",
horiz = F ,
inset = c(0.1, 0.1))
dev.off()
# plot the pre and post 1950 sensitivity surfaces:
yr.sens.df <- site.df.yr
png(height = 5, width = 9, units = 'in', res= 300,'outputs/sensitivity_surface3d_pre_post_1950_precip_DBH.png')
#sens.df, age, class, col, add
plot3dsensitivity(sens.df = site.df.yr, age = "age",class = "Pre-1950", col = "#009E73",add = FALSE)
plot3dsensitivity(site.df.yr, "age","Post-1950", "#D55E00",TRUE)
legend(x = 0.5, y = 0 ,
legend = c(expression(atop("All trees Pre-1950", "(low CO"[2]*")")), expression(atop("All trees Post-1950", "(high CO"[2]*")"))),
col = c("#009E73",
"#D55E00"),
pch = c(18, 18),
bty = "n",
pt.cex = 2,
cex = 1.2,
text.col = "black",
horiz = F ,
inset = c(0.1, 0.1))
dev.off()
###########################################################################################
# make plots for Modern and Past trees sensitivity to JJA PDSI
###########################################################################################
# prelimnary plots sugges that higher precipitation and higher T places might be more sensitive to PDSI (though this is NS)
# specify color for modern and past trees, and order factors
ageColors <- c( "#009E73", "#D55E00")
# for JJApdsi responses:
# specify color for modern and past trees, and order factors
ageColors <- c( "#009E73", "#D55E00")
#ageColors <- c( "blue", "#D55E00")
jja.site.df.age$age <- factor(jja.site.df.age$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age$age)
jja.site.df.age.dry$age <- factor(jja.site.df.age.dry$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age.dry$age)
jja.site.df.age.wet$age <- factor(jja.site.df.age.wet$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age.wet$age)
jja.site.df.age.dry.id$age <- factor(jja.site.df.age.dry.id$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age.dry.id$age)
# specify color for modern and past trees, and order factors
ageColors <- c( "#009E73", "#D55E00")
#ageColors <- c( "blue", "#D55E00")
jja.site.df.age$age <- factor(jja.site.df.age$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age$age)
jja.site.df.age.dry$age <- factor(jja.site.df.age.dry$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age.dry$age)
jja.site.df.age.wet$age <- factor(jja.site.df.age.wet$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age.wet$age)
jja.site.df.age.dry.id$age <- factor(jja.site.df.age.dry.id$age, levels = c("Past", "Modern"))
names(ageColors) <- levels(jja.site.df.age.dry.id$age)
# plot box plot differences and run t tests on them
png(height = 6.5, width = 8, units = "in", res =300, "outputs/boxplot_Past_Modern_sens.png")
ggplot(jja.site.df.age[!jja.site.df.age$site %in% "PLE",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI)")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))
dev.off()
# find differences in means of the jja.site.df.ages
t.out<- t.test(jja.site.df.age[jja.site.df.age$age %in% "Past" & !jja.site.df.age$site %in% "PLE",]$slope.est, jja.site.df.age[jja.site.df.age$age %in% "Modern" & !jja.site.df.age$site %in% "PLE",]$slope.est )
round(t.out$p.value, digits = 5)
png(height = 6.5, width = 8, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25.png")
ggplot(jja.site.df.age.dry[!jja.site.df.age.dry$site %in% "PLE",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))
dev.off()
t.outdry<- t.test(jja.site.df.age.dry[jja.site.df.age.dry$age %in% "Past" & !jja.site.df.age$site %in% c("PLE"),]$slope.est, jja.site.df.age.dry[jja.site.df.age.dry$age %in% "Modern" & !jja.site.df.age$site %in% c("PLE"),]$slope.est )
round(t.outdry$p.value, digits = 5)
png(height = 6.5, width = 8, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_wet_0.25.png")
ggplot(jja.site.df.age.wet[!jja.site.df.age.wet$site %in% "PLE",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))
dev.off()
t.outwet <- t.test(jja.site.df.age.wet[jja.site.df.age.wet$age %in% "Past" & !jja.site.df.age.wet$site %in% "PLE",]$slope.est, jja.site.df.age.wet[jja.site.df.age.wet$age %in% "Modern" & !jja.site.df.age.wet$site %in% "PLE",]$slope.est )
round(t.outwet$p.value, digits = 5)
png(height = 6.5, width = 8, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_id.png")
ggplot(jja.site.df.age.dry.id[ !jja.site.df.age$site %in% "PLE",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 25)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(), legend.position = c(0.8,0.9))+ylim(0,0.15)
dev.off()
t.outdryid <- t.test(jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Past" & !jja.site.df.age$site %in% "PLE",]$slope.est, jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Modern" & !jja.site.df.age$site %in% "PLE",]$slope.est )
round(t.outdryid$p.value, digits = 5)
colnames(jja.site.df.age)[27] <- c("species")
colnames(jja.site.df.age.dry)[28] <- c("species")
colnames(jja.site.df.age.wet)[28] <- c("species")
colnames(jja.site.df.age.dry.id)[29] <- c("species")
jja.site.df.age.wet[jja.site.df.age.wet$site %in% c("AVO", "ENG", "UNI"),]$Description <- "Forest"
jja.site.df.age.dry[jja.site.df.age.dry$site %in% c("AVO", "ENG", "UNI"),]$Description <- "Forest"
jja.site.df.age.dry.id[jja.site.df.age.dry.id$site %in% c("AVO", "ENG", "UNI"),]$Description <- "Forest"
jja.site.df.age.dry[jja.site.df.age.dry$site %in% "",]$Description <- "Forest"
jja.site.df.age.wet[jja.site.df.age.wet$species %in% "QUAL/QUMA",]$species <- "QUAL"
jja.site.df.age.wet[jja.site.df.age.wet$species %in% "QURA/QUVE",]$species <- "QURA"
jja.site.df.age.dry[jja.site.df.age.dry$species %in% "QUAL/QUMA",]$species <- "QUAL"
jja.site.df.age.dry[jja.site.df.age.dry$species %in% "QURA/QUVE",]$species <- "QURA"
jja.site.df.age.dry.id[jja.site.df.age.dry.id$species %in% "QUAL/QUMA",]$species <- "QUAL"
jja.site.df.age.dry.id[jja.site.df.age.dry.id$species %in% "QURA/QUVE",]$species <- "QURA"
png(height = 6.5, width = 9, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_wet_0.25_by_stand_type.png")
ggplot(jja.site.df.age.wet, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~Description)
dev.off()
t.outwetsav <- t.test(jja.site.df.age.wet[jja.site.df.age.wet$age %in% "Past" & jja.site.df.age.wet$Description %in% "Savanna",]$slope.est, jja.site.df.age.wet[jja.site.df.age.wet$age %in% "Modern" & jja.site.df.age.wet$Description %in% "Savanna",]$slope.est )
round(t.outwetsav$p.value, digits = 5)
t.outwetfor <- t.test(jja.site.df.age.wet[jja.site.df.age.wet$age %in% "Past" & jja.site.df.age.wet$Description %in% "Forest",]$slope.est, jja.site.df.age.wet[jja.site.df.age.wet$age %in% "Modern" & jja.site.df.age.wet$Description %in% "Forest",]$slope.est )
round(t.outwetfor$p.value, digits = 5)
png(height = 6.5, width = 9, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_stand_type.png")
ggplot(jja.site.df.age.dry[!jja.site.df.age.dry$site %in% "PLE",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~Description)
dev.off()
t.outdrysav <- t.test(jja.site.df.age.dry[jja.site.df.age.dry$age %in% "Past" & jja.site.df.age.dry$Description %in% "Savanna" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est, jja.site.df.age.wet[jja.site.df.age.dry$age %in% "Modern" & jja.site.df.age.dry$Description %in% "Savanna" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est )
round(t.outdrysav$p.value, digits = 5)
t.outdryfor <- t.test(jja.site.df.age.dry[jja.site.df.age.wet$age %in% "Past" & jja.site.df.age.dry$Description %in% "Forest"& !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est, jja.site.df.age.dry[jja.site.df.age.dry$age %in% "Modern" & jja.site.df.age.wet$Description %in% "Forest" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est )
round(t.outdryfor$p.value, digits = 5)
# for the sensitivity estimated by id:
png(height = 6.5, width = 9, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_stand_type_id.png")
ggplot(jja.site.df.age.dry.id[!jja.site.df.age.dry.id$site %in% "PLE",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~Description)
dev.off()
t.outdrysav.id <- t.test(jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Past" & jja.site.df.age.dry.id$Description %in% "Savanna" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est, jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Modern" & jja.site.df.age.dry.id$Description %in% "Savanna" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est )
round(t.outdrysav.id$p.value, digits = 5)
t.outdryfor.id <- t.test(jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Past" & jja.site.df.age.dry.id$Description %in% "Forest",]$slope.est, jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Modern" & jja.site.df.age.dry.id$Description %in% "Forest",]$slope.est )
round(t.outdryfor.id$p.value, digits = 5)
nonas <- jja.site.df.age.dry.id[complete.cases(jja.site.df.age.dry.id$slope.est),]
nonas %>% group_by(Description) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
nonas %>% group_by( species) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
site.slope.table <- nonas %>% group_by(site) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
site.slope.table.age <- nonas %>% group_by(site, age) %>% summarise(mean = mean(slope.est, na.rm = TRUE), n = n())
write.csv(site.slope.table, "outputs/site_n_trees_slope_table.csv")
write.csv(site.slope.table.age, "outputs/site_n_trees_slope_table_age.csv")
#-------- for the stisitivty by species:
png(width = 12, height = 6, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_species.png")
ggplot(jja.site.df.age.dry, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)
dev.off()
# tests for differences between species
t.outdrywhite <- t.test(jja.site.df.age.dry[jja.site.df.age.wet$age %in% "Past" & jja.site.df.age.dry$species %in% "QUAL",]$slope.est, jja.site.df.age.dry[jja.site.df.age.dry$age %in% "Modern" & jja.site.df.age.dry$species %in% "QUAL",]$slope.est )
t.outdryred <- t.test(jja.site.df.age.dry[jja.site.df.age.wet$age %in% "Past" & jja.site.df.age.dry$species %in% "QURA",]$slope.est, jja.site.df.age.dry[jja.site.df.age.dry$age %in% "Modern" & jja.site.df.age.dry$species %in% "QURA",]$slope.est )
t.outdrybur <- t.test(jja.site.df.age.dry[jja.site.df.age.wet$age %in% "Past" & jja.site.df.age.dry$species %in% "QUMA",]$slope.est, jja.site.df.age.dry[jja.site.df.age.dry$age %in% "Modern" & jja.site.df.age.dry$species %in% "QUMA",]$slope.est )
round(t.outdrybur$p.value, digits = 5)
appends<- data.frame(x = 0.75, y = 0.06 ,label = c( as.character(round(t.outdrywhite$p.value, digits = 5)),
as.character(round(t.outdrybur$p.value, digits = 5)),
as.character(round(t.outdryred$p.value, digits = 5))),
color = c("QUAL", "QUMA", "QURA"))
png(width = 12, height = 6, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_dry_0.25_by_species.png")
ggplot(jja.site.df.age.dry, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+
scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)#+geom_text(data=appends,
# aes(x,y,label=label), inherit.aes=FALSE)
dev.off()
png(width = 12, height = 6, units = "in", res =300,"outputs/boxplot_Past_Modern_sens_wet_0.25_by_species.png")
ggplot(jja.site.df.age.wet, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)
dev.off()
# for sensitivy by species, estimated by tree ID:
png(width = 12, height = 6, units = "in", res =300,"outputs/JJA_pdsi_boxplot_Past_Modern_sens_dry_0.25_by_species_ID.png")
ggplot(jja.site.df.age.dry.id[!jja.site.df.age.dry.id$site %in% "PLE",], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~species)+ylim(0, 0.15)
dev.off()
# tests for differences between species
t.outdrywhite <- t.test(jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Past" & jja.site.df.age.dry.id$species %in% "QUAL" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est, jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Modern" & jja.site.df.age.dry.id$species %in% "QUAL" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est )
t.outdryred <- t.test(jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Past" & jja.site.df.age.dry.id$species %in% "QURA" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est, jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Modern" & jja.site.df.age.dry.id$species %in% "QURA" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est )
t.outdrybur <- t.test(jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Past" & jja.site.df.age.dry.id$species %in% "QUMA" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est, jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Modern" & jja.site.df.age.dry.id$species %in% "QUMA" & !jja.site.df.age.dry.id$site %in% "PLE",]$slope.est )
round(t.outdrybur$p.value, digits = 5)
jja.site.df.age.dry.id$site <- factor(jja.site.df.age.dry.id$site, levels = c("BON", "GLL1", "GLL2", "GLA",
"GLL3", "UNC", "MOU", "HIC",
"GLL4","TOW", "AVO", "ENG",
"COR","STC", "PVC", "PLE", "UNI"))
# plot by site--using the sensitibity estimated by id:
png(width = 12, height = 4.5, units = "in", res = 300,"outputs/JJA_pdsi_boxplot_Past_Modern_sens_dry_0.25_by_site_id_first.png")
ggplot(jja.site.df.age.dry.id[!jja.site.df.age.dry.id$site %in% c("UNI", NA, "GLL4","TOW", "AVO", "ENG",
"COR","STC", "PVC", "PLE"),], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(),strip.text = element_text(face="bold", size=9),
strip.background = element_rect( colour="black",size=0.01))+facet_wrap(~site, scales = "free_y", ncol = 4)#+ylim(-0.01, 0.15)
dev.off()
png(width = 12, height = 4.5, units = "in", res = 300,"outputs/JJA_pdsi_boxplot_Past_Modern_sens_dry_0.25_by_site_id_second.png")
ggplot(jja.site.df.age.dry.id[!jja.site.df.age.dry.id$site %in% c("UNI", NA, "BON", "GLL1", "GLL2", "GLA",
"GLL3", "UNC", "MOU", "HIC"),], aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank(),strip.text = element_text(face="bold", size=9),
strip.background = element_rect( colour="black",size=0.01))+facet_wrap(~site, scales = "free_y", ncol = 4)#+ylim(-0.01, 0.15)
dev.off()
pairs <- c( 'GLA' , 'GLL1', 'GLL2', 'GLL3', 'MOU' , 'UNC')
for(i in 1:length(pairs)){
sitei <- unique(pairs)[i]
testresults<- t.test(jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Past" & jja.site.df.age.dry.id$site %in% sitei,]$slope.est, jja.site.df.age.dry.id[jja.site.df.age.dry.id$age %in% "Modern" & jja.site.df.age.dry.id$site %in% sitei,]$slope.est )
print(sitei)
print(testresults)
}
png(width = 12, height = 6, units = "in", res = 300, "outputs/JJA_pdsi_boxplot_Past_Modern_sens_dry_0.25_by_site.png")
ggplot(jja.site.df.age.dry, aes(age, slope.est, fill = age))+geom_bar(stat = "identity", position = "dodge")+geom_errorbar(aes(ymin = slope.min, ymax = slope.max ), color = "grey", width = 0.2)+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~site, scales = "free_y")
dev.off()
ggplot(cor.jul.pdsi.age_dry.25.dbh[cor.jul.pdsi.age_dry.25.dbh$dbhclass %in% c("< 20", "20 - 40", "40 - 60", "60 - 80"),], aes(age, cor.est, fill = age))+geom_bar(stat="identity")+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~site)
png("outputs/JJA_pdsi_boxplot_Past_Modern_sens_dry_0.25_bydbh_class.png")
ggplot(cor.jul.pdsi.age_dry.25.dbh[cor.jul.pdsi.age_dry.25.dbh$dbhclass %in% c("< 20", "20 - 40", "40 - 60", "60 - 80"),], aes(age, cor.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI) \n in dry years")+theme(legend.title = element_blank())+facet_wrap(~dbhclass)
dev.off()
png("outputs/JJA_pdsi_boxplot_Past_Modern_sens.png")
ggplot(sens.jul.pdsi.age_dry.25, aes(age, slope.est, fill = age))+geom_boxplot()+
theme_black(base_size = 20)+scale_fill_manual(values = ageColors)+ylab("Growth Sensitivity to Drought (PDSI)")+theme(legend.title = element_blank())
dev.off()
ggplot(jja.site.df.age.wet[!jja.site.df.age.wet$site %in% c("PLE", "AVO", "ENG"),], aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(jja.site.df.age.dry[!jja.site.df.age.dry$site %in% c( "PLE", "AVO" ,"ENG"),], aes(pr30yr, slope.est, color = site))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(jja.site.df.age, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(jja.site.df.age, aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(jja.site.df.age, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = "lm")
ggplot(jja.site.df.age, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = "lm")
#ggplot(jja.site.df.age, aes(CW_avg, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max))+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = "lm")
png("outputs/JJA_pdsi_sensitivity_v_siteDBH_age.png")
ggplot(jja.site.df.age, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Site Avg DBH")+theme(legend.title = element_blank())
dev.off()
ggplot(jja.site.df.age[!jja.site.df.age$site %in% c("AVO", "PLE"),], aes(awc, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.005)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("AWC")+theme(legend.title = element_blank())
ggplot(jja.sens.df[!jja.sens.df$site %in% c("AVO", "PLE"),], aes(pr30yr, slope.est))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.005)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("AWC")+theme(legend.title = element_blank())
summary(lm(slope.est ~ pr30yr, data = jja.sens.df[!jja.sens.df$site %in% c("AVO", "PLE") ,]))
ggplot(jja.site.df.age[!jja.site.df.age$site %in% c("AVO", "PLE"),], aes(coords.x1, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.005)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("AWC")+theme(legend.title = element_blank())
summary(lm(slope.est ~ pr30yr, data = jja.site.df.age[!jja.site.df.age$site %in% c("AVO", "PLE") & jja.site.df.age$age %in% "Past",]))
ggplot(jja.site.df.age.wet, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Site Avg DBH")+theme(legend.title = element_blank())
summary(lm(slope.est ~awc + sand + age, data = jja.site.df.age))
gam.pr.dbh <- gam(slope.est ~ pr30yr+ DBH + age,data = jja.site.df.age[!jja.site.df.age$site %in% c("AVO", "PLE"),])
summary(gam.pr.dbh)
jja.site.df.age$ypred <- predict(gam.pr.dbh, jja.site.df.age)
summary(jja.site.df.age)
#png('outputs/JJA_pdsi_modeled_sensitivity_v_DBH_age.png')
#ggplot(jja.site.df.age, aes(ypred, slope.est)) + geom_point(color = "white") + geom_abline(color = "red", linetype = "dashed")+theme_black(base_size = 20)+ylab("Observed Sensitivity to July PDSI")+xlab("Predicted Sensitivity to July PDSI")
#dev.off()
png("outputs/JJA_pdsi_sensitivity_v_DBH_age.png")
ggplot(jja.site.df.age[!jja.site.df.age$site %in% c("PLE"),], aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Diameter of Site")+theme(legend.title = element_blank())
dev.off()
png("outputs/JJA_pdsi_sensitivity_v_sand_age.png")
ggplot(jja.site.df.age[!jja.site.df.age$site %in% c("PLE"),] , aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())
dev.off()
png("outputs/JJA_pdsi_dry_yrs_sensitivity_v_sand_age.png")
ggplot(jja.site.df.age.dry, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())+ylim(-0.5,0.5)
dev.off()
png("outputs/JJA_pdsi_sensitivity_v_sand_age_wet_years.png")
ggplot(jja.site.df.age.wet, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())
dev.off()
png("outputs/JJA_pdsi_sensitivity_v_MAP_age.png")
ggplot(jja.site.df.age, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Annual Precipitation")+theme(legend.title = element_blank())
dev.off()
png("outputs/JJA_pdsi_sensitivity_v_MAP_age_dry_years.png")
ggplot(jja.site.df.age.dry[!jja.site.df.age.dry$site %in% c("PLE","AVO", "ENG"),], aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Annual Precipitation")+theme(legend.title = element_blank())
dev.off()
png("outputs/JJA_pdsi_sensitivity_v_TMEAN_age.png")
ggplot(jja.site.df.age[!jja.site.df.age.dry$site %in% c("PLE","AVO", "ENG"),], aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.1)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Monthly Temperature (DegC)")+theme(legend.title = element_blank())
dev.off()
#ggplot(jja.site.df.age.dry.id[!jja.site.df.age.dry.id$site %in% c("PLE","AVO", "ENG"),], aes(clay, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.1)+scale_color_manual(values = ageColors)+stat_smooth(method = 'lm', se = FALSE)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Monthly Temperature (DegC)")+theme(legend.title = element_blank())
#ggplot(cor.age.df[!cor.age.df$site %in% "UNC",], aes(sand, cor.est, color = age))+geom_point()+geom_errorbar(aes(ymin=ci.min, ymax = ci.max))+stat_smooth(method = "lm")
#sens.Modern <- gam(slope.est ~ pr30yr + tm30yr +sand , data = jja.site.df.age[jja.site.df.age$age=="Modern",])
#summary(sens.Modern) # explains 47.7% of deviance:
#sens.Past <- gam(slope.est ~ pr30yr + tm30yr +sand , data = sens.df[sens.df$age=="Past",])
#summary(sens.Past) # explains 90.5% of deviance:
##############################################################
# make prelimnary plots for pre- and post- 1950
###############################################################3
jja.site.df.yr$age <- factor(jja.site.df.yr$age,levels = rev(levels(jja.site.df.yr$age)),ordered = TRUE)
yrColors <- c( "#009E73", "#D55E00")
names(yrColors) <- levels(jja.site.df.yr$age)
#colScale <- scale_colour_manual(name = "grp",values = myColors)
png("outputs/boxplot_pre_post_sens.png")
ggplot(jja.site.df.yr, aes(age, slope.est, fill = age))+geom_boxplot()+theme_black(base_size = 20)+scale_fill_manual(values = yrColors)+ylab("Growth Sensitivity to Drought (PDSI)")+theme(legend.title = element_blank())
dev.off()
ggplot(jja.site.df.yr, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
ggplot(jja.site.df.yr, aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)
png("outputs/sensitivity_v_sand_pre_post.png")
ggplot(jja.site.df.yr, aes(sand, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("% Sand")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_MAP_pre_post.png")
ggplot(jja.site.df.yr, aes(pr30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Annual Precipitation")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_TMEAN_pre_post.png")
ggplot(jja.site.df.yr, aes(tm30yr, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.05)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("Mean Monthly Temperature (DegC)")+theme(legend.title = element_blank())
dev.off()
png("outputs/sensitivity_v_DBH_pre_post.png")
ggplot(jja.site.df.yr, aes(DBH, slope.est, color = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.05)+stat_smooth(method = 'lm', se = FALSE)+scale_color_manual(values = yrColors)+theme_black(base_size = 20)+ylab("Growth Sensitivity to Drought (PDSI)")+xlab("DBH (cm)")+theme(legend.title = element_blank())
dev.off()
ggplot(jja.site.df.yr, aes(sand, pr30yr,color = slope.est, shape = age))+geom_point()+geom_errorbar(aes(ymin=slope.min, ymax = slope.max), width = 0.5) + ylim(500, 1000)
summary(lm(slope.est ~ sand + age ,data = jja.site.df.yr))
summary(lm(slope.est ~ sand + pr30yr + age ,data = jja.site.df.age))
summary(lm(slope.est ~ pr30yr + age +DBH,data = jja.site.df.age))
reformed.df <- dcast(jja.site.df.age[c("site", "age", "coords.x1", "coords.x2", 'slope.est', "DBH" , "pr30yr", "tm30yr",'sand')], coords.x1 + coords.x2+site+DBH+pr30yr+tm30yr+sand ~ age, mean, na.rm=TRUE, value.var = 'slope.est')
reformed.df$diff <- reformed.df$Past - reformed.df$Modern
sens.dif <- gam(Modern ~ pr30yr + DBH , data = reformed.df)
summary(sens.dif) #Deviance explained = 41.1%
gam.sens.age <- gam(slope.est ~ pr30yr + DBH , data = jja.site.df.age)
summary(gam.sens.age)
#sens.post <- gam(slope.est ~ pr30yr + tm30yr +sand , data = yr.sens.df[yr.sens.df$age=="Post-1950",])
#summary(sens.post) # explains 36.8% of deviance:
sens.df <- jja.site.df.age
#install.packages("plot3D")
library(plot3D)
# created a funciton that takes the data of interest, fits the gam model:
# gam(sensitivity ~ precip + temperature) and plots a 3d surface of it
plot3dsensitivity <- function(sens.df, age, class, col, add ){
df <- sens.df[sens.df[,c(age)] == class,]
df <- df[!is.na(df$slope.est),]
# x, y, z variables
x <- df$pr30yr
y <- df$DBH
z <- df$slope.est
# Compute the linear regression (z = ax + by + d)
fit <- lm(z ~ x + y)
# predict values on regular xy grid
grid.lines = 25
x.pred <- seq(min(x), max(x), length.out = grid.lines)
y.pred <- seq(min(y), max(y), length.out = grid.lines)
xy <- expand.grid( x = x.pred, y = y.pred)
z.pred <- matrix(predict(fit, newdata = xy),
nrow = grid.lines, ncol = grid.lines)
# fitted points for droplines to surface
fitpoints <- predict(fit)
# scatter plot with regression plane
scatter3D(x, y, z, pch = 18, cex = 2, col= col,
theta = 50, phi = 25, bty="u", lwd.panel= 2, space = 0.15,ticktype = "detailed",
xlab = "\n\n\n\n Precip", ylab = "\n\n\n\n DBH (cm)", zlab = "\n\n\n\n drought sensitivity", add= add ,
surf = list(x = x.pred, y = y.pred, z = z.pred,
facets = NA, fit = fitpoints), main = paste("Drought Sensitivity by climate"),
zlim=c(0,0.1))
}
# plot Past and Modern predictive surfaces on the smae plot
png(height = 5, width = 9, units = 'in', res= 300, 'outputs/sensitivity_surface3d_age.png')
plot3dsensitivity(jja.site.df.age, "age","Past", "#009E73",FALSE)
plot3dsensitivity(jja.site.df.age, "age","Modern", "#D55E00",TRUE)
legend(x = 0.5, y = 0 ,
legend = c(expression(atop("Modern pre-1950", "(low CO"[2]*")")), expression(atop("Modern post-1950", "(high CO"[2]*")"))),
col = c("#009E73",
"#D55E00"),
pch = c(18, 18),
bty = "n",
pt.cex = 2,
cex = 1.2,
text.col = "black",
horiz = F ,
inset = c(0.1, 0.1))
dev.off()
# plot the pre and post 1950 sensitivity surfaces:
yr.sens.df <- jja.site.df.yr
png(height = 5, width = 9, units = 'in', res= 300,'outputs/sensitivity_surface3d_pre_post_1950_precip_DBH.png')
#sens.df, age, class, col, add
plot3dsensitivity(sens.df = jja.site.df.yr, age = "age",class = "Pre-1950", col = "#009E73",add = FALSE)
plot3dsensitivity(jja.site.df.yr, "age","Post-1950", "#D55E00",TRUE)
legend(x = 0.5, y = 0 ,
legend = c(expression(atop("All trees Pre-1950", "(low CO"[2]*")")), expression(atop("All trees Post-1950", "(high CO"[2]*")"))),
col = c("#009E73",
"#D55E00"),
pch = c(18, 18),
bty = "n",
pt.cex = 2,
cex = 1.2,
text.col = "black",
horiz = F ,
inset = c(0.1, 0.1))
dev.off()
#-----------------------------modeling drought sensitivity over space:
gam.sens <- mgcv::gam(slope.est ~ pr30yr + DBH , data = jja.sens.df)
jja.sens.df$gam_ypred <- predict(gam.sens, data = jja.sens.df)
sand <- lm(slope.est ~ pr30yr + DBH*pi, data = site.df[!site.df$site %in% "UNC",]) # outside of UNCAS dusnes, sesnsitivyt depends on soil type
summary(gam.sens) # explains 27.4% of deviance:
# get pr30yr for the whole region:
prism <- raster(paste0(workingdir,"PRISM_ppt_30yr_normal_4kmM2_all_bil/PRISM_ppt_30yr_normal_4kmM2_annual_bil.bil"))
prism.alb <- projectRaster(prism, crs='+init=epsg:3175')
# get FIA average DBH for each grid cell:
FIA <- read.csv('/Users/kah/Documents/bimodality/data/FIA_species_plot_parameters_paleongrid.csv')
speciesconversion <- read.csv('/Users/kah/Documents/bimodality/data/fia_conversion_v02-sgd.csv')
FIA.pal <- merge(FIA, speciesconversion, by = 'spcd' )
FIA.by.paleon <- dcast(FIA.pal, x + y+ cell+ plt_cn ~ PalEON, mean, na.rm=TRUE, value.var = 'dbh') #sum all species in common taxa in FIA grid cells
fia.melt <- melt(FIA.by.paleon, id.vars = c('x', 'y', 'cell', 'plt_cn', 'Var.5')) # melt the dataframe
#fia.by.cell <- dcast(fia.melt, x + y+ cell ~ variable, mean, na.rm=TRUE, value.var = 'value') # average species densities and total density within each grid cell
Oak.sites <- FIA.by.paleon[,c("x","y","cell", "Oak")]
colnames(Oak.sites) <- c("x", "y","cell", "DBH")
# extract pr30yr for all sites where we have FIA data:
Oak.sites$pr30yr <- raster::extract(prism.alb, Oak.sites[,c("x","y")])
# predict gam for whole region:
July_pdsi_sens_pred <- as.vector(predict(gam.sens, newdata = Oak.sites))
Oak.sites$July_pdsi_sens_pred <- July_pdsi_sens_pred
ggplot(Oak.sites, aes(x,y, fill = July_pdsi_sens_pred))+geom_raster()
# assume all forests have similar drought sensitivity as oaks:
FIA.pal <- merge(FIA, speciesconversion, by = 'spcd' )
FIA.by.paleon <- dcast(FIA.pal, x + y+ cell+ plt_cn ~ PalEON , mean, na.rm=TRUE, value.var = 'dbh') #sum all species in common taxa in FIA grid cells
fia.melt <- melt(FIA.by.paleon, id.vars = c('x', 'y', 'cell', 'plt_cn')) # melt the dataframe
fia.by.cell <- dcast(fia.melt, x + y+ cell ~ variable, sum, na.rm=TRUE, value.var = 'value') # average species densities and total density within each grid cell
fia.by.cell[fia.by.cell == 0] <- NA
fia.by.cell$DBH <- rowMeans(fia.by.cell[,4:length(fia.by.cell)], na.rm=TRUE)
ggplot(fia.by.cell, aes(x,y, fill = DBH))+geom_raster()
DBH_all <- fia.by.cell[,c("x", "y", "cell", "DBH")]
DBH_all$pr30yr <- raster::extract(prism.alb, DBH_all[,c("x","y")])
# now project gam for whole region
July_pdsi_sens_pred <- as.vector(predict.gam(gam.sens, newdata = DBH_all))
DBH_all$July_pdsi_sens_pred <- July_pdsi_sens_pred
ggplot(DBH_all, aes(x,y, fill = July_pdsi_sens_pred))+geom_raster()
write.csv(DBH_all, "outputs/DBH_modern_8km.csv")
# predict the Oak sensitivity landscape if all were Modern trees (future landscape):
Oak.Modern <- Oak.sites
Oak.Modern$age <- "Modern"
July_pdsi_Modern_sens_pred <- as.vector(predict.gam(gam.pr.dbh, newdata = Oak.Modern))
Oak.Modern$July_pdsi_Modern_sens_pred <- July_pdsi_Modern_sens_pred
# if all trees were Past:
Oak.Past <- Oak.Modern
Oak.Past$age <- "Past"
July_pdsi_Past_sens_pred <-as.vector(predict(gam.pr.dbh, newdata = Oak.Past))
Oak.Past$July_pdsi_Past_sens_pred <- July_pdsi_Past_sens_pred
ggplot(Oak.Past, aes(x, y, fill = July_pdsi_Past_sens_pred))+geom_raster()
ggplot(Oak.Past, aes(x, y, fill = July_pdsi_Modern_sens_pred))+geom_raster()
#Oak.Past$diff <- Oak.Past$July_pdsi_Past_sens_pred - Oak.Past$July_pdsi_Modern_sens_pred
#ggplot(Oak.Past, aes(x, y, fill = diff ))+geom_raster()
# predict the full landscape if all trees were Modern
All.Modern <- DBH_all
All.Modern$age <- "Modern"
July_pdsi_Modern_sens_pred <- as.vector(predict.gam(gam.pr.dbh, newdata = All.Modern))
All.Modern$July_pdsi_Modern_sens_pred <- July_pdsi_Modern_sens_pred
# if all trees were Past:
All.Past <- All.Modern
All.Past$age <- "Past"
July_pdsi_Past_sens_pred <-as.vector(predict(gam.pr.dbh, newdata = All.Past))
All.Past$July_pdsi_Past_sens_pred <- July_pdsi_Past_sens_pred
ggplot(DBH_all, aes(x, y, fill = DBH))+geom_raster()
ggplot(All.Past, aes(x, y, fill = July_pdsi_Past_sens_pred))+geom_raster()
ggplot(All.Past, aes(x, y, fill = July_pdsi_Modern_sens_pred))+geom_raster()
ggplot(All.Past, aes(x, y, fill = July_pdsi_Past_sens_pred))+geom_raster()
ggplot(All.Past, aes(x, y, fill = DBH))+geom_raster()
# map out all predictions over the region:
all_states <- map_data("state")
states <- subset(all_states, region %in% c( "illinois", "minnesota", "wisconsin", "iowa", "south dakota",
"north dakota", 'michigan', 'missouri', 'indiana') )
coordinates(states)<-~long+lat
class(states)
proj4string(states) <-CRS("+proj=longlat +datum=NAD83")
mapdata<-spTransform(states, CRS('+init=epsg:3175'))
mapdata<-data.frame(mapdata)
red.pal <- c('#ffffb2',
'#fecc5c',
'#fd8d3c',
'#f03b20',
'#bd0026')
# map out sensitivity to drought over all oaks:
sites.map <- ggplot()+ geom_raster(data=Oak.sites, aes(x=x, y=y, fill = July_pdsi_sens_pred))+
labs(x="easting", y="northing", title="Oak Drought Sensitivity") +
scale_fill_gradientn(colours = red.pal, name ="Drought \n Sensitivity", limits = c(-0.03, 0.075))+
coord_cartesian(xlim = c(-59495.64, 725903.4), ylim=c(68821.43, 1480021))
sites.map.oak <- sites.map +geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + theme_black(base_size = 20)+
theme(axis.text = element_blank(),
axis.ticks=element_blank(),
axis.title = element_blank(),
legend.key = element_rect(),
#legend.background = element_rect(fill = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(margin = margin(t = 0, r = 20, b = 10, l = 0)))
sites.map.oak
# sensitivity for all forests:
# map out sensitivity to drought over all oaks:
sites.map <- ggplot()+ geom_raster(data=DBH_all, aes(x=x, y=y, fill = July_pdsi_sens_pred))+
labs(x="easting", y="northing", title="All Trees Drought Sensitivity") +
scale_fill_gradientn(colours = red.pal, name ="Drought \n Sensitivity", limits = c(-0.03, 0.075))+
coord_cartesian(xlim = c(-59495.64, 725903.4), ylim=c(68821.43, 1480021))
sites.map.all <- sites.map +geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + theme_black(base_size = 20)+
theme(axis.text = element_blank(),
axis.ticks=element_blank(),
axis.title = element_blank(),
legend.key = element_rect(),
#legend.background = element_rect(fill = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(margin = margin(t = 0, r = 20, b = 10, l = 0)))
sites.map.all
png(width = 12, height = 6, units = "in", res = 300, "outputs/all_modern_drought_sens_predmaps.png")
grid.arrange(sites.map.oak, sites.map.all, ncol = 2)
dev.off()
# ----------------------------- Modern + Past comparison -----------------
#oak sensitivity for Past trees map:
sites.map <- ggplot()+ geom_raster(data=Oak.Past, aes(x=x, y=y, fill = July_pdsi_Past_sens_pred))+
labs(x="easting", y="northing", title="Drought Sensitivity 1895-1950") +
scale_fill_gradientn(colours = red.pal, name ="Drought \n Sensitivity", limits = c(-0.03, 0.075))+
coord_cartesian(xlim = c(-59495.64, 725903.4), ylim=c(68821.43, 1480021))
sites.map.Past <- sites.map +geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + theme_black(base_size = 20)+
theme(axis.text = element_blank(),
axis.ticks=element_blank(),
axis.title = element_blank(),
legend.key = element_rect(),
#legend.background = element_rect(fill = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(margin = margin(t = 0, r = 20, b = 10, l = 0)))
sites.map.Past
ggplot()+ geom_raster(data=Oak.Past, aes(x=x, y=y, fill = DBH))+
labs(x="easting", y="northing", title="Drought Sensitivity 1895-1950") +
scale_fill_gradientn(colours = red.pal, name ="Drought \n Sensitivity", limits = c(-0.03, 0.075))+
coord_cartesian(xlim = c(-59495.64, 725903.4), ylim=c(68821.43, 1480021))
# oak sensitivity for Modern trees map:
sites.map <- ggplot()+ geom_raster(data=Oak.Past, aes(x=x, y=y, fill = July_pdsi_Modern_sens_pred))+
labs(x="easting", y="northing", title="Drought Sensitivity 1950-present") +
scale_fill_gradientn(colours = red.pal, name ="Drought \n Sensitivity", limits = c(-0.03, 0.075))+
coord_cartesian(xlim = c(-59495.64, 725903.4), ylim=c(68821.43, 1480021))
sites.map.Modern <- sites.map +geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + theme_black(base_size = 20)+
theme(axis.text = element_blank(),
axis.ticks=element_blank(),
axis.title = element_blank(),
legend.key = element_rect(),
#legend.background = element_rect(fill = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(margin = margin(t = 0, r = 20, b = 10, l = 0)))
sites.map.Modern
png(width = 12, height = 6, units = "in", res = 300, "outputs/Oak_modern_past_drought_sens_predmaps.png")
grid.arrange(sites.map.Past, sites.map.Modern, ncol = 2)
dev.off()
# oak sensitivity for Past trees map:
sites.map <- ggplot()+ geom_raster(data=All.Past, aes(x=x, y=y, fill = July_pdsi_Past_sens_pred))+
labs(x="easting", y="northing", title="Drought Sensitivity 1895-1950") +
scale_fill_gradientn(colours = red.pal, name ="Drought \n Sensitivity", limits = c(-0.03, 0.075))+
coord_cartesian(xlim = c(-59495.64, 725903.4), ylim=c(68821.43, 1480021))
all.map.Past <- sites.map + geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + theme_black(base_size = 20)+
theme(axis.text = element_blank(),
axis.ticks=element_blank(),
axis.title = element_blank(),
legend.key = element_rect(),
#legend.background = element_rect(fill = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(margin = margin(t = 0, r = 20, b = 10, l = 0)))
all.map.Past
# all tresssensitivity for Modern trees map:
sites.map <- ggplot()+ geom_raster(data=All.Past, aes(x=x, y=y, fill = July_pdsi_Modern_sens_pred))+
labs(x="easting", y="northing", title="Drought Sensitivity 1950-present") +
scale_fill_gradientn(colours = red.pal, name ="Drought \n Sensitivity", limits = c(-0.03, 0.075))+
coord_cartesian(xlim = c(-59495.64, 725903.4), ylim=c(68821.43, 1480021))
all.map.Modern <- sites.map +geom_polygon(data=data.frame(mapdata), aes(x=long, y=lat, group=group),
colour = "darkgrey", fill = NA)+theme_bw() + theme_black(base_size = 20)+
theme(axis.text = element_blank(),
axis.ticks=element_blank(),
axis.title = element_blank(),
legend.key = element_rect(),
#legend.background = element_rect(fill = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(margin = margin(t = 0, r = 20, b = 10, l = 0)))
all.map.Modern
png(width = 12, height = 6, units = "in", res = 300, "outputs/all_modern_past_drought_sens_predmaps.png")
grid.arrange(all.map.Past, all.map.Modern, ncol = 2)
dev.off()
###########################################################################
# # Plot age vs. mean growth (across trees)
# read in all raw cleaned data:
files <- list.files("/Users/kah/Documents/TreeRings/cleanrwl/",pattern = ".rwl")
# read each rwl file and name the robject XXXww.rwl
for (i in seq_along(files)) {
assign(paste(files[i]), read.rwl(paste0("/Users/kah/Documents/TreeRings/cleanrwl/",files[i])))
}
#list.rwls <- list(Hicww.rwl, STCww.rwl, Bon, Tow, Ple, Cor, Unc, Eng, Mou, GLL1, GLL2, GLL3, GLL4, GLL4,PVC)
#list.rwls <- list(HICww.rwl, STCww.rwl)
#age_agg_mean <- lapply(list.rwls, FUN = tree_age_agg_mean, sampleyear = 2015, site.code = "HIC", age1950 = 30,type = "RWI" )
#rwiorbai <- HICww.rwl
# use tree_age_agg.R with raw RWI:
source("R/tree_age_agg_mean.R")
Hic <- tree_age_agg_mean(rwiorbai = HICww.rwl, sampleyear = 2015, site.code= "HIC", age1950 = 30,type = "RWI")
Stc <- tree_age_agg_mean(STCww.rwl, 2015, "STC", 30,"RWI_Spline_detrended")
Bon <- tree_age_agg_mean(BONww.rwl, 2015, "BON", 30,"RWI_Spline_detrended")
Tow <- tree_age_agg_mean(TOWww.rwl, 2015, "TOW", 30,"RWI_Spline_detrended")
Ple <- tree_age_agg_mean(PLEww.rwl, 2015, "PLE", 30,"RWI_Spline_detrended")
Cor <- tree_age_agg_mean(CORww.rwl, 2016, "COR", 30,"RWI_Spline_detrended")
Unc <- tree_age_agg_mean(UNCww.rwl, 2016, "UNC", 30,"RWI_Spline_detrended")
Eng <- tree_age_agg_mean(ENGww.rwl, 2015, "ENG", 30,"RWI_Spline_detrended")
Mou <- tree_age_agg_mean(MOUww.rwl, 2015, "MOU", 30,"RWI_Spline_detrended")
GLL1 <- tree_age_agg_mean(GLL1ww.rwl, 2016, "GLL1", 30,"RWI_Spline_detrended")
GLL2 <- tree_age_agg_mean(GLL2ww.rwl, 2016, "GLL2", 30,"RWI_Spline_detrended")
GLL3 <- tree_age_agg_mean(GLL3ww.rwl, 2016, "GLL3", 30,"RWI_Spline_detrended")
GLL4 <- tree_age_agg_mean(GLL4ww.rwl, 2016, "GLL4", 30,"RWI_Spline_detrended")
PVC <- tree_age_agg_mean(PVCww.rwl, 2016, "GLL5", 30,"RWI_Spline_detrended")
# now plot mean with STDEV
allsitesmean<- list(Hic, Stc, Bon, Tow, Ple, Cor, Unc, Eng, Mou, GLL1, GLL2, GLL3, GLL4, PVC)
plotmean.age<- function(df){
ggplot(df, aes(Age, Mean))+geom_point()+
geom_errorbar(aes(ymin=Mean-Std, ymax=Mean+Std), width=0.01)+xlim(0,250)+ggtitle(paste0(df$site, " Mean rwi by ageclass"))+theme_bw()
}
mean.ages <- lapply(allsitesmean, plotmean.age)
png(width = 12, height = 12, units = "in", res = 300, "outputs/mean_age/mean_growth_vs_age.png")
do.call("grid.arrange", c(mean.ages, ncol = 3))
dev.off()
#------------ find the means for trees established before before 1920 and those established after:
Hic.age <- tree_age_agg_mean_class(rwiorbai = HICww.rwl, sampleyear = 2015, site.code= "HIC", age1950 = 30,type = "RWI")
Stc.age <- tree_age_agg_mean_class(STCww.rwl, 2015, "STC", 30,"RWI_Spline_detrended")
Bon.age <- tree_age_agg_mean_class(BONww.rwl, 2015, "BON", 30,"RWI_Spline_detrended")
Tow.age <- tree_age_agg_mean_class(TOWww.rwl, 2015, "TOW", 30,"RWI_Spline_detrended")
Ple.age <- tree_age_agg_mean_class(PLEww.rwl, 2015, "PLE", 30,"RWI_Spline_detrended")
Cor.age <- tree_age_agg_mean_class(CORww.rwl, 2016, "COR", 30,"RWI_Spline_detrended")
Unc.age <- tree_age_agg_mean_class(UNCww.rwl, 2016, "UNC", 30,"RWI_Spline_detrended")
Eng.age <- tree_age_agg_mean_class(ENGww.rwl, 2015, "ENG", 30,"RWI_Spline_detrended")
Mou.age <- tree_age_agg_mean_class(MOUww.rwl, 2015, "MOU", 30,"RWI_Spline_detrended")
GLL1.age <- tree_age_agg_mean_class(GLL1ww.rwl, 2016, "MOU", 30,"RWI_Spline_detrended")
GLL2.age <- tree_age_agg_mean_class(GLL2ww.rwl, 2016, "MOU", 30,"RWI_Spline_detrended")
GLL3.age <- tree_age_agg_mean_class(GLL3ww.rwl, 2016, "MOU", 30,"RWI_Spline_detrended")
GLL4.age <- tree_age_agg_mean_class(GLL4ww.rwl, 2016, "MOU", 30,"RWI_Spline_detrended")
PVC.age <- tree_age_agg_mean_class(PVCww.rwl, 2016, "MOU", 30,"RWI_Spline_detrended")
allsitesage<- list(Hic.age, Stc.age, Bon.age, Tow.age, Ple.age, Cor.age, Unc.age, Eng.age, Mou.age, GLL1.age, GLL2.age, GLL3.age, GLL4.age, PVC.age)
# now plot mean with STDEV
# made a function to plot out mean RWI vs. age
rwi.age.class<- function(df, site){
ggplot(df, aes(Age, Mean, color = Ageclass))+geom_point()+
geom_errorbar(aes(ymin=Mean-Std, ymax=Mean+Std), width=.1) +ggtitle(df$site)
}
allsites.ages <- lapply(allsitesage, rwi.age.class)
png(width = 12, height = 12, units = "in", res = 300, "outputs/mean_age/mean_growth_vs_age_by_ageclass.png")
do.call("grid.arrange", c(allsites.ages, ncol = 3))
dev.off()
#------------------Plot pith date vs. mean growth (within each tree)
Hic.pith <- tree_pith_agg_mean(rwiorbai = HICww.rwl, sampleyear = 2015, site.code= "HIC", age1950 = 30,type = "RWI")
Stc.pith <- tree_pith_agg_mean(STCww.rwl, 2015, "STC", 30,"RWI_Spline_detrended")
Bon.pith <- tree_pith_agg_mean(BONww.rwl, 2015, "BON", 30,"RWI_Spline_detrended")
Tow.pith <- tree_pith_agg_mean(TOWww.rwl, 2015, "TOW", 30,"RWI_Spline_detrended")
Ple.pith <- tree_pith_agg_mean(PLEww.rwl, 2015, "PLE", 30,"RWI_Spline_detrended")
Cor.pith <- tree_pith_agg_mean(CORww.rwl, 2016, "COR", 30,"RWI_Spline_detrended")
Unc.pith <- tree_pith_agg_mean(UNCww.rwl, 2016, "UNC", 30,"RWI_Spline_detrended")
Eng.pith <- tree_pith_agg_mean(ENGww.rwl, 2015, "ENG", 30,"RWI_Spline_detrended")
Mou.pith <- tree_pith_agg_mean(MOUww.rwl, 2015, "MOU", 30,"RWI_Spline_detrended")
GLL1.pith <- tree_pith_agg_mean(GLL1ww.rwl, 2016, "GLL1", 30,"RWI_Spline_detrended")
GLL2.pith <- tree_pith_agg_mean(GLL2ww.rwl, 2016, "GLL2", 30,"RWI_Spline_detrended")
GLL3.pith <- tree_pith_agg_mean(GLL3ww.rwl, 2016, "GLL3", 30,"RWI_Spline_detrended")
GLL4.pith <- tree_pith_agg_mean(GLL4ww.rwl, 2016, "GLL4", 30,"RWI_Spline_detrended")
PVC.pith <- tree_pith_agg_mean(PVCww.rwl, 2016, "PVC", 30,"RWI_Spline_detrended")
allsitespith<- list(Hic.pith, Stc.pith, Bon.pith, Tow.pith, Ple.pith, Cor.pith, Unc.pith, Eng.pith, Mou.pith, GLL1.pith, GLL2.pith, GLL3.pith, GLL4.pith, PVC.pith)
rwi.pith <- function(df, site){
ggplot(df, aes(Pith, Mean, color = ageclass))+geom_point()+
geom_errorbar(aes(ymin=Mean-Std, ymax=Mean+Std), width=.9) +ggtitle(df$site)+theme_bw()+ylab("Mean RWI (mm)")+xlab("Pith date")
}
allsites.pithplots <- lapply(allsitespith, rwi.pith)
png(width = 12, height = 12, units = "in", res = 300, "outputs/mean_age/mean_growth_vs_age_by_pithdate.png")
do.call("grid.arrange", c(allsites.pithplots, ncol = 3))
dev.off()
# left off here:
#------------------------Does growth climate response really vary by age???-----------------
# generate age classes and age-dependant climate response functions:
# use the "all" dataframe created on line 398
all <- det.age.clim.ghcn.df
summary(det.age.clim.ghcn.df$Age) # ages range from 0 to 246
label.breaks <- function(beg, end, splitby){
labels.test <- data.frame(first = seq(beg, end, by = splitby), second = seq((beg + splitby), (end + splitby), by = splitby))
labels.test <- paste (labels.test$first, '-' , labels.test$second)
labels.test
}
# create classes of age groups by 25 years:
all$agebreaks <- cut(all$Age, breaks = seq(0, 250, by = 50), labels = label.breaks(0,200,50))
X11(width = 12)
ggplot(all, aes(Jul.pdsi, RWI))+geom_point()+stat_smooth(method = 'lm')+facet_grid(~agebreaks)
#
# make a function to plot age based correlations with July PDSI climate of each site:
plot.cor.by.age.site <- function(df, site.names, clim){
coef.list <- list()
all <- df[df$site %in% site.names,]
for(i in 1:length(unique(all$agebreaks))){
lm.agebreak <- lm(all[all$agebreaks %in% unique(all$agebreaks)[i],]$RWI ~ all[all$agebreaks %in% unique(all$agebreaks)[i],c(clim)])
coef.list[[i]] <- lm.agebreak$coefficients
}
coef <- do.call(rbind, coef.list)
coef.df <- data.frame(agebreaks = as.character(unique(all$agebreaks)),
intercept = coef[,1],
slope = coef[,2])
# get correlation coefficient for each group
cor.list <- list()
for(i in 1:length(unique(all$agebreaks))){
cor.list[[i]] <- cor(all[all$agebreaks %in% unique(all$agebreaks)[i],]$RWI, all[all$agebreaks %in% unique(all$agebreaks)[i],c(clim)])
}
cors <- do.call(rbind, cor.list)
cors.df <- data.frame(agebreaks = as.character(unique(all$agebreaks)),
cor = cors[,1])
cors.df$agebreaks_f <- factor(cors.df$agebreaks, levels = c("0 - 50", "50 - 100", "100 - 150", "150 - 200", "200 - 250"))
#"100 - 125", "125 - 150", "150 - 175","175 - 200", "200 - 225",
# "225 - 250", "NA"))
# plot based on correlation coefficient:
ggplot(cors.df, aes(agebreaks_f, cor))+geom_bar(stat= "identity")+theme_bw()+ylab(paste("correlation with", clim))+xlab("Tree Age Classes")+ggtitle(site.names)
}
plot.cor.by.age.site(df = all, site.names = "BON", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "HIC", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "STC", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "COR", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "UNC", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "GLL1", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "GLL2", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "GLL3", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "GLL4", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "PLE", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "PVC", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "TOW", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "MOU", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "ENG", clim = "PDSI")
plot.cor.by.age.site(df = all, site.names = "BON", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "HIC", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "STC", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "COR", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "UNC", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "GLL1", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "GLL2", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "GLL3", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "GLL4", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "PLE", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "PVC", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "TOW", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "MOU", clim = "TMIN")
plot.cor.by.age.site(df = all, site.names = "ENG", clim = "TMIN")
|
2ac511e3cda945f3148ad7496a73ae820e20a859
|
19504db9337ab899a58b0203e5ca73ffd9fc5e36
|
/run_analysis.R
|
ca641899dbf7e1c119a1871e8a9650dbc6ccdef3
|
[] |
no_license
|
jtarrou/Getting-and-Cleaning-Data-Final-Project
|
574c007b1314c82af28bf942979040a608569fb5
|
3c987aa6b2603aedb87bdb0de0e693885aebe6b9
|
refs/heads/master
| 2021-01-01T17:47:55.983691
| 2014-10-26T20:35:22
| 2014-10-26T20:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,376
|
r
|
run_analysis.R
|
# This is the final project for the Getting and Cleaning Data course
# load library data.table
library(data.table)
# I stored the files in a sub-directory names humanActivityRecog
# so I set working directory
setwd("./humanActivityRecog")
# after calling dir() I begin reading in and looking at the data. I'll skip the looking at part here
# read in the activity set, second column
activity_set <- read.table("./activity_labels.txt")[,2]
# read in the features set, second column
features_set <- read.table("./features.txt")[, 2]
# get the rows with mean and and standard deviation
get_meanStd <- grepl("mean|std", features_set)
# rename the variables for clarity
features_set <- gsub("^f", "freq", features_set)
features_set <- gsub("^t", "time", features_set)
features_set <- gsub("-mean", "mean", features_set)
features_set <- gsub("-std", "std_Dev", features_set)
features_set <- gsub("Acc", "accel", features_set)
features_set <- gsub("Mag", "mag", features_set)
features_set <- gsub("Body", "body", features_set)
features_set <- gsub("body.body", "body", features_set)
features_set <- gsub("angle.t", "angle-", features_set)
features_set <- gsub("Gyro", "gyro", features_set)
features_set <- gsub("Jerk", "jerk", features_set)
# read in the test set
X_test_data <- read.table("./test/X_test.txt")
y_test_data <- read.table("./test/y_test.txt")
subject_test_data <- read.table("./test/subject_test.txt")
# read in the training set
X_train_data <- read.table("./train/X_train.txt")
y_train_data <- read.table("./train/y_train.txt")
subject_train_data <- read.table("./train/subject_train.txt")
# Rename the test and training sets
names(X_test_data) <- features_set
names(X_train_data) <- features_set
# pick the desired columns for the test set
X_test_data <- X_test_data[, get_meanStd]
# load the activity labels into the the test set
y_test_data[,2] <- activity_set[y_test_data[,1]]
names(y_test_data) <- c("activity_number", "activity")
names(subject_test_data) <- "subj"
# merge the test data into a single data table
test_data_set <- cbind(as.data.table(subject_test_data), y_test_data, X_test_data)
# pick desired columns for the training set
X_train_data <- X_train_data[, get_meanStd]
# load the activity labels into the training set
y_train_data[,2] <- activity_set[y_train_data[,1]]
names(y_train_data) <- c("activity_number", "activity")
names(subject_train_data) <- "subj"
# merge the training data into a single data table
training_data_set <- cbind(as.data.table(subject_train_data), y_train_data, X_train_data)
# merge test and training data tables by row
testTrain_all <- rbind(test_data_set, training_data_set)
# load the reshape2 library so we can use melt and dcast
library(reshape2)
# set the experiment's identifying columns
exp_columns <- c("subj", "activity_number", "activity")
desired_data_columns <- setdiff(colnames(testTrain_all), exp_columns)
# melt the data so that we do not measure the experiment's identifying columns, only the others
total_data <- melt(testTrain_all, id = exp_columns, measure.vars = desired_data_columns)
# call the mean function to the desired data using dcast function
average_data <- dcast(total_data, subj + activity ~ variable, mean)
# per instructions write data to file using write.table and row.names=FALSE
write.table(average_data, file = "tidy_data1110.txt", sep = "\t", row.names=FALSE)
|
5e80fc6d28d1404abee823758a953c2a8f339890
|
9027136fa37e33a2ac08eb64809fe2e27986f7aa
|
/Meetup4-TallerShiny-master/Ejemplo2/server.R
|
8d73e76ad7cec78b7426b825a17faa790409edc0
|
[] |
no_license
|
mecomontes/R-Programming-for-Data-Science
|
65f0397109d030a8960299efb998ef2de2b02e47
|
1914a0bef880bd347a1bd6f772ec66e413bb587a
|
refs/heads/main
| 2023-06-13T05:40:10.032174
| 2021-07-11T22:32:19
| 2021-07-11T22:32:19
| 385,065,149
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 896
|
r
|
server.R
|
server = function(input, output) {
output$thisPlot <- renderPlot({
years <- gapminder[gapminder$year == input$var_year, ]
idcountry <- which(years$country == input$var_country)
rangepop <- range(gapminder$pop)
p <- ggplot(years, aes(gdpPercap, lifeExp, size = pop)) +
geom_point(alpha = 1/3) +
ylim(20, 90) +
geom_text(aes(years$gdpPercap[idcountry], years$lifeExp[idcountry]),
label = input$var_country, size = 8, color = "black") +
scale_x_log10(limits = range(gapminder$gdpPercap)) +
scale_size(guide = "none", range = c(1,20)*range(years$pop)/rangepop) +
labs(x = "PIB", y = "Esperanza de vida al nacer (aรฑos)")
if (!input$var_continent) print(p)
if (input$var_continent) p + geom_point(aes(color = continent)) +
scale_color_manual(values = c("orange","green","blue","red","brown"))
})
}
|
445914208598b2b5fd2adcb4ca38a5264600a6ee
|
372e4db4c34fea50cf5997344940186ed8a7f603
|
/posgrado/clase-1/caret.R
|
052fb80d5fa53af13c7b8509eb5aba15407a2c82
|
[] |
no_license
|
martinezmelisapamela/r-learning
|
1bba6b01c97580fd67a08772313b640cb5fefe45
|
7cd55227e9287431f5c7ef92f818daf28441eedf
|
refs/heads/master
| 2020-08-28T06:13:50.337086
| 2019-10-27T04:09:20
| 2019-10-27T04:09:20
| 217,618,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33
|
r
|
caret.R
|
library(caret)
#installar ggplot2
|
d87777fd23a987387301c1c139750964467079a9
|
774183c253e6eac37e7d98fabe5dd4786bbc0c37
|
/inst/doc/h_tabular.R
|
9eaaab1119ccb0495c57a5ea9781d3441a5d42eb
|
[] |
no_license
|
cran/heemod
|
fd6b6497b12c7dffcf11ba89a92f7d23c37f7be1
|
6dd0264b6656353c19c345a8be83718321c34023
|
refs/heads/master
| 2023-07-19T20:16:54.993409
| 2023-07-18T21:50:11
| 2023-07-18T22:30:57
| 48,670,398
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,616
|
r
|
h_tabular.R
|
## ---- echo=FALSE, include=FALSE-----------------------------------------------
library(heemod)
library(dplyr)
format_na <- function(x, char = " ") {
x[is.na(x)] <- char
x
}
## ----echo = FALSE-------------------------------------------------------------
heemod:::read_file(system.file("tabular/thr/REFERENCE.csv", package = "heemod")) %>%
format_na %>%
knitr::kable()
## ----echo = FALSE-------------------------------------------------------------
heemod:::read_file(system.file("tabular/thr/THR_states.csv", package = "heemod")) %>%
format_na %>%
knitr::kable()
## ----echo = FALSE-------------------------------------------------------------
heemod:::read_file(system.file("tabular/thr/THR_transition_probs.csv", package = "heemod")) %>%
format_na %>%
knitr::kable()
## ----echo = FALSE-------------------------------------------------------------
heemod:::read_file(system.file("tabular/thr/THR_parameters.csv", package = "heemod")) %>%
format_na %>%
knitr::kable()
## ----echo = FALSE-------------------------------------------------------------
heemod:::read_file(system.file("tabular/thr/THR_options.csv", package = "heemod")) %>%
format_na %>%
knitr::kable(row.names = FALSE)
## -----------------------------------------------------------------------------
result <- run_model_tabular(
location = system.file("tabular/thr", package = "heemod")
)
## ---- fig.width = 6, fig.align='center'---------------------------------------
result$model_runs
plot(result$psa,
type = "ce")
plot(result$dsa,
result = "cost",
strategy = "new")
result$demographics
|
85dee07e2b0f7570a84d3249f093f667ecbe0c1f
|
5af4d49d7dd61a0977257b1aa832cdd569ea67f8
|
/man/ci.ICC3r.nointer.Rd
|
482deaa346ee07a5df885abb517d0494219fa8ee
|
[] |
no_license
|
cran/irrICC
|
19618b0f29d7f292c2f6878f5242e9cbc12f61ee
|
2da1f07f4ca40f7bd802adf52c786d7d3b46cc81
|
refs/heads/master
| 2020-07-30T23:40:19.174599
| 2019-09-23T14:00:02
| 2019-09-23T14:00:02
| 210,402,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,599
|
rd
|
ci.ICC3r.nointer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/icc2x3.r
\name{ci.ICC3r.nointer}
\alias{ci.ICC3r.nointer}
\title{Confidence Interval of the ICC(3,1) under Model 3 without subject-rater interaction}
\usage{
ci.ICC3r.nointer(dfra, conflev = 0.95)
}
\arguments{
\item{dfra}{This is a data frame containing 3 columns or more. The first column contains subject numbers (there could be duplicates
if a subject was assigned multiple ratings) and each of the remaining columns is associated with a particular rater and contains its
numeric ratings.}
\item{conflev}{This is the optional confidence level associated with the confidence interval. If not specified, the default value
will be 0.95, which is the most commonly-used valuee in the literature.}
}
\value{
This function returns a vector containing the lower confidence (lcb) and the upper confidence bound (ucb).
}
\description{
This function computes the confidence interval associateed with the Intraclass Correlation Coefficient (ICC) used as a measure
of inter-rater reliability, under the mixed factorial ANOVA model (Model 3) with no subject-rater interaction. This function computes
the lower and upper confidence bounds.
}
\examples{
#iccdata1 is a small dataset that comes with the package. Use it as follows:
library(irrICC)
iccdata1 #see what the iccdata1 dataset looks like
ci.ICC3r.nointer(iccdata1)
}
\references{
Gwet, K.L. (2014): \emph{Handbook of Inter-Rater Reliability - 4th ed.} chapter 10, section 10.3.1, equations
10.3.6 and 10.3.7, Advanced Analytics, LLC.
}
|
b3e062ac067a9315726d20d4d08897d927f79068
|
c6c90e3231c3c1cf15f87dbd28cf65eb97b0399a
|
/tests/testthat/test-get_elev_point.R
|
b53ca10ac72eaf0a45457262ad7d25452b5d8764
|
[] |
no_license
|
kristineccles/elevatr
|
84ae2fea0173852bfe5c50ca6dfdbcfe657fc585
|
87ff7f4e31e2b011105950af8509a9f35e841782
|
refs/heads/main
| 2023-03-22T01:40:08.393353
| 2021-03-05T00:59:02
| 2021-03-05T00:59:02
| 348,811,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,005
|
r
|
test-get_elev_point.R
|
context("get_elev_point")
library(sp)
library(sf)
library(elevatr)
data("pt_df")
data("sp_big")
skip_on_os(os = "solaris")
if(R.version$major == "3" & R.version$minor == "6.2"){
skip("Skipping on R Version 3.6.2")
}
ll_prj <- st_crs(4326)
aea_prj <- st_crs(5072)
sp_sm <- SpatialPoints(coordinates(pt_df),CRS(SRS_string = ll_prj$wkt))
sp_sm_prj <- spTransform(sp_sm,CRS(SRS_string = aea_prj$wkt))
bad_sp <- SpatialPoints(coordinates(data.frame(x = 1000, y = 1000)),
CRS(SRS_string = ll_prj$wkt))
sf_sm <- st_as_sf(sp_sm)
test_that("get_elev_point returns correctly", {
skip_on_cran()
#skip_on_ci()
epqs_df <- get_elev_point(locations = pt_df, prj = ll_prj, src = "epqs")
epqs_sp <- get_elev_point(locations = sp_sm, src = "epqs")
epqs_sf <- get_elev_point(locations = sf_sm, src = "epqs")
epqs_sp_prj <- get_elev_point(locations = sp_sm_prj, src = "epqs")
epqs_ft <- get_elev_point(locations = sp_sm, src = "epqs", units = "feet")
epqs_m <- get_elev_point(locations = sp_sm, src = "epqs", units = "meters")
epqs_df_aws <- get_elev_point(locations = pt_df, prj = ll_prj, src = "aws")
epqs_sp_aws <- get_elev_point(locations = sp_sm, src = "aws")
epqs_sp_aws_z <- get_elev_point(locations = sp_sm, src = "aws", z = 4)
epqs_sf_aws <- get_elev_point(locations = sf_sm, src = "aws")
epqs_ft_aws <- get_elev_point(locations = sp_sm, src = "aws", units = "feet")
#class
expect_is(epqs_df, "SpatialPointsDataFrame")
expect_is(epqs_sp, "SpatialPointsDataFrame")
expect_is(epqs_sp_prj, "SpatialPointsDataFrame")
expect_is(epqs_sp_prj, "SpatialPointsDataFrame")
expect_is(epqs_sf, "sf")
#proj
expect_equal(wkt(sp_sm),wkt(epqs_sp))
expect_equal(wkt(sp_sm_prj),wkt(epqs_sp_prj))
expect_equal(wkt(sp_sm),wkt(epqs_sp_aws))
#units
expect_equal(epqs_ft$elev_units[1],"feet")
expect_equal(epqs_m$elev_units[1],"meters")
expect_equal(epqs_ft_aws$elev_units[1],"feet")
expect_equal(epqs_sf_aws$elev_units[1],"meters")
})
|
fe3008eb128708f673c1367f35741ed69e4284dd
|
e21cd094b8a840cec8e6d3e024c24e49201b794a
|
/DAOY_cell_line.R
|
bc69bcec383613caabed38f7355a88e90bc9c2bb
|
[] |
no_license
|
SiyiWanggou/CLIC1_in_Medulloblastoma
|
810185e9f3409f23f9f636a2aeb489b7fecc2c66
|
f3e91d52f0648beac3267af1f271c6340c7aa753
|
refs/heads/master
| 2021-06-19T12:11:16.975034
| 2021-04-14T11:05:03
| 2021-04-14T11:05:03
| 167,466,632
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,178
|
r
|
DAOY_cell_line.R
|
#-----shCLIC1_vs_shScr------
#Analysis at gene level
setwd("O:/Michelle_CLIC1/DAOY/CLIC1_RNAseq/Ballgawn")
library(ballgown)
pheno_data<-read.csv("shCLIC1_vs_shScr_phenotype.csv")
bg<-ballgown(dataDir="shCLIC1_vs_shScr",samplePattern="CHE8695_",pData=pheno_data)
names<-data.frame(geneNames=ballgown::geneNames(bg),geneIDs=ballgown::geneIDs(bg))
names_unique<-names[!duplicated(names[c("geneIDs")]),]
setwd("O:/Michelle_CLIC1/DAOY/CLIC1_RNAseq/Ballgown_to_DEseq2/shCLIC1_vs_shScr")
write.table(names_unique,"geneNames.txt",row.names = FALSE,col.names = TRUE,sep="\t",quote=FALSE)
coldata<-read.delim("shCLIC1_vs_shScr_phenotype.txt",header=T,row.names = "Sample")
cts<-read.delim("gene_count_matrix.txt",header=T,row.names="gene_id")
rownames(coldata) <- sub("fb", "", rownames(coldata))
all(rownames(coldata) %in% colnames(cts))
all(rownames(coldata) == colnames(cts))
library(DESeq2)
dds<-DESeqDataSetFromMatrix(countData=cts,colData=coldata,design= ~ Condition)
dds<-dds[rowSums(counts(dds))>=10,]
dds<-DESeq(dds)
res<-results(dds)
normalized_counts<-counts(dds,normalized=TRUE)
merged_file<-data.frame(normalized_counts,res)
write.table(res,"DESeq2_results_statistics.txt",row.names = T,col.names = T,sep='\t',quote=FALSE)
write.table(normalized_counts,"DESeq2_normalized_counts.txt",row.names = T,col.names = T,sep='\t',quote=FALSE)
write.table(merged_file,"DESeq2_normalized_counts_and_statistics.txt",row.names = T,col.names = T,sep='\t',quote=FALSE)
results<-read.delim("DESeq2_normalized_counts_and_statistics.txt",header=T,row.names = 1)
results$geneIDs<-row.names(results)
names<-read.delim("geneNames.txt",header=T)
c<-merge(names,results,by="geneIDs",all=FALSE)
write.table(c,"Gene_DESeq2_normalized_counts_and_statistics.txt",row.names = FALSE,col.names = T,sep='\t',quote=FALSE)
c_sig<-subset(c,padj<0.05)
c_sig<-subset(c_sig,log2FoldChange > 1 | log2FoldChange < -1)
write.table(c_sig,"Results_significant_DE_Gene_level.txt",row.names = FALSE,col.names = T,sep='\t',quote=FALSE)
#Analysis at transcripts level
setwd("O:/Michelle_CLIC1/DAOY/CLIC1_RNAseq/Ballgawn")
library(ballgown)
pheno_data<-read.csv("shCLIC1_vs_shScr_phenotype.csv")
bg<-ballgown(dataDir="shCLIC1_vs_shScr",samplePattern="CHE8695_",pData=pheno_data)
bg_table_transcripts=texpr(bg,'all')
setwd("O:/Michelle_CLIC1/DAOY/CLIC1_RNAseq/Ballgown_to_DEseq2/shCLIC1_vs_shScr")
coldata<-read.delim("shCLIC1_vs_shScr_phenotype.txt",header=T,row.names = "Sample")
cts<-read.delim("transcript_count_matrix.txt",header=T,row.names="transcript_id")
rownames(coldata) <- sub("fb", "", rownames(coldata))
all(rownames(coldata) %in% colnames(cts))
all(rownames(coldata) == colnames(cts))
library(DESeq2)
dds<-DESeqDataSetFromMatrix(countData=cts,colData=coldata,design= ~ Condition)
dds<-dds[rowSums(counts(dds))>=10,]
dds<-DESeq(dds)
res<-results(dds)
normalized_counts<-counts(dds,normalized=TRUE)
merged_file<-data.frame(normalized_counts,res)
merged_file$t_name<-row.names(merged_file)
combined_results<-merge(merged_file,bg_table_transcripts,by="t_name",all=FALSE)
combined_results_sig<-subset(combined_results,padj<0.05)
combined_results_sig<-subset(combined_results_sig,log2FoldChange > 1 | log2FoldChange < -1)
write.table(combined_results_sig,"Results_significant_DE_transcripts_level.txt",col.names = TRUE,row.names = TRUE,sep="\t",quote=FALSE)
write.table(combined_results,"Results_transcripts_all.txt",col.names = TRUE,row.names = TRUE,sep="\t",quote=FALSE)
#Visualization of Vocalno plot
setwd("O:/Michelle_CLIC1/DAOY/CLIC1_RNAseq/Ballgown_to_DEseq2/Visualization_of_shCLIC1_vs_shScr/")
a<-read.delim("Gene_DESeq2_normalized_counts_and_statistics.txt",header=T)
a<-na.omit(a)
library(ggplot2)
library(ggthemes)
library(Cairo)
a$threshold=as.factor(ifelse(a$padj<0.1 & abs(a$log2FoldChange) >= 1.0,
ifelse(a$log2FoldChange > 1.0,
'Up Regulated in shCLIC1','Down Regulated in shCLIC1'),'None'))
Cairo(file="Vocalno_plot_of_shCLIC1_vs_shScr.png",type="png",units="in",bg="white",width=8,height=6,pointsize=16,dpi=300)
ggplot(data=a, aes(x=log2FoldChange, y = -log10(padj), colour=threshold,fill=threshold)) +
scale_color_manual(values=c("Green", "black","Red"))+
geom_point(alpha=0.4, size=1.6) +
xlim(c(-4, 4)) +
theme_bw(base_size = 16, base_family = "Times") +
geom_vline(xintercept=c(-1.0,1.0),lty=4,col="grey",lwd=0.6)+
geom_hline(yintercept = -log10(0.1),lty=4,col="grey",lwd=0.6)+
theme(legend.position="right",
panel.grid=element_blank(),
legend.title = element_blank(),
legend.text= element_text(face="bold", color="black",family = "Times", size=16),
plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(face="bold", color="black", size=16),
axis.text.y = element_text(face="bold", color="black", size=16),
axis.title.x = element_text(face="bold", color="black", size=16),
axis.title.y = element_text(face="bold",color="black", size=16))+
labs(x="Log2 (fold change) ",y="-Log10 (Adjust p-value)",title="shCLIC1_vs_shScr",size=16)
dev.off()
|
2539226f819de6f2ce6de822a6f9805c4dc7966e
|
7b22ee9d8575613eb3e2c6b1c9800a59f2453df5
|
/R/many_pal.R
|
bc3e13ffee0f676ccc3af24788bfbdc7005b9335
|
[
"Apache-2.0"
] |
permissive
|
witch-team/witchtools
|
b362c5faad85c46cc4dfd75e229e7327d1ac15b6
|
7731eb5220d64816d89dea5723b38671e50a8746
|
refs/heads/master
| 2023-08-03T11:00:41.120440
| 2023-07-28T16:24:48
| 2023-07-28T16:24:48
| 212,095,276
| 5
| 3
|
NOASSERTION
| 2023-09-13T21:37:07
| 2019-10-01T12:52:28
|
Assembly
|
UTF-8
|
R
| false
| false
| 4,099
|
r
|
many_pal.R
|
#' Many color palettes
#'
#' Creates thematic palettes for WITCH results and other
#'
#' @param component palette's component (`fuel`, `region`).
#' @param theme A palette theme (`witch-plot` (default)).
#' @param restrict_names only return the corresponding named values.
#' @export
#'
many_pals <- function(component = NULL, variant = NULL, theme = NULL, include_names = NULL) {
# Few tests on function parameters
if (is.null(component)) {
warning('component is NULL')
return(NULL)
}
all.components <- c("fuel","region")
if (!component %in% all.components) {
warning(paste('component should be chosen among',
paste(all.components, collapse = ", ")))
return(NULL)
}
if (is.null(theme)) {
theme <- "witch-plot"
}
if (component == "region") {
if (theme == "witch-plot") {
region_palette_specific <- setNames(rainbow(length(witch_regions)), witch_regions) #just in case have a fall back colour
region_palette_witch <- c(usa="darkblue",Usa="darkblue",oldeuro="blue", neweuro="cornflowerblue",kosau="darkgreen",Kosau="darkgreen",cajaz="chartreuse4",Cajaz="chartreuse4",te="gold2",Te="gold2",mena="darkgoldenrod4",Mena="darkgoldenrod4",ssa="goldenrod",Ssa="goldenrod",sasia="darkorange2","South Asia"="darkorange2",china="deeppink3",PRC="deeppink3",easia="orangered",ESEAP="orangered",laca="#fbb714",Laca="#fbb714",india="#fbf003",India="#fbf003",europe="blue",Europe="blue",indonesia="lightsalmon3",Indonesia="lightsalmon3",Rest_of_World="grey48",chinaw="darkorange",chinac="darkorange2",chinae="darkorange4",italy="green",mexico="slateblue2",brazil="tomato4",canada="blueviolet",jpnkor="darkseagreen",oceania="forestgreen",southafrica="indianred3",seasia="orangered",World="black", "Global Pool"="black")
#add ed57 region colors for RICE50+
region_palette_ed57 <- c("arg" = "#000000","aus" = "#48d1cc","aut" = "#ae8000","bel" = "#800000","bgr" = "#003366","blt" = "#bf4040","bra" = "#ffd633","can" = "#6600cc","chl" = "#ffece6","chn" = "#ff531a","cor" = "#adebad","cro" = "#808080","dnk" = "#ff9933","egy" = "#0044cc","esp" = "#ffd6cc","fin" = "#00cccc","fra" = "#cc0000","gbr" = "#ffffdd","golf57" = "#33d6ff","grc" = "#00ffcc","hun" = "#9999ff","idn" = "#996633","irl" = "#ff4dff","ita" = "#ffff00","jpn" = "#006600","meme"= "#b32d00","mex" = "#ccff33","mys" = "#145252","nde" = "#00d900","nld" = "#c309bd","noan"= "#ffff99","noap"= "#ecf2f9","nor" = "#ff3399","oeu" = "#ffb3ff","osea"= "#008fb3","pol" = "#d6f5d6","prt" = "#003300","rcam"= "#4d1919","rcz" = "#00ffff","rfa" = "#deb887","ris" = "#000080","rjan57" = "#bf00ff","rom" = "#ff00ff","rsaf"= "#ff8000","rsam"= "#0000ff","rsas"= "#ccd6dd","rsl" = "#00ff00","rus" = "#66757f","slo" = "#ff3091","sui" = "#61a62f","swe" = "#cb1942","tha" = "#efff14","tur" = "#4b0082","ukr" = "#c198ff","usa" = "#ffcc00","vnm" = "#3377ff","zaf" = "#b3ccff")
#Add witch34 region colors
region_palette_witch34 <- c("bnl" = "#800000","northeu" = "#bf4040","balkan" = "#808080","easteu" = "#9999ff", "che"="#61a62f", "deu" = "#deb887", "rou" = "#ff00ff", "cze" = "#00ffff")
region_palette <- replace(region_palette_specific, names(region_palette_witch), region_palette_witch)
region_palette <- replace(region_palette, names(region_palette_ed57), region_palette_ed57)
region_palette <- replace(region_palette, names(region_palette_witch34), region_palette_witch34)
return(region_palette[[include_names]])
}
}
if (component == "fuel") {
default_cols <- c(coal = "#3e3e3e",
ngas = "#659AC5",
nuclear = "#8E61E8",
oil = "#663E28",
solar = "#FFE205",
wind = "#252C8F")
if (is.null(include_names)) {
return(default_cols)
}
# select names from include_names
cols <- default_cols[[include_names]]
# Check similar names
if (c("gas","natural_gas") %in% include_names) {
#cols = c(cols, )
}
return(pal)
}
}
|
3d153cc5227cda4013be47518a03f722de0adfc8
|
21e4367e753a15daf36970717d296c5a3b0714ed
|
/man/install.CMake.Rd
|
00158f7ee6abe9b50d5fa27d28ef61743a63c68a
|
[] |
no_license
|
dnlbrky/installr
|
4ae890d017b8239b5c47f0e9885e95c50cf14eed
|
a288151f099937051a72c6e064a05ad6333f863c
|
refs/heads/master
| 2021-01-14T13:08:27.876299
| 2014-11-27T05:41:07
| 2014-11-27T05:41:07
| 27,207,639
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,058
|
rd
|
install.CMake.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{install.CMake}
\alias{install.CMake}
\alias{install.cmake}
\title{Downloads and installs CMake for windows}
\usage{
install.CMake(URL = "http://www.cmake.org/cmake/resources/software.html", ...)
}
\arguments{
\item{URL}{the URL of the CMake download page.}
\item{...}{extra parameters to pass to
\link{install.URL}}
}
\value{
TRUE/FALSE - was the installation successful or not.
}
\description{
Allows the user to downloads and install the latest version
of CMake for Windows.
}
\details{
CMake is a family of tools designed to build, test and
package software. CMake is used to control the software
compilation process using simple platform and compiler
independent configuration files. CMake generates native
makefiles and workspaces that can be used in the compiler
environment of your choice.
}
\examples{
\dontrun{
install.CMake() # installs the latest version of ImageMagick
}
}
\references{
\itemize{ \item CMake homepage:
\url{http://www.cmake.org/cmake/resources/software.html} }
}
|
3474f64fc647107a1e33f25df84b977b1a0b54c6
|
aded9f46e200628422c9d145cf3edd9546ff87a9
|
/MEPDataExplorer/ui.R
|
26c38f801ab3fe05611abd4415276f0594c11402
|
[
"MIT"
] |
permissive
|
MEP-LINCS/MEPDataExplorer
|
58a0aa7e262b5fbc0c3158f2f931b8312d3ed385
|
0bcc51fa3a7dd76dac8c9f74651547ed30767269
|
refs/heads/master
| 2020-04-06T07:05:51.059510
| 2019-10-29T00:04:35
| 2019-10-29T00:04:35
| 62,261,464
| 0
| 0
|
MIT
| 2019-10-29T00:04:36
| 2016-06-29T22:15:40
|
R
|
UTF-8
|
R
| false
| false
| 1,197
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
tags$head(
singleton(
includeScript("www/readCookie.js")
)
),
# Application title
titlePanel("MEP-LINCS Data Explorer"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Display measurements from MEPs."),
selectInput("cell_line", label = 'Cell Line',
choices = c("MCF10A", "HMEC122L", "HMEC240L"), selected = "MCF10A"),
# uiOutput('staining_set_ctrls'),
actionButton("updateButton", "Get Data"),
hr(),
uiOutput('filterLigands'),
uiOutput('filterECMp'),
hr(),
uiOutput('plotParams')
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(id="tabs",
tabPanel("Box Plot", value="box",
plotOutput("boxPlot"),
htmlOutput('boxPlotInfo')),
tabPanel("Scatter Plot", value="scatter",
plotlyOutput("scatterPlot"),
htmlOutput('scatterPlotInfo'))
)
)
)))
|
ba330d46ebdf3e9f4b1fd82bf9c3c6067cf1a471
|
e305f58e445fb6e24aaebd5312ba713e84b0e597
|
/workshop1.R
|
a346215bbc38e40d3bfc91014be7b5962126d061
|
[] |
no_license
|
JoannaKvB/workshop1
|
04534e8a552347dcf7d9ae3b97ce352e4a16ee87
|
d888cdf65cae52c064d48eab7e644a69b1030daa
|
refs/heads/master
| 2021-07-16T21:07:21.631184
| 2017-10-24T18:42:25
| 2017-10-24T18:42:25
| 108,168,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,533
|
r
|
workshop1.R
|
install.packages("ape", dependencies=TRUE)
install.packages("binom", dependencies=TRUE)
install.packages("car", dependencies=TRUE)
install.packages("leaps", dependencies=TRUE)
install.packages("meta", dependencies=TRUE)
install.packages("pwr", dependencies=TRUE)
install.packages("visreg", dependencies=TRUE)
install.packages("lsmeans", dependencies=TRUE)
x <- 3
z <- "Wake up Neo"
y <- 2
z <- x * y
2 + 2 == 4 # Note double "==" for logical "is equal to"
3 <= 2 # less than or equal to
"A" > "a" # greater than
"Hi" != "hi" # not equal to (i.e., R is case sensitive)
x <- c(1,2,333,65,45,-88,-72,8,92,46)
is.vector(x)
x[1:3] # 1:3 is a shortcut for c(1,2,3)
length(x) #answer is 10 because we have 10 numbers
x[length(x)] #gives you the last number in the string
x > 0 #gives you T/F for every number in the string
x[x > 0] #only gives you positive numbers from string
which(x > 0) #gives you the number order of postitive numbers from the string
x[5] <- 0
x[c(2,6,10)] <- c(1,2,3)
x[2] <- NA
y <- c(5,-6,85,32,47,654,79,65,-14,1)
z <- x * y #multiplies numbers in the same string order together
z <- y - 2 * x
z <- x >= y # great than or equal to
z <- x[abs(x) < abs(y)] # absolute values
##making a data frame = like a spreadsheet
mydata <- data.frame(x = x, y = y, stringsAsFactors = FALSE)
#delete a vector, now stored in the dataframe "mydata"
rm(x)
rm(y)
#different commands once working with dataframe
length(mydata) #answer is 2 because 2 columns
length(mydata$x) #answer is 10 as in line 26
#Paradise tree snake data
#side to side undulation
hertz <- c(0.9,1.4,1.2,1.2,1.3,2.0,1.4,1.6)
length(hertz)
hist(hertz, right = FALSE) #creates left-closed, right-open intervals
#hertz to radians/sec (1 hertz = 2pi radians)
radians <- hertz * (2 * pi)
mean(radians)
sd(radians) # = 2.035985
#mean
sum(hertz) / 8
mean(hertz)
#standard deviation
step1 <- hertz - 1.375
step2 <- step1 ^ 2
step3 <- mean(step2)
step4 <- sqrt(step3)
sd(hertz)
sort(hertz) ##puts in numerical order
median(hertz)
#standard error = standard deviation/sqrt sample size
sd(hertz) / sqrt(8)
sd(radians) / sqrt(8) # = 0.7198293
##Missing Data
length(radians)
radians[9] <- NA
length(radians)
mean(radians[1:8])
mean(radians, na.rm = TRUE) #same as above
sd(radians, na.rm = TRUE)
sd(radians, na.rm = TRUE) / sqrt(8) #you get the wrong answer if you use the sqrt of 9??
##ANOLIS LIZARDS on separate script file
|
6b460a015b6434ab6ed7ccd1520172c5565dd8d3
|
8d00160bd339b95b5fe31156964e3de4ff5a8440
|
/dplyr Tutorial/tutorial.R
|
acaec0ffd7d7ddc7db991345e478ef40f2004ab7
|
[] |
no_license
|
akl21/dataplus
|
497464f3321e0ea8cc8b827157c02455165188fd
|
53977b7a169cb62f7e8fa370fc7fd3f792de1c44
|
refs/heads/master
| 2021-01-21T18:38:22.900827
| 2017-05-26T19:16:57
| 2017-05-26T19:16:57
| 92,063,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,423
|
r
|
tutorial.R
|
install.packages("dplyr")
install.packages("hflights")
suppressMessages(library(dplyr))
library(hflights)
data(hflights)
head(hflights)
flights <- tbl_df(hflights)
jan1 <- flights[flights$Month == 1 & flights$DayofMonth == 1, ]
djan1 <- filter(flights, Month == 1, DayofMonth == 1)
filter(flights, UniqueCarrier == "AA"| UniqueCarrier == "UA")
filter(flights, UniqueCarrier %in% c("AA", "UA"))
flights[, c("DepTime","ArrTime", "FlightNum")]
select(flights, DepTime, ArrTime, FlightNum)
select(flights, Year:DayofMonth, contains("Taxi"), contains("Delay"))
flights %>%
select(UniqueCarrier, DepDelay) %>%
filter(DepDelay > 60) %>%
as.data.frame()
flights[order(desc(flights$DepDelay)), c("UniqueCarrier", "DepDelay")]
flights %>%
select(UniqueCarrier, DepDelay) %>%
arrange(DepDelay)
flights %>%
select(UniqueCarrier, DepDelay) %>%
arrange(desc(DepDelay))
flights[, c("Distance", "AirTime", "Speed")]
flights %>%
select(Distance, AirTime) %>%
mutate(Speed = Distance/AirTime*60)
flights <- flights %>% mutate(Speed = Distance/AirTime*60)
head(aggregate(ArrDelay ~ Dest, flights, mean))
with(flights, tapply(ArrDelay, Dest, mean, na.rm = TRUE))
flights %>%
group_by(Dest) %>%
summarize(avg_delay = mean(ArrDelay, na.rm = TRUE))
flights %>%
group_by(UniqueCarrier) %>%
summarize_each(funs(mean), Cancelled, Diverted)
flights %>%
group_by(UniqueCarrier) %>%
summarize_each(funs(min(., na.rm = TRUE), max(., na.rm = TRUE)), matches("Delay"))
flights %>%
group_by(Month, DayofMonth) %>%
summarize(flight_count = n()) %>%
arrange(desc(flight_count))
flights %>%
group_by(Month, DayofMonth) %>%
tally(sort = TRUE)
flights %>%
group_by(Dest) %>%
summarize(flight_count = n(), plane_count = n_distinct(TailNum))
flights %>%
group_by(Dest) %>%
select(Cancelled) %>%
table() %>%
head()
flights %>%
group_by(UniqueCarrier) %>%
select(Month, DayofMonth, DepDelay) %>%
filter(min_rank(desc(DepDelay)) <= 2) %>%
arrange(UniqueCarrier, desc(DepDelay))
flights %>%
group_by(UniqueCarrier) %>%
select(Month, DayofMonth, DepDelay) %>%
top_n(2) %>%
arrange(UniqueCarrier, desc(DepDelay))
flights %>%
group_by(Month) %>%
summarize(flight_count = n()) %>%
mutate(change = flight_count - lag(flight_count))
flights %>%
group_by(Month) %>%
tally() %>%
mutate(change = n - lag(n))
flights %>% sample_n(5)
flights %>% sample_frac(0.25, replace = TRUE)
glimpse(flights)
|
ddc35a283231b1cf2fa4a4ed830c33aadf6a6a75
|
c5f9eadcd6d4845cb0080e3f7bac23d0e29aeb9a
|
/plot4.R
|
a6639658fb711892a53bed9f6ea0010f5117c804
|
[] |
no_license
|
MikeRadford/datasciencecoursera
|
638a5b8e10c127821a80f73504c2df3fe5baceae
|
f2f885caa89d43563a8b154955cd2f367da58d53
|
refs/heads/master
| 2020-05-17T10:22:24.094292
| 2014-12-03T10:40:07
| 2014-12-03T10:40:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,283
|
r
|
plot4.R
|
#plot 4 for assignment 1, load data, subset, resolve datetime then plot
setClass("myDate")
setAs("character", "myDate", function(from) as.Date(from, format="%d/%m/%Y") )
power <- read.table("household_power_consumption.txt", sep=";", header=TRUE, colClasses = c("myDate","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),na.strings=c("?"))
summary(power)
str(power)
power2 <- subset(power, Date >= "2007-02-01" & Date <= "2007-02-02")
temp <- paste(power2$Date,power2$Time)
power2$DateTime <- strptime(temp, "%Y-%m-%d %H:%M:%S")
par(mfcol = c(2,2))
plot(power2$DateTime,power2$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
with (power2, plot(DateTime,Sub_metering_1, type="l", xlab="", ylab = "Energy sub metering"))
with (power2, lines(DateTime,Sub_metering_2, type="l", col = "red"))
with (power2, lines(DateTime,Sub_metering_3, type="l", col = "blue"))
legend("topright", lty=c(1,1,1) , bty = "n", col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
with (power2, plot(DateTime,Voltage, type = "l", xlab = "datetime", ylab = "Voltage"))
with (power2, plot(DateTime,Global_reactive_power, type = "l", xlab = "datetime"))
dev.copy(png,file="plot4.png")
dev.off()
|
f98d8fa05d17798aa4cb5acd633b0c722e104a54
|
521c53413b61538670b6c0766596ddf3418d528c
|
/EPAemssions.R
|
e668c5696baacf1435fcb3350fde91c246e7ca1d
|
[] |
no_license
|
amanguptag/DataScience
|
50eb6f35fce8f47501ab86cbf422a0a7e750d4c7
|
d68f215f27fb5007e11bc47350a8534d01d0429d
|
refs/heads/master
| 2021-12-05T10:32:49.618911
| 2015-06-27T00:05:01
| 2015-06-27T00:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,278
|
r
|
EPAemssions.R
|
#The overall goal of this assignment is to explore the National Emissions Inventory database and
#see what it say about fine particulate matter pollution in the United states over the 10-year period 1999โ2008.
# zip file of data:https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip
#Note that when I unzipped this file, I could not see the files in the dir
library(dplyr)
library(ggplot2)
# load the emissions data. This file contains a data frame with all of the
# PM2.5 emissions data for 1999, 2002, 2005, and 2008. For each year, the table contains
# number of tons of PM2.5 emitted from a specific type of source for the entire year
NEI <- readRDS("summarySCC_PM25.rds")
#fips: A five-digit number (represented as a string) indicating the U.S. county
#SCC: The name of the source as indicated by a digit string (see source code classification table)
#Pollutant: A string indicating the pollutant
#Emissions: Amount of PM2.5 emitted, in tons
#type: The type of source (point, non-point, on-road, or non-road)
#year: The year of emissions recorded
# load the doc that maps from the SCC digit strings in the Emissions table
# to the actual name of the PM2.5 source.
SCC <- readRDS("Source_Classification_Code.rds")
#1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? Using the base plotting system,
# make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
yearlyEm <- NEI %>%
group_by(year) %>%
summarise(n = n(), TotalEmit = sum(Emissions))
yearlyEm$TotalEmit # Here are the total emission values for each year
#Here's the right way to do this using ggplot2
qplot(as.factor(year),TotalEmit,data=yearlyEm, geom= "histogram", stat="identity", xlab="Year", ylab="Total Emissions per Year (Tons)",
main = "USA Yearly Emissions")
#Here's the dumb way
plot(yearlyEm$year,yearlyEm$TotalEmit, type = "o",xlab="Year", ylab="Total Emissions per Year (Tons)",
main = "USA Yearly Emissions" )
dev.copy(png, file = "plot1.png", width=400, height=400) ## Copy my plot to a PNG file
dev.off()
#ANSWER= Yes, total emissions have been decreasing yearly.
#2. Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
# Use the base plotting system to make a plot answering this question.
BaltMD <- NEI %>%
filter(fips == "24510") %>%
group_by(year) %>%
summarise(n = n(), TotalEmit = sum(Emissions))
#Here's the right way to do this using ggplot2
qplot(as.factor(year),TotalEmit,data=BaltMD, geom= "histogram", stat="identity", xlab="Year", ylab="Total Emissions per Year (Tons)",
main = "Yearly Emissions in Baltimore, Maryland")
#Here's the dumb way
plot(BaltMD$year,BaltMD$TotalEmit, type = "o",xlab="Year", ylab="Total Emissions per Year (Tons)",
main = "UYearly Emissions in Baltimore, Maryland" )
dev.copy(png, file = "plot2.png", width=400, height=400) ## Copy my plot to a PNG file
dev.off()
#ANSWER = The total emissions have decreased, however there was a large spike in 2005
#3. Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
# which of these four sources have seen decreases in emissions from 1999โ2008 for Baltimore City?
# Which have seen increases in emissions from 1999โ2008? Use the ggplot2 plotting system to make a plot answer this question.
NEI$type <- as.factor(NEI$type) # turn type into a factor
yearlyType <- NEI %>%
filter(fips == "24510") %>%
group_by(year,type) %>%
summarise(TotalEmit = sum(Emissions))
#plot
byType <- ggplot(yearlyType, aes(year, TotalEmit))
byType + geom_line(aes(color = type),size = 2, alpha = 1/2) + labs(title="Total Yearly Emissions\nin Baltimore by Type", x = "Year", y= "Total Emissions per Year (Tons)") + geom_point(size=2, shape=21, fill="white")
ggsave("TypeEmissions.png", width=3, height =3)
#ANSWER= All types except for 'point' saw a decrease in total emissions. Point type has seen an overall increase in total emissions
#4. Across the United States, how have emissions from coal combustion-related sources changed from 1999โ2008?
coal <- SCC %>% filter(grepl("Coal",Short.Name)) # Get only the codes that correspond to coal
coal1<- left_join(NEI,coal) #join all x that match y. All observatios from NEI that have SCC that matches 'coal'.
coal2 <- coal1 %>%
group_by(year) %>%
summarise(TotalEmit = sum(Emissions))
pcoal <- ggplot(coal2, aes(year, TotalEmit))
pcoal + geom_line(color = "steelblue") + geom_point( size=4, shape=21,fill="white") + labs(title="Total Yearly Emissions\nfrom Coal Sources", x = "Year", y= "Total Emissions per Year (Tons)")
ggsave("CoalEmissions.png")
#ANSWER= Emissions from coal sources have been approximately cut in half
#5. How have emissions from motor vehicle sources changed from 1999โ2008 in Baltimore City?
vehics <- NEI %>%
filter(type == "ON-ROAD" & fips == "24510") %>%
group_by(year) %>%
summarise(TotalEmit = sum(Emissions))
vplot <- ggplot(vehics, aes(year, TotalEmit))
vplot + geom_line(color = "steelblue") + geom_point( size=4, shape=21,fill="white") + labs(title="Yearly Emissions\nin BaltimoreMD\nfrom Motor Vehicles", x = "Year", y= "Total Emissions per Year (Tons)")
ggsave("VehicalEmissions.png")
#ANSWER = Emissions have decreased from motor vehicles
#6. Compare emissions from motor vehicle sources in Baltimore City with emissions from
# motor vehicle sources in Los Angeles County, California (fips == "06037").
# Which city has seen greater changes over time in motor vehicle emissions?
vehics2 <- NEI %>%
filter(type == "ON-ROAD") %>%
filter(fips == "24510" | fips == "06037") %>%
mutate(fips = ifelse(fips == "24510", "Baltimore", "LA")) %>%
group_by(year,fips) %>%
summarise(TotalEmit = sum(Emissions))
compare <- ggplot(vehics2, aes(year,TotalEmit))
compare + geom_line(aes(color = fips)) + geom_point( size=4, shape=21,fill="white") + labs(title="Yearly Motor Vehicle Emissions\nBaltimore vs LA", x = "Year", y= "Total Emissions per Year (Tons)")
ggsave("TotalLAvs")
#ANSWER= Baltimore has decreased emissions each reporting period. LA has had a net increase in emissions from
# 2000 to 2008
vehics3 <- NEI %>%
filter(type == "ON-ROAD") %>%
filter(fips == "24510" | fips == "06037") %>%
mutate(fips = ifelse(fips == "24510", "Baltimore", "LA")) %>%
group_by(fips,year) %>%
summarise(TotalEmit = sum(Emissions)) %>%
mutate(change = (TotalEmit - lag(TotalEmit)))
compare1 <- ggplot(vehics3, aes(year,change))
compare1 + geom_line(aes(color = fips)) + geom_point( size=4, shape=21,fill="white") + labs(title="Yearly Motor Vehicle Emission Changes\nBaltimore vs LA", x = "Year", y= "Emission Change per Year (Tons)")
ggsave("ChangeLAvsBalt.png")
#Here's a method for making side by side box plot comparisons of LA vs Baltimore
#notice the facet_grid() instead of facet_wrap()
ggplot(data = plot_data, aes(x = year, y = Emissions)) + geom_bar(aes(fill = year),stat = "identity") + guides(fill = F) + ggtitle('Comparison of Motor Vehicle Emissions in LA and Baltimore') + ylab(expression('PM'[2.5])) + xlab('Year') + theme(legend.position = 'none') + facet_grid(. ~ City) + geom_text(aes(label = round(Emissions, 0), size = 1, hjust = 0.5, vjust = -1))
|
5c9bb246e422410c708882872fd7d5d76645fe3a
|
b88beec8fd1154c55ccf80f86d9e2bd78c19ed2b
|
/newstock.R
|
b69b2849072ab4ca65829dd040c44faf746867a7
|
[] |
no_license
|
smartgamer/newstock
|
e177f2ecd54b49bc87a32533dd535678ecbb8428
|
e66a4c367e9d4a3167ec3e48302acf190581146b
|
refs/heads/master
| 2020-04-14T04:00:13.083749
| 2018-12-31T00:34:18
| 2018-12-31T00:34:18
| 163,623,108
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 277
|
r
|
newstock.R
|
news = read.csv("data/news_sample.csv", stringsAsFactors = F)
market = read.csv("data/marketdata_sample.csv")
str(news)
head(news)
install.packages("rio")
library(rio)
news=import("data/news_sample.csv")
market=import("data/marketdata_sample.csv")
str(news)
head(news)
|
8ce9a30234dbbe199f6842f1f5ab350e346e09da
|
17c4ce937227a1361a7bb9772c7549e6a4764f2e
|
/R/03_Analysis.R
|
dd18abae0baaf2995d2037c0e8996c577eb6f6c0
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
pjbouchet/orcaella_eoo
|
ed48f834f5abf6dfed1d3867433a456a16de8363
|
ce6f200efcf51f4e037e3c66a80e210149272bca
|
refs/heads/master
| 2021-06-22T03:36:23.129408
| 2020-12-30T13:29:17
| 2020-12-30T13:29:17
| 167,148,229
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,081
|
r
|
03_Analysis.R
|
#' ------------------------------------------------------------------------
#' ------------------------------------------------------------------------
#'
#' Preliminary assessment of the regional conservation status of snubfin
#' dolphins (Orcaella heinsohni) in Western Australia
#'
#' Bouchet et al. (2020). Frontiers in Marine Science
#'
#' --- 03: ANALYSIS ---
#'
#' ------------------------------------------------------------------------
#' ------------------------------------------------------------------------
# Extract covariates ---------------------------------------------------------
# Dolphin sightings
snub <- snub %>%
dplyr::mutate(
depth = raster::extract(x = depth, snub[, c("longitude", "latitude")]),
dfresh = raster::extract(x = dfresh, snub[, c("longitude", "latitude")]) / 1000
)
# Effort points
effort.pts <- read.csv("data/effort_pts.csv")
# Dolphins/boats can only occur in a minimum depth of 1 m
snub$depth <- ifelse(snub$depth>=-1, -1, snub$depth)
effort.pts$depth <- ifelse(effort.pts$depth>=-1, -1, effort.pts$depth)
# Minor adjustments
snub[snub$sighting_ID%in%c("obs_3085","obs_2927", "obs_3027", "obs_0361", "obs_0355"),]$depth <- -1
snub[snub$sighting_ID=="obs_0772",]$depth <- -15.7
snub[snub$sighting_ID=="obs_2829",]$depth <- -9.33
snub[snub$sighting_ID=="obs_2829",]$dfresh <- 55.113
#'---------------------------------------------
# Code to compute distance to freshwater inputs
#'---------------------------------------------
# This code is used to generate the dfresh raster imported earlier
# It requires the input raster to be up-scaled (x 35) for faster execution.
# Convert depth raster to a uniform surface of value 1
depth.surface <- depth
depth.surface[!is.na(depth.surface)] <- 1
# Upscale the raster by a factor of 35
depth.35 <- raster::aggregate(x = depth.surface, fact = 35)
# Create list objects to store transition matrices and related rasters
# This helps considerably speed up the code
list.rasters <- list()
list.conductance <- list()
lat.extent <- list()
# Extract the coordinates of each raster cell and sort by latitude (for efficiency)
env.df <- raster::as.data.frame(depth.35, xy = TRUE, na.rm = TRUE)
names(env.df) <- c('longitude', 'latitude', 'depth')
env.df <- env.df %>% dplyr::arrange(latitude)
# Create a safe version of the shortest_distance function, which will
# return NULL if any error arises
shortestdist_safe <- purrr::safely(shortest_distance)
# Run calculations for each raster cell
future::plan(multiprocess) # Parallel processing
dist.fresh <- furrr::future_map(
.x = 1:nrow(env.df),
.f = ~ {
dat <- env.df[.x, ] # Extract each data.point
if (dat$latitude <= -18.2) lcd <- FALSE else lcd <- TRUE
# Convert to spatial object
locs <- sp::SpatialPointsDataFrame(
coords = cbind(dat$longitude, dat$latitude),
data = dat, proj4string = CRSll
)
# Compute geodesic distance
shortestdist_safe(
least.cost = lcd,
in.pts = locs,
out.pts = rivers,
closest.pts = 3,
clip.width = 20000,
cost.surface = depth.35
)
},
.progress = TRUE
)
# Check whether any errors occurred and if so, identify where
lc.errors <- purrr::map(.x = dist.fresh, .f = "result") %>%
purrr::map_dbl(., ~ as.numeric(is.null(.)))
errors.ind <- which(lc.errors == 1)
# If errors are found, replace values with straight line distances
straight.d <- furrr::future_map(.x = errors.ind,
.f = ~{
dat <- env.df[.x, ]
if(dat$latitude <= -18.2) lcd <- FALSE else lcd <- TRUE
locs <- sp::SpatialPointsDataFrame(coords = cbind(dat$longitude, dat$latitude),
data = dat, proj4string = CRSll)
shortestdist_safe(least.cost = FALSE,
in.pts = locs,
out.pts = rivers,
closest.pts = 3,
clip.width = 20000,
cost.surface = depth.35)},
.progress = TRUE)
straight.d <- purrr::map(straight.d, 'result') %>% do.call(c, .)
dist.fresh.corrected <- dist.fresh
for(i in 1:length(errors.ind)) dist.fresh.corrected[[errors.ind[i]]]$result <- straight.d[i]
distf.r <- purrr::map(dist.fresh.corrected, 'result') %>% do.call(c, .)
# One area near the top end of WA returned inconsistent results
# See the code in 05_Corrections for a fix.
# Bivariate kernel --------------------------------------------------------
# Extract primary sightings
snub.primary <- snub %>% split(x = ., f = .$sighting_class)
snub.primary <- snub.primary$primary
#'---------------------------------------------
# Calculate plug-in bandwidth
#'---------------------------------------------
h.biv <- purrr::map(
.x = list(snub.primary, effort.pts),
.f = ~ ks::Hpi(x = cbind(.x$dfresh, .x$depth))) %>%
purrr::set_names(., nm = c("sightings", "effort")) %>%
purrr::map(.x = ., .f = ~ {
tmp <- .x
colnames(tmp) <- c("dfresh", "depth")
tmp
})
#' ---------------------------------------------
# Generate 2D kernels (adjusted bandwidth values)
#' ---------------------------------------------
bivariate.kernel <- purrr::imap(
.x = list(snub.primary, effort.pts),
.f = ~ KernSmooth::bkde2D(
x = cbind(.x$dfresh, .x$depth),
bandwidth = list(c(1, 1.5), c(15, 25))[[.y]],
range.x = list(c(0, 135), c(-110, -1)),
gridsize = c(80, 95)
)
)
# Convert to raster and crop
bivariate.r <- purrr::map(.x = bivariate.kernel, .f = ~ raster::raster(list(x = .x$x1, y = .x$x2, z = .x$fhat)))
bivariate.r <- purrr::map(
.x = bivariate.r,
.f = ~ raster::crop(x = .x, raster::extent(c(extent(.x)[1], 80, -95, extent(.x)[4])))
)
# Standardise by effort
bivariate.r <- bivariate.r[[1]] / bivariate.r[[2]]
bivariate.r <- bivariate.r / sum(bivariate.r[])
# Rescale to 0-1 range
bivariate.r <- rescale_raster(bivariate.r)
plot(bivariate.r, col = pals::parula(100))
#'---------------------------------------------
# Back-transform to geographic space
#'---------------------------------------------
# Load upscaled rasters
depth35 <- raster::raster("gis/kimb_depth_x35.tif")
dfresh35 <- raster::raster("gis/kimb_dfresh_x35.tif")
# Rasters need to have the same extents
dfresh35 <- raster::resample(x = dfresh35, y = depth35)
# Stack rasters and convert to df
kimb.grid <- raster::stack(depth35, dfresh35) %>%
raster::as.data.frame(., xy = TRUE, na.rm = TRUE) %>%
tibble::as_tibble(.) %>%
dplyr::rename(depth = kimb_depth_x35, dfresh = kimb_dfresh_x35) %>%
dplyr::mutate(dfresh = dfresh/1000)
# Compute percent volume contours (PVC)
k90 <- pvc(k = 0.9, convert.to.poly = TRUE, smoothing = 25)
k50 <- pvc(k = 0.5, convert.to.poly = TRUE, smoothing = 25)
k25 <- pvc(k = 0.25, convert.to.poly = TRUE, smoothing = 25)
# What percentage of the IUCN range polygon does this represent?
iucn.range.wa <- raster::crop(x = iucn.range, y = raster::extent(k90$poly))
rgeos::gArea(spgeom = sp::spTransform(x = k90$poly, CRSobj = CRSKim))/rgeos::gArea(spgeom = sp::spTransform(x = iucn.range.wa, CRSobj = CRSKim))
#'---------------------------------------------
# Assign inclusion probabilities
#'---------------------------------------------
snub.dat <- snub
snub.dat$incl.prob <- raster::extract(bivariate.r, snub.dat[, c("dfresh", "depth")])
snub.dat$incl.prob <- ifelse(snub.dat$sighting_class == "secondary", snub.dat$incl.prob, 1)
boxplot(incl.prob~dataset_ID, data = droplevels(snub.dat[snub.dat$sighting_class=="secondary",]), ylim = c(0,1))
# Bootstrap --------------------------------------------------------
# Number of iterations
n.iter <- 1000
#'-------------------------------------------------
# Generate bootstrap resamples
#'-------------------------------------------------
snub.boot <- split(x = snub.dat, f = snub.dat$sighting_class)
snub.boot <- list(data = snub.boot)
# Primary sightings are resampled with replacement
snub.boot$boot$primary <- purrr::map(
.x = 1:n.iter,
.f = ~ dplyr::sample_n(
tbl = snub.boot$data[[1]],
size = nrow(snub.boot$data[[1]]),
replace = TRUE
)
)
# Secondary sightings are resampled without replacement
select.mat <- matrix(nrow = nrow(snub.boot$data[[2]]), ncol = n.iter)
for (i in 1:n.iter) {
select.mat[, i] <- purrr::map_int(
.x = snub.boot$data[[2]]$incl.prob,
.f = ~ rbinom(n = 1, size = 1, prob = .x)
)
}
snub.boot$boot$secondary$prob <- purrr::map(.x = 1:n.iter, .f = ~ snub.boot$data[[2]][select.mat[, .x], ])
#' -------------------------------------------------
# Recombine datasets
#' -------------------------------------------------
snub.boot$combined <- purrr::map(
.x = 1:n.iter,
.f = ~ rbind(
snub.boot$boot$primary[[.x]],
snub.boot$boot$secondary$prob[[.x]]
)
)
# Extent of occurrence (EOO) --------------------------------------------------------
#' -------------------------------------------------
# EOO as a minimum convex polygon (MCP)
#' -------------------------------------------------
plan(multiprocess)
eoo.mcp <- calc.eoo(dataset.list = snub.boot$combined, convex.hull = TRUE)
bci(eoo.mcp)
median(eoo.mcp)/(rgeos::gArea(spgeom = sp::spTransform(x = iucn.range, CRSobj = CRSKim))/1000000)
#'-------------------------------------------------
# EOO as an alpha-hull
#'-------------------------------------------------
# Define range of alpha values to test
alpha.values <- seq(0.1, 2, by = 0.1)
# Find lowest alpha corresponding to an alpha-hull with no hollow spaces
pb <- progress_estimated(1000)
alpha.param <- purrr::map_dbl(
.x = snub.boot$combined,
.f = ~ smallest_alpha(alphaval = alpha.values, df = .),
.progress = TRUE
)
# Calculate alpha-EOO
plan(multiprocess)
eoo.alpha <- calc.eoo(dataset.list = snub.boot$combined, alphaval = alpha.param, convex.hull = FALSE)
bci(eoo.alpha)
# Area of occupancy (AOO) -------------------------------------------------
pb <- progress_estimated(1000)
aoo <- purrr:::map_dbl(
.x = snub.boot$combined,
.f = ~ calc.aoo(
input.data = .,
coordinate.system = CRSKim,
Cell_size_AOO = 2,
nbe.rep.rast.AOO = 50
)
)
# Mean and 95% confidence interval
bci(aoo)
# Conservation status -------------------------------------------------
# Using MCP-EOO
iucn.mcp <- eoo.mcp %>%
purrr:::map2_chr(.x = ., .y = aoo, .f = ~classify.threat(EOO = .x, AOO = .y))
# Using alpha-hull EOO
iucn.alpha <- eoo.alpha %>%
purrr:::map2_chr(.x = ., .y = aoo, .f = ~classify.threat(EOO = .x, AOO = .y))
table(iucn.mcp) %>% barplot(.)
table(iucn.alpha) %>% barplot(.)
|
b1f7bf81966c39a14fec623ac3a523ac264ca1d2
|
ca96ff81d10521464c60be347b4132f3e2edc40f
|
/man/IDRlsi.Rd
|
60c76398117ed83f4b2c282b955e3390e51350b1
|
[] |
no_license
|
tmrealphd/PF
|
751fbd86e41e081d4e41f9a8a08f4730a7544dc1
|
b1cef82f1f951dd5cc90c1c56d68aef43c4dfba0
|
refs/heads/master
| 2021-01-21T20:01:20.078591
| 2015-03-31T20:37:39
| 2015-03-31T20:37:39
| 37,874,379
| 0
| 0
| null | 2015-06-22T18:52:55
| 2015-06-22T18:52:55
| null |
UTF-8
|
R
| false
| false
| 2,569
|
rd
|
IDRlsi.Rd
|
\name{IDRlsi}
\alias{IDRlsi}
\title{IDR likelihood support interval.}
\usage{
IDRlsi(y, alpha = 0.05, k = 8, use.alpha = FALSE,
pf = TRUE, converge = 1e-08, rnd = 3, start = NULL,
trace.it = FALSE, iter.max = 24)
}
\arguments{
\item{y}{Data vector c(y1, n1, y2, n2) where y are the
positives, n are the total, and group 1 is compared to
group 2.}
\item{k}{Likelihood ratio criterion.}
\item{alpha}{Complement of the confidence level.}
\item{use.alpha}{Base choice of k on its relationship to
alpha?}
\item{pf}{Estimate \emph{IDR} or its complement
\emph{PF}?}
\item{trace.it}{Verbose tracking of the iterations?}
\item{iter.max}{Maximum number of iterations}
\item{converge}{Convergence criterion}
\item{rnd}{Number of digits for rounding. Affects display
only, not estimates.}
\item{start}{describe here.}
}
\value{
A \code{\link{rrsi}} object with the following elements.
\item{estimate}{vector with point and interval estimate}
\item{estimator}{either \emph{PF} or \emph{IDR}}
\item{y}{data vector} \item{k}{Likelihood ratio
criterion} \item{rnd}{how many digits to round the
display} \item{alpha}{complement of confidence level}
}
\description{
Estimates likelihood support interval for the incidence
density ratio or prevented fraction based on it.
}
\details{
Estimates likelihood support interval for the incidence
density ratio based on orthogonal factoring of
reparameterized likelihood. The incidence density is the
number of cases per subject-time; its distribution is
assumed Poisson. \cr \cr Likelihood support intervals are
usually formed based on the desired likelihood ratio,
often 1/8 or 1/32. Under some conditions the log
likelihood ratio may follow the chi square distribution.
If so, then \eqn{\alpha=1-F(2log(k),1)}, where \eqn{F} is
a chi-square CDF. \code{RRsc()} will make the conversion
from \eqn{\alpha} to \emph{k} if \code{use.alpha = TRUE}.
\cr \cr The data may also be a matrix. In that case
\code{y} would be entered as \code{matrix(c(y1, n1 - y1,
y2, n2 - y2), 2, 2, byrow = TRUE)}.
}
\note{
Level tested: Low.
}
\examples{
IDRlsi(c(26, 204, 10, 205), pf = FALSE)
# 1/8 likelihood support interval for IDR
# corresponds to 95.858\% confidence
# (under certain assumptions)
# IDR
# IDR LL UL
# 2.61 1.26 5.88
}
\author{
David Siev \email{david.siev@aphis.usda.gov}
}
\references{
Royall R. \emph{Statistical Evidence: A Likelihood
Paradigm}. Chapman & Hall, Boca Raton, 1997. Section 7.2.
}
\seealso{
\code{\link{IDRsc}}
}
|
abd686dc8d38e2a7749def84d55f28c4ef62dc3e
|
d5fe74be8e38f09c63f3e9cef3093b4e673596da
|
/AstMex_Hypo/scripts/AstMex_Hypo_figure_Microglia_figure_script_V2.R
|
2b7488d9cab3b47a5dd0d1edfb76436162eaa606
|
[] |
no_license
|
wangchengww/Cavefish_Paper
|
38d5abd692c03920e4716f5507bcbc6fb071a0ea
|
2782e95e3e7da83c2fdd323af4f560439ac18310
|
refs/heads/master
| 2023-05-12T13:18:47.005476
| 2020-12-09T14:12:45
| 2020-12-09T14:12:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,384
|
r
|
AstMex_Hypo_figure_Microglia_figure_script_V2.R
|
library(Seurat)
library(stringr)
library(dplyr)
library(data.table)
library(purrr)
library(patchwork)
library(viridis)
# Load subsets
setwd("/Volumes/BZ/Home/gizevo30/R_Projects/Cavefish_Paper/AstMex_Hypo")
hypo.ast <- readRDS("AstMex_63k.rds")
# Subset out the blood lineage cells
Idents(hypo.ast) <- "Subtype"
immune <- subset(hypo.ast, idents = c("Erythrocytes", "Tcells", "Bcells", "Mast_cells", "Neutrophils", "Macrophages", "Microglia"))
immune <- FindVariableFeatures(immune, selection.method = "mvp")
immune <- ScaleData(object = immune, features = VariableFeatures(immune))
immune <- RunPCA(object = immune, features = VariableFeatures(immune), npcs = 100, set.seed = 0)
# ElbowPlot(object = immune, ndims = 100) # 25 PCs looks good
immune <- RunTSNE(object = immune, reduction = "pca", dims = 1:25, tsne.method = "Rtsne", reduction.name = "tsne", reduction.key = "tsne_", seed.use = 1, check_duplicates = F)
saveRDS(immune, file = "AstMex_immune.rds")
## Find Markers
cell.types <- unique(immune@meta.data$morph_Subtype)
cell.types <- cell.types[table(Idents(immune)) > 3]
Idents(immune) <- "morph_Subtype"
Idents(immune) <- factor(Idents(immune), levels = levels(immune@meta.data$morph_Subtype))
morph_subtype_markers <- FindAllMarkers(immune, max.cells.per.ident = 500, only.pos = T)
markers <- morph_subtype_markers %>% group_by(cluster) %>% top_n(3, avg_logFC)
## Make Plots
cols0 <- c("#FDE725FF", "#22A884FF")
cols3 <- c("#771155", "#AA4488", "#CC99BB", "#114477", "#4477AA", "#77AADD", "#117777", "#44AAAA", "#77CCCC", "#777711", "#AAAA44", "#DDDD77", "#774411", "#AA7744", "#DDAA77", "#771122", "#AA4455", "#DD7788")
immune.morph <- DimPlot(object = immune, group.by = "species", reduction = "tsne", pt.size = .25, label = FALSE, cols = cols0) + NoAxes() + theme(legend.position = c(0.8,0.9), legend.background = element_blank()) + guides(color = guide_legend(ncol = 1, override.aes = list(size = 2)))
immune.subtype <- DimPlot(object = immune, group.by = "Subtype", reduction = "tsne", pt.size = .25, label = TRUE) + NoLegend() + NoAxes()
immune.subcluster <- DimPlot(object = immune, group.by = "SubclusterType", reduction = "tsne", pt.size = .25, label = TRUE) + NoLegend() + NoAxes()
immune.orig <- DimPlot(object = immune, group.by = "orig.ident", reduction = "tsne", pt.size = .25, label = FALSE, cols = cols3) + NoAxes() + theme(legend.position = c(0.8,0.9), legend.background = element_blank()) + guides(color = guide_legend(ncol = 2, override.aes = list(size = 5))) + scale_colour_manual(values = cols3)
ccr9a <- FeaturePlot(immune, features = c("ccr9a"), reduction = "tsne", pt.size = .25) + NoAxes() + ggtitle("")
dot.plot <- DotPlot(immune, features = unique(markers$gene), group.by = "species_Subtype", scale.max = 150) + coord_flip() + theme(axis.text = element_text(size = 8), axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) + scale_color_viridis(option = "A")
# Patchwork them together
tsnes <- (immune.subtype + immune.morph + ccr9a + plot_layout(nrow = 3))
tsnes / dot.plot + plot_layout(ncol = 2, widths = c(1.5,1), guides = "collect")
# ## Other gene lists, including from Peuss et. al.
#
# gen.genes <- c("pfn1", "cd74a", "npc2", "grn2", "cotl1", "pgd")
# myeloid.genes <- c("apoeb", "apoc1", "csf3a", "dusp2", "cxcr4b")
# lymphoid.genes <- c("cd28l", "sla2", "IGKC", "srgn", "rel", "p2ry11", "bric2", "ltb4r", "alox5ap")
#
#
# genes <- c("pfn1", "cd74a", "npc2", "grn2", "cotl1", "pgd", "apoeb", "apoc1", "csf3a", "dusp2", "cxcr4b", "cd28l", "sla2", "IGKC", "srgn", "rel", "p2ry11", "bric2", "ltb4r", "alox5ap")
#
# peuss.surface <- read.csv("~/Downloads/media-3_surface/Surface-Table 1.csv")
# peuss.pachon <- read.csv("~/Downloads/media-3_surface/Pachoฬn-Table 1.csv")
#
# top.pachon <- peuss.pachon %>% group_by(Cluster) %>% top_n(2, Avgerage.logFC)
# top.surface <- peuss.surface %>% group_by(Cluster) %>% top_n(2, Avgerage.logFC)
# pachon.genes <- top.pachon$Gene[top.pachon$Gene %in% row.names(GetAssayData(immune))]
# surface.genes <- top.surface$Gene[top.surface$Gene %in% row.names(GetAssayData(immune))]
#
# DotPlot(object = immune, features = union(surface.genes, pachon.genes), group.by = "SubclusterType") + RotatedAxis() + NoLegend()
# DotPlot(object = immune, features = genes, group.by = "Subtype") + RotatedAxis() + NoLegend()
#
# print(top.pachon, n = 28)
# print(top.surface, n = 18)
|
ecfc355a49e26ff557c8c2cfe71c1975919bc4cb
|
b149df305dea721e1a1a55e6c3d4d700b7de36c2
|
/tests/testthat/test-images.R
|
922ac3d9e17b26cba474ae5dd52dfb4bf3be81c5
|
[
"Apache-2.0"
] |
permissive
|
pachadotdev/analogsea
|
f5302d21e4fa11680a12bb16bee3133ec78b064a
|
947d17175a0ac219a1ef7dae885688320aef59b3
|
refs/heads/main
| 2023-07-07T14:39:55.017718
| 2023-07-01T14:52:27
| 2023-07-01T14:52:27
| 20,135,875
| 21
| 3
|
Apache-2.0
| 2023-04-18T17:35:58
| 2014-05-24T17:54:43
|
R
|
UTF-8
|
R
| false
| false
| 1,272
|
r
|
test-images.R
|
# tests for images
context("images")
test_that("returns expected output for public images", {
skip_on_cran()
imgs <- images()
expect_is(imgs, "list")
expect_is(imgs[[1]], "image")
expect_is(imgs[[1]]$id, "integer")
expect_is(imgs[[1]]$name, "character")
expect_true(imgs[[1]]$public)
})
test_that("fails well with wrong input type to private parameter", {
skip_on_cran()
expect_error(images(private = "af"), "is not TRUE")
})
test_that("works with type parameter", {
skip_on_cran()
imgs_dist <- images(type = "distribution")
imgs_appl <- c(images(type = "application"), images(type = "application", page = 2))
expect_is(imgs_dist, "list")
expect_is(imgs_appl, "list")
expect_is(imgs_dist[[1]], "image")
expect_is(imgs_dist[[1]]$regions, "list")
expect_is(imgs_dist[[1]]$regions[[1]], "character")
expect_is(imgs_appl[[1]], "image")
expect_is(imgs_appl[[1]]$regions, "list")
expect_is(imgs_appl[[1]]$regions[[1]], "character")
})
test_that("public parameter is defunct", {
skip_on_cran()
expect_error(images(public = TRUE), "The parameter public has been removed, see private")
})
test_that("httr curl options work", {
skip_on_cran()
library("httr")
expect_error(images(config = timeout(seconds = 0.001)))
})
|
75e182376c75218fc6aa696fc839fa020ae87e97
|
2bd112c1f31fe903a310b716354314488f3b4e54
|
/bay_model_survive2.R
|
d0a32efb71d531906df56bccb86527c466867ff2
|
[] |
no_license
|
BlaineLandsborough/QMEE
|
6195a6becd7b131d1365a6db624129cfdfb129aa
|
2eeced9a6e0a63ddc1127ff756d37fc188878ebc
|
refs/heads/master
| 2020-04-15T19:17:18.041162
| 2019-05-02T16:09:08
| 2019-05-02T16:09:08
| 164,900,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,278
|
r
|
bay_model_survive2.R
|
## Attempt at logistic regression using Bayesian modelling, with survival as a
## binary response variable,to investigate difference in nestling survival at 60 days
##between treatment types. Survival variable (first_sur) coded as 1 for survived and 0
##for did not survive. Models did not converge when tested.
library(R2jags)
library(readr) ## BMB: readr is included in tidyverse
library(ggplot2)
library(lme4)
library(tidyverse)
survival <- read_csv("2018_chick_survival_v2.csv")
##GLM and GLMM
pukglm <- glm(first_sur ~ hatch_spread, data = survival, family = binomial)
summary(pukglm)
puksur <- glmer(first_sur ~ hatch_spread + (1|Nest),
data = survival, family = binomial)
summary(puksur)
## BMB: why are you bounding logit(p[i]) between 1 and 15??
## BMB: shouldn't there be an intercept here?? otherwise you're assuming
## prob=0.5 when hatch_spread is zero ... (I guess this is b_first_sur,
## but you didn't use it in predictions
model.code="
model{
for (i in 1:N) {
first_sur[i] ~ dbern(p[i])
logit(p[i]) <- max(15,min(1,b_hatch_spread*hatch_spread[i]))
}
b_first_sur ~ dnorm(0,0.0001)
b_hatch_spread ~ dnorm(0,0.0001)
}
"
## BMB: this isn't working at all because the max/min bounding is
## removing all of the information, so you're just reproducing the priors.
## BMB: did you write this from scratch? (I'm wondering about the bounding
## stuff) It's fine to build on others' code, but if you use sources,
## please cite them ...
## I'm a little alarmed that you didn't notice that your results don't
## make any sense?
writeLines(model.code,con="pukmodel.bug")
N <- nrow(survival)
bmod <- with(survival, jags(model.file = 'pukmodel.bug'
, parameters=c("b_first_sur","b_hatch_spread")
, data = list('first_sur' = first_sur, 'hatch_spread' =
hatch_spread, 'N'=N)
, n.chains = 4
, inits=NULL
))
bayoutput <- bmod$BUGSoutput
library("emdbook")
pukmcmc <- as.mcmc.bugs(bayoutput)
print(bmod)
traceplot(bayoutput)
library("lattice")
bayoutput <- bmod$BUGSoutput
pukmcmc <- as.mcmc.bugs(bayoutput)
xyplot(pukmcmc,layout=c(2,3))
densityplot(pukmcmc,layout=c(2,2))
#second chain
## BMB: you don't need to run multiple chains by hand -- that's what
## n.chains does; if you wanted 8 chains you could just say n.chains=8
sec.code="
model{
for (i in 1:N) {
first_sur[i] ~ dbern(p[i])
logit(p[i]) <- max(15,min(1,b_hatch_spread*hatch_spread[i]))
}
b_first_sur ~ dnorm(0,1)
b_hatch_spread ~ dnorm(0,1)
}
"
writeLines(sec.code,con="pukmodel2.bug")
N <- nrow(survival)
bmod2 <- with(survival, jags(model.file = 'pukmodel2.bug'
, parameters=c("b_first_sur","b_hatch_spread")
, data = list('first_sur' = first_sur, 'hatch_spread' =
hatch_spread, 'N'=N)
, n.chains = 4
, inits=NULL
))
bayoutput2 <- bmod2$BUGSoutput
combinedchains <- as.mcmc.list(bayoutput, bayoutput2)
plot(combinedchains)
## BMB: score 2.
|
617ee4f437bdbd6c9beea47f3ed4495a4ae66d2b
|
43b17584478c0360d0fdced151db43c35728837a
|
/R/gitlab_api.R
|
c57348ae33fe757cbbc6cf404005c1c966c6e2d7
|
[] |
no_license
|
cran/gitlabr
|
51357cc4c136b4d5125a1d39aec63ea62ef509d1
|
b8d64273024933804044ca8eeab18930a4611c55
|
refs/heads/master
| 2022-10-03T07:18:40.338952
| 2022-09-13T10:00:02
| 2022-09-13T10:00:02
| 48,080,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,487
|
r
|
gitlab_api.R
|
#' Request GitLab API
#'
#' This is {gitlabr}'s core function to talk to GitLab's server API via HTTP(S). Usually you will not
#' use this function directly too often, but either use {gitlabr}'s convenience wrappers or write your
#' own. See the {gitlabr} vignette for more information on this.
#'
#' @param req vector of characters that represents the call (e.g. `c("projects", project_id, "events")`)
#' @param api_root URL where the GitLab API to request resides (e.g. `https://gitlab.myserver.com/api/v3/`)
#' @param verb http verb to use for request in form of one of the `httr` functions
#' [httr::GET()], [httr::PUT()], [httr::POST()], [httr::DELETE()]
#' @param auto_format whether to format the returned object automatically to a flat data.frame
#' @param debug if TRUE API URL and query will be printed, defaults to FALSE
#' @param gitlab_con function to use for issuing API requests (e.g. as returned by
#' [gitlab_connection()]
#' @param page number of page of API response to get; if "all" (default), all pages
#' (up to max_page parameter!) are queried successively and combined.
#' @param max_page maximum number of pages to retrieve. Defaults to 10. This is an upper limit
#' to prevent {gitlabr} getting stuck in retrieving an unexpectedly high number of entries (e.g. of a
#' project list). It can be set to NA/Inf to retrieve all available pages without limit, but this
#' is recommended only under controlled circumstances.
#' @param enforce_api_root if multiple pages are requested, the API root URL is ensured
#' to be the same as in the original call for all calls using the "next page" URL returned
#' by GitLab This makes sense for security and in cases where GitLab is behind a reverse proxy
#' and ignorant about its URL from external.
#' @param argname_verb name of the argument of the verb that fields and information are passed on to
#' @param ... named parameters to pass on to GitLab API (technically: modifies query parameters of request URL),
#' may include private_token and all other parameters as documented for the GitLab API
#'
#' @importFrom utils capture.output
#' @importFrom tibble tibble as_tibble
#' @importFrom magrittr %T>%
#' @importFrom dplyr bind_rows
#' @importFrom stringr str_replace_all str_replace
#' @export
#'
#' @return the response from the GitLab API, usually as a `tibble` and including all pages
#'
#' @details
#' `gitlab()` function allows to use any request of the GitLab API <https://docs.gitlab.com/ce/api/>.
#'
#' For instance, the API documentation shows how to create a new project in
#' <https://docs.gitlab.com/ce/api/projects.html#create-project>:
#'
#' - The verb is `POST`
#' - The request is `projects`
#' - Required attributes are `name` or `path` (if `name` not set)
#' - `default_branch` is an attribute that can be set if wanted
#'
#' The corresponding use of `gitlab()` is:
#'
#' ```
#' gitlab(
#' req = "projects",
#' verb = httr::POST,
#' name = "toto",
#' default_branch = "main"
#' )
#' ```
#'
#' Note: currently GitLab API v4 is supported. GitLab API v3 is no longer supported, but
#' you can give it a try.
#'
#' @examples \dontrun{
#' # Connect as a fixed user to a GitLab instance
#' set_gitlab_connection(
#' gitlab_url = "https://gitlab.com",
#' private_token = Sys.getenv("GITLAB_COM_TOKEN")
#' )
#'
#' # Use a simple request
#' gitlab(req = "projects")
#' # Use a combined request with extra parameters
#' gitlab(req = c("projects", 1234, "issues"),
#' state = "closed")
#' }
gitlab <- function(req,
api_root,
verb = httr::GET,
auto_format = TRUE,
debug = FALSE,
gitlab_con = "default",
page = "all",
max_page = 10,
enforce_api_root = TRUE,
argname_verb = if (identical(verb, httr::GET) |
identical(verb, httr::DELETE)) { "query" } else { "body" },
...) {
if (!is.function(gitlab_con) &&
gitlab_con == "default" &&
!is.null(get_gitlab_connection())) {
gitlab_con <- get_gitlab_connection()
}
if (!is.function(gitlab_con)) {
url <- req %>%
paste(collapse = "/") %>%
prefix(api_root, "/") %T>%
iff(debug, function(x) { print(paste(c("URL:", x, " "
, "query:", paste(utils::capture.output(print((list(...)))), collapse = " "), " ", collapse = " "))); x })
# Extract private token to put it in header
l <- list(...)
private_token <- l$private_token
l <- within(l, rm(private_token))
private_token_header <- httr::add_headers("PRIVATE-TOKEN" = private_token)
(if (page == "all") {l} else { c(page = page, l)}) %>%
pipe_into(argname_verb, verb, url = url, private_token_header) %>%
http_error_or_content() -> resp
resp$ct %>%
iff(auto_format, json_to_flat_df) %>% ## better would be to check MIME type
iff(debug, print) -> resp$ct
if (page == "all") {
# pages_retrieved <- 0L
pages_retrieved <- 1L
while (length(resp$nxt) > 0 && is.finite(max_page) && pages_retrieved < max_page) {
nxt_resp <- resp$nxt %>%
as.character() %>%
iff(enforce_api_root, stringr::str_replace, "^.*/api/v\\d/", api_root) %>%
httr::GET(private_token_header) %>%
http_error_or_content()
resp$nxt <- nxt_resp$nxt
resp$ct <- bind_rows(resp$ct, nxt_resp$ct %>%
iff(auto_format, json_to_flat_df))
pages_retrieved <- pages_retrieved + 1
}
}
return(resp$ct)
} else {
if (!missing(req)) {
dot_args <- list(req = req)
} else {
dot_args <- list()
}
if (!missing(api_root)) {
dot_args <- c(dot_args, api_root = api_root)
}
if (!missing(verb)) {
dot_args <- c(dot_args, verb = verb)
}
if (!missing(auto_format)) {
dot_args <- c(dot_args, auto_format = auto_format)
}
if (!missing(debug)) {
dot_args <- c(dot_args, debug = debug)
}
if (!missing(page)) {
dot_args <- c(dot_args, page = page)
}
if (!missing(max_page)) {
dot_args <- c(dot_args, max_page = max_page)
}
do.call(gitlab_con, c(dot_args, gitlab_con = "self", ...)) %>%
iff(debug, print)
}
}
http_error_or_content <- function(response,
handle = httr::stop_for_status,
...) {
if (!identical(handle(response), FALSE)) {
ct <- httr::content(response, ...)
nxt <- get_next_link(httr::headers(response)$link)
list(ct = ct, nxt = nxt)
}
}
#' @importFrom stringr str_replace_all str_split
#' @noRd
get_rel <- function(links) {
links %>%
stringr::str_split(",\\s+") %>%
getElement(1) -> strs
tibble::tibble(link = strs %>%
lapply(stringr::str_replace_all, "\\<(.+)\\>.*", "\\1") %>%
unlist(),
rel = strs %>%
lapply(stringr::str_replace_all, ".+rel=.(\\w+).", "\\1") %>%
unlist(),
stringsAsFactors = FALSE)
}
#' @importFrom dplyr filter
#' @noRd
get_next_link <- function(links) {
if(is.null(links)) {
return(NULL)
} else {
links %>%
get_rel() %>%
filter(rel == "next") %>%
getElement("link")
}
}
is.nested.list <- function(l) {
is.list(l) && any(unlist(lapply(l, is.list)))
is.list(l[26]) && any(unlist(lapply(l[26], is.list)))
}
is_named <- function(v) {
!is.null(names(v))
}
is_single_row <- function(l) {
if (length(l) == 1 || !any(lapply(l, is.list) %>% unlist())) {
return(TRUE)
} else {
# if (is.null(names(l)))
# not named, then probably multiple rows
# at least one name is the same shows multiple lines
all_names <- lapply(l, names)
if(any(
lapply(all_names, function(x) any(x %in% all_names[[1]])) %>% unlist()
)) {
return(FALSE)
} else {
return(TRUE)
}
}
}
# is_single_row <- function(l) {
# if (length(l) == 1 || !any(lapply(l, is.list) %>% unlist())) {
# return(TRUE)
# } else {
# the_lengths <- lapply(l, length) %>% unlist()
# u_length <- unique(the_lengths)
# if (length(u_length) == 1) {
# return(u_length == 1)
# } else {
# multi_cols <- which(the_lengths > 1) %>% unlist()
# return(all(lapply(l[multi_cols], is_named) %>% unlist() &
# !(lapply(l[multi_cols], is.nested.list) %>% unlist())))
# }
# }
# }
format_row <- function(row, ...) {
row %>%
lapply(unlist, use.names = FALSE, ...) %>%
# tibble::as_tibble(stringsAsFactors = FALSE)
tibble::as_tibble(.name_repair = "unique")
}
#' @importFrom dplyr bind_rows
#' @noRd
json_to_flat_df <- function(l) {
l %>%
iff(is_single_row, list) %>%
lapply(unlist, recursive = TRUE) %>%
lapply(format_row) %>%
bind_rows()
}
call_filter_dots <- function(fun,
.dots = list(),
.dots_allowed = gitlab %>%
formals() %>%
names() %>%
setdiff("...") %>%
c("api_root", "private_token"),
...) {
do.call(fun, args = c(list(...), .dots[intersect(.dots_allowed, names(.dots))]))
}
|
8312e59b54b044919ac5c29d7bb9a822dd6105d9
|
d493e240277aef7dac95ece4c699a2921218bf8c
|
/R/penepma_geo_hdr_cmt.R
|
dc5d2a13c4221a4b1da00a3882e0e019a55d6111
|
[
"MIT"
] |
permissive
|
jrminter/rpemepma
|
d4471fd39c7107a0555ac8e8bf7aead383ffde64
|
eb9db97bd436855f62f37bea59f4b1a7667cabac
|
refs/heads/master
| 2020-03-23T15:18:09.928804
| 2018-09-21T17:59:45
| 2018-09-21T17:59:45
| 141,736,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
penepma_geo_hdr_cmt.R
|
#' Create a header line with a comment
#'
#' A helper function to create .geo files
#'
#' @param str_cmt string. Example: "Cylindrical layers - distances all in cm"
#'
#' @return none. Exports by cat to console or file as set previously
#'
#' @export
penepma_geo_hdr_cmt <- function(str_cmt){
line <- sprintf("C %s\n", str_cmt)
cat(line)
}
|
a780a176435cc8a5880486759fbdec0feea84f5a
|
0c49c585d7429e964c5ed0b70075a07909d29ebe
|
/LWeinman data club script 03092020.R
|
94491fdfa6c8dc0db261268b65edfc1b7959ad72
|
[] |
no_license
|
migou0426/workshop-march-09
|
1831826db4eb5364e24fa9a6b1cf053b0c846548
|
9aa5d9c6e781726a5244767f90fbc272ce6ef6ff
|
refs/heads/master
| 2021-03-05T05:44:56.188110
| 2020-03-09T01:25:01
| 2020-03-09T01:25:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,523
|
r
|
LWeinman data club script 03092020.R
|
library(tidyverse)
###1. Get our data.
####dataset one. Need to do some data cleaning/filtering before we can use it.
nsf<-read.csv("nsf0607_spec.csv")
nsfnet<-filter(nsf, method=="net")
table(nsfnet$site)
nsfnet2<-unite(nsfnet, genus.species, genus, species)
nsfnet.no2007<-droplevels(filter(nsfnet2, year!=2007 & site_type!="reference" & site_type!="matrix"))
length(unique(nsfnet.no2007$site))
###dataset two. some data cleaning/filtering before we can use it.
cig<-read.csv("cig1115_spec.csv")
cig<-droplevels(filter(cig, round!="NULL" & year=="2013"))
cig2<-unite(cig, genus.species, genus, species)
cig3<-unite(cig2, plant_genus_sp, plant_genus, plant_species)
table(cig$site)
unique(cig3$site)
write.csv(cig3, "cig3 for dataclub.csv")
##2. Format data for analysis
##filter to get records for just one site in one dataset
site<-filter(cig3, site=="DR")
##each row in "site" corresponds to one insect specimen. The column "genus.species" indicates
#the species of the insect. The column "plant_genus_sp" indicates the flowers species that the specimen
#was visiting when the specimen was caught. We want to know how many visits each flower species
#at the site recieved from each insect species.
interactions<-site%>%
group_by(genus.species, plant_genus_sp)%>%
summarize(visits=length(uniqueID))
##the below code turns "interactions" into an insect species by plant species matrix
matrix<-spread(interactions, genus.species, as.numeric(visits))
View(matrix)
##Next need to do some formatting to get the matrix into shape for the analysis.
matrix[is.na(matrix)]<-0
matrix2<-data.matrix(matrix)
matrix3<-matrix2[,-1]
row.names(matrix3)<-c(as.matrix(matrix[,1]))
View(matrix3)
#### 3. Used published code (adapted from publicly available function "nestednodf") to get species-level nestedness values for each plant and pollinator species
###at our focal site.
#This first bit sorts our matrix by decreasing row and column fill (number of bees a plant interacts with, number of plants a bee interacts with)
comm<-matrix3
bin.comm <- ifelse(comm > 0, 1, 0)
rfill <- rowSums(bin.comm)
cfill <- colSums(bin.comm)
rgrad <- rowSums(comm)
cgrad <- colSums(comm)
rorder <- order(rfill, rgrad, decreasing = TRUE)
corder <- order(cfill, cgrad, decreasing = TRUE)
comm <- comm[rorder, corder]
rfill <- rfill[rorder]
cfill <- cfill[corder]
#This bit gets some info from our interaction matrix that will be used in the calculation of the WNODF values
#(number of rows and columns, and the "fill" of the matrix).
nr <- NROW(comm)
nc <- NCOL(comm)
fill <- sum(rfill)/prod(dim(comm))
#we then use those values to create two vectors of 0s, each the length of the number of
#unique pairs of rows and columns, respectively. The loops below will replace these zeros with the pairwise
##nestedness values of each pair of rows and each pair of columns, respectively. So "N.paired.rows" will
##ultimately become an object that stores pairwise nestedness of rows(aka pairwise nestedness of plant species),
#and the same for "N.paired.cols" for columns (aka pairwise nestedness of insect species)
N.paired.rows <- numeric(nr * (nr - 1)/2)
N.paired.cols <- numeric(nc * (nc - 1)/2)
###CALCULATE NESTEDNESS OF ALL PAIRS OF ROWS###
#The line of code below is just an empty data frame in which to store nestedness values of each pair of rows i and j,
#plus the total abundance of focal row species in the network.
thegoods.rows<-data.frame(value=NA, row.species.i=NA, row.species.j=NA, row.species.i.abundance=NA)
##this loops through each row (indexed by "i"),
#and calculates the proportion of interactions of each subsequent row "j"
#that are nested within the interactions of the focal row ("i").
counter <- 0
for (i in 1:(nr - 1)) {
#get the ith row. starts with the first row in the matrix.
first <- comm[i, ]
for (j in (i + 1):nr) {
counter <- counter + 1
#if the jth row has higher or equal fill to the ith row OR if the fill of either row is 0, move on to the next value of j
##(the nestedness value is 0)
if (rfill[i] <= rfill[j] || any(rfill[c(i, j)] == 0))
next
#otherwise, get the jth row and do the calculation. j starts with the first row AFTER row i.
second <- comm[j, ]
#this is the actual calculation.
N.paired.rows[counter] <-
sum(first - second > 0 & second > 0)/sum(second > 0)
#store that info in my empty data frame "thegoods.rows"
thegoods.rows<-rbind(
thegoods.rows,
c(N.paired.rows[counter], names(comm[,1][i]), names((comm[,1][j])), rgrad[rorder][i])
)
}
}
#get rid of the empty first row of our dataframe
thegoods.rows<-thegoods.rows[-1,]
####CALCULATE NESTEDNESS OF ALL PAIRS OF COLUMNS
##the same as above, but for each pair of columns.
counter <- 0
thegoods.cols<-data.frame(value=NA, column.species.i=NA, column.species.j=NA, col.species.i.abundance=NA)
for (i in 1:(nc - 1)) {
first <- comm[, i]
for (j in (i + 1):nc) {
counter <- counter + 1
if (cfill[i] <= cfill[j] || any(cfill[c(i, j)] == 0))
next
second <- comm[, j]
N.paired.cols[counter] <- sum(first - second > 0 & second > 0)/sum(second > 0)
thegoods.cols<-rbind(
thegoods.cols,
c(N.paired.cols[counter], names(comm[1,][i]), names((comm[1,][j])), cgrad[corder][i])
)
}
}
thegoods.cols<-thegoods.cols[-1,]
#### 4.
####so we now have all of the pairwise nestedness values for each pollinator (columns) and plant (rows).
### to come up with a nestedness value for each species, let's take the mean of their pairwise values.
thegoods.rows$value<-as.numeric(thegoods.rows$value)
thegoods.cols$value<-as.numeric(thegoods.cols$value)
rmeans<-thegoods.rows%>%
group_by(row.species.i, row.species.i.abundance)%>%
summarize(species.wnodf=mean(value))
rmeans$row.species.i.abundance<-as.numeric(rmeans$row.species.i.abundance)
cmeans<-thegoods.cols%>%
group_by(column.species.i, col.species.i.abundance)%>%
summarize(species.wnodf=mean(value))
cmeans$col.species.i.abundance<-as.numeric(cmeans$col.species.i.abundance)
###5. visualizations
#I can now look at the distribution of species-level nestedness values, and the plot species' nestedness against
##their abundance.
hist(cmeans$species.wnodf)
hist(rmeans$species.wnodf)
ggplot(rmeans, aes(x=row.species.i.abundance,y=species.wnodf ))+
geom_point()
ggplot(cmeans, aes(x=col.species.i.abundance,y=species.wnodf ))+
geom_point()
|
c921e554391d0d288474f5c064de67692b63139a
|
5ae342134b85d1b7a0059ce8695a6836d17f5a12
|
/deconv2two/scr/discreteANM_1.1.R
|
e92156d9e93cc533fb9b928794ec87d8701d1b8f
|
[] |
no_license
|
wenrurumon/non_negative_attribution_model
|
15d5e5b6b54b64cce0942e58c73fc653a37dd607
|
37780b43187697d0ec2f572796c8b91ebcef77f2
|
refs/heads/master
| 2020-06-17T14:10:17.332642
| 2019-10-28T10:10:35
| 2019-10-28T10:10:35
| 74,996,129
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,801
|
r
|
discreteANM_1.1.R
|
# conduct permutation test to determine direction
permdANM<-function(X,Y,number_of_permutations=5000,level=0.05,cycX=1,cycY=1){
output=fit_both_dir_discrete(X,cycX=cycX,Y,cycY=cycY,level=level)
P_X2Y=output$p_val_fw
P_Y2X=output$p_val_bw
diff_estimated=P_Y2X-P_X2Y
abs_diff=abs(diff_estimated)
# P value of causation test by permutation test based on difference
perm_X2Y<-perm_Y2X<-c()
sizeY=length(Y)
for (i in 1:number_of_permutations){
Y_perm=sample(Y,sizeY)
perm_output=fit_both_dir_discrete(X,cycX=cycX,Y=Y_perm,cycY=cycY,level=level)
perm_X2Y=c(perm_X2Y,perm_output$p_val_fw)
perm_Y2X=c(perm_Y2X,perm_output$p_val_bw)
}
perm_diff<-perm_Y2X-perm_X2Y
abs_perm_diff=abs(perm_diff)
Top_Y2X=(sum(perm_diff>diff_estimated)+.5*sum(perm_diff==diff_estimated))/number_of_permutations
Top_X2Y=1-Top_Y2X
dir=ifelse(Top_Y2X<level,-1,ifelse(Top_X2Y<level,1,0))
Pc=(sum(abs_perm_diff>abs_diff)+.5*sum(abs_perm_diff==abs_diff))/number_of_permutations
perm_P_X2Y=(sum(perm_X2Y<P_X2Y)+.5*sum(perm_X2Y==P_X2Y))/number_of_permutations
perm_P_Y2X=(sum(perm_Y2X<P_Y2X)+.5*sum(perm_Y2X==P_Y2X))/number_of_permutations
list(dir=dir,P_no_causation=Pc,p_val_ind=output$p_val_ind,fct_fw=output$fct_fw,fct_bw=output$fct_bw,P_X2Y=P_X2Y,P_Y2X=P_Y2X,diff_percent_X2Y=Top_X2Y,diff_percent_Y2X=Top_Y2X,perm_P_X2Y=perm_P_X2Y,perm_P_Y2X=perm_P_Y2X)
}
########################### source script ##################################
fit_both_dir_discrete<-function(X,cycX,Y,cycY,level,display=c(FALSE,TRUE)){
if (cycY==0){
fit.output=fit_discrete(X,Y,level)
fct_fw=fit.output$fct
p_val_fw=fit.output$p_val
}else if(cycY==1){
fit.output=fit_discrete_cyclic(X,Y,level)
fct_fw=fit.output$fct
p_val_fw=fit.output$p_val
}
if (cycX==0){
fit.output=fit_discrete(Y,X,level)
fct_bw=fit.output$fct
p_val_bw=fit.output$p_val
}else if(cycX==1){
fit.output=fit_discrete_cyclic(Y,X,level)
fct_bw=fit.output$fct
p_val_bw=fit.output$p_val
}
options(warn=-1)
p_val_ind=ifelse((length(unique(Y))==1|length(unique(X))==1),1,chisq.test(Y,X,correct=FALSE)$p.value)
if (display==TRUE){
# p_val_fw
if (p_val_fw>level){
cat("fct_fw",fct_fw,"\n")
cat("ANM could be fitted in the direction X->Y using fct_fw. \n")
}
# p_val_bw
if (p_val_bw>level){
cat("fct_bw",fct_bw,"\n")
cat("ANM could be fitted in the direction Y->X using fct_bw. \n")
}
if (p_val_bw>level & p_val_fw<level){
cat("Only one ANM could be fit. The method infers Y->X. \n")
}else if(p_val_bw<level & p_val_fw>level){
cat("Only one ANM could be fit. The method infers X->Y. \n")
}else if(p_val_bw<level & p_val_fw<level){
cat("No ANM could be fit. The method does not know the causal direction. \n")
}else{
cat("Both ANM could be fit. The method does not know the causal direction. \n")
}
# are X and Y independent?
if (p_val_ind>level){
cat("But note that X and Y are considered to be independent anyway. (Thus no causal relation) \n")
}
}
options(warn=0)
list(fct_fw=fct_fw,fct_bw=fct_bw,p_val_fw=p_val_fw,p_val_bw=p_val_bw,p_val_ind=p_val_ind)
}
#####################
# cyclic
fit_discrete_cyclic<-function(X,Y,level){
options(warn=-1)
require(descr)
# parameter
num_iter=10
num_pos_fct=min(max(Y)-min(Y),10)
# rescaling
# X_new takes values from 1 ... X_new_max
# Y_values are everything between Y_min and Y_max
X_values=unique(X)
Y_values=seq(min(Y),max(Y),by=1)
if (length(X_values)==1|length(Y_values)==1){
fct=rep(1,length(X_values))*Y_values[1]
p_val=1
}else{
p<-CrossTable(c(X,rep(NA,length(Y_values))),c(Y,Y_values),prop.chisq = FALSE)$t
fct=c()
cand=list()
for (i in 1:length(X_values)){
b=order(p[i,])
for (k in 1:ncol(p)){
p[i,k]=ifelse(k==b[length(b)],p[i,k]+1,p[i,k]+1/(2*abs(k-b[length(b)])))
}
b=order(p[i,])
cand[[i]]=b
fct=c(fct,Y_values[b[length(b)]])
}
X_new=X
for (i in 1:nrow(p)){
X_new[X==rownames(p)[i]]=i
}
yhat=fct[X_new]
eps=(Y-yhat)%%(max(Y)-min(Y)+1)
p_val=ifelse((length(unique(eps))==1),1,chisq.test(eps,X,correct=FALSE)$p.value)
# correct=TRUE as default; if correct=FALSE, completely consistant to original MATLAB scripts
i=0
while(p_val<level & i<num_iter){
for (j_new in sample.int(length(X_values))){
pos_fct=list()
p_val_comp<-p_val_comp2<-c()
for (j in 1:(num_pos_fct+1)){
pos_fct[[j]]=fct
pos_fct[[j]][j_new]=Y_values[cand[[j_new]][length(cand[[j_new]])-(j-1)]]
yhat=pos_fct[[j]][X_new]
eps=(Y-yhat)%%(max(Y)-min(Y)+1)
if (length(unique(eps))==1){
p_val_comp=c(p_val_comp,1)
p_val_comp2=c(p_val_comp2,0)
}else{
chi_sq=chisq.test(eps,X,correct=FALSE)
p_val_comp=c(p_val_comp,chi_sq$p.value)
p_val_comp2=c(p_val_comp2,chi_sq$statistic)
}
}
aa=max(p_val_comp)
j_max=which(p_val_comp==aa)
if (aa<exp(-3)){
j_max=which(p_val_comp2==min(p_val_comp2))
}
fct=pos_fct[[min(j_max)]]
yhat=fct[X_new]
eps=(Y-yhat)%%(max(Y)-min(Y)+1)
p_val=ifelse((length(unique(eps))==1),1,chisq.test(eps,X,correct=FALSE)$p.value)
}
i=i+1
}
}
options(warn=0)
list(fct=fct,p_val=p_val)
}
###################
# non_cyclic
fit_discrete<-function(X,Y,level){
options(warn=-1)
require(descr)
# parameter
num_iter=10
num_pos_fct=min(max(Y)-min(Y),20)
# rescaling
# X_new takes values from 1 ... X_new_max
# Y_values are everything between Y_min and Y_max
X_values=unique(X)
Y_values=seq(min(Y),max(Y),by=1)
if (length(X_values)==1|length(Y_values)==1){
fct=rep(1,length(X_values))*Y_values[1]
p_val=1
}else{
p<-CrossTable(c(X,rep(NA,length(Y_values))),c(Y,Y_values),prop.chisq = FALSE)$t
fct=c()
cand=list()
for (i in 1:length(X_values)){
b=order(p[i,])
for (k in 1:ncol(p)){
p[i,k]=ifelse(k==b[length(b)],p[i,k]+1,p[i,k]+1/(2*abs(k-b[length(b)])))
}
b=order(p[i,])
cand[[i]]=b
fct=c(fct,Y_values[b[length(b)]])
}
# the following script more convenient compared to MATLAB
X_new=X
for (i in 1:nrow(p)){
X_new[X==rownames(p)[i]]=i
}
yhat=fct[X_new]
eps=Y-yhat
if (length(unique(eps))==1){
cat("Warning!!there is a deterministic relation between X and Y \n")
p_val=1
}else{
p_val=chisq.test(eps,X,correct=FALSE)$p.value
}
i=0
while(p_val<level & i<num_iter){
for (j_new in sample.int(length(X_values))){
pos_fct=list()
p_val_comp<-p_val_comp2<-c()
for (j in 1:(num_pos_fct+1)){
pos_fct[[j]]=fct
pos_fct[[j]][j_new]=Y_values[cand[[j_new]][length(cand[[j_new]])-(j-1)]]
yhat=pos_fct[[j]][X_new]
eps=Y-yhat
if (length(unique(eps))==1){
p_val_comp=c(p_val_comp,1)
p_val_comp2=c(p_val_comp2,0)
}else{
chi_sq=chisq.test(eps,X,correct=FALSE)
p_val_comp=c(p_val_comp,chi_sq$p.value)
p_val_comp2=c(p_val_comp2,chi_sq$statistic)
}
}
aa=max(p_val_comp)
j_max=which(p_val_comp==aa)
if (aa<exp(-3)){
j_max=which(p_val_comp2==min(p_val_comp2))
}
fct=pos_fct[[min(j_max)]]
yhat=fct[X_new]
eps=Y-yhat
p_val=ifelse((length(unique(eps))==1),1,chisq.test(eps,X,correct=FALSE)$p.value)
}
i=i+1
}
fct=fct+round(mean(eps))
}
options(warn=0)
list(fct=fct,p_val=p_val)
}
|
890f95dc25fe2472522ce4ee17d5f6f4e8ad2dbb
|
517733befd7596013a64255d8d597259e39603e8
|
/sejmRP/man/votes_match_deputies_ids.Rd
|
6531cd15d1c05efe04cc727c266eedc6b22fdd4f
|
[] |
no_license
|
mi2-warsaw/sejmRP
|
5740a66d39da6fcb7ebf6fcb1171d53cdb44d5cc
|
4c7a629d375be9c22add22d3b65a1f425746a3a9
|
refs/heads/master
| 2020-05-21T04:29:07.272159
| 2017-09-06T21:31:06
| 2017-09-06T21:31:06
| 40,239,403
| 20
| 4
| null | 2017-09-07T17:32:18
| 2015-08-05T10:29:34
|
HTML
|
UTF-8
|
R
| false
| true
| 1,739
|
rd
|
votes_match_deputies_ids.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/votes_match_deputies_ids.R
\name{votes_match_deputies_ids}
\alias{votes_match_deputies_ids}
\title{Matching deputies to theirs' ids}
\usage{
votes_match_deputies_ids(dbname, user, password, host, page,
nr_term_of_office = 8, windows = .Platform$OS.type == 'windows')
}
\arguments{
\item{dbname}{name of database}
\item{user}{name of user}
\item{password}{password of database}
\item{host}{name of host}
\item{page}{club's voting's results page}
\item{nr_term_of_office}{number of term of office of Polish Diet; default: 8}
\item{windows}{information of used operation system;
default: .Platform$OS.type == 'windows'}
}
\value{
data frame with three columns: deputy, vote, id
}
\description{
Function \code{votes_match_deputies_ids} matches deputies from voting's results
page to theirs' ids from \emph{deputies} table.
}
\details{
Function \code{votes_match_deputies_ids} matches deputies from voting's results
page to theirs' ids from \emph{deputies} table. The result of this function is
a data frame with deputies' data, ids and votes. Because of encoding issue
on Windows operation system, you need to select if you use Windows.
Example of page with voting's results of PO club:
http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?
symbol=klubglos&IdGlosowania=37494&KodKlubu=PO
}
\note{
All information is stored in PostgreSQL database.
}
\examples{
\dontrun{
page <- paste0('http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?',
'symbol=klubglos&IdGlosowania=37494&KodKlubu=PO')
votes_match_deputies_ids(dbname, user, password, host, page, 7, TRUE)
votes_match_deputies_ids(dbname, user, password, host, page, 7, FALSE)}
}
\author{
Piotr Smuda
}
|
4057b80386670de783bd501e633db2832e66ac44
|
5fcc3f8421fa41dbb443204d206961ab18b1d45e
|
/man/savePNG.Rd
|
6505929259a44c7c9c8c3a70e014e9c40f6a9e02
|
[
"MIT"
] |
permissive
|
fengweijp/RCyjs
|
192f369e1024661686bc10b19578587824660f1c
|
0f22b40382b63f4882d7204b54b650bbfbb59333
|
refs/heads/master
| 2021-10-26T16:10:46.523267
| 2019-04-13T18:38:52
| 2019-04-13T18:38:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 797
|
rd
|
savePNG.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCyjs-class.R
\docType{methods}
\name{savePNG,RCyjs-method}
\alias{savePNG,RCyjs-method}
\alias{savePNG}
\title{savePNG}
\usage{
\S4method{savePNG}{RCyjs}(obj, filename)
}
\arguments{
\item{obj}{an RCyjs instance}
\item{filename}{a character string}
}
\value{
no return value
}
\description{
\code{savePNG} write current cytoscape view, at current resolution, to a PNG file.
}
\examples{
if(interactive()){
rcy <- RCyjs(title="layouts", graph=createTestGraph(nodeCount=20, edgeCount=20))
style.filename <- system.file(package="RCyjs", "extdata", "sampleStyle1.js");
loadStyleFile(rcy, style.filename)
layout(rcy, "cose")
fit(rcy)
filename <- tempfile(fileext=".png")
savePNG(rcy, filename)
}
}
|
93bd2731465dc4a594200a78ee85a2f2bd5a8e8f
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rfast/man/matrnorm.Rd
|
f7783abca9571a532fe8a50f26a6afe551e2f222
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,391
|
rd
|
matrnorm.Rd
|
\name{Generates random values from a normal and puts them in a matrix}
\alias{matrnorm}
\title{
Generates random values from a normal and puts them in a matrix
}
\description{
Generates random values from a normal and puts them in a matrix.
}
\usage{
matrnorm(n, p)
}
\arguments{
\item{n}{
The sample size, the number of rows the matrix will have.
}
\item{p}{
The dimensionality of the data, the nubmer of columns of the matrix.
}
}
\details{
How many times did you have to simulated data from a (standard) normal distribution in order to test
something? For example, in order to see the speed of \code{\link{logistic_only}}, one needs to generate
a matrix with predictor variables. The same is true for other similar functions. In \code{\link{sftests}},
one would like to examine the typer I error of this test under the null hypothesis.
By using the Ziggurat method of generating standard normal variates, this function is really fast when you
want to generate big matrices.
}
\value{
An n x p matrix with data simulated from a standard normal distribution.
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr>
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{rvmf}, \link{Rnorm}, \link{rmvnorm}, \link{rvonmises}
}
}
\examples{
x <- matrnorm(100, 100)
}
|
60c8cd1eca9405037836eec549c2122f48e176f7
|
9a56f5d195315a0b297e99c89da8470913052e53
|
/fin_opt.R
|
d6613c98265956f1bf246974acafb06433c70027
|
[] |
no_license
|
mollyrubin/spring1orange6
|
f546f1bb333507682c6982149fb05a1c8a1ccb1b
|
573e3fbef5a97fd2b65d1192ef077796d2cc4534
|
refs/heads/master
| 2020-04-15T15:09:28.682701
| 2019-02-25T01:42:56
| 2019-02-25T01:42:56
| 164,782,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,649
|
r
|
fin_opt.R
|
#----------------------------------#
# Financial and Opt HW #
#----------------------------------#
if(!require('graphics'))install.packages('graphics')
if(!require('quantmod'))install.packages('quantmod')
if(!require('TTR'))install.packages('TTR')
if(!require('ks'))install.packages('ks')
if(!require('scales'))install.packages('scales')
if(!require('forecast'))install.packages('forecast')
if(!require('aTSA'))install.packages('aTSA')
if(!require('ccgarch'))install.packages('ccgarch')
if(!require('fGarch'))install.packages('fGarch')
if(!require('rugarch'))install.packages('rugarch')
if(!require('stringr'))install.packages('stringr')
if(!require('tidyverse'))install.packages('tidyverse')
if(!require('quadprog'))install.packages('quadprog')
library(graphics)
library(quantmod)
library(TTR)
library(ks)
library(scales)
library(forecast)
library(aTSA)
library(ccgarch)
library(fGarch)
library(rugarch)
library(stringr)
library(tidyverse)
library(quadprog)
#************************************************************************
# Part 1
#************************************************************************
#################################################
#STEP 1: GATHER STOCK DATA FOR FULL DIJA PORTFOLIO
#################################################
# Load Stock Data
tickers = c("MMM", "AXP", "AAPL", "BA", "CAT",
"CVX", "CSCO", "KO", "DIS", "DWDP",
"XOM", "GS", "HD", "IBM", "INTC",
"JNJ", "JPM", "MCD", "MRK", "MSFT",
"NKE", "PFE", "PG", "TRV", "UTX",
"UNH", "VZ", "V", "WMT", "WBA", "DJI")
getSymbols(tickers)
# replace missing values for V with NA
nas <- rep(NA, length(MMM$MMM.Close) - length(V$V.Close))
new_v <- c(nas, V$V.Close)
# create dataframe of all stocks
stocks <- data.frame(
MMM = MMM$MMM.Close,
AXP = AXP$AXP.Close,
AAPL = AAPL$AAPL.Close,
BA = BA$BA.Close,
CAT = CAT$CAT.Close,
CVX = CVX$CVX.Close,
CSCO = CSCO$CSCO.Close,
KO = KO$KO.Close,
DIS = DIS$DIS.Close,
DWDP = DWDP$DWDP.Close,
XOM = XOM$XOM.Close,
GS = GS$GS.Close,
HD = HD$HD.Close,
IBM = IBM$IBM.Close,
INTC = INTC$INTC.Close,
JNJ = JNJ$JNJ.Close,
JPM = JPM$JPM.Close,
MCD = MCD$MCD.Close,
MRK = MRK$MRK.Close,
MSFT = MSFT$MSFT.Close,
NKE = NKE$NKE.Close,
PFE = PFE$PFE.Close,
PG = PG$PG.Close,
TRV = TRV$TRV.Close,
UTX = UTX$UTX.Close,
UNH = UNH$UNH.Close,
VZ = VZ$VZ.Close,
WMT = WMT$WMT.Close,
WBA = WBA$WBA.Close,
V.Close = new_v)
#ROC = Rate of Change calculation, appending returns for each stock to main dataset
# looping through each stock
for(stock_name in names(stocks)){
# getting rid of .close in stock name
new_name <- str_split(stock_name, "[.]")[[1]][1]
# adding _r to represent a return
new_name <- paste0(new_name, "_r")
# calculating return and adding returns to dataframe
stocks[new_name] <- ROC( stocks[, stock_name])
}
# write stock values and returns to csv
write.csv(stocks, file = "../../../fin_opt_project/stocks.csv")
#####################################################
#STEP 2: Rank stocks by most significant arch effects
#####################################################
# creating empty list and vectors to add in test results
arch_effects <- list()
resid_r2 <- numeric()
resid_test_stat <- numeric()
resid_p <- numeric()
stock_names <- character()
# loop through all columns in the dataframe
i <- 1
for(stock_name in names(stocks)){
# only test for arch effects on the returns
if( str_sub( stock_name, nchar(stock_name) - 1) == "_r"){
# create new name that gets rid of _r
new_name <- str_sub(stock_name, 1, nchar(stock_name) - 2)
# get rid of leading and trailing NA's
stock_ts <- na.trim( as.vector( stocks[, stock_name] ) )
# create a time period variable
ts <- 1:length(stock_ts)
# OLS model for the returns over time
lin_mod <- lm(stock_ts ~ ts)
# squaring residuals from the model
squared_resids <- lin_mod$residuals ^ 2
# lag the residuals one period
lag_resids <- squared_resids[-length(squared_resids)]
squared_resids <- squared_resids[-1]
# modeling residuals with 1 lag residuals as requested
resid_mod <- lm(squared_resids ~ lag_resids)
# getting R squared from the lagged residuals model
r_square <- summary(resid_mod)$r.squared
# calculating LM test stat
test_stat <- r_square * ( length( squared_resids ) )
# getting p-value of LM test stat from chi sqaured distribution
p_value <- pchisq(test_stat, 1, lower.tail = F)
# adding test stat and p-values to vectors
resid_r2[i] <- r_square
resid_test_stat[i] <- test_stat
resid_p[i] <- p_value
stock_names[i] <- new_name
# adding arch effects result to list
arch_effects[[new_name]] <- arch.test( arima(stock_ts[-1], order = c(0,0,0)), output = F )
i = i + 1
}
}
# creating dataframe of the arch effects
arch_effects_df <- data.frame(
stock = stock_names,
resid_r2 = resid_r2,
test_stat = resid_test_stat,
p_value = resid_p
)
# sorting in descending order by LM test statistic
arch_effects_df <- arch_effects_df %>% arrange(desc(test_stat))
# getting the top 5 most significant
top_5_stocks <- as.character(arch_effects_df$stock[1:5])
# creating vector to index stocks df for only returns
# true if _r false if not
returns <- vector(length = ncol(stocks))
index <- 1
for(stock_name in names(stocks)){
returns[index] <- str_sub( stock_name, nchar(stock_name) - 1) == "_r"
index <- index + 1
}
# subsetting stocks df to only get returns
stock_returns <- stocks[, returns]
# creating vector to index stock_returns for only top 5 most significant stocks
# true if top 5 false if not
top_5 <- vector(length = ncol(stock_returns))
index <- 1
for(stock_name in names(stock_returns)){
current_name <- str_sub(stock_name, 1, nchar(stock_name) - 2)
top_5[index] <- current_name %in% top_5_stocks
index <- index + 1
}
# subsetting stocks df to only have top 5 most significant stocks
top_5_returns <- stock_returns[, top_5]
#####################################################
#STEP 3: Model top 5 stocks and pic best model by AIC
#####################################################
# creating empty dataframe to include the AIC of the 4 possible models for all 5 stocks
model_evalutation <- data.frame(
stock_name = top_5_stocks,
garch_norm_aic = numeric( length = length(top_5_stocks)),
garch_t_aic = numeric(length = length(top_5_stocks)),
q_garch_norm_aic = numeric(length = length(top_5_stocks)),
q_garch_t_aic = numeric(length = length(top_5_stocks)),
jb_test_p = numeric(length = length(top_5_stocks)),
stringsAsFactors = F
)
# looping through all 5 stocks
for(stock in names(top_5_returns) ){
# getting rid of _r
name <- str_sub(stock, 1, nchar(stock) - 2)
# formal test for normality
jb_test_temp <- jb.test( top_5_returns[-1, stock])
# normal garch model fit
garch_norm <- garchFit(formula = ~ garch(1,1), data=top_5_returns[-1, stock],
cond.dist = "norm", include.mean = FALSE)
# t garch model fit
garch_t <- garchFit(formula = ~ garch(1,1), data=top_5_returns[-1, stock],
cond.dist = "std", include.mean = FALSE)
# skewed normal garch model fit
q_garch_norm <- garchFit(formula= ~ garch(1,1), data=top_5_returns[-1, stock],
cond.dist="snorm", include.mean = FALSE)
# skewed t garch model fit
q_garch_t <- garchFit(formula= ~ garch(1,1), data=top_5_returns[-1, stock],
cond.dist="sstd", include.mean = FALSE)
# adding AIC & jb.test p-value to the model_evaluation df
model_evalutation[ model_evalutation$stock_name == name, "garch_norm_aic"] <- garch_norm@fit$ics[1]
model_evalutation[ model_evalutation$stock_name == name, "garch_t_aic"] <- garch_t@fit$ics[1]
model_evalutation[ model_evalutation$stock_name == name, "q_garch_norm_aic"] <- q_garch_norm@fit$ics[1]
model_evalutation[ model_evalutation$stock_name == name, "q_garch_t_aic"] <- q_garch_t@fit$ics[1]
model_evalutation[ model_evalutation$stock_name == name, "jb_test_p"] <- jb_test_temp[2]
}
#####################################################
#STEP 3: Forecast next 5 days of volatility
#####################################################
# determining best model for each stock
possible_models <- names(model_evalutation)[2:5]
for(i in 1:nrow(model_evalutation)){
model_scores <- model_evalutation[i, 2:5]
best_mod_index <- min(model_scores) == model_scores
best_model <- possible_models[best_mod_index]
model_evalutation[i, "best_model"] <- str_sub(best_model, 1, nchar(best_model) - 4)
}
model_predictions <- data.frame(
date = 1:5
)
model_parameters <- data.frame(
stock_name = character( length(top_5_stocks)),
omega = numeric( length(top_5_stocks)),
alpha1 = numeric( length(top_5_stocks)),
beta1 = numeric( length(top_5_stocks)),
stringsAsFactors = F
)
for(i in 1:nrow(model_evalutation)){
current_stock <- as.character(model_evalutation[i, "stock_name"])
current_stock_r <- paste0(current_stock, "_r")
best_model <- model_evalutation[i, "best_model"]
if(best_model == "garch_norm"){
model_dist <- "norm"
} else if(best_model == "garch_t"){
model_dist <- "std"
} else if(best_model == "q_garch_norm"){
model_dist <- "snorm"
} else if(best_model == "q_garch_t"){
model_dist <- "sstd"
} else {
model_dist <- "norm"
print("some error occured")
}
current_stock_pred <- paste0(current_stock, "_preds")
print(current_stock_pred)
model_fit <- garchFit(formula = ~ garch(1,1), data=top_5_returns[-1, current_stock_r],
cond.dist = model_dist, include.mean = FALSE)
preds <- predict(model_fit, n.ahead = 5)
model_predictions[, current_stock_pred] <- preds$standardDeviation ^ 2
model_parameters[i, "stock_name" ] <- current_stock
model_parameters[i, "omega"] <- model_fit@fit$coef[1]
model_parameters[i, "alpha1"] <- model_fit@fit$coef[2]
model_parameters[i, "beta1"] <- model_fit@fit$coef[3]
}
# sorting by largest shock
sorted_alphas <- model_parameters %>% arrange(desc(alpha1))
# sorting by longest shock
sorted_betas <- model_parameters %>% arrange(desc(beta1))
# looking at the stocks with top 5 arch effects
View(arch_effects_df)
# looking at model evaluation stats
View(model_evalutation)
# model parameters sorted by alpha
View(sorted_alphas)
# model paramters sorted by beta
View(sorted_betas)
#************************************************************************
# Part 2
#************************************************************************
#################################################
#STEP 1: Portfolio Optimization
#################################################
# removing first row of na's
top_5_returns1 <- top_5_returns[-1,]
# getting rid of date from model predictions df
variance_preds <- model_predictions[, -1]
historical_median_returns <- sapply(top_5_returns1, median)
predicted_variance <- sapply(variance_preds, median)
historical_cov <- cov(top_5_returns1)
# replacing historical variance with predicted variance
cov_matrix <- historical_cov
diag(cov_matrix) <- predicted_variance
#-----------------------------------------------------------
# using LaBarr's Code
#-----------------------------------------------------------
f <- function(x) x[1]*cov_matrix[1,1]*x[1] + x[1]*cov_matrix[1,2]*x[2] + x[1]*cov_matrix[1,3]*x[3] + x[1]*cov_matrix[1,4]*x[4] + x[1]*cov_matrix[1,5]*x[5] +
x[2]*cov_matrix[2,1]*x[1] + x[2]*cov_matrix[2,2]*x[2] + x[2]*cov_matrix[2,3]*x[3] + x[2]*cov_matrix[2,4]*x[4] + x[2]*cov_matrix[2,5]*x[5] +
x[3]*cov_matrix[3,1]*x[1] + x[3]*cov_matrix[3,2]*x[2] + x[3]*cov_matrix[3,3]*x[3] + x[3]*cov_matrix[3,4]*x[4] + x[3]*cov_matrix[3,5]*x[5] +
x[4]*cov_matrix[4,1]*x[1] + x[4]*cov_matrix[4,2]*x[2] + x[4]*cov_matrix[4,3]*x[3] + x[4]*cov_matrix[4,4]*x[4] + x[4]*cov_matrix[4,5]*x[5] +
x[5]*cov_matrix[5,1]*x[1] + x[5]*cov_matrix[5,2]*x[2] + x[5]*cov_matrix[5,3]*x[3] + x[5]*cov_matrix[5,4]*x[4] + x[5]*cov_matrix[5,5]*x[5]
theta <- c(0.96,0.01,0.01,0.01,0.005)
ui <- rbind(c(1,0,0,0,0),
c(0,1,0,0,0),
c(0,0,1,0,0),
c(0,0,0,1,0),
c(0,0,0,0,1),
c(-1,-1,-1,-1,-1),
c(1,1,1,1,1),
c(historical_median_returns))
ci <- c(0,
0,
0,
0,
0,
-1,
0.99,
0.0005) # 5.04% Annual Return Spread to Daily #
port_opt <- constrOptim(theta = theta, f = f, ui = ui, ci = ci, grad = NULL)
port_weights_h <- port_opt$par
port_var_h <- port_opt$value
names(port_weights_h) <- names(historical_median_returns)
final_h <- round(port_weights_h*100,2)
#-----------------------------------------------------------
# using Simmon's Code
#-----------------------------------------------------------
mean.vec <- historical_median_returns
#cov.vec <- cov_matrix
cov.vec <- cov(top_5_returns1)
Dmat <- 2*cov.vec
dvec <- rep(0,5)
Amat <- t(matrix(c(1,1,1,1,1,mean.vec),nrow=2,byrow=T))
bvec <- c(1,0.0005)
meq <- 1
ln.model <- solve.QP(Dmat,dvec,Amat,bvec,meq)
ln.names <- names(historical_median_returns)
names(ln.model$solution)=ln.names
ln.model$solution
ln.model$value
ln.model$solution
ln.model$value
################################
#Efficient Frontier
################################
param=seq(0.0001,0.0007, by=0.000001)
eff.front.weight=matrix(nrow=length(param),ncol=length(mean.vec))
eff.front.return=vector(length=length(param))
eff.front.risk=param
for (i in 1:length(param)){
bvec=c(1,param[i])
ln.model=solve.QP(Dmat,dvec,Amat,bvec,meq)
eff.front.return[i]=sum(ln.model$solution*mean.vec)
eff.front.risk[i]=sqrt(ln.model$value)
eff.front.weight[i,]=ln.model$solution
}
plot(eff.front.risk,eff.front.return,type='l')
|
895a64fc18c3a74984579678ff87281d82c033e4
|
d85a556a2693c0ac9c4dcc3a908c3c921e95835d
|
/rcode/ivano_src_avtime.R
|
23186beb124e3466957bad9767b9569b9f435c09
|
[] |
no_license
|
ndvietleti/ieee_052017
|
8e2c7275ff839e25c6c9914abfeb7fee9e0438fb
|
6c78fcc86659352d0a61697a668707b5bf6d193e
|
refs/heads/master
| 2021-01-20T16:38:51.695044
| 2017-05-10T09:38:35
| 2017-05-10T09:38:35
| 90,846,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,689
|
r
|
ivano_src_avtime.R
|
library(data.table)
library(doParallel)
library(minpack.lm)
library(ggplot2)
library(scales)
library(pracma)
require(mixtools)
fdate <- c(20170131, 20170201, 20170202, 20170203, 20170204,
20170205, 20170206, 20170207, 20170208, 20170209,
20170210, 20170211, 20170212, 20170213, 20170214)
n = 15
for (i in 1:n) {
print(paste0('date ',i,': ',fdate[i]))
fyear <- substr(fdate[i],1,4)
fmonth <- substr(fdate[i],5,6)
fday <- substr(fdate[i], 7, 8)
times <- gsub(":", "", substr(seq(ISOdatetime(fyear,fmonth,fday,0,0,0), ISOdatetime(fyear,fmonth,fday,23,50,0), length.out=144), 12, 16))
timestamp <- paste0(fdate[i],"_",times)
infile <- paste0("../data_ses/src_ses/D",substr(timestamp, 1, 8) ,"/ses_D", timestamp, ".txt", sep="")
fnum <- 144
avtime <- numeric(fnum)
for (j in 1:fnum) {
if (file.size(infile[j])>0){
data <- fread(infile[j])
var1 <- diff(data$V1)
avtime[j] <- mean(var1)
} else{
avtime[j] <- avtime[j-1]
}
}
nvar <- mean(avtime)/avtime
#co <- sqrt(var(nvar))/mean(nvar)
#print(paste0('Coef. of var: ',round(co, 3)))
p <- density(nvar,from = 0, to = 3, n=300)
p$y <- p$y/trapz(p$x,p$y)
out <- data.frame(x=p$x,y=p$y)
write.table(out,file = paste0('graph/2/data/date/',fdate[i],'.txt'), col.names = F, row.names = F)
if (i==1) {
nvar1 <- nvar
pax <- p$x
pa <- p$y
} else {
pa <- cbind(pa,p$y)
nvar1 <- cbind(nvar1,nvar)
}
if (i==1){
pl <- ggplot()
pl <- pl+geom_point(data=out,aes(x=x,y=y),shape=1, color="red",size=3)+
scale_x_continuous(limits = c(0.01,3))+
scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x, n = 4),
labels = trans_format("log10", math_format(10^.x)),
limits=c(1e-2, 2))+
theme_bw()+
theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.position="none", text=element_text(size=20), axis.text=element_text(size=20))+
ylab(bquote(paste(~bar(beta), " ", ~italic(p), "(", ~beta, ")", sep=""))) +
#ylab(bquote(paste(~bar(beta), " ", ~italic(p), "(", ~beta, ")", sep="")))+
xlab(bquote(paste(~beta, "/", ~bar(beta))))+
geom_text(aes(label = "", x = 1.5, y = 8), size=8, parse = TRUE)
} else {
pl <- pl+geom_point(data=out,aes(x=x,y=y),shape=1, color="red",size=3)
}
}
pv <- rowSums(pa, na.rm = FALSE, dims = 1)/n
outv <- data.frame(xv = pax, yv=pv)
write.table(outv,file = 'graph/2/data/other/ep_av.txt', col.names = F, row.names = F)
pl <- pl+geom_line(data=outv, aes(x=xv,y=yv),color='green',size=1)
# l1 <- 0.545
# a1 <- 5.7814509
# b1 <- 6.7849861
# l2 <- 0.525
# a2 <- 26
# b2 <- 15.45
l1 <- 0.545
a1 <- 5.6
b1 <- 6.9
l2 <- 0.525
a2 <- 26
b2 <- 15.45
pf1 <- l1*dgamma(pax, a1, b1)
outf1 <- data.frame(xf1 = pax, pf1 = pf1)
write.table(outf1,file = 'graph/2/data/other/g1.txt', col.names = F, row.names = F)
pl <- pl+geom_line(data=outf1, aes(x = xf1, y = pf1), linetype = 1, color = "black", size=1)
pf2 <- l2*dgamma(pax,a2,b2)
outf2 <- data.frame(xf2 = pax, pf2 = pf2)
write.table(outf2,file = 'graph/2/data/other/g2.txt', col.names = F, row.names = F)
pl <- pl+geom_line(data=outf2, aes(x = xf2, y = pf2), linetype = 1, color = "black", size=1)
pf <- pf1+pf2
outf <- data.frame(xf=pax,pf=pf)
write.table(outf,file = 'graph/2/data/other/gg.txt', col.names = F, row.names = F)
pl <- pl <- pl+geom_line(data=outf, aes(x = xf, y = pf), linetype = "dashed", color = "blue", size=1)
print(pl)
fit <- nls(pv~(d1*dgamma(pax,p1,p2)+(1-d1)*dgamma(pax,p3,p4)), start=list(d1=0.56,p1=5.8,p2=6.8,p3=26.7,p4=15.45))
print(coef(fit))
|
f6985783ab6e4e4960e59056aa42edad1690cfe0
|
4b9d5f14103211ebea69ce5e2cb866d97ec39f0c
|
/data/plot-entregas.R
|
e768212a76b14ba72d1b01d90291b4ac8a792eb3
|
[] |
no_license
|
JJ/IV
|
737a7c8813cd50ab695df27a566cc4be42cbf32c
|
fdd7c655a8ae001c47357b90c69070a6a321ff34
|
refs/heads/master
| 2023-08-16T12:42:11.904810
| 2023-08-12T10:13:40
| 2023-08-12T10:13:40
| 12,357,637
| 61
| 99
| null | 2023-02-03T18:34:42
| 2013-08-25T10:50:04
|
Perl
|
UTF-8
|
R
| false
| false
| 479
|
r
|
plot-entregas.R
|
library(ggplot2)
library(dplyr)
library(ggthemes)
data <- read.csv("notas-suspensos-entregas-2015-2020.csv")
data$Curso <- as.factor(data$Curso)
data[is.na(data)] <- 0
data$Aprobados <- data$Cuantos - data$Suspensos
data <- data %>% group_by(Curso) %>% mutate(Porcentaje = Aprobados/max(Cuantos))
ggplot( data,aes(x=Hito,y=Porcentaje,color=Curso,group=Curso)) + geom_line() + geom_point() + theme_solarized()
ggsave("notas-suspensos-entregas-2015-2020.png", width=8, height=6)
|
401cdf97b563e80513d195d4ce8f42fce7530134
|
e9625dba422720f326d8ae5cee767a6ec36dc0dd
|
/cv-02-01.R
|
6430aa91c983e49af0df3256aff4ce87800023a0
|
[] |
no_license
|
spetrovi/MV011
|
8e1fd39714475f63e51368a81da89e1fef2d5335
|
61722580c7c6e9277fb3c17295e230e7e8776bc6
|
refs/heads/master
| 2020-03-17T04:01:52.575528
| 2018-05-14T07:31:18
| 2018-05-14T07:31:18
| 133,259,832
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,037
|
r
|
cv-02-01.R
|
# nacteme knihovnu "prob"
library (prob)
# Priklad 1
# s ciselnymi hodnotami
n <- 4
mince <- tosscoin (n)
# vytvoril se tzv. data.frame = datova tabulka (matice) s pojmenovanymi sloupci
mince
# rozmery
dim (mince)
nrow (mince)
ncol (mince)
# nazvy sloupcu (promennych)
names (mince)
# nazvy lze menit, napr.
names (mince) <- c ("prvni", "druha", "treti")
mince
# krome klasickeho indexovani pomoci hranatych zavorek se lze na sloupce odkazovat i nazvem: promenna$nazev
mince$prvni
mince$treti
# struktura promenne
str (mince)
# vidime, ze vysledky jsou H = head a T = tail
# jedna se o tzv. faktory
# interne je ulozena ciselna hodnota, cislo je ale pouze kodem, nema vyznam ciselne hopdnoty
as.numeric (mince$treti)
# vytvorime pravdepodobnostni prostor
S <- probspace (mince)
# podivame se na vysledek
S
str (S)
names (S)
# jde opet o datovou tabulku, na kazdem radku je jeden elementarni jev, pribyl sloupec s pravdepodobnosti
# velikost zakladniho prostoru Omega
nrow (S)
# jev A = padnou same lice = heads = H
A <- subset (S, isin (S, rep ("H", n)))
A
nrow (A)
nrow (A) / nrow (S)
Prob (A)
# jev Bk = padne prave k licu, tzn. k krat H a (n-k) krat T
B0 <- subset (S, isin (S, rep ("T", 4)))
B1 <- subset (S, isin (S, c ("H", "T", "T", "T")))
B2 <- subset (S, isin (S, c ("H", "H", "T", "T")))
B3 <- subset (S, isin (S, c ("H", "H", "H", "T")))
B4 <- A
B0
B1
B2
B3
B4
Prob (B0)
Prob (B1)
Prob (B2)
Prob (B3)
Prob (B4)
psti <- c (Prob (B0), Prob (B1), Prob (B2), Prob (B3), Prob (B4))
# zkontrolujeme soucet
sum (psti)
# vykreslime sloupcovy graf
names (psti) <- seq (0, 4, by = 1)
barplot (psti, xlab = "pocet licu", ylab = "pravdepodobnost")
# Dalsi ukoly k samostatnemu vyreseni (na cvicenich anebo domaci ukol):
# Opakujte ulohu pro vetsi pocet hodu n
# Urcete mnoziny elementarnich jevu priznivych nasledujicim jevum a jejich pravdepodobnosti:
# jev Ck = padne alespon k licu
# jev Dk = padne nejvyse k licu
|
040e6476afb726d4618ae48b9599a95247ac7f4d
|
7b072a9b73414dbaeb09e0ff6fefac717c7b9eb5
|
/scripts/SelectTranscriptsForPCR.R
|
87ce555a63a284c213648c0bb155e1144f4e1286
|
[] |
no_license
|
EugeniaRadulescu/Isoform_BrainSpan
|
b8be74d791bd1644f38aa5a4c1ded472944d4abc
|
c77bb9205f0a60182c5b7e96dea529e40c717e9a
|
refs/heads/master
| 2023-06-17T08:05:10.093729
| 2021-07-11T22:22:29
| 2021-07-11T22:22:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,730
|
r
|
SelectTranscriptsForPCR.R
|
library(tidyverse)
iso_tpm <- readRDS("data/iso_tpm_filter.rds")
metadata <- read_tsv("data/metadata.tsv")
annotations <- read_csv("data/source/annotation.transcript.ensg75.txt")[, -1]
female_22pcw_frontal_lobe <- metadata %>%
filter(Regioncode %in% c("OFC", "DFC", "VFC", "MFC", "M1C")) %>%
filter(Period == 6) %>%
filter(Sex == "F") %>%
pull(Sample)
female_27y_frontal_cortex <- metadata %>%
filter(Regioncode %in% c("OFC", "DFC", "VFC", "MFC", "M1C")) %>%
filter(Period == 13) %>%
filter(Sex == "F") %>%
pull(Sample)
female_22pcw_frontal_lobe_tpm <- rowMeans(iso_tpm[, female_22pcw_frontal_lobe])
female_27y_frontal_cortex_tpm <- rowMeans(iso_tpm[, female_27y_frontal_cortex])
isoforms <- rownames(iso_tpm)
delta_iso <- log2(female_22pcw_frontal_lobe_tpm[isoforms]/(female_27y_frontal_cortex_tpm+0.0001))
isoforms_for_qpcr <- tibble(
ensembl_transcript_id = isoforms
) %>%
left_join(
dplyr::select(annotations, ensembl_transcript_id, ensembl_gene_id),
by = "ensembl_transcript_id"
) %>%
mutate(
female_22pcw_frontal_lobe_tpm = female_22pcw_frontal_lobe_tpm,
female_27y_frontal_cortex_tpm = female_27y_frontal_cortex_tpm,
log2fc = delta_iso
) %>%
filter(female_22pcw_frontal_lobe_tpm != 0) %>%
filter(female_27y_frontal_cortex_tpm != 0) %>%
mutate(abs_log2fc = abs(log2fc)) %>%
arrange(desc(abs_log2fc))
write_csv(isoforms_for_qpcr, "data/isoforms_for_qpcr.csv")
library(biomaRt)
mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl", host = "GRCh37.ensembl.org")
exons <- getBM(
attributes = c(
"ensembl_gene_id", "ensembl_transcript_id", "transcript_biotype",
"start_position", "end_position",
"ensembl_exon_id", "exon_chrom_start", "exon_chrom_end", "strand"
),
filters = c("ensembl_transcript_id"),
values = read.csv("data/source/annotation.transcript.ensg75.txt")$ensembl_transcript_id,
mart = mart
)
calculate_overlap <- function(start1, end1, start2, end2) {
return(max(0, min(c(end1, end2)) - max(c(start1, start2))))
}
isoforms_for_qpcr <- isoforms_for_qpcr %>%
left_join(
dplyr::select(exons, ensembl_transcript_id, transcript_biotype, ensembl_gene_id, start_position, end_position)
) %>%
distinct() %>%
filter(transcript_biotype == "protein_coding")
tx_unique_region <- c()
ex_unique_region <- c()
ln_unique_region <- c()
for (row in 1:nrow(isoforms_for_qpcr)) {
tx <- pull(isoforms_for_qpcr[row, ], ensembl_transcript_id)
gn <- unique(pull(filter(exons, ensembl_transcript_id == tx), ensembl_gene_id))
print(tx)
my_exons <- exons %>%
filter(ensembl_transcript_id == tx) %>%
pull(ensembl_exon_id)
other_tx <- annotations %>%
filter(ensembl_gene_id == gn) %>%
pull(ensembl_transcript_id) %>%
unique()
if (length(other_tx) < 1) { next }
for (my_exon in my_exons) {
my_exon_start <- exons %>%
filter(ensembl_exon_id == my_exon) %>%
pull(exon_chrom_start) %>%
unique()
my_exon_end <- exons %>%
filter(ensembl_exon_id == my_exon) %>%
pull(exon_chrom_end) %>%
unique()
message(my_exon_start, ":", my_exon_end)
found_exons <- exons %>%
filter(ensembl_exon_id != my_exon) %>%
filter(
(exon_chrom_start <= my_exon_start & my_exon_start <= exon_chrom_end) |
(exon_chrom_start <= my_exon_end & my_exon_end <= exon_chrom_end)
) %>%
nrow()
if (found_exons == 0) {
message("Found transcript: ", tx)
tx_unique_region <- c(tx_unique_region, tx)
ex_unique_region <- c(ex_unique_region, my_exon)
ln_unique_region <- c(ln_unique_region, my_exon_end - my_exon_start)
break
}
}
}
unique_regions <- tibble(
ensembl_transcript_id = tx_unique_region,
ensembl_exon_id = ex_unique_region,
unique_region_length = ln_unique_region
)
exon_seq <- getBM(
attributes = c("ensembl_exon_id", "gene_exon", "exon_chrom_start", "exon_chrom_end", "rank"),
filters = c("ensembl_exon_id"),
mart = mart,
values = unique_regions$ensembl_exon_id
)
unique_regions_seq <- left_join(unique_regions, exon_seq, by = "ensembl_exon_id") %>%
left_join(isoforms_for_qpcr, by = "ensembl_transcript_id") %>%
filter(unique_region_length > 200)
write_csv(unique_regions_seq, "data/TranscriptsWithUniqueExons.csv")
unique_regions_seq <- read_csv("data/TranscriptsWithUniqueExons.csv") %>%
mutate(female_22pcw_frontal_lobe_tpm = female_22pcw_frontal_lobe_tpm[ensembl_transcript_id])
|
c99f6427c5760f9e4a4e1e3b74ed84449bece480
|
d3d27b9b50aeb63ff885709817fb4ebf68eb5bc7
|
/man/spider.Rd
|
927c680a289829c003d0b87f4b3197ef32f9bae6
|
[] |
no_license
|
roliveros-ramos/colorful
|
64e1c3cb1b95fa0645fd37491597ac9723cbeab8
|
999e7bd880dba3556a341639773e25a9ff071112
|
refs/heads/master
| 2020-03-22T08:00:01.856282
| 2019-11-20T22:24:56
| 2019-11-20T22:24:56
| 139,738,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 453
|
rd
|
spider.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colorful-main.R
\name{spider}
\alias{spider}
\title{Add a spider web to a spider plot.}
\usage{
spider(x, ylim, type = "b", col = 1, fill = FALSE, border = NULL,
theta = 0, pch = 19, lwd = 1, lty = 1, cex = NULL,
density = NULL, angle = 45, alpha = 1, rmin = NULL,
clockwise = FALSE, ...)
}
\arguments{
\item{...}{}
}
\description{
Add a spider web to a spider plot.
}
|
eb0fd7183c6518df41b4960bf853954e877ed96d
|
c1dbb14d5536e88ca7ea1bd8001d883f3ed8020b
|
/devel/lotvol_popmodel.R
|
7ecc78ecbfe8bc2d22b827c785c491f16c23f62d
|
[] |
no_license
|
atredennick/community_synchrony
|
493ad3d5dc97c4160994469fcf36678870de24e4
|
d01b584bc504583435db678a8c8b22cfa5ff6285
|
refs/heads/master
| 2020-05-21T19:03:40.687803
| 2017-03-10T14:30:26
| 2017-03-10T14:30:26
| 34,754,749
| 1
| 1
| null | 2016-11-22T13:52:18
| 2015-04-28T20:40:03
|
R
|
UTF-8
|
R
| false
| false
| 2,515
|
r
|
lotvol_popmodel.R
|
## lotvol_popmodel.R: script to simulate population dynamics using
## Lotka-Volterra model with environmental variability. Uses simulated
## time series to calculate the coefficient of variation of total community
## biomass with and without asynchronous environmental responses. Model assumes
## interspecific competition is absent.
##
## Author: Andrew Tredennick (atredenn@gmail.com)
## Date created: November 14, 2016
rm(list=ls(all.names = TRUE))
set.seed(1234567)
####
#### Libraries ----------------------------------------------------------------
####
library(plyr)
library(reshape2)
library(mvtnorm)
library(synchrony)
####
#### Lotka-Volterra Model with Environmental Stochasticity --------------------
####
update_pop <- function(r, Nnow, K, env, sig_env){
nspp <- length(Nnow)
rm <- numeric(nspp)
for(i in 1:nspp){
rm[i] <- (1 - (Nnow[i]/K[i])) + env[i]*sig_env[i]
}
return(rm)
}
####
#### Function to Generate Environmental Responses -----------------------------
####
get_env <- function(sigE, rho, nTime, num_spp) {
varcov <- matrix(rep(rho*sigE,num_spp*2), num_spp, num_spp)
diag(varcov) <- sigE
varcov <- as.matrix(varcov)
e <- rmvnorm(n = nTime, mean = rep(0,num_spp), sigma = varcov)
return(e)
}
####
#### Simulate the Model -------------------------------------------------------
####
years_to_sim <- 2000
nspp <- 2
env_variance <- 1
rho <- seq(-1,1,by=0.05)
r <- rep(1, nspp)
K <- rep(1000, nspp)
sig_env <- rep(0.1, nspp)
cv_outs <- numeric(length(rho))
env_synch <- numeric(length(rho))
for(j in 1:length(rho)){
fluct_env <- get_env(sigE = env_variance, rho = rho[j], nTime = years_to_sim, num_spp = nspp)
N <- matrix(data = NA, nrow = years_to_sim, ncol = nspp)
N[1,] <- 1
rsaves <- matrix(data = NA, nrow = years_to_sim-1, ncol = nspp)
for(t in 2:years_to_sim){
rnows <- update_pop(r, N[t-1,], K, env = fluct_env[t,], sig_env)
N[t,] <- N[t-1,] + N[t-1,]*rnows
rsaves[t-1, ] <- rnows
}
matplot(N, type="l")
cv <- sd(rowSums(N[500:2000,])) / mean(rowSums(N[500:2000,]))
cv_outs[j] <- cv
env_synch[j] <- as.numeric(community.sync(rsaves[500:1999,])[[1]])
}
plot(env_synch, cv_outs, frame.plot = FALSE, pch=19,
xlab="Synchrony of Growth Rates", xlim=c(0,1),
ylab="CV of Total Community Biomass")
cbind(env_synch, cv_outs)
# summary(lm(cv_outs~env_synch))
# abline(lm(cv_outs~env_synch), col="red")
# text(0.2,0.08,labels = paste("slope =",round(coef(lm(cv_outs~env_synch))[2],2)))
|
8715bd5e0b85b779a143aeb322fe5d2031a8d050
|
0d2e5781e412519e2f31fdc7c084c2c3f66e2494
|
/ASSIGNMENT/Part one/Clean Air Temp.R
|
b8db93477ea5d73565e3e49051c9ef2f9e1b66bf
|
[] |
no_license
|
guanxuyi/R
|
1ff27493d612d8bf9a79ec90532c4ddc495a83b4
|
9f0a32619c6d25007905c08efc598f562ecdc679
|
refs/heads/master
| 2021-04-09T10:26:01.930621
| 2018-03-15T18:44:24
| 2018-03-15T18:44:24
| 125,398,955
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
Clean Air Temp.R
|
library(tidyverse)
Air.temp <- read.csv("E:/AIR.TEMPERATURES.csv", header = TRUE)
unique(Air.temp[,"DURATION"])
unique(Air.temp[,"MIN"]) %>% print(n>30, na.print = "")
unique(Air.temp[,"TIME"])
unique(Air.temp[,"MAX"])
|
b374cd2b8ed99d706072b6737cfec86bc9bc4328
|
8df9f89767dd7ac4a0c4319e72c852fb5edb88af
|
/Chapter_09/figures/figures9.R
|
febecae4e3d606d700f1ae4de165bb06d6354552
|
[] |
no_license
|
jsgro/MDAuR
|
74709084ffcb46903a994b0b2360683641cda195
|
1462f8c18730beb3c49f7e8f620610986d8f4829
|
refs/heads/master
| 2023-05-08T06:39:15.250038
| 2021-05-27T09:42:17
| 2021-05-27T09:42:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,405
|
r
|
figures9.R
|
###################################################################
# Co-expression networks
## figure 9.1
library(qpgraph)
furin.data<-read.csv("furin_significant_genes.csv",row.names=1)
pcc <- qpPCC(furin.data)
pcc$Rsign<-pcc$R
pcc$Rsign[pcc$P>0.05]<-NA
nrr.q1 <- qpNrr(as.matrix(furin.data),q=1)
nrr.q3 <- qpNrr(as.matrix(furin.data),q=3)
nrr.q5 <- qpNrr(as.matrix(furin.data),q=5)
nrr.q7 <- qpNrr(as.matrix(furin.data),q=7)
tiff("figure9_1.tif",width=5,height=5,units="in",res=1200)
par(mfrow=c(3,2),mai=c(0.42,0.42,0.42,0.12))
plot(density(as.numeric(pcc$R),na.rm=T),
main="Pearson correlation\nAll",xlab="",ylab="")
mtext("A", side = 3, line = 0, adj = 0, cex = 1)
plot(density(as.numeric(pcc$Rsign),na.rm=T,xlab=""),
main="Pearson correlation\nSignificants",xlab="",ylab="")
mtext("B", side = 3, line = 0, adj = 0, cex = 1)
plot(density(as.numeric(nrr.q1),na.rm=T),
main="Non-rejection rate\nq=1",xlab="",ylab="")
mtext("C", side = 3, line = 0, adj = 0, cex = 1)
plot(density(as.numeric(nrr.q3),na.rm=T),
main="Non-rejection rate\nq=3",xlab="",ylab="")
mtext("D", side = 3, line = 0, adj = 0, cex = 1)
plot(density(as.numeric(nrr.q5),na.rm=T),
main="Non-rejection rate\nq=5",xlab="",ylab="")
mtext("E", side = 3, line = 0, adj = 0, cex = 1)
plot(density(as.numeric(nrr.q7),na.rm=T),
main="Non-rejection rate\nq=7",xlab="",ylab="")
mtext("F", side = 3, line = 0, adj = 0, cex = 1)
dev.off()
## figure 9.2
tiff("figure9_2.tif",width=5,height=5,units="in",res=1200)
par(mfrow=c(2,2),mai=c(0.52,0.42,0.52,0.12))
plot(density(as.numeric(abs(pcc$Rsign)),na.rm=T),
main="Pearson correlation\nSignificants",ylim=c(0,6))
plot(density(as.numeric(nrr.q1),na.rm=T),
main="Non-rejection rate\nq=1",xlim=c(1,0),ylim=c(0,6))
plot(density(as.numeric(nrr.q3),na.rm=T),
main="Non-rejection rate\nq=3",xlim=c(1,0),ylim=c(0,6))
plot(density(as.numeric(nrr.q5),na.rm=T),
main="Non-rejection rate\nq=5",xlim=c(1,0),ylim=c(0,6))
dev.off()
## figure 9.3
tiff("figure9_3.tif",width=5,height=5,units="in",res=1200)
par(mfrow=c(2,2),mai=c(0.52,0.42,0.52,0.12))
qpGraphDensity(nrr.q1, title="q=1", breaks=10)
qpGraphDensity(nrr.q3, title="q=3", breaks=10)
qpGraphDensity(nrr.q5, title="q=5", breaks=10)
qpGraphDensity(nrr.q7, title="q=7", breaks=10)
dev.off()
## figure 9.4
gm <- as.matrix(nrr.q3)
thres <- 0.1
my.nodes <- row.names(gm)
edL <- vector("list", length=length(my.nodes))
names(edL) <- my.nodes
for(i in 1:length(my.nodes)){
edL[[i]] <- list(edges=names(which(gm[i,]<thres)), weights=gm[i,which(gm[i,]<thres)])
}
library(graph)
g <- graphNEL(nodes=my.nodes, edgeL=edL)
# deprecated code
# pcc100 <- qpGraph(as(1-abs(pcc$Rsign),"dspMatrix"),
# topPairs=100,
# return.type="graphNEL")
gmpc100 <- as.matrix(abs(pcc$Rsign))
gmpc100[gmpc100==1] <- NA
topnum <- 200
pc100vals <- as.vector(gmpc100)
pc100vals <- sort(pc100vals[!is.na(pc100vals)],decreasing = T)
thres <- pc100vals[topnum]
my.nodes.pc100 <- row.names(gmpc100)
edLpc100 <- vector("list", length=length(my.nodes.pc100))
names(edLpc100) <- my.nodes.pc100
for(i in 1:length(my.nodes.pc100)){
edLpc100[[i]] <- list(edges=names(which(gmpc100[i,]>=thres)),
weights=gmpc100[i,which(gmpc100[i,]>=thres)])
}
pcc100 <- graphNEL(nodes=my.nodes.pc100, edgeL=edLpc100)
# deprecated code:
# qpg100 <- qpGraph(nrr.q3,
# topPairs=100,
# return.type="graphNEL")
gmpg100 <- as.matrix(nrr.q3)
gmpg100[gmpg100==1] <- NA
topnum <- 200
pg100vals <- as.vector(gmpg100)
pg100vals <- sort(pg100vals[!is.na(pg100vals)])
thres <- pg100vals[topnum]
my.nodes.pg100 <- row.names(gmpg100)
edLpg100 <- vector("list", length=length(my.nodes.pg100))
names(edLpg100) <- my.nodes.pg100
for(i in 1:length(my.nodes.pg100)){
edLpg100[[i]] <- list(edges=names(which(gmpg100[i,]<=thres)),
weights=gmpg100[i,which(gmpg100[i,]<=thres)])
}
qpg100 <- graphNEL(nodes=my.nodes.pg100, edgeL=edLpg100)
tiff("figure9_4.tif",width=5,height=5,units="in",res=1200)
par(mfrow=c(2,1))
qpPlotNetwork(qpg100,minimumSizeConnComp=4)
mtext("A", side = 3, line = 0, adj = 0.01, cex = 1, padj = 1)
qpPlotNetwork(pcc100,minimumSizeConnComp=4)
mtext("B", side = 3, line = 0, adj = 0.01, cex = 1)
#par(mfrow=c(1,1))
dev.off()
tiff("figure9_5.tif",width=5,height=5,units="in",res=1200)
par(mfrow=c(2,1))
qpPlotNetwork(qpg100,vertexSubset="Lhx2", boundary=TRUE)
mtext("A", side = 3, line = 0, adj = 0.01, cex = 1, padj = 1)
qpPlotNetwork(pcc100,vertexSubset="Lhx2", boundary=TRUE)
mtext("B", side = 3, line = 0, adj = 0.01, cex = 1)
dev.off()
rm(list = ls())
#################################################################
# Master regulators
# figure 9.6
library(Biobase)
load("eset_tf.rda")
tiff("figure9_6.tif",width=5,height=5,units="in",res=1200)
par(mfrow=c(1,2),mai=c(0.52,0.52,0.72,0.12))
plot(density(exprs(eset.tf)),
main="Raw gene expression",xlab="",ylab="",cex.main=0.9)
mtext("A", side = 3, line = 0, adj = 0, cex = 1)
plot(density(log2(exprs(eset.tf)),na.rm=T),
main="Logarithm\nof gene expression",xlab="",ylab="",cex.main=0.9)
mtext("B", side = 3, line = 0, adj = 0, cex = 1)
dev.off()
# figure 9.7
library(RTN)
annot2<-read.csv("annot.csv",row.names=1)
target.tf.probes<-c("1341_at","40511_at","41504_s_at","33592_at")
names(target.tf.probes)<-c("SPI1","GATA3","MAF","ZBTB7B")
tf.rtni<-new("TNI",
gexp=exprs(eset.tf),
transcriptionFactors=target.tf.probes)
tf.rtni<-tni.preprocess(tf.rtni, gexpIDs=annot2)
tf.rtni<-tni.permutation(tf.rtni, estimator='kendall', pValueCutoff=0.03)
tf.rtni<-tni.bootstrap(tf.rtni, estimator='kendall', consensus=95)
tf.rtni<-tni.dpi.filter(tf.rtni)
tiff("figure9_7.tif",width=5,height=5,units="in",res=1200)
par(mfrow=c(1,2),mai=c(0.52,0.52,0.72,0.12))
g<-tni.graph(tf.rtni)
#V(g)$color <- "black"
plot(g,vertex.shape="none",vertex.label.color="black",vertex.label.cex=0.5)
mtext("A", side = 3, line = 0, adj = 0.5, cex = 1)
V(g)$label<-as.character(annot2[annot2$PROBEID %in% V(g)$name, "SYMBOL"])
plot(g,vertex.shape="none",vertex.label.color="black",vertex.label.cex=0.5)
mtext("B", side = 3, line = 0, adj = 0.5, cex = 1)
dev.off()
rm(list = ls())
#################################################################
# GeneAnswers demo
library(GeneAnswers)
furin.genes<-read.csv("furin_significant_gids.csv")
names(furin.genes)[1]<-"GeneID"
furin.input<-data.frame("Entrez Gene ID"=furin.genes$Gene, fold.change=log2(furin.genes$Activated.KO/furin.genes$Activated.WT))
genAns<-geneAnswersBuilder(furin.input,
'org.Mm.eg.db',
categoryType='KEGG',
known=T,
geneExpressionProfile=furin.genes)
genAnsRead<-geneAnswersReadable(genAns)
geneAnswersChartPlots(genAnsRead,
chartType='pieChart',
newWindow=F,cex=0.6)
geneAnswersConceptNet(genAnsRead,
colorValueColumn='fold.change',
centroidSize='pvalue',output='interactive')
geneAnswersConceptNet(genAnsRead,
colorValueColumn='fold.change',
centroidSize='geneNum',output='interactive')
tiff("figure9_9_color.tif",width=5,height=5,units="in",res=1200)
geneAnswersHeatmap(genAns, catTerm=TRUE, geneSymbol=TRUE)
dev.off()
|
6aae04d6e1d9847dbf69cb0a6a4106b26af977aa
|
d2ac85674d6812fe3f606094bae82ea089659609
|
/NCANDA/Scripts/01_EWComposites.R
|
3fce8fd023ed0dcebd61d8eebe7196fb8d14366e
|
[] |
no_license
|
LabNeuroCogDevel/R03Behavioral
|
2a98e71917b1f35a4affe08298e32f9100df3b93
|
f743b316ac00aa3381eb72ae08c47b3c87891ebf
|
refs/heads/master
| 2020-09-23T07:19:38.313210
| 2019-12-05T22:19:06
| 2019-12-05T22:19:06
| 225,437,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,119
|
r
|
01_EWComposites.R
|
library(dplyr)
library(tidyr)
library(lubridate)
library(ggplot2)
library(lsmeans)
library(mgcv)
library(itsadug)
library(lme4)
library(lsmeans)
library(stats)
library(psych)
library(LNCDR)
library(FactoMineR)
library(corrplot)
library(mgcv)
############
compositecols<-function(cols,data){
#return composite score (z scored) of data[,cols]
#seperate function to increase flexibilit
data_z<-scale(data[,cols])
compositeout<-scale(base::rowSums(data_z,na.rm=TRUE))
return(compositeout)
}
########
coglongdata<-read.csv("/Users/brendenclemmens/Desktop/Projects/R03_behavioral/NCANDA/Data/btc_NCANDAscoredmeasures_20191115.outlierremoved.csv")
####vars############
####################
###Exclude crystalized intelligence measures from composites
accvars<-c("cnp_cpf_ifac_tot","cnp_cpw_iwrd_tot","cnp_spcptnl_scpt_tp","cnp_sfnb2_sfnb_mcr","cnp_pmat24a_pmat24_a_cr","cnp_cpfd_dfac_tot","cnp_cpwd_dwrd_tot",
"cnp_shortvolt_svt","cnp_er40d_er40_cr","cnp_pcet_pcet_acc2","cnp_medf36_medf36_a","cnp_pvrt_pvrt_pc","cnp_svdelay_svt_ld","latentdd")
latvars<-c("cnp_cpf_ifac_rtc","cnp_cpw_iwrd_rtc","cnp_spcptnl_scpt_tprt","cnp_sfnb2_sfnb_mrtc","cnp_pmat24a_pmat24_a_rtcr","cnp_cpfd_dfac_rtc",
"cnp_cpwd_dwrd_rtc","cnp_shortvolt_svtcrt","cnp_er40d_er40_crt","cnp_pcet_pcetrtcr","cnp_medf36_medf36_t","cnp_pvrt_pvrtrtcr","cnp_svdelay_svtldrtc","stroop_total_mean","latentgroove")
allfactorvars<-c(accvars,latvars)
####################
###composites#######
coglongdata$Latencycomposite<-compositecols(latvars,coglongdata)
coglongdata$Accuracycomposite<-compositecols(accvars,coglongdata)
###factors#######
latencyfa<-psych::fa(coglongdata[,latvars],nfactors=1,fm="ml",missing=TRUE,impute="median",scores="tenBerge")
coglongdata$Latencyfactorscore<-latencyfa$scores
accuracyfa<-psych::fa(coglongdata[,accvars],nfactors=1,fm="ml",missing=TRUE,impute="median",scores="tenBerge")
coglongdata$Accuracyfactorscores<-accuracyfa$scores
write.csv(coglongdata,"/Users/brendenclemmens/Desktop/Projects/R03_behavioral/NCANDA/Data/btc_NCANDAscoredmeasures_20191115.outlierremoved.compositeacclat.csv")
|
5301f82b302b92e559948fabcf745803051df25a
|
c43c705a3eabcd6706d40290015d087613a1e3e0
|
/New.R
|
1955f44df0b796b0d6690b3d9f4da548c7e8a945
|
[] |
no_license
|
Shubham619/Twitter-analysis
|
eb73fb6bb1ace347a239d7173dd0bb9e1f5aa130
|
f69f696b172be7395d3c30baabcdcc32f7934597
|
refs/heads/master
| 2020-05-22T05:54:07.198435
| 2019-07-07T16:23:50
| 2019-07-07T16:23:50
| 186,243,013
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,258
|
r
|
New.R
|
packs<-c("slam","topicmodels","tm","wordcloud","twitteR","RYandexTranslate","textcat","syuzhet")
instd_packs<-packs %in% installed.packages()
for(i in 1:length(instd_packs)){
if(instd_packs[i]==FALSE){
install.packages(packs[i])
}
}
for(i in packs){
library(i,character.only = TRUE)
}
consumer_key<-readline("Enter the consumer_key")
#G3oOAX9WH11dCPKU3ISzw253Q (API key)
consumer_secret<-readline("Enter the consumer_secret")
#8jld82Pbc0tLz5arq4fSps5pSSlNOHGTHPiUQccxq8Hqzq6QqM (API secret key)
access_token<-readline("Enter the access_token")
#441550750-nQBw6A06qkZXPl7MTvpOdBPBX2a1r6BshqFw2c9t (Access token)
access_secret<-readline("Enter the access_secret")
#ipd3PaewrO12c9IoNlD1ERUMcKKqC3GOYVyibGdgwVlys (Access token secret)
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
n1=as.integer(readline("Enter the number of observations you want to process"))
start_date<-as.Date.character(readline("Enter the date from when you want the data(YYYY-MM-DD)"))
until_date<- as.Date.character(readline("Enter the date upto when you want the data(YYYY-MM-DD)"))
Topic<-gsub(" ","",paste("#", as.character(readline("Enter the topic name which to "))))
tw = twitteR::searchTwitter(as.character(Topic), n = n1, since = as.character(start_date),until = as.character(until_date),lang = "en")
tweets.df = twitteR::twListToDF(tw)
cleanL<-function (x){
tweets.df$text=gsub("&", "", tweets.df$text)
tweets.df$text = gsub("&", "", tweets.df$text)
tweets.df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", tweets.df$text)
tweets.df$text = gsub("@\\w+", "", tweets.df$text)
tweets.df$text = gsub("[[:punct:]]", "", tweets.df$text)
tweets.df$text = gsub("[[:digit:]]", "", tweets.df$text)
tweets.df$text = gsub("http\\w+", "", tweets.df$text)
tweets.df$text = gsub("[ \t]{2,}", "", tweets.df$text)
tweets.df$text = gsub("^\\s+|\\s+$", "", tweets.df$text)
tweets.df$text = gsub("\n"," ",tweets.df$text)
tweets.df$text <- iconv(tweets.df$text, "UTF-8", "ASCII", sub="")
return(tweets.df)
}
clean_content<-cleanL()
emotions<-get_nrc_sentiment(tweets.df$text)
emo_bar<-colSums(emotions)
emo_sum<- data.frame(count=emo_bar,emotion=names(emo_bar))
emo_sum$emotion = factor(emo_sum$emotion, levels=emo_sum$emotion[order(emo_sum$count, decreasing = TRUE)])
# Visualize the emotions from NRC sentiments
library(plotly)
p <- plot_ly(emo_sum, x=~emotion, y=~count, type="bar", color=~emotion) %>%
layout(xaxis=list(title=""), showlegend=FALSE,
title="Emotion Type for hashtag: #narendrea modi")
api_create(p,filename="Sentimentanalysis")
# Create comparison word cloud data
wordcloud_tweet = c(
paste(tweets.df$text[emotions$anger > 0], collapse=" "),
paste(tweets.df$text[emotions$anticipation > 0], collapse=" "),
paste(tweets.df$text[emotions$disgust > 0], collapse=" "),
paste(tweets.df$text[emotions$fear > 0], collapse=" "),
paste(tweets.df$text[emotions$joy > 0], collapse=" "),
paste(tweets.df$text[emotions$sadness > 0], collapse=" "),
paste(tweets.df$text[emotions$surprise > 0], collapse=" "),
paste(tweets.df$text[emotions$trust > 0], collapse=" ")
)
# create corpus
corpus = Corpus(VectorSource(wordcloud_tweet))
# remove punctuation, convert every word in lower case and remove stop words
corpus = tm_map(corpus, tolower)
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, c(stopwords("english")))
corpus = tm_map(corpus, stemDocument)
# create document term matrix
tdm = TermDocumentMatrix(corpus)
# convert as matrix
tdm = as.matrix(tdm)
tdmnew <- tdm[nchar(rownames(tdm)) < 11,]
# column name binding
colnames(tdm) = c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust')
colnames(tdmnew) <- colnames(tdm)
comparison.cloud(tdmnew, random.order=FALSE,
colors = c("#00B2FF", "red", "#FF0099", "#6600CC", "green", "orange", "blue", "brown"),
title.size=1, max.words=250, scale=c(2.5, 0.4),rot.per=0.4)
|
02fdbfed8b0b939cf3274aa1805f02a699be6ec6
|
30f442869626f8130e5e0a32a9de668861173399
|
/R/variable_importance.R
|
3fa055c9627a51b13b806c1c0f508eac7d9db499
|
[
"MIT"
] |
permissive
|
ehsanx/rcf
|
20a6546df6f9c5f3e149507ecd9f1fb2c7dd21a0
|
cc704a0259ce69d1e0ebec8bd2b7243478d4f00c
|
refs/heads/master
| 2023-05-10T07:14:19.557476
| 2021-05-28T14:14:13
| 2021-05-28T14:14:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,667
|
r
|
variable_importance.R
|
#' Computes variable importance measures for a causal forest
#'
#' @param cf output of the rcf causal_forest function
#' @param covariates names of predictors used in training the causal forest as character vector
#' @param n maximum tree depth of splits to be considered for variable importance computation
#' @param d decay parameter controlling weighting of splits at different depths for variable importance computation
#' @return a data frame of variables and their importance measures
#' @export
variable_importance <- function(cf, covariates, n, d){
imp_data <- lapply(cf, function(i) i[["var_importance"]])
imp_data <- do.call(rbind, imp_data)
imp_data <- imp_data[-which(imp_data$depth == 0),]
total_splits <- as.data.frame(table(imp_data$depth))
colnames(total_splits) <- c("depth", "total_splits")
var_grouped <- base::split(imp_data, imp_data$var)
imp <- lapply(var_grouped, function(i) as.data.frame(table(i$depth)))
imp <- lapply(imp, setNames, c("depth", "n_splits"))
imp <- lapply(imp, function(i) merge(i, total_splits, by = "depth"))
imp <- lapply(imp, function(i) i[as.numeric(i$depth) <= n,])
imp <- lapply(imp, function(i) sum((i$n_splits / i$total_splits) * as.numeric(i$depth)^-d)/ sum(c(1:n)^-d))
imp <- cbind(names(imp), do.call(c, imp))
colnames(imp) <- c("variable", "importance")
rownames(imp) <- NULL
imp <- as.data.frame(imp)
var_not_split <- setdiff(vars, imp$variable)
var_not_split <- data.frame(variable = var_not_split,
importance = rep(0, length(var_not_split)))
imp <- rbind(imp, var_not_split)
imp <- dplyr::arrange(imp, desc(importance))
return(imp)
}
|
fb0f6d6e45d06661d68672eeb91d5b91b97dd661
|
69ed15a883dfbc2d67023d436dbb4cb9742b3970
|
/man/rollingCorMatrix.Rd
|
71e901a15df2b722a55736d23ed29575b7f92172
|
[] |
no_license
|
joh4n/JGTools
|
57f163463b107028509243260e80f7f05f847dd5
|
7418f924665c03791e758da7bc3112cd6b2022d9
|
refs/heads/master
| 2021-06-20T08:26:36.857149
| 2017-06-17T13:35:56
| 2017-06-17T13:35:56
| 77,140,153
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 744
|
rd
|
rollingCorMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rollingCorrMatrix.R
\name{rollingCorMatrix}
\alias{rollingCorMatrix}
\title{Calculates the roolingCorrelation matrix and returns the uniqe correlations ranked}
\usage{
rollingCorMatrix(df, removeConstantColumns = T, tolerance = 0.6,
window = 100)
}
\arguments{
\item{df}{data frame}
\item{removeConstantColumns}{removes columns in df which are constant}
\item{tolerance}{the lowest accepted correlation}
\item{window}{the window of which the correlation should be calculated}
}
\value{
a data frame
}
\description{
alculates the roolingCorrelation matrix and returns the uniqe correlations ranked
}
\author{
Johan Gudmundsson, \email{jgu@blackwoodseven.com}
}
|
d14693c576041ffbf1b9074ae4daa9eb8795e03c
|
6ff4577459aec8c589bab40625301f7eefc82e73
|
/R/ebayes-helpers.R
|
347647aa1ca0931a0d608d5f06b5cefdffdc4da1
|
[] |
no_license
|
lagzxadr/MAST
|
f1cb34efdb42d2c4eb2b6383eff02193a8e69409
|
a079646898349315a676b56b6a77ca7dd17ec449
|
refs/heads/master
| 2021-04-27T16:27:16.229846
| 2017-12-22T16:19:32
| 2017-12-22T16:19:32
| 122,302,743
| 1
| 0
| null | 2018-02-21T06:59:29
| 2018-02-21T06:59:29
| null |
UTF-8
|
R
| false
| false
| 4,718
|
r
|
ebayes-helpers.R
|
## Likelihood functions and other helpers for shrunken dispersion estimates for zlm
## rNg: residual Ng: Ng -p, where p is the dimension of the model
## SSg: residual sum of squares
getMarginalHyperLikelihood <- function(rNg, SSg, deriv=FALSE){
if(!deriv){
fun <- function(theta){
stopifnot(names(theta)==c('a0', 'b0'))
a0 <- theta['a0']
b0 <- theta['b0']
Li <- -lbeta(rNg/2, a0)-rNg/2*log(b0)-log(1+SSg/(2*b0))*(rNg/2+a0)
return(sum(Li))
}
} else{
fun <- function(theta){
stopifnot(names(theta)==c('a0', 'b0'))
a0 <- theta['a0']
b0 <- theta['b0']
score_a0_i <- digamma(rNg/2+a0)-digamma(a0)-log(1+SSg/(2*b0))
score_b0_i <- (a0*SSg-rNg*b0)/(SSg*b0+2*b0^2)
return(c(a0=sum(score_a0_i), b0=sum(score_b0_i)))
}
}
fun
}
## probably need a global optimization routine--plus there are multiple roots potentially.
## or just a good starting value
solveMoM <- function(rNg, SSg){
rbar <- mean(SSg/rNg)
rbarbar <- mean(SSg^2/(rNg*(rNg+2)))
a0mom <- function(a0) (2*(a0-1)^2*rbar^2 -rbarbar^2*((a0-2)*(a0-4)))^2
a0slv <- optimize(a0mom, c(0, 10))
a0 <- a0slv$minimum
b0 <- (a0-1)*rbar
c(a0, b0)
}
##' @importFrom plyr aaply
getSSg_rNg <- function(sca, mm){
aaply(exprs(sca), 2, function(y){
SSg <- NA
rNg <- NA
try({
pos <- y>0
yp <- y[pos]
mp <- mm[pos,]
QR <- qr(mp)
resid <- qr.resid(QR, yp)
SSg <- crossprod(resid)
rNg <- length(yp)-QR$rank
}, silent=TRUE)
return(c(SSg=SSg, rNg=rNg))
})
}
##' Estimate hyperparameters for hierarchical variance model for continuous component
##'
##' \code{ebayesControl} is a named list with (optional) components 'method' (one of 'MOM' or 'MLE') and 'model' (one of 'H0' or 'H1')
##' method MOM uses a method-of-moments estimator, while MLE using the marginal likelihood.
##' H0 model estimates the precisions using the intercept alone in each gene, while H1 fits the full model specified by \code{formula}
##' @param sca \code{SingleCellAssay}
##' @param ebayesControl list with (optional) components 'method', 'model'. See details.
##' @param Formula a formula (using variables in \code{colData(sca)} used when \code{model='H1'}.
##' @param truncate Genes with sample precisions exceeding this value are discarded when estimating the hyper parameters
##' @return \code{numeric} of length two, giving the hyperparameters in terms of a variance (\code{v}) and prior observations (\code{df}), inside a \code{structure}, with component \code{hess}, giving the Fisher Information of the hyperparameters.
ebayes <- function(sca, ebayesControl, Formula, truncate=Inf){
## Empirical bayes method
defaultCtl <- list(method='MLE', model='H0')
if (is.null(ebayesControl)){
ebayesControl <- list()
nms <- ''
} else{
nms <- names(ebayesControl)
}
missingControl <- setdiff(names(defaultCtl), nms)
ebayesControl[missingControl] <- defaultCtl[missingControl]
method <- match.arg(ebayesControl[['method']], c('MOM', 'MLE'))
model <- match.arg(ebayesControl[['model']], c('H0', 'H1'))
ee <- exprs(sca)
ee[ee==0] <- NA
if(model == 'H0'){
ee <- scale(ee, scale=FALSE, center=TRUE)
## Global variance
rNg <- colSums(!is.na(ee), na.rm=TRUE)-1
SSg <- colSums(ee^2, na.rm=TRUE)
valid <- rNg>0 & rNg/SSg < truncate
rNg <- rNg[valid]
SSg <- SSg[valid]
} else if(model == 'H1'){
mm <- model.matrix(Formula, colData(sca))
allfits <- getSSg_rNg(sca, mm)
valid <- apply(!is.na(allfits), 1, all) & allfits[, 'rNg']/allfits[, 'SSg']<truncate
valid[is.na(valid)] <- FALSE
SSg <- allfits[valid,'SSg']
rNg <- allfits[valid, 'rNg']
}
if(method == 'MLE'){
fn <- getMarginalHyperLikelihood(rNg, SSg, deriv=FALSE)
grad <- getMarginalHyperLikelihood(rNg, SSg, deriv=TRUE)
O <- optim(c(a0=1, b0=1), fn, gr=grad, method='L-BFGS', lower=.001, upper=Inf, control=list(fnscale=-1), hessian=TRUE)
if(O$convergence!=0) stop('Hyper parameter estimation might have failed', O$message)
#O <- optim(c(a0=1, b0=1), fn, method='L-BFGS', lower=.001, upper=Inf, control=list(fnscale=-1))
th <- O$par
} else if(method == 'MOM'){
th <- solveMoM(rNg, SSg)
O <- list(hessian=NA)
}
v <- max(th['b0']/th['a0'], 0)
df <- max(2*th['a0'], 0)
structure(c(v=v, df=df), hess=O$hessian)
}
|
3c4ac9970ff0b33e1a30ce9b0cecf318d0171a4a
|
51d06a904af41f52ca8afa780fa53f16e1625364
|
/R/knitr.R
|
4fbea88aada76d41e1cdbe498482bd5a067e9e5f
|
[
"MIT"
] |
permissive
|
thomasp85/cpp11
|
055f279b25fc2112be90e0e6d0e04af5d134ab6c
|
d8edde92819a650e65bdc70ca48ebebacc631c0d
|
refs/heads/master
| 2022-10-22T02:32:27.617484
| 2020-06-14T01:30:39
| 2020-06-14T03:21:01
| 272,389,144
| 2
| 0
|
NOASSERTION
| 2020-06-15T08:55:24
| 2020-06-15T08:55:23
| null |
UTF-8
|
R
| false
| false
| 374
|
r
|
knitr.R
|
eng_cpp11 = function(options) {
if (options$eval) {
source_cpp(code = options$code, env = knitr::knit_global(), clean = TRUE, quiet = FALSE)
}
options$engine <- "cpp"
knitr::engine_output(options, options$code, '')
}
.onLoad <- function(libname, pkgname) {
if (requireNamespace("knitr", quietly = TRUE)) {
knitr::knit_engines$set(cpp11 = eng_cpp11)
}
}
|
7f428e6c28071a3d4a60bd434e6c9ad1df58bf75
|
01e37cc7e663340b47bda5d0094ccfe94f49ef4b
|
/cachematrix.R
|
f057c2976b6eeda69c8c2e079a081892354e2206
|
[] |
no_license
|
NamLQ/ProgrammingAssignment2
|
9c683463072c1817f86590896fc03abcf85eca3b
|
98949a3b64508f2dff1199349a1b3403c00b0a16
|
refs/heads/master
| 2020-02-26T16:02:09.240287
| 2014-11-25T10:40:15
| 2014-11-25T10:40:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,519
|
r
|
cachematrix.R
|
## Caching the Inverse of a Matrix
# comments are based on Bill Hilton at
# https://class.coursera.org/rprog-009/forum/thread?thread_id=457
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { # input x will be a matrix
m <- NULL # m will be our invertible matrix and it's reset to NULL every
# time makeCacheMatrix is called
set <- function(y) { # set the value of the matrix
x <<- y # Assign new value (y) to the matrix x
m <<- NULL # Reset matrix m to NULL every time setting the new value
}
# note these next three functions are not run when makeCacheMatrix is called.
# instead, they will be used by cacheSolve() to get values for x or for
# m and for setting the invertible matrix.
get <- function() {x} # returns the value of the original matrix
setinvert <- function(solve) {m <<- solve} # set the value of the invertible matrix
# this is called by cacheSolve() during the first cacheSolve() access and it will
# store the value using superassignment
getinvert <- function() {m} # this will return the cached value to cacheSolve() on subsequent accesses
list(set = set, get = get, # This is a list of the internal functions ('methods') so a calling
setinvert = setinvert, # function knows how to access those methods.
getinvert = getinvert)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then cacheSolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) { # the input x is an object created by makeCacheMatrix
m <- x$getinvert() # accesses the object 'x' and gets the value of the invertible matrix
if(!is.null(m)) { # if the invertible matrix was already cached (not NULL) ...
message("getting cached data") # ... send this message to the console
return(m) # ... and return the invertible matrix ... "return" ends
# the function cachemean()
}
data <- x$get() # we reach this code only if x$getinvert() returned NULL
m <- solve(data, ...) # if m was NULL then we have to calculate the invertible matrix
x$setinvert(m) # store the calculated invertible value in x
m # return the invertible matrix to the code that called this function
}
|
36e9b379353a88d001a8a12d5c07105c3804e19a
|
aa2a544ee1dbdc89b96ea937b3370884e604f7bd
|
/man/eval.results.partitions.Rd
|
456104f02edcb7764075f5050a777a67cac26ea4
|
[] |
no_license
|
jamiemkass/ENMeval
|
dae21510cf7978ff7a6c446b98db310a86afa2a8
|
199bf0181716b25ea5033be16ed8c6efadcfbd95
|
refs/heads/master
| 2023-08-15T03:42:15.250740
| 2023-01-09T10:47:05
| 2023-01-09T10:47:05
| 29,864,043
| 16
| 13
| null | 2023-06-21T14:31:07
| 2015-01-26T14:18:11
|
R
|
UTF-8
|
R
| false
| true
| 478
|
rd
|
eval.results.partitions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\name{eval.results.partitions}
\alias{eval.results.partitions}
\alias{eval.results.partitions,ENMevaluation-method}
\title{eval.results.partitions generic for ENMevaluation object}
\usage{
eval.results.partitions(x)
\S4method{eval.results.partitions}{ENMevaluation}(x)
}
\arguments{
\item{x}{ENMevaluation object}
}
\description{
eval.results.partitions generic for ENMevaluation object
}
|
0acacde2686e00e487eba684904590da62368cfb
|
4a54731f78aa6f4e15e7935cbe3302c252cf2b18
|
/man/dirmult.summary.Rd
|
1e8bc36724d7c7a007eaa45b09d55953e335ee11
|
[] |
no_license
|
cran/dirmult
|
fa12784039f207e5b196f97ab7d54b17f49a7e8a
|
d2074d38fbcd58bbdef041b015f2aa92a8fcb689
|
refs/heads/master
| 2022-06-09T21:32:28.537908
| 2022-03-21T09:30:02
| 2022-03-21T09:30:02
| 17,695,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,255
|
rd
|
dirmult.summary.Rd
|
\name{dirmult.summary}
\alias{dirmult.summary}
\concept{Genetics}
\concept{Overdispersion}
\concept{Dirichlet-multinomial}
\title{Summary table of parameter estimates from dirmult}
\description{
Produces a summary table based on the estimated parameters from
\code{\link{dirmult}}. The table contains MLE estimates and standard
errors together with method of moment (MoM) estimates and standard
errors based on MoM estimates from 'Weir and Hill (2002)'.
}
\usage{dirmult.summary(data, fit, expectedFIM=FALSE)}
\arguments{
\item{data}{A matrix or table with counts. Rows represent subpopulations
and columns the different categories of the data. Zero rows or columns
are automaticly removed.}
\item{fit}{Output from \code{dirmult} used on the same data table as
above.}
\item{expectedFIM}{Logical. Determines whether the observed or expected
Fisher Information Matrix should be used. For speed use observed (i.e.
FALSE) - for accuracy (and theoretical support) use expected (i.e. TRUE).}
}
\value{
Summary table with estimates and standard errors for \eqn{\pi}{\pi}
and \eqn{\theta}{theta}.
}
\seealso{
\code{\link{dirmult}}
}
\examples{
data(us)
fit <- dirmult(us[[1]],epsilon=10^(-4),trace=FALSE)
dirmult.summary(us[[1]],fit)
}
|
b82806e482df716b77b2ec1bd3523bf487980afc
|
2078076176e1da24fa13f2f9ee9cec25697a23da
|
/scripts/Federicos script.r
|
42ff710c77a1be8e342978a643fe3bb1e70b814e
|
[] |
no_license
|
julianadf/Paper-3-Corridor-or-barrier
|
84db00f5c74df9ee834037b45e73037017c296d7
|
f13f2d4abc3bee633e2ab2676bef1896269952d0
|
refs/heads/main
| 2023-02-09T20:28:57.733625
| 2021-01-11T08:32:33
| 2021-01-11T08:32:33
| 312,519,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,611
|
r
|
Federicos script.r
|
##### Riva, Acorn and Nielsen 2018 - Narrow anthropogenic corridors direct the movement of a generalist boreal butterfly
##### coded with R version 3.3.3, using Rstudio 1.0.136
##### 0) Install required packages
if(!require(readr)){install.packages("readr")}
if(!require(lme4)){install.packages("lme4")}
if(!require(sjPlot)){install.packages("sjPlot")}
if(!require(gplots)){install.packages("gplots")}
if(!require(glmmTMB)){install.packages("glmmTMB")}
##### 1) Import the data
experimental_releases <- read_csv("C:/Riva_et_al_Narrow_anthropogenic_corridors.csv")
##### 2) Prepare the data
# convert data to dataframe
data.frame_release <- data.frame(experimental_releases)
# encode as factors some of the columns in the dataset
experimental_releases$atreat <- as.factor(experimental_releases$atreat)
experimental_releases$persistent_movement <- as.numeric(experimental_releases$persistent_movement)
experimental_releases$immediate_movement_ew <- as.numeric(experimental_releases$immediate_movement_ew)
experimental_releases$immediate_movement_ns <- as.numeric(experimental_releases$immediate_movement_ns)
experimental_releases$immediate_movement_corridor <- as.numeric(experimental_releases$immediate_movement_corridor)
# subset data: corridors, wellpads, controls
subset_corridor <- subset.data.frame(data.frame_release, type == 'corridor')
subset_wellpad <- subset.data.frame(data.frame_release, type == 'wellpad')
subset_control <- subset.data.frame(data.frame_release, type == 'control')
subset_corridor8 <- subset(subset_corridor, treat == 'corridor_8')
subset_corridor4 <- subset(subset_corridor, treat == 'corridor_4')
subset_control4 <- subset(subset_control, treat =='control_4')
subset_control8 <- subset(subset_control, treat =='control_8')
subset_wellpad4 <- subset(subset_wellpad, treat =='wellpad_4')
subset_wellpad8 <- subset(subset_wellpad, treat =='wellpad_8')
# subset 4-m and 8-m wide arenas
subset_4 <- rbind(subset_corridor4, subset_control4, subset_wellpad4)
subset_8 <- rbind(subset_corridor8, subset_control8, subset_wellpad8)
##### 3) Modeling; see supplementary materials for information on analyses
###### Legend: atreat= arenas categories (controls: forest and clearing vs. corridors, at 4-m and 8-m arena size)
##### before each GLMM model (random effect on release arena), a simple GLM withouth the random effect is provided for comparison. The estimates of the two models are always very similar (variance and st.dev of random effect = 0; run for comparison)
##### 4-m scale
# a) probability of immediate movement in east-west direction
# modelEW4 <- glm(immediate_movement_ew ~ atreat + corridor_direction , family = binomial("logit"), subset_4)
modelEW4 <- glmer(immediate_movement_ew ~ atreat + corridor_direction + (1|arena_id) , family = binomial("logit"), subset_4)
summary(modelEW4)
sjp.glmer(modelEW4, type = "fe")
sjp.glmer(modelEW4, y.offset = .4)
# number of parameter
attributes(logLik(modelEW4))
# confidence interval (95%)
confint(modelEW4)
# predict model, transform back to real values (e.g. log link-> exp()) and return the standard error
predict(modelEW4, type="response", se.fit = TRUE)
# b) probability of immediate movement in north-south direction
# modelNS4 <- glm(immediate_movement_ns ~ atreat + corridor_direction , family = binomial("logit"), subset_4)
modelNS4 <- glmer(immediate_movement_ns ~ atreat + corridor_direction + (1|arena_id) , family = binomial("logit"), subset_4)
summary(modelNS4)
sjp.glmer(modelNS4, type = "fe")
attributes(logLik(modelNS4))
confint(modelNS4)
predict(modelNS4, type="response", se.fit = TRUE)
# c) probability of persistence in directional movement after 12 m
# model3F4 <- glm(persistent_movement ~ atreat, family = binomial, subset_4)
model3F4 <- glmer(persistent_movement ~ atreat + (1|arena_id), family = binomial, subset_4)
summary(model3F4)
sjp.glmer(model3F4, type = "fe")
attributes(logLik(model3F4))
confint(model3F4)
predict(model3F4, type="response", se.fit = TRUE)
##### 8-m scale
# a) probability of immediate movement in east-west direction
# modelEW8 <- glm(immediate_movement_ew ~ atreat + corridor_direction , family = binomial, subset_8)
modelEW8 <- glmer(immediate_movement_ew ~ atreat + corridor_direction + (1|arena_id), family = binomial, subset_8)
summary(modelEW8)
sjp.glmer(modelEW8, type = "fe")
attributes(logLik(modelEW8))
confint(modelEW8)
predict(modelEW8, type="response", se.fit = TRUE)
# b) probability of immediate movement in north-south direction
# modelNS8 <- glm(immediate_movement_ns ~ atreat + corridor_direction, family = binomial, subset_8)
modelNS8 <- glmer(immediate_movement_ns ~ atreat + corridor_direction + (1|arena_id), family = binomial, subset_8)
summary(modelNS8)
sjp.glmer(modelNS8, type = "fe")
attributes(logLik(modelNS8))
confint(modelNS8)
predict(modelNS8, type="response", se.fit = TRUE)
# c) probability of persistence in directional movement after 12 m
# model3F8 <- glm(persistent_movement ~ atreat, family = binomial, subset_8)
model3F8 <- glmer(persistent_movement ~ atreat + (1|arena_id), family = binomial, subset_8)
summary(model3F8)
sjp.glmer(model3F8, type = "fe")
attributes(logLik(model3F8))
confint(model3F8)
predict(model3F8, type="response", se.fit = TRUE)
### plotting expected vs observed probability of (1) immediate movement in east-west direction, (2) immediate movement in north-south direction, and (3) persistent movement in initial direction, in forest, clearing and corridor arenas.
### because intervals of confidence in forest and clearing always overlap with the expected probability under the null hypothesis of random movement, we then focused only on corridors.
plotmeans( subset_4$immediate_movement_ew ~ subset_4$treat_and_direction2, main= ("Immediate east-west movement (4-m scale)"), ylab = "% of occurrence", ylim = c(0.1, 0.9), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen")+ abline(h = 0.5, untf = FALSE, lty=4)
plotmeans( subset_8$immediate_movement_ew ~ subset_8$treat_and_direction2, main= ("Immediate east-west movement (8-m scale)"), ylab = "% of occurrence", ylim = c(0.1, 0.9), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen")+ abline(h = 0.5, untf = FALSE, lty=4)
plotmeans( subset_4$immediate_movement_ns ~ subset_4$treat_and_direction2, main= ("Immediate north-south movement (4-m scale)"), ylab = "% of occurrence", ylim = c(0.15, 0.90), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen" )+ abline(h = 0.5, untf = FALSE, lty=4)
plotmeans( subset_8$immediate_movement_ns ~ subset_8$treat_and_direction2, main= ("Immediate north-south movement (8-m scale)"), ylab = "% of occurrence", ylim = c(0.15, 0.90), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen" )+ abline(h = 0.5, untf = FALSE, lty=4)
plotmeans( subset_4$persistent_movement ~ subset_4$atreat, main= ("Persistence in directional movement (4-m scale)"), ylab = "% of occurrence", ylim = c(0, 0.4), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen" )+ abline(h = 0.09, untf = FALSE, lty=4)
plotmeans( subset_8$persistent_movement ~ subset_8$atreat, main= ("Persistence in directional movement (8-m scale)"), ylab = "% of occurrence", ylim = c(0, 0.4), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen" )+ abline(h = 0.155, untf = FALSE, lty=4)
##### assessing the effects of butterfly sex, corridor characteristics (width and direction), forest height (used as a proxy of forest density), and the interaction of forest height and corridor width
# subset corridors
# testing the effect of date (date_ord), time of release (time.n), temperature (temp), and position of the releaser (sq_corner_code) on immediate and persistent arctic fritillary movements
summary(glmer(immediate_movement_corridor ~ (1|arena_id) + date_ord, family = binomial, subset_corridor))
summary(glmer(immediate_movement_corridor ~ (1|arena_id) + time.n, family = binomial, subset_corridor))
summary(glmer(immediate_movement_corridor ~ (1|arena_id) + temp, family = binomial, subset_corridor))
summary(glmer(immediate_movement_corridor ~ (1|arena_id) + sq_corner_code, family = binomial, subset_corridor))
summary(glmer(persistent_movement ~ (1|arena_id) + date_ord, family = binomial, subset_corridor))
summary(glmer(persistent_movement ~ (1|arena_id) + time.n, family = binomial, subset_corridor))
summary(glmer(persistent_movement ~ (1|arena_id) + temp, family = binomial, subset_corridor))
summary(glmer(persistent_movement ~ (1|arena_id) + sq_corner_code, family = binomial, subset_corridor))
# probability of immediate movement in corridor
# modelimmediate <- glm(immediate_movement_corridor ~ atreat , family = binomial, subset_corridor)
modelimmediate <- glmer(immediate_movement_corridor ~ atreat + (1|arena_id), family = binomial, subset_corridor)
summary(modelimmediate)
attributes(logLik(modelimmediate))
confint(modelimmediate)
predict(modelimmediate, type="response", se.fit = TRUE)
## plot used to create Fig. 2 in the paper (random effect variance is 0)
plotmeans( subset_corridor$immediate_movement_corridor ~ subset_corridor$atreat, main= ("Immediate movement in corridor direction"), ylab = "Probability of event", ylim = c(0.4, 0.8), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen")+ abline(h = 0.5, untf = FALSE, lty=4)
##### testing for the effect of the other covariates, with no support for effect of
# model1 <- glm(immediate_movement_corridor ~ atreat + canopy_height, family = binomial, subset_corridor)
# model2 <- glm(immediate_movement_corridor ~ atreat * canopy_height, family = binomial, subset_corridor)
# model3 <- glm(immediate_movement_corridor ~ atreat + sex , family = binomial, subset_corridor)
# model4 <- glm(immediate_movement_corridor ~ atreat + corridor_direction, family = binomial, subset_corridor)
# model5 <- glm(immediate_movement_corridor ~ atreat * canopy_height + corridor_direction + sex, family = binomial, subset_corridor)
model1 <- glmer(immediate_movement_corridor ~ atreat + canopy_height+ (1|arena_id), family = binomial, subset_corridor)
model2 <- glmer(immediate_movement_corridor ~ atreat * canopy_height+ (1|arena_id), family = binomial, subset_corridor)
model3 <- glmer(immediate_movement_corridor ~ atreat + sex + (1|arena_id), family = binomial, subset_corridor)
model4 <- glmer(immediate_movement_corridor ~ atreat + corridor_direction + (1|arena_id), family = binomial, subset_corridor)
model5 <- glmer(immediate_movement_corridor ~ atreat * canopy_height + corridor_direction + sex + (1|arena_id), family = binomial, subset_corridor)
summary(model1)
summary(model2)
summary(model3)
summary(model4)
summary(model5)
confint(model5)
# probability of persistent movement in corridor
# modelpersist <- glm(persistent_movement ~ atreat, family = binomial, subset_corridor)
modelpersist <- glmer(persistent_movement ~ atreat + (1|arena_id), family = binomial, subset_corridor)
summary(modelpersist)
attributes(logLik(modelpersist))
confint(modelpersist)
predict(modelpersist, type="response", se.fit = TRUE)
## plot used to create Fig. 2 in the paper
plotmeans( subset_corridor$persistent_movement ~ subset_corridor$atreat, main= ("Persistent movement in corridor direction"), ylab = "Probability of event", ylim = c(0, 0.4), ci.label=F, digits = 2, barwidth = 2, connect= F, use.t=F, pch=1, barcol = "darkgreen")+ abline(h = 0.09, untf = FALSE, lty=4)+ abline(h = 0.155, untf = FALSE, lty=4)
##### testing for the effect of the other covariates, with no support for effect of
# model11 <- glm(persistent_movement ~ atreat + canopy_height, family = binomial, subset_corridor)
# model12 <- glm(persistent_movement ~ atreat * canopy_height, family = binomial, subset_corridor)
# model13 <- glm(persistent_movement ~ atreat + sex, family = binomial, subset_corridor)
# model14 <- glm(persistent_movement ~ atreat + corridor_direction, family = binomial, subset_corridor)
# model15 <- glm(persistent_movement ~ atreat * canopy_height + corridor_direction + sex, family = binomial, subset_corridor)
model11 <- glmer(persistent_movement ~ atreat + canopy_height + (1|arena_id), family = binomial, subset_corridor)
model12 <- glmer(persistent_movement ~ atreat * canopy_height + (1|arena_id), family = binomial, subset_corridor)
model13 <- glmer(persistent_movement ~ atreat + sex + (1|arena_id), family = binomial, subset_corridor)
model14 <- glmer(persistent_movement ~ atreat + corridor_direction + (1|arena_id), family = binomial, subset_corridor)
model15 <- glmer(persistent_movement ~ atreat * canopy_height + corridor_direction + sex+ (1|arena_id), family = binomial, subset_corridor)
summary(model11)
summary(model12)
summary(model13)
summary(model14)
summary(model15)
confint(model15)
##### 4) Chi-square tests between expected and observed distributions; see supplementary material for further information on how the expected probabilities were calculated
## Legend: narrow= 4-m arenas; large= 8-m arenas; F= forest; CL= clearing; CO= corridor; COEW: corridor oriented on east-west; CONS: corridor oriented on north-south
## number of butterfly selecting the immediate east-west direction vs. not (results are equal for north-south)
narrowF <- c(49, 57)
chisq.test(narrowF, p = c(1/2, 1/2))
narrowCL <- c(49, 50)
chisq.test(narrowCL, p = c(1/2, 1/2))
narrowCOEW <- c(55, 22)
chisq.test(narrowCOEW, p = c(1/2, 1/2))
narrowCONS <- c(32, 56)
chisq.test(narrowCONS, p = c(1/2, 1/2))
largeF <- c(49, 57)
chisq.test(largeF, p = c(1/2, 1/2))
largeCL <- c(50, 49)
chisq.test(largeCL, p = c(1/2, 1/2))
largeCOEW <- c(54, 26)
chisq.test(largeCOEW, p = c(1/2, 1/2))
largeCONS <- c(29, 60)
chisq.test(largeCONS, p = c(1/2, 1/2))
## number of butterflies selecting the immediate corridor direction at the release vs. not (independent of corridor orientation)
corridorsallsmall <- c(111, 54)
chisq.test(corridorsallsmall, p = c(0.5, 0.5))
corridorsalllarge <- c (113, 56)
chisq.test(corridorsalllarge, p = c(0.5, 0.5))
## number of butterflies passing the 12-m mark vs. not
narrowF12 <- c(8, 98)
chisq.test(narrowF12, p = c(0.09, 0.91))
narrowCL12 <- c(10, 89)
chisq.test(narrowCL12, p = c(0.09, 0.91))
narrowCO12 <- c(27, 138)
chisq.test(narrowCO12, p = c(0.09, 0.91))
largeF12 <- c(12, 94)
chisq.test(largeF12, p = c(0.155, 0.845))
largeCL12 <- c(14, 85)
chisq.test(largeCL12, p = c(0.155, 0.845))
largeCO12 <- c(44, 125)
chisq.test(largeCO12, p = c(0.155, 0.845))
|
759adea69c2bb2a67aa8a8ce1d485a558e749dea
|
71abde1c9025f7ab6d13f074192101dbab41c32d
|
/R Codes/r-tutorial-src/rtutor-pt3-c26.R
|
31b9134a0fb1bf1f8492369b32609623a1c9e3f5
|
[
"MIT"
] |
permissive
|
hejibo/Psychological-Statistics
|
844ce22f8b70a860ba033b279b2f8f1823459062
|
2e245228f0e9d599ffaa50d01f41e3cdfbd3b17a
|
refs/heads/master
| 2021-05-09T10:14:53.475249
| 2018-05-01T01:27:00
| 2018-05-01T01:27:00
| 118,957,025
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,591
|
r
|
rtutor-pt3-c26.R
|
###########################################################
#
# Copyright (C) 2012 by Chi Yau
# All rights reserved
#
# http://www.r-tutor.com
#
################################
# c26-s01
model <- function() {
# Priors
alpha ~ dnorm(0, 0.001)
beta ~ dnorm(0, 0.001)
tau ~ dgamma(0.001, 0.001)
# Likelihood
for (i in 1:n) {
# y[i] ~ dt(mu[i], tau, 2)
y[i] ~ dnorm(mu[i], tau)
mu[i] <- alpha + beta*x[i]
}
}
waiting <- faithful$waiting
x.m <- mean(waiting)
x <- waiting - x.m
y <- faithful$eruptions
n <- length(waiting)
data <- list("x", "y", "n")
params <- c("alpha", "beta", "mu")
inits <- function() {
list(alpha=0, beta=0, tau=1)
}
library(R2OpenBUGS)
model.file <- file.path(tempdir(),
"model.txt")
write.model(model, model.file)
out <- bugs(data, inits, params,
model.file, n.iter=5000)
all(out$summary[,"Rhat"] < 1.1)
# fitting the model
cbind(unlist(out$mean[
c("alpha", "beta")]))
# credible intervals
out$summary[c("alpha", "beta"),
c("2.5%", "97.5%")]
faithful.lm <- lm(y ~ x)
cbind(coefficients(faithful.lm))
summary(faithful.lm)
################################
# c26-s02
model <- function() {
# Priors
alpha ~ dnorm(0, 0.001)
beta ~ dnorm(0, 0.001)
tau ~ dgamma(0.001, 0.001)
# Likelihood
for (i in 1:n) {
# y[i] ~ dt(mu[i], tau, 2)
y[i] ~ dnorm(mu[i], tau)
mu[i] <- alpha + beta*x[i]
}
}
waiting <- faithful$waiting
x.m <- mean(waiting)
x <- waiting - x.m
y <- faithful$eruptions
x0 <- 80 - x.m
n <- length(x)
data <- list(
x=c(x0, x),
y=c(NA, y),
n=n+1)
params <- c("mu", "y")
inits <- function() {
list(alpha=0, beta=0, tau=1)
}
library(R2OpenBUGS)
model.file <- file.path(tempdir(),
"model.txt")
write.model(model, model.file)
out <- bugs(data, inits, params,
model.file, n.iter=5000)
all(out$summary[,"Rhat"] < 1.1)
# prediction
out$mean$y
cbind(c(mu=out$mean$mu[1], y=out$mean$y))
# credible intervals
out$summary[c("mu[1]", "y[1]"),
c("2.5%", "97.5%")]
# frequentist prediction
faithful.lm <- lm(y ~ x)
newdata <- data.frame(x=x0)
predict(faithful.lm, newdata,
interval="confidence")
predict(faithful.lm, newdata,
interval="predict")
# Alternative
model <- function() {
# Priors
alpha ~ dnorm(0, 0.001)
beta ~ dnorm(0, 0.001)
tau ~ dgamma(0.001, 0.001)
# Likelihood
for (i in 1:n) {
y[i] ~ dnorm(mu[i], tau)
mu[i] <- alpha + beta*x[i]
}
# Prediction
y0 ~ dnorm(mu0, tau)
mu0 <- alpha + beta*x0
}
waiting <- faithful$waiting
x.m <- mean(waiting)
x0 <- 80 - x.m
x <- waiting - x.m
y <- faithful$eruptions
n <- length(x)
data <- list("x", "y", "n", "x0")
params <- c("y0", "mu0")
inits <- function() {
list(alpha=0, beta=0, tau=1)
}
library(R2OpenBUGS)
model.file <- file.path(tempdir(), "model.txt")
write.model(model, model.file)
out <- bugs(data, inits, params, model.file, n.iter=5000)
all(out$summary[,"Rhat"] < 1.1)
# prediction
cbind(unlist(
out$mean[c("mu0", "y0")]))
# credible intervals
out$summary[c("mu0", "y0"),
c("2.5%", "97.5%")]
################################
# c26-s03
model <- function() {
# Priors
alpha ~ dnorm(0, 0.001)
beta ~ dnorm(0, 0.001)
tau ~ dgamma(0.001, 0.001)
sigma <- 1/sqrt(tau)
for (i in 1:n) {
# Likelihood
y[i] ~ dnorm(mu[i], tau)
mu[i] <- alpha + beta*x[i]
# Derived
stdres[i] <- (y[i]-mu[i])/sigma
}
}
waiting <- faithful$waiting
x.m <- mean(waiting)
x <- waiting - x.m
y <- faithful$eruptions
n <- length(x)
data <- list("x", "y", "n")
params <- c("alpha", "beta", "mu", "stdres")
inits <- function() {
list(alpha=0, beta=0, tau=1)
}
library(R2OpenBUGS)
model.file <- file.path(tempdir(),
"model.txt")
write.model(model, model.file)
out <- bugs(data, inits, params,
model.file, n.iter=5000)
all(out$summary[,"Rhat"] < 1.1)
eruption.stdres <- out$mean$stdres
plot(faithful$waiting, eruption.stdres,
ylab="Standardized Residuals",
xlab="Waiting Time",
main="Old Faithful Eruptions")
abline(0, 0) # the horizon
qqnorm(eruption.stdres,
ylab="Standardized Residuals",
xlab="Normal Scores",
main="Old Faithful Eruptions")
qqline(eruption.stdres)
|
0ebb390036345ec3caf8563d5cd5d2d1c7cac853
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/immer/R/immer_score_person_adjusted.R
|
74b1002983ddb990b8551e7b4bacde6d5ec6793e
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
immer_score_person_adjusted.R
|
## File Name: immer_score_person_adjusted.R
## File Version: 0.04
immer_score_person_adjusted <- function( sum_score, max_pers, eps)
{
score_pers <- sum_score
score_pers <- ifelse( sum_score==0, eps, score_pers )
score_pers <- ifelse( sum_score==max_pers, max_pers - eps, score_pers )
return(score_pers)
}
|
9c36c261711af0fa48f791ba1d329ec3ce43be64
|
1aa29b4acc9c39bb9fa0cc0567e4447918e126a4
|
/ui.R
|
ed2b093c80f3105a47a3c8e73c3a563c2b1a20c6
|
[
"MIT"
] |
permissive
|
nkousiadis/student_performance_app
|
1818d2fc1b0f58345917461646d66311ed86b2a9
|
a477b0714166db964a7beeb10b6056090ae55dcc
|
refs/heads/master
| 2020-03-28T07:37:17.908071
| 2018-11-16T15:11:34
| 2018-11-16T15:11:34
| 147,913,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,941
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
# Title
titlePanel(h3("Student performance monitoring")),
sidebarLayout(
sidebarPanel(
tags$style(".well {background-color:#bfdcf2;}"),
radioButtons(inputId = "data_type",
label = "Upload files or use toy data?",
choices = c("Upload file","Toy data"),
selected = "Upload file",
inline = T),
conditionalPanel(condition = "input.data_type == 'Upload file'",
fileInput(inputId = "input_percentiles",
label = "Choose percentile (csv file)",
accept = ".csv",
buttonLabel = "Browse",
placeholder = "No file selected"),
fileInput(inputId = "input_student_performance",
label = "Upload student's performance (csv file)",
accept = ".csv",
buttonLabel = "Browse",
placeholder = "No file selected")),
checkboxInput("show_percentiles",
"Show percentiles"),
numericInput("input_goal",
"Set performance goal",
value = NA),
checkboxInput("show_smooth_line",
"Show performance progress smooth line"),
textInput("plot_title",
"Fill in plot's title"),
textInput("performance_metric",
"Fill in the y-axis label (Performance's measurement unit)"),
textInput("period_unit",
"Fill in the x-axis label (Period's unit)"),
downloadButton("export_plot",
"Export plot"),
width = 3),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot1",
height = "600px")
)
)
))
|
487f69144519f9ef1ea437ff85e6cc30a9944c61
|
fde6257c1dd48fb58f74cdf84b91d656f00bf7f1
|
/man/npn_download_magnitude_phenometrics.Rd
|
1b739df894005866d734330dfffa2ff1dada1191
|
[
"MIT"
] |
permissive
|
tufangxu/rnpn
|
c366fe385d738e5de0b48bc287198e5a7b168922
|
b8c0271e9a55c865135fcea8a633b877afb8575f
|
refs/heads/master
| 2020-03-29T04:10:13.770308
| 2018-05-14T17:49:36
| 2018-05-14T17:49:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,616
|
rd
|
npn_download_magnitude_phenometrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npn_data_download.R
\name{npn_download_magnitude_phenometrics}
\alias{npn_download_magnitude_phenometrics}
\title{Download Magnitude Phenometrics}
\usage{
npn_download_magnitude_phenometrics(request_source, years,
period_frequency = 30, coords = NULL, individual_ids = NULL,
species_ids = NULL, station_ids = NULL, species_types = NULL,
network_ids = NULL, states = NULL, phenophase_ids = NULL,
functional_types = NULL, additional_fields = NULL, climate_data = FALSE,
ip_address = NULL, email = NULL, download_path = NULL)
}
\arguments{
\item{request_source}{Required field, string. Self-identify who is making requests to the data service}
\item{years}{Required field, list of strings. Specify the years to include in the search, e.g. c('2013','2014'). You must specify at least one year.}
\item{period_frequency}{Required field, integer. The integer value specifies the number of days by which to delineate the period of time specified by the
start_date and end_date, i.e. a value of 7 will delineate the period of time weekly. Any remainder days are grouped into the final delineation.
This parameter, while typically an int, also allows for a โspecialโ string value, โmonthsโ to be passed in. Specifying this parameter as โmonthsโ will
delineate the period of time by the calendar months regardless of how many days are in each month. Defaults to 30 if omitted.}
\item{coords}{List of float values, used to specify a bounding box as a search parameter, e.g. c ( lower_left_lat, lower_left_long,upper_right,lat,upper_right_long )}
\item{species_ids}{List of unique IDs for searching based on species, e.g. c ( 3, 34, 35 )}
\item{station_ids}{List of unique IDs for searching based on site location, e.g. c ( 5, 9, ... )}
\item{species_types}{List of unique species type names for searching based on species types, e.g. c ( "Decidious", "Evergreen" )}
\item{network_ids}{List of unique IDs for searching based on parter group/network, e.g. ( 500, 300, ... )}
\item{states}{List of US postal states to be used as search params, e.g. c ( "AZ", "IL" )}
\item{phenophase_ids}{List of unique IDs for searching based on phenophase, e.g. c ( 323, 324, ... )}
\item{functional_types}{List of unique functional type names, e.g. c ( "Birds" )}
\item{additional_fields}{List of additional fields to be included in the search results, e.g. ( "Station_Name", "Plant_Nickname" )}
\item{climate_data}{Boolean value indicating that all climate variables should be included in additional_fields}
\item{ip_address}{Optional field, string. IP Address of user requesting data. Used for generating data reports}
\item{email}{Optional field, string. Email of user requesting data.}
\item{download_path}{Optional file path to which search results should be re-directed for later use.}
}
\value{
Data table of all status records returned as per the search parameters. Null if output directed to file.
}
\description{
This function allows for a parameterized search of all magnitude phenometrics in the USA-NPN database, returning all records as per the search results in a
data table. Data fetched from NPN services is returned as raw JSON before being channeled into a data table. Optinally results can be directed to an output file in
which case raw JSON is saved to file; in that case, data is also streamed to file which allows for more easily handling of the data if the search otherwise
returns more data than can be handled at once in memory.
}
\details{
This data type includes various measures of the extent to which a phenophase for a plant or animal species is expressed across multiple individuals and sites
over a user-selected set of time intervals. Each row provides up to eight calculated measures summarized weekly, bi-weekly, monthly or over a custom time interval.
These measures include approaches to evaluate the shape of an annual activity curve, including the total number of โyesโ records and the proportion of โyesโ
records relative to the total number of status records over the course of a calendar year for a region of interest. They also include several approaches for
standardizing animal abundances by observer effort over time and space (e.g. mean active bird individuals per hour). See the Metadata window for more information.
Most search parameters are optional, however, failing to provide even a single search parameter will return all results in the database. Request_Source
must be provided. This is a self-identifying string, telling the service who is asking for the data or from where the request is being made. It is recommended
you provide your name or organization name. If the call to this function is acting as an intermediary for a client, then you may also optionally provide
a user email and/or IP address for usage data reporting later.
Additional fields provides the ability to specify more, non-critical fields to include in the search results. A complete list of additional fields can be found in
the NPN service's companion documention
https://docs.google.com/document/d/1yNjupricKOAXn6tY1sI7-EwkcfwdGUZ7lxYv7fcPjO8/edit#heading=h.df3zspopwq98
Metadata on all fields can be found in the following Excel sheet:
http://www.usanpn.org/files/metadata/magnitude_phenometrics_datafield_descriptions.xlsx
}
\examples{
\dontrun{
Download all saguaro data for 2013
npn_download_magnitude_phenometrics(request_source="Your Name or Org Here", start_date='2013-01-01', end_date='2013-12-31', species_id=c(210),
download_path="saguaro_data_2013.json")
}
}
|
4c6efabfdbe3ef4168630cef164a616e31c6da2f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/conjoint/examples/caBTL.Rd.R
|
930f33005e1affaf92495cfb1e560a583926ef4b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
caBTL.Rd.R
|
library(conjoint)
### Name: caBTL
### Title: Function caBTL estimates participation (market share) of
### simulation profiles
### Aliases: caBTL
### Keywords: multivariate
### ** Examples
#Example 1
library(conjoint)
data(tea)
simutil<-caBTL(tsimp,tpref,tprof)
print("Percentage participation of profiles: ", quote=FALSE)
print(simutil)
#Example 2
library(conjoint)
data(chocolate)
simutil<-caBTL(csimp,cpref,cprof)
print("Percentage participation of profiles:", quote=FALSE)
print(simutil)
#Example 3
library(conjoint)
data(chocolate)
ShowAllSimulations(csimp,cpref,cprof)
#Example 4
#library(conjoint)
#data(journey)
#ShowAllSimulations(jsimp,jpref,jprof)
|
b0c3c861b2c7a338b1ba5a5768d3c171607f197e
|
607d3cbb96e05c489cd5e9e939488d0f9de59e82
|
/man/convertListTocompData.Rd
|
be064fa05ade0f33f042a409c1c10ccaf1d0a280
|
[] |
no_license
|
csoneson/compcodeR
|
95fa5f8867af7fc8c034dacffa91642a5a4506d0
|
e7b809e889789bf5e9b627f8a136cb4089fc5f78
|
refs/heads/devel
| 2023-07-06T13:36:51.779149
| 2023-07-03T14:21:36
| 2023-07-03T14:21:36
| 18,625,797
| 9
| 3
| null | 2023-07-03T14:14:37
| 2014-04-10T06:03:04
|
HTML
|
UTF-8
|
R
| false
| true
| 844
|
rd
|
convertListTocompData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\name{convertListTocompData}
\alias{convertListTocompData}
\title{Convert a list with data and results to a \code{compData} object}
\usage{
convertListTocompData(inp.list)
}
\arguments{
\item{inp.list}{A list with data and results, e.g. generated by \code{compcodeR} version 0.1.0.}
}
\description{
Given a list with data and results (resulting e.g. from \code{compcodeR} version 0.1.0), convert it to a \code{compData} object.
}
\examples{
convertListTocompData(list(count.matrix = matrix(round(1000*runif(4000)), 1000),
sample.annotations = data.frame(condition = c(1,1,2,2)),
info.parameters = list(dataset = "mydata",
uID = "123456")))
}
\author{
Charlotte Soneson
}
|
0a52943ce0dba1b44b3152d3b3a625a01c65c765
|
ecb0587cffdc6eaf4854722495a143f6258b417e
|
/Final.R
|
1556ba376c014ce858d76b8c9bfb2d063b52d97a
|
[] |
no_license
|
benjdreier/Math-23c-Final
|
abbc0d3fe323aaa849150e4257bd462487edbce2
|
412cf04f0e9783930814ef907166095e0812117f
|
refs/heads/master
| 2020-05-04T23:55:04.437812
| 2019-05-14T20:00:23
| 2019-05-14T20:00:23
| 179,559,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,349
|
r
|
Final.R
|
#Title: Math 23c Final Project
#Written by: Ben Dreier and Michael Cheng
#Date: May 14, 2019
#Sources: Scripts from Paul Bamberg, data from Cori Tucker-Price
#PLEASE DO NOT DISTRIBUTE THIS DATASETโMY PROCTOR PLANS TO PUBLISH THIS IN A BOOK AND DOES NOT WANT THIS DATA RELEASED PUBLICLY
#Have a great summer! =)
#Master outline/grading checklist
#DOES THE DATASET MEET THE REQUIRED DATASET STANDARDS??
#1- dataframe: yes, we have a .csv file with columns and rows
#2- yes, name, street name, city, former church, and hometown are all categorical columns, and several logical columns are made throughout this data analysis
#3- yes, we used a python script to add two numerical columns: 1st, distance of hometown from LA; 2nd, population of home state in 1940
#4-yes, there are over 3000 rows representing over 3000 church members
#DOES THE DATASET MEET THE REQUIRED GRAPHICAL DISPLAY STANDARDS?
#1- yes, see section 5 for bar plot
#2- yes, see section 2 for histograms (also in other places)
#3- yes, see section 7 for a bad attempt to fit the curve
#4- yes, see contingency table in section 1
#DOES THE DATASET MEET THE REQUIRED ANALYSIS STANDARDS?
#1- yes, see permutation test in sections 2, 3
#2- yes, p-values are used throughout this project, including in sections 2 and 3
#3- yes, see contingency table in section 1
#4- yes, see section 2 for a comparison
#WHAT BONUS POINTS HAVE WE ACHIEVED?
#2- Yes, our dataset has over 3000 individuals, a very large dataset. If you look in Section 2, we actually take samples from the population there.
#3- See one-page document on ethical issues related to collection of data in attached files.
#4 - See one-page document on ethical issues related to final results in attached files.
#5- See section 6 for a heat map with markers
#8- See Section 1 for convincing demonstration of a relationship that might not have been statistically significant but turns out to be so, also in Section 2
#9- See Section 4 or 5; state population may have been statistically significant but in fact does not seem to correlate with anything
#10- See Section 5 for the definition of a function to extract church denominations or the .py script and usage of an API to gather information
#11- See section 6 for a heat map made with ggplot and ggmaps
#12- See Section 2 for a permutation test that works better than classical methods
#14- See Section 4 for a use of linear regression
#15- See Section 4 for calculation and display of a logistic regression curve
#16- See Section 4 for an appropriate use of correlation
#18- See section 2 for use of theoretical knowledge of sampling distributions with the CLT analysis
#19- See Section 1 for pie charts, or see Section 6 for a heat map
#20- See Section 2 for calculation of a confidence interval
#22- Ben and Michael are two people; the team has exactly two members! =)
#Note that this is organized by section.
#Also, section 5 has some of the most interesting analysis because it compares statistics by church denomination. highly suggest you go there! :)
#SECTION 0: LOADING THE DATA FILE AND BASIC ANALYSIS
#install.packages("ggplot2") Make sure to install ggplot2 once
library("ggplot2") #note that this is required to run Section 6
# Only install these once
install.packages('ggmap')
install.packages("leaflet")
install.packages("geojsonio")
library(ggmap) #note that ggmap calls ggplot2, so using ggmap satisfies BONUS POINT 11
library(leaflet)
library(geojson)
citation("ggmap")
#First, let's load the data file. Let M be the data file.
M <- read.csv("MembershipEdited.csv"); head(M) #notice that the original file was edited to add two numeric columns using a Python script since the original file did not have any numeric columns and was simply a list of members
#also notice that the file has a column of just the states that individuals are from
#BONUS POINT 2: notice that our dataset has over 3,000 individuals. This is definitely a data set so large it can be used as a population from which samples are taken, see section 2 for actual samples from the population
#Ben wrote a python script, distancegetter.py, that gets the distance of each hometown from LA (aka how far an individual moved to LA) and their home state's population
#Note: since certain individuals' Hometowns did not have data associated with them, those individuals have been deleted for the sake of this analysis.
#Less than 0.3% of individuals were deleted; I deleted Ritta Penden, Susan Fleming, John Hunt, George H. Jones, Wilfred Wein, Coxdelia Marshall, and Melvin Johnson since their hometowns either did not exist or had no data according to the API
#Since these individuals represent less than 0.3% of the total number of individuals, which is over 3000, this should not meaningfully impact our data
#SECTION 1: Hometown analysis WITH A CONTINGENCY TABLE
#MEETS REQUIRED ANALYSIS 3 AND BONUS POINTS 8, 19.
#Is having a hometown that isn't Los Angeles correlated with being a Convert from another/no religion?
#My hypothesis: In theory, it could make sense because people who moved from further away towns might have joined the church in search of community, then converted religions
#While people originally from LA already had a community from growing up there; or another factor could dominate
#First, let's find out the number of people from LA and the number of converts
HomeLA <- which(M$Hometown == "Los Angeles, CA"); HomeLA #People with hometowns LA
length(HomeLA) #Los Angeles, CA has 1,232
HomeNotLA <- which(M$Hometown != "Los Angeles, CA"); HomeNotLA #All people who were not from LA
length(HomeNotLA) #Not LA has 1,814
ConvertsA <-which(M$Former.Church == "Convert"); ConvertsA #All the converts
length(ConvertsA) #427 converts
NotConverts <-which(M$Former.Church != "Convert"); NotConverts #All the non-converts
length(NotConverts) #2,619 non-converts
#Second, let's make some logical columns
LA <- M$Hometown == "Los Angeles, CA"
Converts <- M$Former.Church == "Convert"
HomeR <- data.frame(LA, Converts) #make a dataframe with just the logical columns
#Now let's build a contingency table #NOTICE THATTHIS MEETS REQUIRED GRAPHICAL DISPLAY CRITERIA 4
tbl <- table(HomeR$LA,HomeR$Converts); tbl
#Looking at the table, there are 1811 who are not originally from LA and aren't converts; 3 who are not originally from LA and are converts; 808 originally from LA who are not converts, and 424 originally from LA who are converts
#Now let's compare this with what the table would look like if Hometown and Convert status were independent
tbl #our actual table
Expected <- outer(rowSums(tbl), colSums(tbl))/sum(tbl); Expected #evidently, the tables are pretty different!!
#In particular, the value for not originally from LA and is converts is much lower in the actual table compared to the expected table
#These tables look quite different. Is the difference significant? Let's use the chi-squared test and see.
chisq.test(HomeR$LA,HomeR$Converts)
#The p-value is incredibly tiny, at below 2.2 * 10^-16, and the odds this arose by chance is less than 1 in a quadrillion; since the p-value is far less than 0.05 we reject the null hypothesis
#Therefore, having a Hometown of LA is very correlated with being a Convert to Christianity, which completely goes against the theory I laid out earlier
#It appears that having a hometown that isn't LA is very correlated against being a Convert to Christianity
#In other words, almost everyone who joined the People's Independent Church of Christ who was not originally from LA was already a Christian; but according to the table roughly 1/3 of people with hometown LA were Converts to Christianityโa very, very interesting finding!!
#Not quite sure why this is, but perhaps the Church was very appealing to people who were already Christians moving in from out of state
#It makes sense that most migrants traveling west would be churched folk because that kind of migration was based on social networks and their faith sustained them during the journey. that's one hypothesis
#Quote from Proctor on significance of this: It tells me that the church was able to pull in people from LA with no religious affiliation at a time (WWII) when people were looking for resources and help. The church was taken over by the second pastor (Clayton Russell) at this point. He was the first black preaching radio broadcaster on the West Coast and lived a celebrity lifestyle. So I know that the church was a big draw for migrants and local Angelenos. But the fact that non churched people in LA were joining at such a high rate proves larger claims about the churchโs significance.
#Earning Bonus Point 8
#This relationship between having a hometown of Los Angeles and being a Convert to Christianity was not apparent at allโmy hypothesis at the beginning was totally wrong. But evidently, this relationship is very strong considering 424 of the 427 converts were from LA despite the fact that 1,814 of the 3,046 church members had a non-LA hometown.
#Therefore, this is a convincing relationship that might not have looked statistically significant but turns out to be so.
#BONUS POINT 19: Pie charts were not made in the class scripts, these charts are different
#Let's build pie charts to illustrate our data!
# Pie Chart for Converts and Non-Converts
slices <- c(427, 2626) #notice that the numbers are slightly different since I added back the residents who were excluded earlier
lbls <- c("Converts to Christianity", "Non-Converts")
pct <- round(slices/sum(slices)*100, 2)
lbls <- paste(lbls, slices, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, col=rainbow(length(lbls)),
main="Converts and Non-Converts at the Church")
# Pie Chart for Converts and Non-Converts, separated by From/Not From LA
slices <- c(424, 3, 808, 1818) #notice that the numbers are slightly different since I added back the residents who were excluded earlier
lbls <- c("Convert Locals", "Convert Migrants", "Non-Convert Locals", "Non-Convert Migrants")
pct <- round(slices/sum(slices)*100, 2)
lbls <- paste(lbls, slices, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, col=rainbow(length(lbls)),
main="Converts, Non-Converts, Migrants, and Locals at the Church")
# Pie Chart for Locals and Migrants
slices <- c(1232, 1821) #notice that the numbers are slightly different since I added back the residents who were excluded earlier
lbls <- c("Locals", "Migrants")
pct <- round(slices/sum(slices)*100, 2)
lbls <- paste(lbls, slices, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, col=rainbow(length(lbls)),
main="Locals and Migrants at the Church")
#END REQUIRED ANALYSIS 3 AND BONUS POINTS 8,11
#END SECTION 1
#SECTION 2: Comparing distances for converts and non-converts with CLT, permutation tests
#BONUS POINTS 2, 12, 20, 8 (done again)
#REQUIRED ANALYSIS 1,3
#After conducting a contingency table analysis of the differences between converts and non-converts, let's figure out whether this difference is reality using standard deviations and confidence intervals
#Let's look at the mean distance migrated for the total population and compare the mean distance migrated for the Converts, and see if this is a statistically significant difference
km <- M$Distance/1000; km #convert our distances from meters to km
mu <- mean(km); mu #mean distance migrated from hometown for full population is 1580.57 km. Remember that someone whose hometown was LA who still lives in LA counts as zero!
sigma <- sd(km); sigma #standard deviation 1513.49 km
migD <- sum(km * (M$Former.Church == "Convert"))/sum((M$Former.Church == "Convert")); migD #mean distance migrated for Converts is 7.26 km
#How likely is it that the discrepancy between mean distance migrated for Converts and the full population arose by chance?
#Approach #1
#Pretend that we are drawing a random sample that is the same size as the number of Converts
#The CLT says that if we draw a sample of size n from the total population,
#the mean will be 1580.57 km, the SD will be sigma/sqrt(n), the distribution normal
#Notice how we used theoretical knowledge of sample distributions with the CLT
n = 424 #draw a sample of size 424 since our sample should be equal to the number of converts
curve(dnorm(x, mu, sigma/sqrt(n)), from = 0, to = 2500)
abline(v = migD, col = "red") #our mean distance for converts looks good, it's on the far left tail
pnorm(migD, mu, sigma/sqrt(n), lower.tail = FALSE) #and notice that an average distance greater than ours should arise roughly 100% of the time
#Let's try to use a single sample of size 424 to see whether our actual outcome is in the confidence interval
mean <- mean(sample(M$Distance/1000, 424)); mean #take one sample and look at the value
mean <- 1491.96 #our sample is 1491.96
#now make a 95% confidence interval
l <- (mean + qnorm(0.025) * (sigma/sqrt(424))); l #lower end is 1347.90 km
u <- (mean - qnorm(0.025) * (sigma/sqrt(424))); u #upper end is 1636.02 km
#Since our actual value is 7.26 km, far outside the confidence interval, so our outcome probably did not arise randomly. The difference between mean distance migrated for Converts and the full population did not arise by chance
#BONUS POINT 2: notice how we drew samples from the total population here! This was possible because of how large our dataset is, with over 3000 entries
#BONUS POINT 20: we also calculate a confidence interval here!
#Here is a way to shade some of the area
xpoints <- c(migD,seq(migD,2500,1),2500) #define polygon to shade
ypoints <- c(0,dnorm(seq(migD,2500,1), mu, sigma/sqrt(n)),0)
curve(dnorm(x, mu, sigma/sqrt(n)), from = 0, to = 2500)
abline(v = migD, col = "red")
graphics::polygon(xpoints,ypoints,col="skyblue") #notice that virtually the entire graph is sky blue, this visually demonstrates how virtually every other outcome would be more likely than the one we got if the distance traveled for Converts was random
#Approach 2
#Equivalent approach - create a statistic whose distribution is N(0,1)
#Pretend that we don't have access to the entire population
#In the days of printed statistical tables, this was a good idea.
Z = (migD-mu)/(sigma/sqrt(n)); Z
#Calculate the probability of a sample mean as extreme as what we observed
PValue <- pnorm(Z, lower.tail = FALSE); PValue #same result, the probability of a sample mean greater than our sample mean is 1
#Alternatively, we can assume that we do not know the LA-side wigma
#If we use our sample standard deviation S, we create a t statistic.
#Studentize the data, using S instead of the national sigma.
S <- sd(km); S
t = (migD-mu)/(S/sqrt(n)); t
PValue <- pt(t, df = n-1, lower.tail = FALSE); PValue
#the p-value is pretty much the same at p = 1; it's virtually certain that if you generated random sample means you'd get a higher mean than our actual mean for Converts
curve(dnorm(x, mu, S/sqrt(n)), from = 0, to = 2500)
abline(v = migD, col = "red") #our mean score looks really good
#For n this large, the t distribution is essentially standard normal
t = (migD-mu)/(sigma/sqrt(n)); t
PValue <- pt(t, df = n-1, lower.tail = FALSE); PValue #about the same P-value as earlier result
#BONUS POINT 20: I calculated a confidence interval here!
#notice that the lower end of the confidence interval is 1436.10
L <- mean(km) + qt(0.025, n-1) * sd(km)/sqrt(n); L
#and the higher end of the confidence interval is 1725.05
H <- mean(km) - qt(0.025, n-1) * sd(km)/sqrt(n); H
#So if the distance migrated from one's hometown followed a Student t distribution, then the 95% confidence interval for the distance migrated would be [1436.10, 1725.05]
#But because actual distance migrated from one's hometown for Converts to Christianity is just 7.26, FAR outside the actual confidence interval, we know that this probably did not arise by chance
#In other words, I have shown more quantiatively than Section 1 that Converts are likely to have migrated a much lower distance, and that this difference is statistically significant.
#BONUS POINT 8: Again, this shows the same relationship as in Section 1 and how it did not arise randomly. The statistical difference the mean distance traveled for Converts and the mean distance traveled for all people at this Church is clear
#finally, let's try a permutation test
#Now let's do a permutation test
isConvert <- M$Former.Church == "Convert"; isConvert
N <- 10000
diffs <- numeric(N)
for(i in 1:N){
Samp <- sample(isConvert); Samp #permuted isBaptist column
disConverts <- sum((km) * (Samp == TRUE))/sum(Samp == TRUE); disConverts
dOthers <- sum((km) * (Samp == FALSE))/sum(Samp == FALSE); dOthers
diffs[i] = disConverts - dOthers #as likely to be negative or positive
}
mean(diffs) #should be close to zero, this is indeed near zero
hist(diffs, breaks = "FD", xlim = c(-2000,500), xlab = "Observed Differences", main = "Histogram of Observed Differences") #now display the observed difference on the histogram
other <- sum(km * (M$Former.Church != "Convert"))/sum((M$Former.Church != "Convert")); other
observed <- migD - other ; observed #observed difference between mean for Converts and non-Convert; this is -1829 km
abline(v = observed, col = "red") #notice that the observed difference is very far off from the random simulations
#what is the probability that a difference this large could have arisen with a random subset?
pvalue <- (sum(diffs >= observed)+1)/(N+1); pvalue #notice that the p-value is about 1, so the probability of randomly exceeding the actual value is 100%; so it is extremely unlikely that this difference arose by chance
#evidently, the difference between Converts and non-Converts probably did not arise randomly, as there is a 100% chance that a random simulation would have a difference of lesser magnitude than the actual difference seen
#compared to the confidence interval analysis above, we actually have a concrete probability that this happened by chance and know more than just the fact that the actual result is outside the confidence interval
#BONUS POINT 12: This is an example of a permutation test working much better than classical methods. Using this permutation test, we have shown that the difference probably did not arise out of random chance and is statistically significant. But with classical methods like CLT/standard deviation, it is harder to know whether the difference happened randomly/not randomly. Therefore, this permutation test works better than classical methods in demonstrating the statistical significance of Baptists and distance traveled.
#REQUIRED ANALYSIS 3: This is a clear comparison of a CLT analysis with a simulation, permutation test
#END SECTION 2
#SECTION 3: Permutation test with Baptists
#Looking at the dataset, we have a lot of Baptists! Now I want to figure out whether being a Baptist is correlated at all with Distance Traveled using a Permutation test
#the following code creates a column that is True if an individual was Baptist before joining the People's Independent Church of Christ, false otherwise (defined as their church having the word Baptist in it)
Churches <- (M$Former.Church); Churches #column of all individuals churches
Denoms <- c("Baptist", "Methodist", "AME", "Episcopal", "CME", "Presbyterian", "Catholic") # Enumerate some words pointing to church denomination
# Function to get which churches contain which denomination strings
whichContain <- function(ch, dn){
denoms <- rep("", length(ch))
i <- 1
for(c in ch){
for(d in dn){
#Check if the church has the denomination
if( is.na(grepl(d, c)) ){break}
if( grepl(d, c) ){
denoms[i] = paste(denoms[i], d);
}
}
i <- i + 1
}
return(denoms)
}
WhichDenoms <- whichContain(Churches, Denoms)
isBaptist <- WhichDenoms == " Baptist"; isBaptist #true if an individual is Baptist
sum(isBaptist) #so we have 1,185 baptists; out of 3,046 individuals this is a lot
km <- M$Distance / 1000 #divide distance by 1000 to get distance in km
mean(km) #mean distance for all Individuals is 1580.57 km
median(km) #median distance for all Individuals is 2137 km
#Calculate the observed Distance Traveled for Baptists and non-Baptists
dBaptists <- sum((km) * (isBaptist == TRUE))/sum(isBaptist == TRUE); dBaptists #average distance for Baptists is 2028.802 km
dOthers <- sum((km) * (isBaptist == FALSE))/sum(isBaptist == FALSE); dOthers #average distance for non-Baptists is 1295.159 km
observed <- dBaptists - dOthers; observed #on average, Baptists traveled 733.64 km farther than non-Baptists
#Now let's do a permutation test
N <- 10000
diffs <- numeric(N)
for(i in 1:N){
Samp <- sample(isBaptist); Samp #permuted isBaptist column
dBaptists <- sum((km) * (Samp == TRUE))/sum(Samp == TRUE); dBaptists
dOthers <- sum((km) * (Samp == FALSE))/sum(Samp == FALSE)
diffs[i] = dBaptists - dOthers #as likely to be negative or positive
}
mean(diffs) #should be close to zero, this is indeed near zero
hist(diffs, breaks = "FD", xlim = c(-500,900), main = "Histogram of Observed Differences", xlab = "Observed Differences") #now display the observed difference on the histogram
abline(v = observed, col = "red") #notice that the observed difference is very far off from the random simulations
#what is the probability that a difference this large could have arisen with a random subset?
pvalue <- (sum(diffs >= observed)+1)/(N+1); pvalue #notice that the p-value is about 0.0001, which is far less than 0.05 (our typical threshold for signifiance). Therefore, the difference between Distance traveled for Baptists and non-Baptists is statistically significant. It is incredibly unlikely that it arose by chance.
#for whatever reason, Baptists at the People's Independent Church migrated from further distances than non-Baptists, on aveage 733.64 km further. This difference did not occur by random chance.
#END SECTION 3
#SECTION 4: Is there a relationship between distance traveled and state population?
#BONUS POINTS: 9, 14, 15, 16
#Let's try to analyze our only two numeric columns, distance traveled and state population
#Is there some kind of relationship between traveling from a farther away state and coming from a larger or smaller state?
km <- M$Distance/1000; km #convert our distances from meters to km
plot(km ~ M$State.Population, col = "blue", xlab = "State population in 1940", ylab = "Distance traveled") #here is a scatter plot comparing distance migrated with state population
mod <- lm(km ~ M$State.Population); mod #we found the regression line; apparently Distance Traveled to LA = -1.407*10^4 * (State population) + 2.404*10^3
#BONUS POINT 14! used linear regression
abline(mod, col = "green") #now let's add the regression line
#according to this regression line, the larger a migrant's state population, the less distance they traveled to LA
#So migrants from smaller states were far more likely to travel longer distances to LA; perhaps reflecting lower populations in the Northeast/Midwest in 1940?
#However, this relationship might not be statistically significant.
summary(mod) #Multiple R squared is 0.05, adjusted R squared is 0.05
#Evidently, our R squared value is about 0.05, indicating a very inaccurate regression line. Our regression line only explains about 5% of the variability of the response data around its mean, indicating a very weak correlation. distance traveled and state population in 1940 don't seem to be correlated well with each other, but let's use correlation to verify!
#BONUS POINT 16! used correlation here.
#Now let's look at the correlation
res <- cor(km, M$State.Population); res
round(res,2) #our correlation is -0.22. Since our correlation is negative, an increase in state population predicts a decrease the distance traveled. However, again this is a very weak correlation. There may not be a real relationship between the variables
#BONUS POINT 15! logistic regression
#let's try to use a logistic regression to model Distance Traveled as a function of State Population. Note that we need to normalize these variables between 0 and 1, so I divided each variable by its maximimum value in the dataset
pop <- M$State.Population/max(M$State.Population)
k <- km/max(km)
plot(pop, k, xlab = "State population in 1940", ylab = "Distance Traveled") #here's another plot
#Start with minus the log of the likelihood function from Paul's code
MLL<- function(alpha, beta) {
-sum( log( exp(alpha+beta*pop)/(1+exp(alpha+beta*pop)) )*k
+ log(1/(1+exp(alpha+beta*pop)))*(1-k) )
}
#R has a function that will maximize this function of alpha and beta
#install.packages("stats4") #needs to be run at most once
library(stats4)
results<-mle(MLL, start = list(alpha = 0, beta = 0)) #an initial guess is required
results@coef #alpha = -0.158, beta = -1.725 are the parameters for our logistic regression curve
curve( exp(results@coef[1]+results@coef[2]*x)/ (1+exp(results@coef[1]+results@coef[2]*x)),col = "blue", add=TRUE) #the blue line is the logistic regression curve
#The logistic regression curve does not look terrible, but considering how low our correlation was earlier it is unlikely that there is a substantial correlation between distance traveled and state population in 1940
#BONUS POINT 9! Evidently, there could have been a relationship between state population and distance traveled, but our analysis indicates that there is likely no significant relationship after all. Therefore, this relationship turns out to be statistically insignificant.
#END SECTION 4
#SECTION 5: Distances traveled and population of state by church denomination
#another example of bonus point 9
#Continuing from Section 3, let's analyze the average distances traveled for all major church denominations
#the following code creates a column that is True if an individual was a certain denomination before joining the People's Independent Church of Christ, false otherwise (defined as their church having a certain word in it)
Churches <- (M$Former.Church); Churches #column of all individuals churches
Denoms <- c("Baptist", "Methodist", "AME", "Episcopal", "CME", "Presbyterian", "Catholic") # Enumerate some words pointing to church denomination
# Function to get which churches contain which denomination strings
#BONUS POINT 10: Professional looking software engineering, defining functions
whichContain <- function(ch, dn){
denoms <- rep("", length(ch))
i <- 1
for(c in ch){
for(d in dn){
#Check if the church has the denomination
if( is.na(grepl(d, c)) ){break}
if( grepl(d, c) ){
denoms[i] = paste(denoms[i], d);
}
}
i <- i + 1
}
return(denoms)
}
WhichDenoms <- whichContain(Churches, Denoms)
isBaptist <- WhichDenoms == " Baptist"; isBaptist #true if an individual is Baptist
isMethodist <- WhichDenoms == " Methodist"; isMethodist
isAME <- WhichDenoms == " AME"; isAME
isEpiscopal <- WhichDenoms == " Episcopal"; isEpiscopal
isCME <- WhichDenoms == " CME"; isCME
isPresbyterian <- WhichDenoms == " Presbyterian"; isPresbyterian
isCatholic <- WhichDenoms == " Catholic"; isCatholic
#Notice that I added the individuals who were deleted at the very beginning (since their hometowns couldn't be traced) back in for the purposes of counting as we have their former churches
numBap <- sum(isBaptist) + 1; numBap #so we have 1,186 baptists; out of 3,046 individuals this is a lot
numMeth <- sum(isMethodist); numMeth #123 Methodists
numAME <- sum(isAME)+2; numAME #324 AMEs/ African Methodists
numEp <- sum(isEpiscopal)+2; numEp #43 Episcopals
numCME <- sum(isCME); numCME #70 CMEs
numPres <-sum(isPresbyterian); numPres #25 Presbyterians
numCat <- sum(isCatholic); numCat #62 Catholics
numC <- sum(M$Former.Church == "Convert"); numC #427 Converts (same method as Section 1)
#So our algorithm accounts for 2,255 of the 3,046 individuals in our dataset. This is pretty good for just a word search!
other <- 3046 - sum(numBap, numMeth, numAME, numEp, numCME, numPres, numCat, numC) +2; other #793 other denominations
#Let's make a vector of all the individuals' denominations and graph them
des <- c(rep("Baptist", numBap), rep("Methodist", numMeth), rep("AME", numAME), rep("Episcopal", numEp), rep("CME", numCME), rep("Presbyterian", numPres), rep("Catholic", numCat), rep("Convert", numC), rep("Other", other)); des
table(des) #look at our cute little table!
barplot(table(des), col = "pink", xlab = "Denomination", ylab = "Number of Individuals", main = "Denominations of Church Membership") #note: you may need to resize your window to get all of the labels to show
#THIS BARPLOT IS REQUIRED GRAPHICAL DISPLAY 1
#(BONUS POINT 19 AGAIN): let's make a pie chart of the denominations! :)
# Pie Chart for Denominations
slices <- c(numBap, numMeth, numAME, numEp, numCME, numPres, numCat, numC, other)
lbls <- c("Baptist", "Methodist", "AME", "Episcopal", "CME", "Presbyterian", "Catholic", "Convert to Christianity", "Other")
pct <- round(slices/sum(slices)*100, 2)
lbls <- paste(lbls, slices, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, col=rainbow(length(lbls)),
main="Denominations at the Church")
Converts <- M$Former.Church == "Convert"
#Now let's look at average distance traveled by church denomination, remembering that 0 km = from LA
disB <- sum(isBaptist * km)/sum(isBaptist); disB #2028.80 km for Baptists
disM <- sum(isMethodist * km)/sum(isMethodist); disM #2581.766 km for Methodists
disA <- sum(isAME * km)/sum(isAME); disA #2087.33 km for AME
disE <- sum(isEpiscopal * km)/sum(isEpiscopal); disE #2234.66 km for Episcopal
disCM <- sum(isCME * km)/sum(isCME); disCM #1894.95 km for CME
disP <- sum(isPresbyterian * km)/sum(isPresbyterian); disP #2041.20 km for Presbyterians
disCA <- sum(isCatholic * km)/sum(isCatholic); disCA #802.37 km for Catholics
disC <- sum(Converts * km)/sum(Converts); disC #7.26 km for Converts
disO <- sum((!isBaptist & !isMethodist & !isAME & !isEpiscopal & !isCME & !isPresbyterian & !isCatholic & !Converts) * km)/ sum((!isBaptist & !isMethodist & !isAME & !isEpiscopal & !isCME & !isPresbyterian & !isCatholic & !Converts)); disO #1381.13 km for others
#Let's make a graph of the distances. For the purposes of making a barplot, I'm rounding the distances to the nearest whole number
dis <- c(rep("Baptist", disB), rep("Methodist", disM), rep("AME", disA), rep("Episcopal", disE), rep("CME", disCM), rep("Presbyterian", disP), rep("Catholic", disCA), rep("Convert", disC), rep("Other", disO)); dis
table(dis) #look at our cute little table!
barplot(table(dis), col = "orange", xlab = "Denomination", ylab = "Average Distance Traveled to LA (km)", main = "Average Distance Traveled to LA (km) by Denomination") #note: you may need to resize your window to get all of the labels to show
#We knew about Converts from Section 1, but notice how Catholics did not migrate very far either compared to the other denominations
#Finally, let's look at average home state population by denomination
pB <- sum(isBaptist * M$State.Population)/sum(isBaptist); pB #5476799 for Baptists
pM <- sum(isMethodist * M$State.Population)/sum(isMethodist); pM #5072736 for Methodists
pA <- sum(isAME * M$State.Population)/sum(isAME); pA #5060682 for AME
pE <- sum(isEpiscopal * M$State.Population)/sum(isEpiscopal); pE #6310050 for Episcopal
pCM <- sum(isCME * M$State.Population)/sum(isCME); pCM #5022214 for CME
pP <- sum(isPresbyterian * M$State.Population)/sum(isPresbyterian); pP #4982980 for Presbyterians
pCA <- sum(isCatholic * M$State.Population)/sum(isCatholic); pCA #68705221 for Catholics
pC <- sum(Converts * M$State.Population)/sum(Converts); pC #6906233 for Converts
pO <- sum((!isBaptist & !isMethodist & !isAME & !isEpiscopal & !isCME & !isPresbyterian & !isCatholic & !Converts) * M$State.Population)/ sum((!isBaptist & !isMethodist & !isAME & !isEpiscopal & !isCME & !isPresbyterian & !isCatholic & !Converts)); pO #6270838 for others
#Let's make a graph of the average home state populations
p <- c(rep("Baptist", pB), rep("Methodist", pM), rep("AME", pA), rep("Episcopal", pE), rep("CME", pCM), rep("Presbyterian", pP), rep("Catholic", pCA), rep("Convert", pC), rep("Other", pO)); p
table(p) #look at our cute little table!
barplot(table(p), col = "green", xlab = "Denomination", ylab = "Average Home State Population", main = "Average Home State Population by Denomination") #note: you may need to resize your window to get all of the labels to show
#It doesn't look like there are many clear differences by denomination
#BONUS POINT 9: here's another relationship that might have been statistically significant, but are not so
#END SECTION 5
#BEGIN SECTION 6
#This section makes a cool map of the population distribution! :)
#Make sure that you have downloaded ALL of the data files
#BONUS POINTS: 5, 10, 11, 19
# Let's try to map
# First, go through and extract
# Iterate through hometowns and get their latitude and longitude
# This takes a while and requires a key, try to only run it once if at all
# You can also just use the backup file I created below
# Key removed for security; if you need to run this part of the code yourself, I can provide a key
#register_google(key="_KEY_", write=TRUE)
##Skip This, just use the file backup "locations2.csv">##
#locs <- {}
#for(i in seq(1, length(M$Hometown))){
# town <- toString(M$Hometown[i])
#loc <- geocode(town)
#locs <- rbind(locs, loc)
#}
#write.csv(locs, "locations2.csv")
##Skip This <##
#Instead of running this again, just load from a file backup I made
locs_file <- read.csv("locations2.csv")
# Now we can map the counts
# Get state outlines
states <- geojsonio::geojson_read("us-states.json", what="sp")
#Let's make a quick table of all the States counts
table(sort(M$State))
#notice the top states of migration: 1. California (1364) 2. Texas (501) 3. Illinois (159) 4. Louisiana (131). Missouri (115)
# Before mapping, Count occurrences of each state in the states object
# Doing it this way ensures that each count corresponds to the correct state
counts = {}
for(i in 1:length(states$name)){
curr_state <- toString(states$name[i])
state_count <- length(M$State[M$State == curr_state])
counts = c(counts, state_count)
}
# Put these counts in the states object used for visualization
states$count <- counts
# Now we can decide how to break up the bins
barplot((sort(log10(counts+1), decreasing=TRUE)), main = "Distribution of log(number of people from each state)", ylab = "log(number of people from each state)", col= "blue", xlab = "states ordered in order of decreasing number of people") #notice that this is a graph of the log of the number of individuals with hometowns of each state
# The log of counts has a nice linear shape, so we'll base bins off of that
bins <- c(0, 10^seq(0, 3.5, 0.5))
pal <- colorBin("YlOrRd", domain = states$count, bins = floor(bins))
m <- leaflet(states)
m <- addTiles(m)
m <- addPolygons(m, fillColor = ~pal(counts), weight=1, color="white", fillOpacity = 0.7)
m <- addLegend(m, "bottomright", pal=pal, values=counts)
m
#LOOK AT OUR PRETTY MAP in the viewer! (screenshot is also attached in the 1 page handout)
#BONUS POINT 11: THIS MAP IS GORGEOUS, THANKS TO GGPLOT!! :)
#BONUS POINTS 5, 19: This map is not found in any of the textbook or class scripts
# If we want to plot every individual location:
addCircleMarkers(m, lng=locs_file$lon, lat=locs_file$lat, label=M$Former.Church, radius=1)
#BONUS POINTS 5, 19: This map with location markers is not found in any of the textbook or class scripts
#END SECTION 6
#BEGIN SECTION 7
#Let's try to fit the distances of migrants to a probability density function
dist <- M$Distance[M$Distance>0]; dist #get all the distances of the migrants
mu <- mean(dist); mu #mean
sigma <- sd(dist); sigma #standard deviation
hist(dist, probability = TRUE, breaks = 100, xlab = "Distance", main = "Distances traveled from hometown for migrants") #make a histogram
curve(dnorm(x, mu, sigma), add = TRUE, col = "blue") #evidently, the distances of migrants don't really follow a normal distribution neatly, so our distances of migrants probably doesn't follow a normal distribution
#REQUIRED ANALYSIS 3^ this is a probability density function overlaid on a histogram
#END SECTION 7
|
f2a9102eca6fcaa6c34a0cca5380487b42209e36
|
092bb1455f7f78c48c5f1083f570f710f4e23f23
|
/cachematrix.R
|
6002b514df0f2935ea17733d59789ab1e2cfc86a
|
[] |
no_license
|
petersharp21/ProgrammingAssignment2
|
f25470b1dfac5e9b89c2d9aa80330510098590b4
|
851c24e66d1cf6a4b41a10e83b4bed7a9a12335e
|
refs/heads/master
| 2021-01-15T09:43:41.489301
| 2015-10-25T01:03:39
| 2015-10-25T01:03:39
| 44,890,286
| 0
| 0
| null | 2015-10-25T00:20:56
| 2015-10-25T00:20:53
| null |
UTF-8
|
R
| false
| false
| 1,872
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
##==============================================================================
## The purpose of this script is to calculate the inverse of a given matrix
## utilizing R's powerful concept of storing an object in a different environment
##==============================================================================
## Write a short comment describing this function
##==================================================================================
## The function makeCacheMatrix creates a matrix and stores it in cache. It contains
## four other functions: set, get, getinverse, and setinverse. Description of each
## one can be found below.
##==================================================================================
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) { ## changes the matrix stored in the main function
x <<- y
i <<- NULL
}
get <- function() x # returns the matrix stored in the main function
setinverse <- function(inverse) i <<- inverse ## stores the value of the input in a variable "i"
getinverse <- function() i
list(setinverse = setinverse, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
##==========================================================================================
## The function cacheSolve calculates the inverse of the matrix created with makeCacheMatrix
##==========================================================================================
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
1bad79c4ed728de9bad04cde4923cd7037c8c2f5
|
1110482aa19ebe4daf081847f509d4ca49c3293e
|
/enemy_scraping.R
|
1e93c833e9ce1f217f4db57f7050151421f2f9aa
|
[] |
no_license
|
rpodcast/megatable
|
67862dad4a26908a95afe1fafc1f69d54661c2d4
|
da908053a8e26b6d7a51b7319bb445c734cb5d15
|
refs/heads/main
| 2023-09-03T13:04:30.850855
| 2021-11-14T04:18:09
| 2021-11-14T04:18:09
| 427,549,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,455
|
r
|
enemy_scraping.R
|
library(tidyverse)
library(janitor)
library(webshot)
library(robotstxt)
library(rvest)
# create utility functions for processing enemy / boss data fields
parse_attack_damage <- function(x) {
# split by ; into a list
x1 <- purrr::map(stringr::str_split(x, ";"), ~stringr::str_trim(.x))
x2 >- purrr::map(x1, ~{
# extract number
d_points <- stringr::str_extract(.x, "(\\d)+")
d_type <- str_extract_all(.x, "(?<=\\().+?(?=\\))")
list(d_points = d_points, d_type = d_type)
})
return(x2)
}
# set up data frame for keeping results
megaman_df <- tibble::tibble(
game_index = c(1:11),
enemy_url = glue::glue("http://megaman.wikia.com/wiki/List_of_Mega_Man_{index}_Enemies", index = game_index),
enemy_table_index = rep(3, 11),
boss_table_index = rep(4, 11)
)
boss_table_xpath <- '//*[@id="mw-content-text"]/div[1]/table[4]'
enemy_table_xpath <- '//*[@id="mw-content-text"]/div[1]/table[3]'
megaman_df2 <- megaman_df %>%
mutate(boss_data = purrr::map(enemy_url, ~{
browser()
tmp <- read_html(.x) %>%
html_elements(., xpath = boss_table_xpath) %>%
html_elements(., "table") %>%
html_elements(., "tbody")
x_name <- tmp %>%
html_elements(., xpath = "tr[1]/td") %>%
html_elements("a") %>%
html_text() %>%
.[2]
x_pic <- tmp %>%
html_elements(., xpath = "tr[1]/td") %>%
html_elements("a") %>%
html_attr("href") %>%
.[1]
x_data <- tmp %>%
html_elements(., xpath = "tr[2]/td") %>%
html_elements(., "table") %>%
html_table() %>%
.[[1]]
# obtain rows for health points, attack damage, and special weapon
x_data2 <- x_data %>%
filter(X1 %in% c("Health Points:", "Attack Damage:", "Special Weapon:", "Weakness:")) %>%
mutate(X1 = case_when(
X1 == "Health Points:" ~ "heath_points",
X1 == "Attack Damage:" ~ "attack_damage",
X1 == "Special Weapon:" ~ "special_weapon",
TRUE ~ "weakness"
))
x_data$X1
return(tmp)
}))
# mm1 robots masters table
# //*[@id="mw-content-text"]/div[1]/table[4]
# mm2 robots masters table
# //*[@id="mw-content-text"]/div[1]/table[4]
# mm6 robots masters table
# //*[@id="mw-content-text"]/div[1]/table[4]
# mm7 enemies table
# //*[@id="mw-content-text"]/div[1]/table[2]
# mm7 robot master table
# //*[@id="mw-content-text"]/div[1]/table[4]
# mm8 robot master table
# //*[@id="mw-content-text"]/div[1]/table[4]
# mm9
# //*[@id="mw-content-text"]/div[1]/table[4]
# mm10
# //*[@id="mw-content-text"]/div[1]/table[4]
# mm11
# //*[@id="mw-content-text"]/div[1]/table[4]
#url <- "http://megaman.wikia.com/wiki/List_of_Mega_Man_7_Enemies"
enemy_chart_content <- read_html(url)
# let's isolate element 63 which corresponds to the acid drop enemy
# why is table[2] skipped? Looks like that one is invisible
# xpath for name of enemy acid drop: //*[@id="mw-content-text"]/table[3]/tbody/tr[2]/td/table[1]/tbody/tr[1]/td
# xpath for name of enemy bubble bat: //*[@id="mw-content-text"]/table[3]/tbody/tr[2]/td/table[3]/tbody/tr[1]/td
# xpath for name of enemy big fish: //*[@id="mw-content-text"]/table[3]/tbody/tr[2]/td/table[4]/tbody/tr[1]/td
# xpath for name of enemy blocky: //*[@id="mw-content-text"]/table[3]/tbody/tr[2]/td/table[5]/tbody/tr[1]/td
# xpath for data of enemy acid drop: //*[@id="mw-content-text"]/table[3]/tbody/tr[2]/td/table[1]/tbody/tr[2]/td/table
# xpath for data of enemy bubble bat: //*[@id="mw-content-text"]/table[3]/tbody/tr[2]/td/table[3]/tbody/tr[2]/td/table
# xpath for data of enemy big fish: //*[@id="mw-content-text"]/table[3]/tbody/tr[2]/td/table[4]/tbody/tr[2]/td/table
# xpath for hazards table: //*[@id="mw-content-text"]/table[4]
# xpath for sub-bosses table: //*[@id="mw-content-text"]/table[5]
# xpath for robot masters table: //*[@id="mw-content-text"]/table[6]
# xpath for wily bosses table: //*[@id="mw-content-text"]/table[7]
# try to dynamically determine how many rows are in the table
# first try to determine what is different in 'valid' rows
# looks like the non-valid tables have an attribute class='hiddenStructure'
#//*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]
#//*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td
# this grabs all of the "tables" in the big table of enemies in megaman 2
table_xpath <- '//*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td'
enemy_tables <- html_nodes(enemy_chart_content, xpath = table_xpath) %>%
html_nodes(., "table") %>%
html_nodes(., "tbody")
# //*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td
# //*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]/tbody
# //*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]/tbody/tr[1]
# enemy pic
# //*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]/tbody/tr[1]/td
# #mw-content-text > div.mw-parser-output > table:nth-child(11) > tbody > tr:nth-child(2) > td > table:nth-child(1) > tbody > tr:nth-child(1) > td
# enemy stats
# //*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]/tbody/tr[2]/td
# #mw-content-text > div.mw-parser-output > table:nth-child(11) > tbody > tr:nth-child(2) > td > table:nth-child(1) > tbody > tr:nth-child(2) > td
# this blcok gets enemy name and image
# need href attributes for link to image and then enemy name
get_enemy_pic <- function(enemy_table) {
enemy_name <- enemy_table %>%
html_elements(., xpath = "tr[1]/td") %>%
html_elements("a") %>%
html_text() %>%
.[2]
enemy_piclink <- enemy_table %>%
html_elements(., xpath = "tr[1]/td") %>%
html_elements("a") %>%
html_attr("href") %>%
.[1]
return(list(enemy_name = enemy_name, enemy_piclink = enemy_piclink))
}
get_enemy_pic(enemy_tables[[1]])
# this block gets enemy stats
get_enemy_stats <- function(enemy_table) {
df <- enemy_table %>%
html_elements(., xpath = "tr[2]/td") %>%
html_elements(., "table") %>%
html_table() %>%
.[[1]]
df2 <- df %>%
mutate(X1 = stringr::str_replace_all(X1, ":", "")) %>%
mutate(X1 = make_clean_names(X1))
return(df2)
}
get_enemy_stats(enemy_tables[[1]])
enemy_tables[[1]] %>%
html_elements(., xpath = "tr[2]/td") %>%
html_elements(., "table") %>%
html_table()
purrr::map(enemy_tables, ~{
html_nodes(.x, "table")
# html_nodes(., "tbody")
})
# create tibble to host enemy content
enemy_df <- tibble::tibble(
tbl_index = 1:length(enemy_tables)
) %>%
mutate(valid_tables = purrr::map_lgl(tbl_index, ~{
html_nodes(enemy_tables[.x], "table")
is.na(html_attr(enemy_nodeset[.x], "class"))
})) %>%
filter(valid_tables)
custom_path <- '//*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]'
html_node(enemy_chart_content, xpath = custom_path) %>% html_attrs(.)
custom_path <- '//*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[34]'
html_node(enemy_chart_content, xpath = custom_path) %>% html_attrs(.)
# AHA! All of the valid rows will not have any attribute of class defined
# //*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]
# //*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[2]
custom_path <- '//*[@id="mw-content-text"]/div[1]/table[3]/tbody/tr[2]/td/table[1]'
html_nodes(enemy_chart_content, xpath = custom_path) %>%
html_nodes(., "tbody") %>%
html_nodes(., "tr") %>%
html_nodes(., "td") %>%
html_text()
map_chr(., ~html_attr(.x, "class"))
enemy_nodeset <- html_nodes(enemy_chart_content, xpath = custom_path)
enemy_nodeset[2] %>%
html_nodes(., "tr") %>%
html_nodes(., "td") %>%
html_nodes(., "table") %>%
html_nodes(., "tr") %>%
html_text()
# experiment with selecting based on xpath
custom_path <- '//*[@id="mw-content-text"]/div[1]/table[3]'
enemy_table2 <- html_nodes(enemy_chart_content, xpath = custom_path) %>%
html_nodes(., "tr") %>%
html_nodes(., "td") %>%
html_nodes(., "table") %>%
html_nodes(., "tr")
enemy_table2
#map(., ~html_table(.x, fill = TRUE, header = FALSE))
# KEY TIP: need to remove "tbody" from the xpaths obtained from chrome inspector
# the following gets the data associated with acid drop
custom_path <- '//*[@id="mw-content-text"]/table[3]/tr[2]/td/table[1]/tr[2]/td/table'
html_node(enemy_chart_content, xpath = custom_path) %>% html_table(.)
custom_path <- '//*[@id="mw-content-text"]/table[3]/tr[2]/td/table[33]/tr[2]/td/table'
html_node(enemy_chart_content, xpath = custom_path) %>% html_table(.)
# the following gets the name associated with the enemy as well as the link associated with it
custom_path <- '//*[@id="mw-content-text"]/table[3]/tr[2]/td/table[1]/tr[1]/td'
html_node(enemy_chart_content, xpath = custom_path) %>% html_node("b") %>% html_node("a") %>% html_attr("href")
html_node(enemy_chart_content, xpath = custom_path) %>% html_node("b") %>% html_node("a") %>% html_attr("title")
html_node(enemy_chart_content, xpath = custom_path) %>% html_node("table") %>% html_attrs(.)
# I used selector gadget to select any table and then got rid of the top one
# however this seems to have unintended consequences
css_sel <- "table:nth-child(2)"
css_sel <- "table"
# I took the second element as that corresponds to the enemies table or so it seems
# taking the td elements gives us all of the entries for "Fly Boy"
enemy_table <- html_nodes(enemy_chart_content, css_sel) %>%
#.[2] %>%
html_nodes("td")
# the first of the above td elements gives the name of the enemy
# the second of the above td elemtns is actually the sub table with the data we want
blah <- map(enemy_table, ~html_text(.x))
blah2 <- map(enemy_table, ~html_node(.x, "table"))
acid_raw <- blah2[63]
acid1 <- map(acid_raw, ~html_nodes(.x, "tr"))
acid2 <- map(acid1, ~html_nodes(.x, "td"))
acid3 <- map(acid2, ~html_nodes(.x, "table"))
acid4 <- map(acid3, ~html_table(.x, fill = FALSE, header = FALSE))
map(acid_raw, ~html_text(.x))
# a lot of these will have class "xml_missing" so we can flag which ones
# inspired by https://stackoverflow.com/questions/42135192/web-scraping-using-r-error-in-bind-rows-x-id
keep <- map_lgl(blah2, ~class(.) != "xml_missing")
blah3 <- map(blah2[keep], ~html_node(.x, "tr"))
keep <- map_lgl(blah3, ~class(.) != "xml_missing")
blah4 <- map(blah3[keep], ~html_table(.x, fill = TRUE, header = FALSE))
# here let's see about grabbing that data portion as a data frame
# SUCCESS!
enemy_table[[2]] %>%
html_node("table") %>%
html_table(fill = FALSE, header = FALSE)
|
15b0852bd35bc8439f77f37dbf9acb637ec51eb0
|
13e35292b29a1b7ab97d29709a71310f3d711429
|
/Code.R
|
acfa41c94c946d7d5c41c4090605c90b734e62b1
|
[] |
no_license
|
kshitijsankesara/Cleaning-and-Transforming-Data-using-R
|
a00baf0bd209b5824e7606c7fa9ddb6c145ff06d
|
c8278416dc60e39f5da010221ed83b8e1614f16b
|
refs/heads/master
| 2022-10-06T01:36:05.570890
| 2020-06-08T17:59:07
| 2020-06-08T17:59:07
| 266,429,976
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,390
|
r
|
Code.R
|
permit <- read.csv("C:/Users/sofia/OneDrive/Desktop/iConsult/Permit Data/Permit Data.csv")
View(permit)
#Deleting serial number column
permit <- permit[,-1]
#Removing timestamp from application date
permit$application_date <- sub(' .*', '', permit$application_date)
#Removing timestamp from issue date
permit$issue_date <- sub(' .*', '', permit$issue_date)
#Deleting permit application type id
permit <- permit[,-4]
#Removing application number before 2013
permit <- permit[(permit$application_date > "2012-12-31"), ]
#Removing dirty application number
#It is possible that there are different patterns of dirty data in new datafiles every month
#We can just keep adding these patterns in the below code
permit$application_number <- sub('.*\\.', '', permit$application_number)
#This will remove the decimal in the application number
#Removing Voided status type
permit <- permit[permit$status_type_name != "Voided", ]
#Replacing the NA values in the Issue Date with 1900-01-01
permit$issue_date[is.na(permit$issue_date)] <- "1900-01-01"
#Creating a new column DUE Date based on the time frame provided
permit["Due Date"] <- NA
permit$application_date <- as.Date(permit$application_date, format = "%Y-%m-%d")
permit$`Due Date` <- ifelse(
permit$permit_type_description == "ROW Road Cut",
as.character(permit$application_date + 42),
ifelse(
permit$permit_type_description == "ROW Curb Cut",
as.character(permit$application_date + 21),
ifelse(
permit$permit_type_description == "Commercial Renovation / Remodel",
as.character(permit$application_date + 42),
ifelse(
permit$permit_type_description == "Residentail Remodel",
as.character(permit$application_date + 14),
ifelse(
permit$permit_type_description == "New Commercial Construction",
as.character(permit$application_date + 56),
ifelse(
permit$permit_type_description == "New 1-2 Family Home",
as.character(permit$application_date + 28),
as.character(permit$issue_date)
)
)
)
)
)
)
#Created Is On Time Column
permit["Is On Time"] <- NA
permit$`Is On Time` <- ifelse(
permit$issue_date <= permit$`Due Date`,
permit$`Is On Time` <- "YES",
permit$`Is On Time` <- "NO"
)
index <- permit$issue_date == "1900-01-01"
permit$`Is On Time`[index] <- "In Progress"
#Deleting the NA values from Neighborhood
permit$Neighborhood <- as.character(permit$Neighborhood)
permit$Neighborhood[is.na(permit$Neighborhood)] <- "Unknown"
permit <- permit[!(permit$Neighborhood == "Unknown"),]
#Creating a new column Days_for_Issue
permit["Days_for_Issue"] <- NA
permit$application_date <- as.Date(permit$application_date)
permit$issue_date <- as.Date(permit$issue_date)
permit$Days_for_Issue <- permit$issue_date - permit$application_date
#Changing status type to Issued for all applications which had the Issue date but the status type was not issued
index2 <- permit$issue_date != "1900-01-01"
permit$status_type_name[index2] <- "Issued"
#Deleted the records which had Issue Date before the Application Date
permit <- subset(permit, issue_date == "1900-01-01" | application_date <= issue_date)
View(permit)
#Replacing negative values with -1 in permit data
permit$Days_for_Issue[permit$Days_for_Issue<0] <- -1
View(permit)
#Permit Reviewers
Reviewers <- read.csv("C:/Users/sofia/OneDrive/Desktop/iConsult/Permit Data/Permit Reviewers.csv")
#Creating a new column Reviewer Name
Reviewers$ReviewerName <- paste(Reviewers$first_name, Reviewers$last_name)
Reviewers$ReviewerName[which(Reviewers$ReviewerName == " ")] <- "Unknown"
View(Reviewers)
#Removing duplicate rows
install.packages("dplyr", repos = "http://cran.us.r-project.org")
library("dplyr")
permit <- permit[!duplicated(permit$application_number),]
View(permit)
#Removing dirty application number
Reviewers$application_number <- sub('.*\\.', '', Reviewers$application_number)
#Merging permit and reviewers data
PermitReviewers <- merge(Reviewers, permit, by="application_number", all=TRUE)
View(PermitReviewers)
#Removing extra columns
PermitReviewers <- PermitReviewers[,-2:-6]
PermitReviewers <- na.omit(PermitReviewers)
#Permit Approvers
Approvers <- read.csv("C:/Users/sofia/OneDrive/Desktop/iConsult/Permit Data/Permit Approvals.csv")
colnames(Approvers)[3] <- "application_number"
View(Approvers)
#Merging permit and Approvals
install.packages("plyr", repos = "http://cran.us.r-project.org")
library(plyr)
PermitApprovers <- join(Approvers, permit, by="application_number", type='right', match='all')
PermitApprovers <- na.omit(PermitApprovers)
PermitApprovers <- PermitApprovers[,-2:-4]
View(PermitApprovers)
#Alternate Permit Type
AlternatePermits <- read.csv("C:/Users/sofia/OneDrive/Desktop/iConsult/Permit Data/Alternative Permit Types.csv")
View(AlternatePermits)
#Changing date format and converting it from character to date
#AlternatePermits$application_date <- strftime(strptime(AlternatePermits$application_date,"%d/%m/%y"), "%m/%d/%Y")
AlternatePermits$issue_date <- strftime(strptime(AlternatePermits$issue_date,"%Y-%m-%d"), "%m/%d/%Y")
AlternatePermits$issue_date <- as.Date(AlternatePermits$issue_date, "%m/%d/%Y")
AlternatePermits$application_date <- as.Date(AlternatePermits$application_date, "%m/%d/%Y")
#Creating new column Days for Issue
AlternatePermits$Days_for_Issue <- NA
AlternatePermits$Days_for_Issue <- AlternatePermits$issue_date - AlternatePermits$application_date
# Changing column names
colnames(Reviewers)[2] <- "r_application_number"
colnames(Reviewers)[1] <- "r_permit_type_description"
colnames(Reviewers)[3] <- "r_application_date"
colnames(Approvers)[3] <- "a_application_number"
colnames(Approvers)[2] <- "a_permit_type_description"
colnames(Approvers)[1] <- "a_application_date"
colnames(AlternatePermits)[3] <- "alt_application_number"
colnames(AlternatePermits)[1] <- "alt_application_date"
colnames(PermitReviewers)[1] <- "pr_application_number"
colnames(PermitApprovers)[1] <- "pa_application_number"
colnames(PermitApprovers)[7] <- "pa_permit_type_description"
colnames(PermitApprovers)[5] <- "pa_application_date"
|
e8f9451faa99c52667309c61ad2dde125e383755
|
29d87698c80e23cad4d31dafad48fee6a4e899fb
|
/R/makeRaster_AhdiAK_noQ3_hyb09c_sigma.R
|
867ac5dd62a6feff036c74d4eb526305dd7fb77a
|
[] |
no_license
|
fostergeotech/Vs30_NZ
|
56459df71b8d0148bf89cfe548a78b5f707c69bf
|
2760af63199f48ed326e370ccfd9ec8a78891aa2
|
refs/heads/master
| 2020-04-10T15:42:52.496050
| 2020-01-16T22:39:57
| 2020-01-16T22:39:57
| 161,119,831
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 621
|
r
|
makeRaster_AhdiAK_noQ3_hyb09c_sigma.R
|
# makeRaster_AhdiAK_noQ3_hyb09c_sigma.R
#
# Creates a sigma raster
rm(list=ls())
library(raster)
source("R/MODEL_AhdiAK_noQ3_hyb09c.R")
Ahdi <- raster("~/big_noDB/models/AhdiGeoCats.tif")
# 00_ICE and 00_WATER are not included in AhdiAK_noQ3_hyb09c_lookup(),
# so I add them manually - thus the first two NA entries:
subsTable <- data.frame(groupNum = seq(1,17),
sigmaVals = c(NA, NA, AhdiAK_noQ3_hyb09c_lookup()$stDv_AhdiAK_noQ3))
hybSigma <- subs(x = Ahdi, y = subsTable)
writeRaster(hybSigma, "~/big_noDB/models/sig_NZGD00_allNZ_AhdiAK_noQ3_hyb09c.tif", format="GTiff", overwrite=T)
|
bbfa03dbb26aed3a1a0d5b1787a916dbb86443ab
|
b3357449a175852145c9099327649df50046d0c4
|
/src/recommender/R/recommender-package.R
|
177f055df35a4fc3ac45c04d55f9475df81fdee7
|
[] |
no_license
|
savchukndr/temat14
|
acb2e5c28beee21c67c7deca942b4edca34ca6b5
|
3fb63c9c2128de0784047be72d9daf2090bf098c
|
refs/heads/master
| 2021-05-14T17:21:29.412546
| 2018-01-15T20:55:35
| 2018-01-15T20:55:35
| 116,044,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 364
|
r
|
recommender-package.R
|
#' Simple recommendation library
#'
#'@description
#'Library implementing collaborative filtering methods to make recommendations.
#'
#'@details
#'Contains two functions: \code{\link{ub_collaborative_filtering}} for user-based collaborative filtering
#'and \code{\link{ib_collaborative_filtering}} for item-based collaborative filtering.
#' @name recommender
NULL
|
0e32f44b5911070b4382b03df3f9204487d4049e
|
a01984c90baa149120fe852ea44888a23d9f6007
|
/man/buildStrataDFSpatial.Rd
|
6e76966f46804a308b46015ee9cfa2037b595635
|
[] |
no_license
|
cran/SamplingStrata
|
b724c3b41d35582f96d9b67afbd907211ce973ea
|
9b1b6084fd9c9f55313ccfbf558e6e834579c80d
|
refs/heads/master
| 2022-11-24T04:14:05.098505
| 2022-11-15T20:50:06
| 2022-11-15T20:50:06
| 17,693,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,154
|
rd
|
buildStrataDFSpatial.Rd
|
\name{buildStrataDFSpatial}
\Rdversion{1.3}
\alias{buildStrataDFSpatial}
\title{
Builds the "strata" dataframe containing information on target variables Y's
distributions in the different strata, starting from sample data or from a frame
}
\description{
This function allows to build the information regarding strata in the population required as
an input by the algorithm of Bethel for the optimal allocation.
In order to estimate means and standard deviations for target variables Y's, we need data coming from:
(1) a previous round of the survey whose sample we want to plan;
(2) sample data from a survey with variables that are proxy to the ones we are interested to;
(3) a frame containing values of Y's variables (or proxy variables) for all the population.
In all cases, each unit in the dataset must contain auxiliary information (X's variables)
and also target variables Y's (or proxy variables) values: under these conditions it is possible
to build the dataframe "strata", containing information on the distribution of Y's in the different strata
(namely, means and standard deviations), together with information on strata (total population,
if it is to be censused or not, the cost per single interview).
If the information is contained in a sample dataset, a variable named WEIGHT is expected to be
present. In case of a frame, no such variable is given, and the function will define a WEIGHT variable
for each unit, whose value is always '1'.
Missing values for each Y variable will not be taken into account in the computation of means and standard
deviations (in any case, NA's can be present in the dataset).
The dataframe "strata" is written to an external file (tab delimited, extension "txt"), and will be
used as an input by the function "optimizeStrata".
}
\usage{
buildStrataDFSpatial(dataset,
fitting=c(1),
range=c(0),
kappa=3,
progress=FALSE,
verbose=FALSE)
}
\arguments{
\item{dataset}{
This is the name of the dataframe containing the sample data, or the frame data.
It is strictly required that auxiliary information is organised in variables named
as X1, X2, ... , Xm (there should be at least one of them) and the target variables
are denoted by Y1, Y2, ... , Yn.
In addition, in case of sample data, a variable named 'WEIGHT' must be present in the dataframe,
containing the weigths associated to each sampling unit
}
\item{fitting}{
Fitting of the model(s).
Default is 1.
}
\item{range}{
Maximum range for spatial autocorrelation
}
\item{kappa}{
Factor used in evaluating spatial autocorrelation.
Default is 3.
}
\item{progress}{
If set to TRUE, a progress bar is visualised during the execution. Default is FALSE.
}
\item{verbose}{
If set to TRUE, information is given about the number of strata generated. Default is FALSE.
}
}
\value{
A dataframe containing strata
}
\author{
Giulio Barcaroli
}
\examples{
\dontrun{
strata <- buildStrataDFSpatial(dataset=frame,range=800)
}
}
\keyword{ survey }
|
4ac063c3b78310e252a1afc874eeb03bb851d991
|
0e6d3ed19aa2ef50bf4e4bd164cb3383c106a84f
|
/GWAS/misc_gwas_traits/aterido_psa_process_data_and_project.R
|
b1810a99addfea99e5b6ed8c4afab870b84cd9df
|
[
"MIT"
] |
permissive
|
ollyburren/basis_paper
|
4cdefd86a8811efb0bbeeae6975b804f6f7639a6
|
7393390c1b1f5b673049202d293994704cebeafb
|
refs/heads/master
| 2020-03-29T22:06:03.889635
| 2019-10-23T17:06:15
| 2019-10-23T17:06:15
| 150,402,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,895
|
r
|
aterido_psa_process_data_and_project.R
|
## bipolar prognosis GWAS
library(annotSnpStats)
library(rtracklayer)
SNP_MANIFEST <-'/home/ob219/share/as_basis/GWAS/snp_manifest/gwas_june_19_w_vitiligo.tab'
SHRINKAGE_FILE <- '/home/ob219/share/as_basis/GWAS/support/ss_shrinkage_gwas_0619.RDS'
BASIS_FILE <- '/home/ob219/share/as_basis/GWAS/support/ss_basis_gwas_0619.RDS'
OUT_FILE <- '/home/ob219/share/as_basis/GWAS/psa_aterido/psa_aterido_0619.RDS'
SRC_OUT_DIR <- '/home/ob219/share/as_basis/GWAS/for_fdr'
psa.DT <- fread("/home/ob219/share/Data/GWAS-summary/psa-aterido/psa_Aterido.csv")
psa.DT[,pid:=paste(CHR,POS,sep=':')]
psa.DT <- psa.DT[!pid %in% psa.DT[duplicated(pid),],]
psa.DT[,id:=1:.N]
## note OR are with respect to A1
man.DT <- fread(SNP_MANIFEST)
am.DT <- merge(psa.DT[,.(pid,a1=A1,a2=A2,or=ORN,p.value=PN)],man.DT,by='pid')
span.DT <- merge(psa.DT[,.(pid,a1=A1,a2=A2,or=ORS,p.value=PS)],man.DT,by='pid')
##alignment will be the same for both studies
alleles <- data.table(pid=am.DT$pid,al.x = paste(am.DT$ref_a1,am.DT$ref_a2,sep='/'),al.y=paste(am.DT$a1,am.DT$a2,sep='/'))
#alleles <- alleles[!duplicated(pid),]
#alleles <- M[,list(al.x=paste(uk10_A1,uk10_A2,sep='/'),al.y=paste(a1,a2,sep='/')),by='pid']
## to make quick
align.class <- rep('match',nrow(alleles))
idx<-which(alleles$al.x!=alleles$al.y)
x.alleles <- alleles[idx,]$al.x
names(x.alleles)<-alleles[idx,]$pid
y.alleles <- alleles[idx,]$al.y
names(y.alleles)<-names(x.alleles)
align.class[idx] <- g.class(x.alleles,y.alleles)
print(table(align.class))
alleles[,g.class:=align.class]
idx<-which(alleles$g.class=='impossible')
if(length(idx) >0){
M <- M[-idx,]
alleles <- alleles[-idx,]
}
am.DT <- merge(am.DT,alleles[,.(pid,g.class)],by='pid',all.x=TRUE)
span.DT <- merge(span.DT,alleles[,.(pid,g.class)],by='pid',all.x=TRUE)
## so here alleles match we need to flip as we want wrt to a2
am.DT[g.class=='match',or:=1/or]
span.DT[g.class=='match',or:=1/or]
am.DT[,trait:='na_psa']
span.DT[,trait:='span_psa']
sDT <- readRDS(SHRINKAGE_FILE)
stmp<-sDT[,.(pid,ws_emp_shrinkage)]
res <- lapply(list(N=am.DT,S=span.DT),function(M){
tmp <- merge(M,stmp,by='pid',all.y=TRUE)
tra <- tmp[!is.na(trait),]$trait %>% unique
pfile <- file.path(SRC_OUT_DIR,sprintf("%s_source.RDS",tra))
saveRDS(tmp[,.(pid,or,p.value,ws_emp_shrinkage)],file=pfile)
tmp$metric <- tmp[['ws_emp_shrinkage']] * log(tmp$or)
tmp[,trait:=tra]
## where snp is missing make it zero
tmp[is.na(metric),metric:=0]
saveRDS(tmp,file=sprintf('%s/%s_source.RDS',SRC_OUT_DIR,tra))
B <- dcast(tmp,pid ~ trait,value.var='metric')
snames <- B[,1]$pid
mat.emp <- as.matrix(B[,-1]) %>% t()
colnames(mat.emp) <- snames
pc.emp <- readRDS(BASIS_FILE)
if(!identical(colnames(mat.emp),rownames(pc.emp$rotation)))
stop("Something wrong basis and projection matrix don't match")
all.proj <- predict(pc.emp,newdata=mat.emp)
}) %>% do.call('rbind',.)
saveRDS(res,file=OUT_FILE)
|
4a61384f784b1cda3d66929a00851047677419ed
|
ea1191378907a7857c86b8628a5720074ae8bd7e
|
/UVAapp_v0.2/server/landmarks/server_landmarks_table.R
|
2ea101edd353ed8455f97ec840f74a94b6cd1044
|
[] |
no_license
|
simongonzalez/uva
|
cc10e0badaa97f6bf75aabd9045d8ea16ecbeb11
|
5458c0935ce0ed5830a7e6305735a4d85ff95e76
|
refs/heads/master
| 2022-12-03T04:10:16.986146
| 2022-11-23T00:44:20
| 2022-11-23T00:44:20
| 211,875,233
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,626
|
r
|
server_landmarks_table.R
|
#creates the reactive value to create the landmark table
prep_landmark_table <- reactive({
#if plot data is null
if(is.null(all_plot_data$d))
return()
#creates a vector with the names of the columns to be dropped
drops <- c('point', 'coord', 'pixel', 'mm')
#imports the plot data
#subsets data to the first point
#subsets data to the x points
#drops the columns in the vector
df <- all_plot_data$d %>%
filter(point == 1) %>%
filter(coord == 'x') %>%
dplyr::select(-one_of(drops))
write.csv(df, paste0('./workingFiles/df_landmark.csv'), row.names = F)
#returns the dataframe
return(df)
})
#creates the landmark table display
output$landmark_table = output$gridline_table = DT::renderDataTable(
prep_landmark_table(),
options = list(lengthChange = T, dom = 'tip', scrollX = TRUE),
rownames = FALSE, style = "bootstrap"
)
#This section controls the data management
#sets the dataframe and their values
vals <- reactiveValues()
observe({
if(!is.null(importFiles()[[1]])){
dat <- as.data.table(importFiles()[[1]])
dat_short <- dat %>%
filter(point == 1) %>%
filter(coord == 'x') %>%
dplyr::select(speaker,segment,repetition,frame)
vals$Data <- dat_short
}
})
output$Main_table <- renderDataTable({
DT <- vals$Data
DT[["Select"]] <- paste0('<input type="checkbox" name="row_selected" value="Row',1:nrow(vals$Data),'"><br>')
DT[["Actions"]] <-
paste0('
<div class="btn-group" role="group" aria-label="Basic example">
<button type="button" class="btn btn-secondary delete" id=delete_',1:nrow(vals$Data),'>Delete</button>
<button type="button" class="btn btn-secondary modify"id=modify_',1:nrow(vals$Data),'>Modify</button>
</div>
')
datatable(DT,
escape=F, style = "bootstrap")}
)
observeEvent(input$Del_row_head,{
#gets the numeric value of the row
row_to_del <- as.numeric(gsub("Row","",input$checked_rows))
#deletes the row
vals$Data <- vals$Data[-row_to_del]}
)
#Visualisation
observeEvent(input$Compare_row_head,{
#gets the index of the rows
row_to_del <- as.numeric(gsub("Row","",input$checked_rows))
#gets the total number of rows to be compared
number_brands <- length(row_to_del)
#gets the plotting values
#get the index of the rows
tmp_dat_short <- vals$Data
tmp_dat_short <- tmp_dat_short[row_to_del]
plot_dat <- data.frame(matrix(nrow = 0, ncol = ncol(importFiles()[[1]])))
names(plot_dat) <- names(importFiles()[[1]])
names(plot_dat)[7] <- 'measurement'
tpm_incoming_data <- as.data.table(importFiles()[[1]])
names(tpm_incoming_data)[7] <- 'measurement'
for(speaker_i in unique(tmp_dat_short$speaker)){
speaker_df <- tmp_dat_short[tmp_dat_short$speaker == speaker_i,]
for(segment_i in unique(speaker_df$segment)){
segment_df <- speaker_df[speaker_df$segment == segment_i,]
for(repetition_i in unique(segment_df$repetition)){
repetition_df <- segment_df[segment_df$repetition == repetition_i,]
for(frame_i in unique(repetition_df$frame)){
iterated_df <- tpm_incoming_data[tpm_incoming_data$speaker == speaker_i &
tpm_incoming_data$segment == segment_i &
tpm_incoming_data$repetition == repetition_i &
tpm_incoming_data$frame == frame_i,]
names(iterated_df) <- names(plot_dat)
plot_dat <- rbind(plot_dat, iterated_df)
}
}
}
}
plot_wide <- spread(plot_dat, coord, measurement)
#creates baseline values
vals$plotvalues <- as.data.table(plot_wide)
#as factor
vals$plotvalues[,speaker:=as.factor(speaker)]
vals$plotvalues[,segment:=as.factor(segment)]
vals$plotvalues[,repetition:=as.factor(repetition)]
vals$plotvalues[,frame:=as.factor(frame)]
#shows the plot in a UI modal window
showModal(plotvalues_modal)
}
)
#creates a modal dialog with the plot
plotvalues_modal<-modalDialog(
fluidPage(
h3(strong("Contours for selected tokens"),align="center"),
plotOutput('table_view_plot')
),
#size of the modal window, large in this case
size <- "l"
)
#creates the plot
output$table_view_plot <- renderPlot({
#line ggplot of table plots
if(length(unique(vals$plotvalues$speaker)) == 1){
#if only one speaker is selected
if(length(unique(vals$plotvalues$segment)) == 1){
#if only one segment number is selected
if(length(unique(vals$plotvalues$repetition)) == 1){
#if only one repetition is selected
if(length(unique(vals$plotvalues$frame)) == 1){
#if only one frame is selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = frame, colour = frame)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = frame, colour = frame)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}else{
#if multiple frames are selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(repetition,frame),
colour = frame)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(repetition,frame),
colour = frame)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}
}else{
#if multiple repetitions are selected
if(length(unique(vals$plotvalues$frame)) == 1){
#if only one frame is selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = repetition, colour = repetition)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = repetition, colour = repetition)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}else{
#if multiple frames are selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(repetition,frame),
colour = frame)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(repetition,frame),
colour = frame)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}
}
}else{
#if multiple segment numbers are selected
if(length(unique(vals$plotvalues$repetition)) == 1){
#if only one repetition is selected
if(length(unique(vals$plotvalues$frame)) == 1){
#if only one frame is selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = segment, colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = segment, colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}else{
#if multiple frames are selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y,
group = interaction(segment,frame), colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y,
group = interaction(segment,frame), colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}
}else{
#if multiple repetitions are selected
if(length(unique(vals$plotvalues$frame)) == 1){
#if only one frame is selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(segment,repetition),
colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(segment,repetition),
colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}else{
#if multiple frames are selected
if(input$main_invert_y){
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(segment,repetition),
colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}else{
p <- ggplot(vals$plotvalues, aes(x = point, y = y, group = interaction(segment,repetition),
colour = segment)) +
geom_line(formula = y ~ x, stat = 'smooth', method = 'loess') +
scale_x_continuous() + scale_y_reverse() + labs(x ='Tongue Advancement', y = 'Tongue Height')
}
}
}
}
}
#print('here')
p <- p + theme_minimal()
print(p)
})
##Managing in row deletion / modification
modal_modify <- modalDialog(
fluidPage(
h3(strong("Row modification"),align="center"),
hr(),
#row selected
dataTableOutput('row_modif'),
#action button to save the changes
actionButton("save_changes","Save changes"),
tags$script(HTML("$(document).on('click', '#save_changes', function () {
var list_value=[]
for (i = 0; i < $( '.new_input' ).length; i++)
{
list_value.push($( '.new_input' )[i].value)
}
Shiny.onInputChange('newValue', list_value)
});"))
),
size="l"
)
#checks the last clicked button
observeEvent(input$lastClick,
{
if (input$lastClickId%like%"delete")
{
#if the user clicks the delete button
#gets the row index
row_to_del=as.numeric(gsub("delete_","",input$lastClickId))
#deletes it from the dataset
vals$Data=vals$Data[-row_to_del]
}
else if (input$lastClickId%like%"modify")
{
#if the user clicks the modify button
#open a modal window
showModal(modal_modify)
}
}
)
#modifying the dataset
output$row_modif <- renderDataTable({
#seltected row to modify
selected_row <- as.numeric(gsub("modify_","",input$lastClickId))
#gets the old row values
old_row <- vals$Data[selected_row]
#creates a list to store the new values
row_change <- list()
#iterates through all the columns
for (i in colnames(old_row))
{
if (is.numeric(vals$Data[[i]]))
{
#if the column value is numeric
row_change[[i]] <- paste0('<input class="new_input" type="number" id=new_',i,'><br>')
}
else
#if the column value is a character
row_change[[i]] <- paste0('<input class="new_input" type="text" id=new_',i,'><br>')
}
#converts the list values to a datatable class
row_change <- as.data.table(row_change)
#sets the names to the column names of the original data
setnames(row_change,colnames(old_row))
#adds the row to the original data
DT <- rbind(old_row,row_change)
rownames(DT) <- c("Current values","New values")
DT
},escape=F,options=list(dom='t',ordering=F)
)
#if new values are entered
observeEvent(input$newValue,
{
newValue=lapply(input$newValue, function(col) {
if (suppressWarnings(all(!is.na(as.numeric(as.character(col)))))) {
as.numeric(as.character(col))
} else {
col
}
})
DF <- data.frame(lapply(newValue, function(x) t(data.frame(x))))
colnames(DF) <- colnames(vals$Data)
vals$Data[as.numeric(gsub("modify_","",input$lastClickId))] <- DF
}
)
|
d275fd0be4f91bdb16fae4f6abc215477e27b7be
|
f7b05054901f8506886fd2fac0a30c5cdb9a8752
|
/Football/2014Football.R
|
3161e548c25b64333fa698d800de8b55e9f30629
|
[] |
no_license
|
tmulc18/MulcSeniorThesis
|
a92da8eb3a5991a3f4be250978f90cfb367178d8
|
ab2e0b7ae654f92468eae9b902945e5711ac2e44
|
refs/heads/master
| 2021-01-21T13:04:48.994813
| 2016-05-08T02:27:37
| 2016-05-08T02:27:37
| 44,121,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 556
|
r
|
2014Football.R
|
setwd('/home/mulctv/Rose_Classes/MathThesis/')
library(igraph)
dat<-read.csv('clean.csv',header=TRUE)
colnames(dat)<-NULL
el=as.matrix(dat)
el[,1]=as.character(el[,1])
el[,2]=as.character(el[,2])
g=graph.edgelist(el,directed=FALSE)
g = simplify(g, remove.multiple = TRUE, remove.loops = FALSE)
plot(g)
#OR
g2=graph.data.frame(dat,directed=FALSE)
g2 = simplify(g2, remove.multiple = TRUE, remove.loops = FALSE)
plot(g2)
plot(cluster_walktrap(g2),g2, mark.groups=NULL,layout=layout.kamada.kawai,vertex.label.cex=.4,vertex.size=7,margin=c(-.1,-.4,-.1,-.4))
|
96922107cbc2b5ab424f623fe054cd1c6f371fe8
|
594b92e3d4a43afb8de69164926f62defe853b8c
|
/script1.R
|
7f51ad2ba496572ea4d039ed1ac8e1df5d6fd541
|
[] |
no_license
|
vulkin/cleaningdata
|
4b797739f1f34150f738cc4e8c5ceaeb1ec47d69
|
63ffa99fa79b72e19e1093cbd2ccf9e0dc1a48b0
|
refs/heads/master
| 2020-05-19T19:53:08.132797
| 2015-07-06T10:07:47
| 2015-07-06T10:07:47
| 35,217,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
script1.R
|
##reading subject ids for test data
subject_test<-read.table("./data/test/subject_test.txt")
##reading test data
testset<-read.table("./data/test/x_test.txt")
##reading acitivty ids for test data
activity<-read.table("./data/test/y_test.txt")
##reading feature names
features<-read.table("./data/features.txt",colClasses="character")
##replacing column names of test data with feature names
for(i in 1:561){
names(testset)[i]<-features[i,2]
}
##combining test data columns
test<-cbind(subject_test,activity,testset)
##reading subject ids for train data
subject_train<-read.table("./data/train/subject_train.txt")
##reading training data
trainset<-read.table("./data/train/x_train.txt")
##replacing training data column names with feature names
for(i in 1:561){
names(trainset)[i]<-features[i,2]
}
##reading activity ids for training data
activitytrain<-read.table("./data/train/y_train.txt")
## combining training data columns
train<-cbind(subject_train,activitytrain,trainset)
## combining test and training data set
data<-rbind(test,train)
library(dplyr)
## converting to tbl
initial<-tbl_df(data)
##making all the column names valid and unique
validnames<-make.names(names=names(initial),unique=TRUE,allow_= TRUE)
names(initial)<-validnames
names(initial)[1]<- "subjectid"
names(initial)[2]<- "activityid"
##extracting the columns with "mean" or "std" in their names
step2<-select(initial,subjectid,activityid,contains("mean"),contains("std"))
## grouping data acc to subject and acitivity ids
step3<-group_by(step2,subjectid,activityid)
## sorting data acc to subject ids followed by acitivty ids
step3<-arrange(step3,subjectid,activityid)
## converting acitvity id into factor and replacing labesl with actual acitvity labels
step3$activityid<-as.factor(step3$activityid)
library(plyr)
step3$activityid<-revalue(step3$activityid,c("1"="WALKING","2"="WALKING_UPSTAIRS","3"="WALKING_DOWNSTAIRS","4"="SITTING","5"="STANDING","6"="LAYING"))
## again grouping acc to subject and acivity ids
grouped<-group_by(step3,subjectid,activityid)
## summarising each measurement column acc to grouped columns(ie acc to each subject and acitvity)
step5<-summarise_each(grouped,funs(mean)
|
e8670b4b660b0b6e41d6cf1bf5b487f288dfb54b
|
f63a9c1887ec71cae6d65f88c33ddc99f3fded4a
|
/R/mongo_index.R
|
7ea2bf2f0869129c2c47ba2bb1ef357dbde702ea
|
[] |
no_license
|
agnaldodasilva/rmongodb
|
41b337c42b4b6e1fb41b9ad2949fab1e6a850fb0
|
8eb2bca2d9c88f542832d1bcb6ccd209fdfc676c
|
refs/heads/master
| 2020-08-07T15:39:41.703738
| 2016-03-21T10:36:28
| 2016-03-21T10:36:28
| 213,510,405
| 1
| 0
| null | 2019-10-08T00:10:59
| 2019-10-08T00:10:59
| null |
UTF-8
|
R
| false
| false
| 6,451
|
r
|
mongo_index.R
|
#' mongo.index.create flag constant - unique keys
#'
#' \code{\link{mongo.index.create}()} flag constant - unique keys (no
#' duplicates).
#'
#'
#' @return 1L
#' @export mongo.index.unique
mongo.index.unique <- 1L
#' mongo.index.create flag constant - drop duplicate keys
#'
#' \code{\link{mongo.index.create}()} flag constant - drop duplicate keys.
#'
#'
#' @return 4L
#' @export mongo.index.drop.dups
mongo.index.drop.dups <- 4L
#' mongo.index.create flag constant - background
#'
#' \code{\link{mongo.index.create}()} flag constant - background.
#'
#'
#' @return 8L
#' @export mongo.index.background
mongo.index.background <- 8L
#' mongo.index.create flag constant - sparse
#'
#' \code{\link{mongo.index.create}()} flag constant - sparse.
#'
#'
#' @return 16L
#' @export mongo.index.sparse
mongo.index.sparse <- 16L
#' Add an index to a collection
#'
#' Add an index to a collection.
#'
#' See \url{http://www.mongodb.org/display/DOCS/Indexes}.
#'
#'
#' @param mongo (\link{mongo}) A mongo connection object.
#' @param ns (string) The namespace of the collection to which to add an index.
#' @param key An object enumerating the fields in order which are to
#' participate in the index. This object may be a vector of strings listing the
#' key fields or a \link{mongo.bson} object containing the key fields in the
#' desired order.
#'
#' Alternately, \code{key} may be a list which will be converted to a
#' mongo.bson object by \code{\link{mongo.bson.from.list}()}.
#'
#' Alternately, \code{key} may be a valid JSON character string which will be converted to a
#' mongo.bson object by \code{\link{mongo.bson.from.JSON}()}.
#' @param options (integer vector) Optional flags governing the operation:
#' \itemize{ \item\code{\link{mongo.index.unique}}
#' \item\code{\link{mongo.index.drop.dups}}
#' \item\code{\link{mongo.index.background}}
#' \item\code{\link{mongo.index.sparse}} }
#' @return NULL if successful; otherwise, a \link{mongo.bson} object describing
#' the error.\cr \code{\link{mongo.get.server.err}()} or
#' \code{\link{mongo.get.server.err.string}()} may alternately be called in
#' this case instead of examining the returned object.
#' @seealso \code{\link{mongo.find}},\cr \code{\link{mongo.find.one}},\cr
#' \code{\link{mongo.insert}},\cr \code{\link{mongo.update}},\cr
#' \code{\link{mongo.remove}},\cr \link{mongo},\cr \link{mongo.bson}.
#' @examples
#'
#' mongo <- mongo.create()
#' if (mongo.is.connected(mongo)) {
#' # Add a city index to collection people in database test
#' b <- mongo.index.create(mongo, "test.people", '{"city":1}')
#' if (!is.null(b)) {
#' print(b)
#' stop("Server error")
#' }
#'
#' # Add an index to collection people in database test
#' # which will speed up queries of age followed by name
#' b <- mongo.index.create(mongo, "test.people", c("age", "name"))
#'
#' buf <- mongo.bson.buffer.create()
#' mongo.bson.buffer.append(buf, "age", 1L)
#' mongo.bson.buffer.append(buf, "name", 1L)
#' key <- mongo.bson.from.buffer(buf)
#'
#' # add an index using an alternate method of specifying the key fields
#' b <- mongo.index.create(mongo, "test.people", key)
#'
#' # create an index using list of that enumerates the key fields
#' b <- mongo.index.create(mongo, "test.cars", list(make=1L, model=1L))
#' }
#'
#' @export mongo.index.create
mongo.index.create <- function(mongo, ns, key, options=0L) {
#check for mongodb connection
if( !mongo.is.connected(mongo))
stop("No mongoDB connection!")
#validate and process input
if( class(key) == "mongo.bson"){
key <- key
} else if ( class(key) == "list"){
key <- mongo.bson.from.list(key)
} else if ( class(key) == "character"){
if( validate(I(key)))
key <- mongo.bson.from.JSON(key)
else
key <- key
}
.Call(".mongo.index.create", mongo, ns, key, options)
}
#' Add a time to live (TTL) index to a collection
#'
#' Add a time to live (TTL) index to a collection
#'
#' See \url{http://docs.mongodb.org/manual/tutorial/expire-data}.
#'
#'
#' @param mongo (\link{mongo}) A mongo connection object.
#' @param ns (string) The namespace of the collection to add a TTL index to.
#' @param key (\link{mongo.bson}) The desired field(s) to use as the basis for expiration time. The field should be of type 'Date'.
#'
#' Alternately, \code{key} may be a list which will be converted to a
#' mongo.bson object by \code{\link{mongo.bson.from.list}()}.
#'
#' Alternately, \code{key} may be a valid JSON character string which will be converted to a
#' mongo.bson object by \code{\link{mongo.bson.from.JSON}()}.
#'
#' @param expireAfterSeconds (Numeric or Integer) The time in seconds after which records should be removed.
#'
#' @param index_name (string) The name of the index to be created.
#'
#' @return NULL if the command failed. \code{\link{mongo.get.err}()} may be
#' MONGO_COMMAND_FAILED.
#'
#' (\link{mongo.bson}) The server's response if successful.
#'
#' @seealso \code{\link{mongo.index.create}}
#' @examples
#' mongo <- mongo.create()
#' if (mongo.is.connected(mongo)) {
#' for (i in 1:10) mongo.insert(mongo, ns = 'test.testTTL', b = list(a = i, last_updated = i))
#' res_bson <- mongo.index.TTLcreate (mongo, ns = 'test.testTTL', key = list(last_updated = 1),
#' expireAfterSeconds = 3600, index_name = 'last_updated_1')
#' print(res_bson);
#' mongo.drop(mongo, ns = 'test.testTTL')
#' }
#' mongo.destroy(mongo);
#' @export mongo.index.TTLcreate
mongo.index.TTLcreate <- function(mongo, ns, key, expireAfterSeconds, index_name = NULL) {
#parse ns into db and collection names
ns_parsed <- mongo.parse.ns(ns)
#check for mongodb connection
if( !mongo.is.connected(mongo))
stop("No mongoDB connection!")
key_list <- mongo.list.from.argument(key)
if(!is.character(index_name)) index_name <- paste(names(unlist(key_list)), collapse = '.')
indexes <- list()
indexes[["name"]] <- index_name
indexes[["expireAfterSeconds"]] <- expireAfterSeconds
indexes[["key"]] <- key_list
listCreateIndex <- list(createIndexes = ns_parsed$collection, indexes = list(indexes))
bsonCreateIndex <- mongo.bson.from.list(listCreateIndex)
res <- mongo.command(mongo, db = ns_parsed$db, bsonCreateIndex)
if(is.null(res)) warning("Probably index was not created (syntax error), try to see last error: mongo.get.err(), mongo.get.last.err()");
return(res);
}
|
bddcb5ce21d0dc9e15b5f71690b565162cdeacfe
|
f56054d4a2426922877d8fed45f56d36137f623d
|
/extras/CodeToRunLocal.R
|
e1b03b0d6d6228427e260ee45266a6180402215c
|
[
"Apache-2.0"
] |
permissive
|
ohdsi-studies/Covid19EstimationHydroxychloroquine2
|
2bfc4e3c36acc69bf77c9410772fd33f8a802048
|
9d5ef0eef55802a9c1832538981c48d656ea27c6
|
refs/heads/master
| 2022-12-12T12:11:44.632990
| 2020-07-24T15:18:27
| 2020-07-24T15:18:27
| 268,395,526
| 1
| 1
| null | 2020-08-29T21:02:19
| 2020-06-01T01:17:40
|
R
|
UTF-8
|
R
| false
| false
| 8,549
|
r
|
CodeToRunLocal.R
|
library(Covid19EstimationHydroxychloroquine2)
options(fftempdir = "S:/FFTemp")
maxCores <- parallel::detectCores()
studyFolder <- "G:/StudyResults/Covid19EstimationHcqPsychInfluenza"
source("S:/MiscCode/SetEnvironmentVariables.R")
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = "pdw",
server = Sys.getenv("server"),
user = NULL,
password = NULL,
port = Sys.getenv("port"))
mailSettings <- list(from = Sys.getenv("emailAddress"),
to = c(Sys.getenv("emailAddress")),
smtp = list(host.name = Sys.getenv("emailHost"), port = 25,
user.name = Sys.getenv("emailAddress"),
passwd = Sys.getenv("emailPassword"), ssl = FALSE),
authenticate = FALSE,
send = TRUE)
# CCAE settings ---------------------------------------------------------------- done
databaseId <- "CCAE"
databaseName <- "CCAE"
databaseDescription <- "CCAE"
cdmDatabaseSchema <- "CDM_IBM_CCAE_V1103.dbo"
outputFolder <- file.path(studyFolder, databaseId)
cohortDatabaseSchema = "scratch.dbo"
cohortTable = "covid19_hcq_psych_ccae"
# Optum DOD settings ----------------------------------------------------------- done
databaseId <- "Clinformatics"
databaseName <- "Clinformatics"
databaseDescription <- "Clinformatics"
cdmDatabaseSchema = "CDM_OPTUM_EXTENDED_DOD_V1107.dbo"
outputFolder <- file.path(studyFolder, databaseId)
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "covid19_hcq_psych_optum"
# CPRD settings ---------------------------------------------------------------- done
databaseId <- "CPRD"
databaseName <- "CPRD"
databaseDescription <- "CPRD"
cdmDatabaseSchema = "CDM_CPRD_V1102.dbo"
outputFolder <- file.path(studyFolder, databaseId)
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "covid19_hcq_psych_cprd"
# MDCD settings ---------------------------------------------------------------- done
databaseId <- "MDCD"
databaseName <- "MDCD"
databaseDescription <- "MDCD"
cdmDatabaseSchema = "CDM_IBM_MDCD_V1105.dbo"
outputFolder <- file.path(studyFolder, databaseId)
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "covid19_hcq_psych_mdcd"
# MDCR settings ---------------------------------------------------------------- done
databaseId <- "MDCR"
databaseName <- "MDCR"
databaseDescription <- "MDCR"
cdmDatabaseSchema = "CDM_IBM_MDCR_V1104.dbo"
outputFolder <- file.path(studyFolder, databaseName)
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "covid19_hcq_psych_mdcr"
# PanTher ---------------------------------------------------------------------- done
databaseId <- "OptumEHR"
databaseName <- "OptumEHR"
databaseDescription <- "OptumEHR"
cdmDatabaseSchema = "CDM_OPTUM_PANTHER_V1109.dbo"
outputFolder <- file.path(studyFolder, databaseName)
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "covid19_hcq_psych_panther"
# DAGermany --------------------------------------------------------------------
databaseId <- "DAGermany"
databaseName <- "DAGermany"
outputFolder <- file.path(studyFolder, databaseName)
# VA ---------------------------------------------------------------------------
databaseId <- "VA"
databaseName <- "VA"
outputFolder <- file.path(studyFolder, databaseName)
# IMRD -------------------------------------------------------------------------
databaseId <- "IMRD"
databaseName <- "IMRD"
outputFolder <- file.path(studyFolder, databaseName)
# AmbEMR -----------------------------------------------------------------------
databaseId <- "AmbEMR"
databaseName <- "AmbEMR"
outputFolder <- file.path(studyFolder, databaseName)
# OpenClaims -------------------------------------------------------------------
databaseId <- "OpenClaims"
databaseName <- "OpenClaims"
outputFolder <- file.path(studyFolder, databaseName)
# Run --------------------------------------------------------------------------
OhdsiRTools::runAndNotify(expression = {
execute(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = NULL,
outputFolder = outputFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
createCohorts = TRUE,
synthesizePositiveControls = FALSE,
runAnalyses = TRUE,
runDiagnostics = FALSE,
packageResults = TRUE,
maxCores = maxCores)
}, mailSettings = mailSettings, label = paste0("Covid19EstimationHydroxychloroquine2 ", databaseId), stopOnWarning = FALSE)
resultsZipFile <- file.path(outputFolder, "export", paste0("Results", databaseId, ".zip"))
dataFolder <- file.path(outputFolder, "shinyData")
prepareForEvidenceExplorer(resultsZipFile = resultsZipFile, dataFolder = dataFolder)
renameDatabaseIds(outputFolder = file.path(studyFolder, "OpenClaims"), oldDatabaseId = "Open Claims", newDatabaseId = "OpenClaims")
renameDatabaseIds(outputFolder = file.path(studyFolder, "DAGermany"), oldDatabaseId = "DA Germany", newDatabaseId = "DAGermany")
doMetaAnalysis(studyFolder = studyFolder,
outputFolders = c(file.path(studyFolder, "CCAE"),
file.path(studyFolder, "Clinformatics"),
file.path(studyFolder, "CPRD"),
file.path(studyFolder, "MDCD"),
file.path(studyFolder, "MDCR"),
#file.path(studyFolder, "JMDC"),
file.path(studyFolder, "OptumEHR"),
file.path(studyFolder, "DAGermany"),
#file.path(studyFolder, "VA"),
file.path(studyFolder, "IMRD"),
file.path(studyFolder, "OpenClaims"),
file.path(studyFolder, "AmbEMR")
#file.path(studyFolder, "SIDIAP"),
#file.path(studyFolder, "IPCI")
),
maOutputFolder = file.path(studyFolder, "MetaAnalysis"),
maxCores = maxCores)
fullShinyDataFolder <- file.path(studyFolder, "shinyData")
if (!file.exists(fullShinyDataFolder)) {
dir.create(fullShinyDataFolder)
}
file.copy(from = c(list.files(file.path(studyFolder, "CCAE", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "Clinformatics", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "CPRD", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "MDCD", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "MDCR", "shinyData"), full.names = TRUE),
# list.files(file.path(studyFolder, "JMDC", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "OptumEHR", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "DAGermany", "shinyData"), full.names = TRUE),
# list.files(file.path(studyFolder, "VA", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "IMRD", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "OpenClaims", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "AmbEMR", "shinyData"), full.names = TRUE),
# list.files(file.path(studyFolder, "SIDIAP", "shinyData"), full.names = TRUE),
# list.files(file.path(studyFolder, "IPCI", "shinyData"), full.names = TRUE),
list.files(file.path(studyFolder, "MetaAnalysis", "shinyData"), full.names = TRUE)),
to = fullShinyDataFolder,
overwrite = TRUE)
premergeCleanShinyData(fullShinyDataFolder = fullShinyDataFolder,
premergedCleanShinyDataFolder = file.path(studyFolder, "premergedCleanShinyData"))
premergedCleanShinyData <- file.path(studyFolder, "premergedCleanShinyData")
launchEvidenceExplorer(dataFolder = premergedCleanShinyData, blind = FALSE, launch.browser = FALSE)
|
3cc46a18b25685c69b2575fb7f7c7fd98c9af3ba
|
7a666f42ac91b2542aac48a6975fc04d936e8bcc
|
/week4-quiz.R
|
84d2be347226da00ef9a86671c3a0c906b9c71d5
|
[] |
no_license
|
FernandoRoque/coursera-datascience-capstone
|
7f6f7db9344f5f928b6efb460340e0fb3c03239f
|
6ac06a40a79980ca2684449ebf447be11f208aaf
|
refs/heads/master
| 2021-05-31T11:20:13.222092
| 2016-04-24T08:57:41
| 2016-04-24T08:57:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,113
|
r
|
week4-quiz.R
|
source("./week4-MarkovChain.R")
week4 <- function() {
questions <- c(
"When you breathe, I want to be the air for you. I'll be there for you, I'd live and I'd",
"Guy at my table's wife got up to go to the bathroom and I asked about dessert and he started telling me about his",
"I'd give anything to see arctic monkeys this",
"Talking to your mom has the same effect as a hug and helps reduce your",
"When you were in Holland you were like 1 inch away from me but you hadn't time to take a",
"I'd just like all of these questions answered, a presentation of evidence, and a jury to settle the",
"I can't deal with unsymetrical things. I can't even hold an uneven number of bags of groceries in each",
"Every inch of you is perfect from the bottom to the",
"Iโm thankful my childhood was filled with imagination and bruises from playing",
"I like how the same people are in almost all of Adam Sandler's")
for (i in 1:length(questions)) {
predictions <- predictFollowingWord(markovChainModel, preprocessInputText(questions[i]))
print(predictions)
}
}
week4()
|
7a924d9f0fe890c16439640822012bb3d1b953d2
|
474c7d9d9aa0731a69460f8d7db866e5f8641f5a
|
/R/rows.R
|
38101b7171a1d5a646065a614c9f92b0feb4f230
|
[] |
no_license
|
jengelaere/shinydreal
|
c75b6745eb9e25640b3300e69974d0ac3c6e5015
|
7a8f4d962690c2e8283ab97d91a1c4f7b9249e0d
|
refs/heads/master
| 2023-04-03T13:25:28.604936
| 2019-12-10T10:46:59
| 2019-12-10T10:46:59
| 360,187,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,449
|
r
|
rows.R
|
#' รlรฉment de corps de page
#'
#' `dr_panel` est un panel pour les navbarPage et le sidebarPage.
#' `dr_row` crรฉe une nouvelle ligne dans l'UI, `dr_col(width = X)` crรฉe
#' une colonne de largeur X. `dr_col_*` 12, 6, 4 et 3 crรฉรฉent des รฉlรฉments
#' de largeur 12, 6, 4 et 3 respectivement.
#'
#' @param id ID, pour le `dr_panel`
#' @param width largeur de la colonne, pour `dr_col`.
#' @param ... รlรฉment ร insรฉrer
#'
#' @return Une liste HTML.
#' @export
#' @importFrom htmltools tags
#' @rdname bodyelements
#'
dr_panel <- function(id, ...){
tags$div(
id = id,
class = "panel",
...
)
}
#' @export
#' @importFrom htmltools tags
#' @rdname bodyelements
dr_row <- function(...){
tags$div(
class = "row",
...
)
}
#' @export
#' @importFrom htmltools tags
#' @rdname bodyelements
dr_col <- function( width,...){
tags$div(
class = sprintf("col-lg-%s", width),
...
)
}
#' @export
#' @importFrom htmltools tags
#' @rdname bodyelements
dr_col_12 <- function(...){
dr_col(
width = 12,
...
)
}
#' @export
#' @importFrom htmltools tags
#' @rdname bodyelements
dr_col_6 <- function(...){
dr_col(
width = 6,
...
)
}
#' @export
#' @importFrom htmltools tags
#' @rdname bodyelements
dr_col_4 <- function( ...){
dr_col(
width = 4,
...
)
}
#' @export
#' @importFrom htmltools tags
#' @rdname bodyelements
dr_col_3 <- function( ...){
dr_col(
width = 3,
...
)
}
|
5517e7c98c64b4ff3a89938dc33dd513f5a788cf
|
177821a1018c289ee071d8d84a3884d7523755dc
|
/man/bfmPlot.Rd
|
4aa9aeace8d7f01e65cf2a7239f7c37048b802e4
|
[] |
no_license
|
bendv/bfastPlot
|
4ce50d03cdffd0e05c4fc401f9c7390af6861b4d
|
d2668f76e553d770a968c9e7043b5e583ec1f60f
|
refs/heads/master
| 2021-01-02T09:20:47.639752
| 2016-01-06T18:17:24
| 2016-01-06T18:17:24
| 25,979,600
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,143
|
rd
|
bfmPlot.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{bfmPlot}
\alias{bfmPlot}
\title{ggplot bfastmonitor}
\usage{
bfmPlot(bfm, plotlabs = NULL, ncols = 1, rescale = 1, ylab = "response",
displayMagn = FALSE, magn_ypos = 0.3, magn_xoffset = -0.45,
magn_digits = 3, displayTrend = TRUE, displayResiduals = c("none",
"all", "monperiod", "history"), type = "irregular")
}
\arguments{
\item{bfm}{Object of type \code{bfastmonitor} or a \code{list} of such objects}
\item{plotlabs}{Character. Optional: vector of facet plot lables. These should correspond to \code{bfm}}
\item{ncols}{Numeric. Number of columns in plot}
\item{rescale}{Numeric. Factor by which to rescale data}
\item{ylab}{Character. y-axis label}
\item{displayMagn}{Logical. Display magnitude on plot? See \code{\link{bfastmonitor}} for more information}
\item{magn_ypos}{Numeric. Vertical position of magnitude label on plot (relative to y-range)}
\item{magn_xoffset}{Numeric. Horizontal offset of magnitude label relative to the start of the monitoring period (vertical black line)}
\item{magn_digits}{Numeric. Number of digits to round magnitude}
\item{displayResiduals}{Character. Section of the plot where residuals should be highlighted. Defaults to "none" - no residuals highlighted.}
\item{type}{Character. Type of time series. Can be either "irregular" (Landsat-type) or "16-day" (MODIS-type). See \code{\link{bfastts}} for more information.}
\item{displayTend}{Logical. Display trend component of history model as dotted blue line?}
}
\value{
ggplot object (see \code{\link{ggplot}}).
}
\description{
Generate a ggplot object from a (list of) bfastmonitor object(s)
}
\examples{
# adapted from help page of bfastmonitor
library(bfast)
library(ggplot2)
NDVIa <- as.ts(zoo(som$NDVI.a, som$Time))
plot(NDVIa)
## apply the bfast monitor function on the data
## start of the monitoring period is c(2010, 13)
## and the ROC method is used as a method to automatically identify a stable history
mona1 <- bfastmonitor(NDVIa, start = c(2010, 13), formula = response ~ harmon, order = 3)
class(mona1)
# regular plot
plot(mona1)
# ggplot of the same
p <- bfmPlot(mona1)
p
## the advantage of ggplot is that is is object based
## additional layers can simply be added
# change to black/white background
p2 <- p + theme_bw()
p2
## combine several bfastmonitor objects into one facet plot
mona2 <- bfastmonitor(NDVIa, start = c(2010, 13), formula = response~harmon, order=2)
mona3 <- bfastmonitor(NDVIa, start = c(2010, 13), formula = response~harmon, order=1)
p3 <- bfmPlot(list(mona1, mona2, mona3), plotlabs = c("order = 3", "order = 2", "order = 1")) + theme_bw()
p3
# it's not necessary to show the trend when there is none
p4 <- bfmPlot(list(mona1, mona2, mona3),
plotlabs = c("order = 3", "order = 2", "order = 1"), displayTrend = FALSE) + theme_bw()
p4
# compare land cover time series
data(tura_ts1) # cropland pixel
data(tura_ts2) # forest pixel
data(tura_ts3) # converstion of forest to cropland
x <- list(tura_ts1, tura_ts2, tura_ts3)
y <- lapply(x, FUN=function(z) bfastts(z, dates = time2date(time(z)), type = "irregular"))
bfm <- lapply(y, FUN=function(z) bfastmonitor(z, start = c(2008, 1), formula = response~harmon, order = 1, history = "all"))
p5 <- bfmPlot(bfm, displayResiduals = "monperiod", plotlabs = c("cropland", "forest", "forest to cropland"), displayTrend = FALSE) + theme_bw()
p5 <- p5 + labs(y = "NDVI")
p5
# sequential monitoring periods for forest disturbance monitoring
# convert to 'regular' bfast time series
x <- bfastts(tura_ts3, dates = time2date(time(tura_ts1)), type = "irregular")
years <- c(2005:2009)
bfm <- lapply(years, FUN=function(z) bfastmonitor(window(x, end = c(z + 1, 1)), start = c(z, 1), history = "all", formula = response ~ harmon, order = 1))
## returns a list of bfastmonitor objects
# show all results with change magnitudes for each monitoring period
# also show residuals in the monitoring period only
p6 <- bfmPlot(bfm, plotlabs = years, displayTrend = FALSE, displayMagn = TRUE, displayResiduals = "monperiod") + theme_bw()
p6
}
\author{
Ben DeVries
}
\seealso{
\code{\link{bfmPredict}}
}
|
bf5a894cfe0026d77d07bee1cb6ec3c9ec3d4041
|
68743ad37cb9ef70b5b18b3420d14b43dbe8dd11
|
/tests/testthat/test_tpca.R
|
a2d34795263cdc789882f397fa203094a6aeb164
|
[] |
no_license
|
Tveten/tpca
|
b1f153373a5bb5121055299b3982e38552062f7a
|
ed6a46e41863846f420adedaa163ccf82feac9b1
|
refs/heads/master
| 2021-06-16T11:46:34.812149
| 2021-06-09T12:38:30
| 2021-06-09T12:38:30
| 157,405,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,609
|
r
|
test_tpca.R
|
context('tpca and tpca_helpers')
test_that('tpca returns sensible output', {
N <- 10
n_sim <- 10^2
cor_mat <- rcov_mat(N, N/2)
cutoffs <- c(0, 0.5, 0.8, 0.9, 0.99, 1)
for (j in seq_along(cutoffs)) {
tpca_obj <- tpca(cor_mat, cutoff = cutoffs[j], n_sim = n_sim)
if (j == 1) expect_equal(length(tpca_obj$which_axes), 1)
if (j == length(cutoffs)) expect_equal(length(tpca_obj$which_axes), N)
expect_true(all(dim(tpca_obj$divergence_sim) == c(N, n_sim)))
for (i in seq_along(tpca_obj)) {
expect_true(!any(is.na(tpca_obj[[i]])))
expect_true(!is.null(tpca_obj[[i]]))
expect_true(!any(is.nan(tpca_obj[[i]])))
}
}
})
expect_identical_vectors <- function(x, y) {
expect_true(all(x == y))
}
test_that('which_dims_cor return correct dimensions', {
N <- 10
K0 <- c(0, 2, 5, 10)
cor_mats <- lapply(K0, rcor_mat, N = N)
which_dims_list <- lapply(cor_mats, which_dims_cor)
expected_output <- list(0, 1:2, 1:5, 1:10)
Map(expect_identical_vectors, which_dims_list, expected_output)
})
test_that('which_axes returns correctly', {
prop_max <- c(0.01, 0.04, 0.05, 0.1, 0.3, 0.5)
keep_prop <- c(0, 0.5, 0.9, 0.93, 1)
max_axes <- c(1, 2, 5)
expect_list <- list(6, 6, 6,
6, 6, 6,
6, 6:5, 6:4,
6, 6:5, 6:3,
6, 6:5, 6:2)
for (i in seq_along(keep_prop)) {
for (j in seq_along(max_axes)) {
expect_identical_vectors(which_axes(prop_max, keep_prop[i], max_axes[j]),
expect_list[[(i - 1) * length(max_axes) + j]])
}
}
})
|
93fc7158541c26d1a85aacd26d382404685d5dfa
|
6b79bcbef25d7755f0abbbbcac7548f5566ab1e8
|
/script/chap13_1_DecisionTree.R
|
9f018871a24b832cf07a306c30592168584fa10f
|
[] |
no_license
|
Joo-seoyeong/R
|
5abb83c999a06893f38f0b0d93bb8f48a4e5a7f9
|
74aae57f8156cc0e5e70a4f28525dc05a911d37b
|
refs/heads/master
| 2020-12-10T08:43:29.480617
| 2020-01-13T08:38:12
| 2020-01-13T08:38:12
| 233,547,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,082
|
r
|
chap13_1_DecisionTree.R
|
# chap13_1_DecisionTree
# ๊ด๋ จ ํจํค์ง ์ค์น์น
install.packages("rpart")
library(rpart)
# tree ์๊ฐํ ํจํค์ง
install.packages("rpart.plot")
library(rpart.plot)
# 1. dataset(train/test) : iris
idx <- sample(nrow(iris),nrow(iris)*0.7)
train <- iris[idx,]
test <- iris[-idx,]
names(iris)
# 2. ๋ถ๋ฅ๋ชจ๋ธ
model <- rpart(Species~., data=train)
model
# ๋ถ๋ฅ๋ชจ๋ธ ์๊ฐํ
rpart.plot(model)
# [์ค์๋ณ์] ๊ฐ์ฅ ์ฃผ์ํ ๋ณ์๋? "Petal.Length"
# 3. ๋ชจ๋ธ ํ๊ฐ
y_pred <- predict(model,test) # ๋น์จ ์์ธก์น
y_pred
y_pred <- predict(model, test, type="class") # ํด๋์ค ์์ธก์น
y_pred
y_true <- test$Species
# ๊ต์ฐจ๋ถํ ํ(confusion matrix)
table(y_true, y_pred)
# y_pred
# y_true setosa versicolor virginica
# setosa 15 0 0
# versicolor 0 16 1
# virginica 0 2 11
acc <- (15+16+11)/ nrow(test)
acc # 0.9333333 (๋ถ๋ฅ์ ํ๋)
#################################
######## Titanic ๋ถ๋ฅ๋ถ์########
#################################
setwd("c:/Rwork/data")
titanic3 <- read.csv("titanic3.csv")
str(titanic3) # 'data.frame': 1309 obs. of 14 variables
# titanic3.csv ๋ณ์ ์ค๋ช
# 'data.frame': 1309 obs. of 14 variables:
# 1.pclass : 1, 2, 3๋ฑ์ ์ ๋ณด๋ฅผ ๊ฐ๊ฐ 1, 2, 3์ผ๋ก ์ ์ฅ
# 2.survived : ์์กด ์ฌ๋ถ. survived(์์กด=1), dead(์ฌ๋ง=0)
# 3.name : ์ด๋ฆ(์ ์ธ)
# 4.sex : ์ฑ๋ณ. female(์ฌ์ฑ), male(๋จ์ฑ)
# 5.age : ๋์ด
# 6.sibsp : ํจ๊ป ํ์นํ ํ์ ๋๋ ๋ฐฐ์ฐ์์ ์
# 7.parch : ํจ๊ป ํ์นํ ๋ถ๋ชจ ๋๋ ์๋
์ ์
# 8.ticket : ํฐ์ผ ๋ฒํธ(์ ์ธ)
# 9.fare : ํฐ์ผ ์๊ธ
# 10.cabin : ์ ์ค ๋ฒํธ(์ ์ธ)
# 11.embarked : ํ์นํ ๊ณณ. C(Cherbourg), Q(Queenstown), S(Southampton)
# 12.boat : (์ ์ธ)Factor w/ 28 levels "","1","10","11",..: 13 4 1 1 1 14 3 1 28 1 ...
# 13.body : (์ ์ธ)int NA NA NA 135 NA NA NA NA NA 22 ...
# 14.home.dest: (์ ์ธ)
# int -> Factor(๋ฒ์ฃผํ)
titanic3$survived <- factor(titanic3$survived, levels=c(0,1))
table(titanic3$survived)
# 0 1
# 809 500
809 / 1309 # ์ฌ๋ง๋น์จ=0.618029(62%)
# subset ์์ฑ : ์นผ๋ผ ์ ์ธ
titanic <- titanic3[-c(3,8,10,12,13,14)]
str(titanic) # 'data.frame': 1309 obs. of 8 variables
# $ survived: Factor w/ 2 levels "0","1"
# train/test set
idx <- sample(nrow(titanic), nrow(titanic)*0.8)
train <- titanic[idx,]
test <- titanic[-idx,]
model <- rpart(survived~., data=train)
model
rpart.plot(model)
y_pred <- predict(model, test, type="class")
y_true <- test$survived
table(y_true, y_pred)
# y_pred
# y_true 0 1
# 0 139 21
# 1 36 66
acc <- (139+66)/nrow(test)
acc # 0.7824427
table(test$survived) # ์ค์ ์์กด์ฌ๋ถ ๊ด์ธก์น
# 0 1
# 160 102
# ์ ํ๋ฅ :
precision <- 66/(21+66)
precision # 0.7586207
# ์ฌํ์จ : Yes -> Yes
recall<- 66/(36+66)
recall # 0.6470588
# f1 score
f1_score <- 2 * (( precision*recall) / (precision+recall))
f1_score # 0.6984127
|
853361e2a5090f61ba651354907e3e0803427e4a
|
26f56f97516a683cac7243d36773e3f02d41f169
|
/work/cecere/R/lineas/KernelGadget.R
|
4e97bb4857d3aa0439ff91d7e9b85b2b2b77eb33
|
[] |
no_license
|
marialda27/mhd-disks
|
5855b85bee1a6e0342efe5302cff0d123e492b69
|
ba94e66221ab079140435524c071bd25b6fb5824
|
refs/heads/master
| 2020-03-09T13:12:15.394725
| 2018-04-12T22:45:35
| 2018-04-12T22:45:35
| 128,804,649
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
KernelGadget.R
|
KernelGadget <- function(dist,hsml)
{
COEF_1 = 2.54647908947
COEF_2 = 15.278874536822
COEF_5 = 5.092958178941
NORM = 8.0/3.1415
if(dist>hsml)
{
fac=0.0
}
else
{
hinv=1./hsml
hinv3=(1./hsml)**3.
u=dist*hinv
if(u<0.5)
{
fac=hinv3*(1.0 + 6.0 * ( u - 1.0)*u*u)*NORM
}
else
{
fac=hinv3 * 2.0* (1.-u) * (1.-u) * (1.-u) *NORM
}
}
return(fac)
}
|
4ce43e48d172aa37923ec8f0008c84426ccb22e0
|
62e1665efcbd67bc0de0d9be749d5d2b222c80ce
|
/man/placesvg.Rd
|
02ecceeb7e8a72eedf91fe8051da4379fc8bc859
|
[] |
no_license
|
sewouter/StratigrapheR
|
25e669143eeb73051e79e0b4cb490e6060ed0d4b
|
2d19b6cc5dbbb4bade454ad83b61842d2f8871e1
|
refs/heads/main
| 2021-09-28T00:26:51.110494
| 2021-09-24T12:23:45
| 2021-09-24T12:23:45
| 341,558,856
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,936
|
rd
|
placesvg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/placesvg.R
\name{placesvg}
\alias{placesvg}
\title{Draws a pointsvg object}
\usage{
placesvg(
object,
forget = NULL,
front = NULL,
back = NULL,
standard = FALSE,
keep.ratio = FALSE,
col = NA,
border = "black",
density = NULL,
angle = 45,
lwd = par("lwd"),
lty = par("lty"),
scol = border,
slty = lty,
slwd = lwd
)
}
\arguments{
\item{object}{a pointsvg object (svg object imported as data frame
using \code{\link{pointsvg}}).}
\item{forget}{the elements that should be discarded, by their id
or index (i.e. name or number of appearance).}
\item{front, back}{the elements to be put in front and back position,
by their id or index (i.e. name or number of appearance). By default the
order is the one of the original .svg file.}
\item{standard}{whether to standardise (centre to (0,0), rescale so
that extreme points are at -1 and 1) or not (T or F)}
\item{keep.ratio}{if the object is to be standardised, whether to
keep the x/y ratio (T or F)}
\item{col}{the polygones background color. If density is specified with
a positive value this gives the color of the shading lines.}
\item{border}{the lines color.}
\item{density}{the density of shading lines, in lines per inch. The
default value of NULL means that no shading lines are drawn.}
\item{angle}{the slope of shading lines, given as an angle in degrees
(counter-clockwise)}
\item{lty, lwd}{the border line type and width, see ?par for details.}
\item{scol, slty, slwd}{the colour, type and width of the shading lines.}
}
\description{
Draws a svg object imported as data frame using
\code{\link{pointsvg}}, with its importation coordinates (or with
standardisation).
}
\examples{
object <- example.ammonite
plot(c(-2,2), c(-2,2), type = "n")
placesvg(object, lty = 1,density = 20, angle = 45)
}
\seealso{
\code{\link{centresvg}} and \code{\link{framesvg}}
}
|
5ea4c79731bccdccb7105a3bb3ce0c7f6fe22f58
|
578866d0d9cbc74c557a18795d3ae0d602658119
|
/data_preparation/normalizeData.R
|
7d44f4f9dcb06681bf9903e4599ea80bc1c8e0d8
|
[] |
no_license
|
PerryXDeng/wheatyeeters
|
0cd31c3b38605509616eb6ba7499fc972971aeb4
|
08ea065f67f4b4354711386d47455f0e3f7d7d12
|
refs/heads/master
| 2020-05-03T05:23:56.310882
| 2019-05-05T00:29:50
| 2019-05-05T00:29:50
| 178,446,733
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,646
|
r
|
normalizeData.R
|
source("readData.R")
library(tidyverse)
library(bestNormalize)
# File to normalize the user inputted data in
# the wellness
wellnessData <- readWellnessData()
playerIds <-unique(wellnessData$PlayerID)
cat("Number of Players: ", length(playerIds), sep="")
normPlayerIDs <- c()
normDate <- c()
normFatigue <- c()
normSoreness <- c()
normDesire <- c()
normIrritability <- c()
normSleepHours <- c()
normSleepQuality <- c()
for(id in playerIds)
{
wellnessDataT <- subset(wellnessData, PlayerID == id)
if(length(wellnessDataT$Fatigue) > 0)
{
print(id)
userTibble <- subset(wellnessData, PlayerID == id)
print(length(userTibble$Fatigue))
#fatigueNormalized <- bestNormalize(userTibble$Fatigue)
fatigueNormalized <- bestNormalize(userTibble$Fatigue, standardize = TRUE)
fatNorm <-predict(fatigueNormalized)
print(fatigueNormalized)
sleepNormalized <- bestNormalize(userTibble$SleepHours, standardize = TRUE)
sleepNorm <-predict(fatigueNormalized)
soreness <- bestNormalize(userTibble$Soreness, standardize = TRUE)
sorenessNorm <- predict(soreness)
desire <- bestNormalize(userTibble$Desire, standardize = TRUE)
desireNorm <- predict(desire)
irritability <- bestNormalize(userTibble$Irritability, standardize = TRUE)
irritabilityNorm <- predict(irritability)
sleepHours <- bestNormalize(userTibble$SleepHours, standardize = TRUE)
sleepHoursNorm <- predict(sleepHours)
sleepQuality <- bestNormalize(userTibble$SleepQuality, standardize = TRUE)
sleepQualityNorm <- predict(sleepQuality)
normPlayerIDs <- c(normPlayerIDs, userTibble$PlayerID)
normDate <- c(normDate, userTibble$TimeSinceAugFirst)
normSoreness <- c(normSoreness, sorenessNorm)
normFatigue <- c(normFatigue, fatNorm)
normDesire <- c(normDesire, desireNorm)
normIrritability <- c(normIrritability, irritabilityNorm)
normSleepHours <- c(normSleepHours, sleepHoursNorm)
normSleepQuality <- c(normSleepQuality, sleepQualityNorm)
#plot(density(userTibble$SleepHours))
#plot(density(sleepNorm))
}
}
normalWellnessData <- tibble(TimeSinceAugFirst = normDate, playerID = normPlayerIDs, normSoreness = normSoreness,
normFatigue = normFatigue, normDesire = normDesire, normIrritability = normIrritability,
normSleepHours = normSleepHours, normSleepQuality = normSleepQuality)
write.csv(normalWellnessData, "cleaned/time_series_normalized_wellness.csv")
plot()
plot(normDesire, normSoreness)
print(fagigueNormalized)
|
1c33756c90d3529e0354c799353f89a1f95b6d92
|
271484f5245719ccdabd34f3779d128a6042b3b2
|
/plot1.R
|
bef31c7467d4bfc56e34c2b21184f61250f10ac0
|
[] |
no_license
|
BogdanTarus/ExData_Plotting1
|
6c48b9e2b8506abc90239fe95fc265688ec27683
|
2a72f797f1d4b13745946553f1b4e3d5f276358d
|
refs/heads/master
| 2021-05-30T09:09:57.905941
| 2015-12-13T14:53:07
| 2015-12-13T14:53:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
plot1.R
|
# read data file
dataInput <- "../01_inputData/household_power_consumption.txt"
data <- read.table(dataInput, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# select data from 1/2/2007 to 2/2/2007
dataTwoDays <- data[data$Date %in% c("1/2/2007", "2/2/2007") ,]
# prepare the data to be ploted
dataGlobalActivePower <- as.numeric(dataTwoDays$Global_active_power)
# make the png plot
png("plot1.png", width=480, height=480)
hist(dataGlobalActivePower, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
cc09a106308d9a1d9fa3208429d0e98d330daa02
|
e459bd9f284d18273e577385b9b255260681efa5
|
/6_kmeans(6th(.R
|
86df72dc6620f6a61c95c51376108efb338fa8f9
|
[
"MIT"
] |
permissive
|
tenzink692/DSR-1BM16CS143
|
41106a5841ddc218b747c08d75a8c1a810f3766d
|
5631a8cbad564d3bf171b607d2e0fb9a1eae6472
|
refs/heads/master
| 2020-08-22T14:49:27.996053
| 2019-11-12T16:50:48
| 2019-11-12T16:50:48
| 216,364,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,173
|
r
|
6_kmeans(6th(.R
|
path="/Users/tenzinkunsang/Documents/tenkun"
setwd(path)
library(cluster)
library(ggplot2)
#library(plyr)
#library(lattice)
#library(graphics)
x<-c(185,170,168,179,182,188)
y<-c(72,56,60,68,72,77)
#below code is for iris dataset read from iris.csv
k <- read.csv("iris.csv")
k
k1 <- k[,1:2]
k1
clsample<-data.frame(x,y)
clsample
dim(clsample)
clsample1<-data.matrix(clsample)
clsample1
cldata<-clsample1[,1:2]
cldata
#Elbow Curve
wss<-vector(mode="numeric",length=6)
wss
#wss<-(nrow(clsample1)-1)*sum(apply(clsample1,2,var))
#wss
#for(i in 1:6) {
# wss[i]=sum(kmeans(cldata,centers=i,nstart=25)$withinss)
#}
km=kmeans(cldata,2,nstart=10)
km
km$cluster
km$centers
km$withinss
km$betweenss
km$totss
km1=kmeans(k1,2)
km1
#Visualizing clusters
plot(cldata[km$cluster ==1,],col="red",xlim=c(min(cldata[,1]),max(cldata[,1])),ylim=c(min(cldata[,2]),max(cldata[,2])))
points(cldata[km$cluster == 2,],col="blue")
#below code is for iris data read from iris.csv file
plot(k1[km1$cluster ==1,],col="red",xlim=c(min(k1[,1]),max(k1[,1])),ylim=c(min(k1[,2]),max(k1[,2])))
points(k1[km1$cluster == 2,],col="blue")
plot.new()
#plot(k1$sepal.width~k1$sepal.length,col=km1$cluster)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.