content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## code to prepare `met_paris` dataset goes here
library(data.table)
library(stationaRy)
library(lubridate)
# stations <- get_station_metadata()
# Le Bourget: 071500-99999
met_paris <- get_met_data(
station_id = "071500-99999",
years = 2020
)
setDT(met_paris)
met_paris <- met_paris[, list(
temp = mean(temp, na.rm = TRUE),
rh = mean(rh, na.rm = TRUE)
), by = list(
month = month.name[month(time)],
date = as_date(time)
)]
# met_paris <- met_paris[, list(data = list(.SD)), by = month]
met_paris_nest <- met_paris[, list(
temp = list(.SD[, list(date, temp)]),
rh = list(.SD[, list(date, rh)])
), by = month]
# Use
met_paris <- as.data.frame(met_paris_nest)
met_paris$temp <- lapply(met_paris$temp, as.data.frame)
met_paris$rh <- lapply(met_paris$rh, as.data.frame)
usethis::use_data(met_paris)
# Test --------------------------------------------------------------------
library(toastui)
library(apexcharter)
datagrid(met_paris) %>%
grid_complex_header(
"Le Bourget meteorological data" = names(met_paris_nest)
) %>%
grid_columns(
vars = "month", width = 150
) %>%
grid_sparkline(
column = "temp",
renderer = function(data) {
apex(data, aes(date, temp), type = "area") %>%
ax_chart(sparkline = list(enabled = TRUE)) %>%
ax_yaxis(min = -5, max = 30)
}
) %>%
grid_sparkline(
column = "rh",
renderer = function(data) {
apex(data, aes(date, rh), type = "column") %>%
ax_chart(sparkline = list(enabled = TRUE)) %>%
ax_yaxis(min = 0, max = 100)
}
)
library(highcharter)
datagrid(met_paris_nest) %>%
grid_complex_header(
"Le Bourget climate data" = names(met_paris_nest)
) %>%
grid_columns(
vars = "month", width = 150
) %>%
grid_sparkline(
column = "temp",
renderer = function(data) {
hchart(data, type = "area", hcaes(date, temp)) %>%
hc_add_theme(hc_theme_sparkline()) %>%
hc_yAxis(min = -5, max = 30)
}
) %>%
grid_sparkline(
column = "rh",
renderer = function(data) {
hchart(data, type = "column", hcaes(date, rh)) %>%
hc_add_theme(hc_theme_sparkline()) %>%
hc_yAxis(min = 0, max = 100)
}
)
| /data-raw/met-paris.R | permissive | jeanantoinedasilva/toastui | R | false | false | 2,202 | r | ## code to prepare `met_paris` dataset goes here
library(data.table)
library(stationaRy)
library(lubridate)
# stations <- get_station_metadata()
# Le Bourget: 071500-99999
met_paris <- get_met_data(
station_id = "071500-99999",
years = 2020
)
setDT(met_paris)
met_paris <- met_paris[, list(
temp = mean(temp, na.rm = TRUE),
rh = mean(rh, na.rm = TRUE)
), by = list(
month = month.name[month(time)],
date = as_date(time)
)]
# met_paris <- met_paris[, list(data = list(.SD)), by = month]
met_paris_nest <- met_paris[, list(
temp = list(.SD[, list(date, temp)]),
rh = list(.SD[, list(date, rh)])
), by = month]
# Use
met_paris <- as.data.frame(met_paris_nest)
met_paris$temp <- lapply(met_paris$temp, as.data.frame)
met_paris$rh <- lapply(met_paris$rh, as.data.frame)
usethis::use_data(met_paris)
# Test --------------------------------------------------------------------
library(toastui)
library(apexcharter)
datagrid(met_paris) %>%
grid_complex_header(
"Le Bourget meteorological data" = names(met_paris_nest)
) %>%
grid_columns(
vars = "month", width = 150
) %>%
grid_sparkline(
column = "temp",
renderer = function(data) {
apex(data, aes(date, temp), type = "area") %>%
ax_chart(sparkline = list(enabled = TRUE)) %>%
ax_yaxis(min = -5, max = 30)
}
) %>%
grid_sparkline(
column = "rh",
renderer = function(data) {
apex(data, aes(date, rh), type = "column") %>%
ax_chart(sparkline = list(enabled = TRUE)) %>%
ax_yaxis(min = 0, max = 100)
}
)
library(highcharter)
datagrid(met_paris_nest) %>%
grid_complex_header(
"Le Bourget climate data" = names(met_paris_nest)
) %>%
grid_columns(
vars = "month", width = 150
) %>%
grid_sparkline(
column = "temp",
renderer = function(data) {
hchart(data, type = "area", hcaes(date, temp)) %>%
hc_add_theme(hc_theme_sparkline()) %>%
hc_yAxis(min = -5, max = 30)
}
) %>%
grid_sparkline(
column = "rh",
renderer = function(data) {
hchart(data, type = "column", hcaes(date, rh)) %>%
hc_add_theme(hc_theme_sparkline()) %>%
hc_yAxis(min = 0, max = 100)
}
)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{country.map}
\alias{country.map}
\title{A world map}
\usage{
data(country.map)
}
\description{
This data.frame corresponds to version 2.0.0 of the "Admin 0 - Countries" map from naturalearthdata.com
The data.frame was modified by removing columns with non-ASCII characters. Also,
I added a column called "region" which is the the all lowercase version of the
column "sovereignt".
}
\details{
Note that due to the resolution of the map (1:110m, or 1 cm=1,100 km), small countries are not
represented on this map. See ?country.names for a list of all countries represented on the map.
}
\examples{
\dontrun{
# render the map with ggplot2
library(ggplot2)
data(country.map)
ggplot(country.map, aes(long, lat, group=group)) + geom_polygon()
}
}
\references{
Taken from http://www.naturalearthdata.com/downloads/110m-cultural-vectors/
}
| /man/country.map.Rd | no_license | cran/choroplethrMaps | R | false | false | 909 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{country.map}
\alias{country.map}
\title{A world map}
\usage{
data(country.map)
}
\description{
This data.frame corresponds to version 2.0.0 of the "Admin 0 - Countries" map from naturalearthdata.com
The data.frame was modified by removing columns with non-ASCII characters. Also,
I added a column called "region" which is the the all lowercase version of the
column "sovereignt".
}
\details{
Note that due to the resolution of the map (1:110m, or 1 cm=1,100 km), small countries are not
represented on this map. See ?country.names for a list of all countries represented on the map.
}
\examples{
\dontrun{
# render the map with ggplot2
library(ggplot2)
data(country.map)
ggplot(country.map, aes(long, lat, group=group)) + geom_polygon()
}
}
\references{
Taken from http://www.naturalearthdata.com/downloads/110m-cultural-vectors/
}
|
# Do Monte Carlo experiments for 4 sites that are rougly bivariate LN (high PPCC, low M.Skew, M.Jurt around 8)
# I have already calculated the parameters for each site based on B bootstrap replicates
# Parameters are averaged over all B bootstrap replicatse
# Parameters are LN3 estimators (except mean of O, mu_O. use product moment estimator)
# Barber, Lamontagne, Vogel
# A Monte Carlo experiment evaluating estimators of Efficiency
# Nash-Sutcliffe Efficiency (NSE), Kling-Gupta Efficiency (KGE), Pool et al. (2018) Estimator (POE)
# Introducing new estimator of efficiency: Barber-Lamontagne Efficiency (LBE)
# 2.11.2019
rm(list = ls()) # Remove everything in global enviroclcnment
graphics.off() # Close all open plots
setwd("C:\\Users\\jlamon02\\Dropbox\\Caitline_Code\\ForJon\\")
source(file="C:\\Users\\jlamon02\\Dropbox\\Caitline_Code\\ForJon\\functions3BT.R", local=FALSE, echo=FALSE, print.eval=FALSE)
S <- read.csv(file='c:/Users/jlamon02/Dropbox/Caitline_Code/ForJon/S1.csv',header=TRUE,sep=",")
O <- read.csv(file='c:/Users/jlamon02/Dropbox/Caitline_Code/ForJon/O1.csv',header=TRUE,sep=",")
period <- read.csv(file='c:/Users/jlamon02/Dropbox/Caitline_Code/ForJon/period1.csv',header=TRUE,sep=",")
mu_O_month <- matrix(nrow=1, ncol=12)
Co_month <- matrix(nrow=1, ncol=12)
delta_month <- matrix(nrow=1, ncol=12)
theta_month <- matrix(nrow=1, ncol=12)
rho_month <- matrix(nrow=1, ncol=12)
tau_O_month<- matrix(nrow=1, ncol=12)
tau_S_month<- matrix(nrow=1, ncol=12)
mu_u_month<- matrix(nrow=1, ncol=12)
mu_v_month<- matrix(nrow=1, ncol=12)
sd_u_month<- matrix(nrow=1, ncol=12)
sd_v_month<- matrix(nrow=1, ncol=12)
rho_log_month<- matrix(nrow=1, ncol=12)
# mixture moment estimators
mu_mix_O_month<- matrix(nrow=1, ncol=12)
var_mix_O_month<- matrix(nrow=1, ncol=12)
mu_mix_S_month<- matrix(nrow=1, ncol=12)
var_mix_S_month<- matrix(nrow=1, ncol=12)
mu_mix_SO_month<- matrix(nrow=1, ncol=12)
mu_u_mix<- matrix(nrow=1, ncol=12)
mu_v_mix<- matrix(nrow=1, ncol=12)
tau_O_mix<- matrix(nrow=1, ncol=12)
tau_S_mix<- matrix(nrow=1, ncol=12)
sd_u_mix<- matrix(nrow=1, ncol=12)
sd_v_mix<- matrix(nrow=1, ncol=12)
mu_mix_O_month_MC<- matrix(nrow=1, ncol=12)
var_mix_O_month_MC_PART<- matrix(nrow=1, ncol=12)
mu_mix_S_month_MC<- matrix(nrow=1, ncol=12)
var_mix_S_month_MC_PART<- matrix(nrow=1, ncol=12)
mu_mix_SO_month_MC<- matrix(nrow=1, ncol=12)
rho_mix<- matrix(nrow=1, ncol=12)
mu_mix_O_MC <- matrix(nrow=1, ncol=1)
var_mix_O_MC <- matrix(nrow=1, ncol=1)
mu_mix_S_MC <- matrix(nrow=1, ncol=1)
var_mix_S_MC <- matrix(nrow=1, ncol=1)
mu_mix_SO_MC <- matrix(nrow=1, ncol=1)
theta_mix_MC <- matrix(nrow=1, ncol=1)
delta_mix_MC <- matrix(nrow=1, ncol=1)
Co_mix_MC <- matrix(nrow=1, ncol=1)
r_mix_MC <- matrix(nrow=1, ncol=1)
LBE_mix_MC <- matrix(nrow=1, ncol=1)
LBEprime_mix_MC <- matrix(nrow=1, ncol=1)
mu_mix_O <- matrix(nrow=1, ncol=1)
var_mix_O <- matrix(nrow=1, ncol=1)
mu_mix_S <- matrix(nrow=1, ncol=1)
var_mix_S <- matrix(nrow=1, ncol=1)
mu_mix_SO <- matrix(nrow=1, ncol=1)
theta_mix <- matrix(nrow=1, ncol=1)
delta_mix <- matrix(nrow=1, ncol=1)
Co_mix <- matrix(nrow=1, ncol=1)
Cs_mix <- matrix(nrow=1, ncol=1)
r_mix <- matrix(nrow=1, ncol=1)
LBE_mix <- matrix(nrow=1, ncol=1)
LBEprime_mix <- matrix(nrow=1, ncol=1)
rho <- matrix(nrow=1, ncol=1)
allData <- cbind(O,S,period)
colnames(allData, do.NULL = FALSE)
colnames(allData) <- c("O","S","month")
for (m in 1:12){ # For calculations based on monthly data
oneSite_month <- subset(allData, allData$month== m) # Pull data for individual site's month
LN3params_month <- parms(oneSite_month[,1:2])
mu_O_month[m] <- LN3params_month[1] # real space mean
rho_month[m] <- LN3params_month[2]
Co_month[m] <- LN3params_month[3]
delta_month[m] <- LN3params_month[4]
theta_month[m] <- LN3params_month[5]
tau_O_month[m] <- LN3params_month[6]
tau_S_month[m] <- LN3params_month[7]
mu_u_month[m] <- LN3params_month[8]
mu_v_month[m] <- LN3params_month[9]
sd_u_month[m] <- LN3params_month[10]
sd_v_month[m] <- LN3params_month[11]
rho_log_month[m] <- LN3params_month[12]
# Per Vogel's suggestion (11/1/2019) if tau values are negative set tau to zero. This means fitting LN2 at these sites.
if (tau_O_month[m]<0 | tau_S_month[m]<0){
tau_O_month[m] <- 0
tau_S_month[m] <- 0
LN2count_month <- LN2count_month + 1
}
# mixture moment estimators
mu_mix_O_month[m] <- tau_O_month[m]+exp(mu_u_month[m]+sd_u_month[m]^2/2)
var_mix_O_month[m] <- (exp(2*mu_u_month[m]+sd_u_month[m]^2)*(exp(sd_u_month[m]^2)-1))
mu_mix_S_month[m] <- tau_S_month[m]+exp(mu_v_month[m]+sd_v_month[m]^2/2)
var_mix_S_month[m] <- (exp(2*mu_v_month[m]+sd_v_month[m]^2)*(exp(sd_v_month[m]^2)-1))
mu_mix_SO_month[m] <- (mu_mix_S_month[m]*mu_mix_O_month[m]+rho_month[m]*sqrt(var_mix_S_month[m])*sqrt(var_mix_O_month[m]))
}
# mixture moments from RAW data
mu_mix_O <- 1/12*sum(mu_mix_O_month)
var_mix_O <- 1/12*sum(var_mix_O_month+mu_mix_O_month^2)-mu_mix_O^2
mu_mix_S <- 1/12*sum(mu_mix_S_month)
var_mix_S <- 1/12*sum(var_mix_S_month+mu_mix_S_month^2)-mu_mix_S^2
mu_mix_SO <- 1/12*sum(mu_mix_SO_month)
theta_mix <- sqrt(var_mix_S)/sqrt(var_mix_O)
delta_mix <- 1-mu_mix_S/mu_mix_O
Co_mix <- sqrt(var_mix_O)/mu_mix_O
Cs_mix <- sqrt(var_mix_S)/mu_mix_S
#r1_mix[i] <- (1/nrow(oneSite)*sum(oneSite[,3]*oneSite[,4])-mu_mix_O[i]*mu_mix_S[i])/(sqrt(var_mix_O[i]*var_mix_S[i]))
r_mix <- (mu_mix_SO-mu_mix_O*mu_mix_S)/(sqrt(var_mix_O*var_mix_S))
LBE_mix <- 2*theta_mix*r_mix-theta_mix^2-delta_mix^2/Co_mix^2
LBEprime_mix <- 1-sqrt(delta_mix^2+(theta_mix-1)^2+(r_mix-1)^2) | /LBEm.R | permissive | DominikMann/Efficiency | R | false | false | 5,690 | r | # Do Monte Carlo experiments for 4 sites that are rougly bivariate LN (high PPCC, low M.Skew, M.Jurt around 8)
# I have already calculated the parameters for each site based on B bootstrap replicates
# Parameters are averaged over all B bootstrap replicatse
# Parameters are LN3 estimators (except mean of O, mu_O. use product moment estimator)
# Barber, Lamontagne, Vogel
# A Monte Carlo experiment evaluating estimators of Efficiency
# Nash-Sutcliffe Efficiency (NSE), Kling-Gupta Efficiency (KGE), Pool et al. (2018) Estimator (POE)
# Introducing new estimator of efficiency: Barber-Lamontagne Efficiency (LBE)
# 2.11.2019
rm(list = ls()) # Remove everything in global enviroclcnment
graphics.off() # Close all open plots
setwd("C:\\Users\\jlamon02\\Dropbox\\Caitline_Code\\ForJon\\")
source(file="C:\\Users\\jlamon02\\Dropbox\\Caitline_Code\\ForJon\\functions3BT.R", local=FALSE, echo=FALSE, print.eval=FALSE)
S <- read.csv(file='c:/Users/jlamon02/Dropbox/Caitline_Code/ForJon/S1.csv',header=TRUE,sep=",")
O <- read.csv(file='c:/Users/jlamon02/Dropbox/Caitline_Code/ForJon/O1.csv',header=TRUE,sep=",")
period <- read.csv(file='c:/Users/jlamon02/Dropbox/Caitline_Code/ForJon/period1.csv',header=TRUE,sep=",")
mu_O_month <- matrix(nrow=1, ncol=12)
Co_month <- matrix(nrow=1, ncol=12)
delta_month <- matrix(nrow=1, ncol=12)
theta_month <- matrix(nrow=1, ncol=12)
rho_month <- matrix(nrow=1, ncol=12)
tau_O_month<- matrix(nrow=1, ncol=12)
tau_S_month<- matrix(nrow=1, ncol=12)
mu_u_month<- matrix(nrow=1, ncol=12)
mu_v_month<- matrix(nrow=1, ncol=12)
sd_u_month<- matrix(nrow=1, ncol=12)
sd_v_month<- matrix(nrow=1, ncol=12)
rho_log_month<- matrix(nrow=1, ncol=12)
# mixture moment estimators
mu_mix_O_month<- matrix(nrow=1, ncol=12)
var_mix_O_month<- matrix(nrow=1, ncol=12)
mu_mix_S_month<- matrix(nrow=1, ncol=12)
var_mix_S_month<- matrix(nrow=1, ncol=12)
mu_mix_SO_month<- matrix(nrow=1, ncol=12)
mu_u_mix<- matrix(nrow=1, ncol=12)
mu_v_mix<- matrix(nrow=1, ncol=12)
tau_O_mix<- matrix(nrow=1, ncol=12)
tau_S_mix<- matrix(nrow=1, ncol=12)
sd_u_mix<- matrix(nrow=1, ncol=12)
sd_v_mix<- matrix(nrow=1, ncol=12)
mu_mix_O_month_MC<- matrix(nrow=1, ncol=12)
var_mix_O_month_MC_PART<- matrix(nrow=1, ncol=12)
mu_mix_S_month_MC<- matrix(nrow=1, ncol=12)
var_mix_S_month_MC_PART<- matrix(nrow=1, ncol=12)
mu_mix_SO_month_MC<- matrix(nrow=1, ncol=12)
rho_mix<- matrix(nrow=1, ncol=12)
mu_mix_O_MC <- matrix(nrow=1, ncol=1)
var_mix_O_MC <- matrix(nrow=1, ncol=1)
mu_mix_S_MC <- matrix(nrow=1, ncol=1)
var_mix_S_MC <- matrix(nrow=1, ncol=1)
mu_mix_SO_MC <- matrix(nrow=1, ncol=1)
theta_mix_MC <- matrix(nrow=1, ncol=1)
delta_mix_MC <- matrix(nrow=1, ncol=1)
Co_mix_MC <- matrix(nrow=1, ncol=1)
r_mix_MC <- matrix(nrow=1, ncol=1)
LBE_mix_MC <- matrix(nrow=1, ncol=1)
LBEprime_mix_MC <- matrix(nrow=1, ncol=1)
mu_mix_O <- matrix(nrow=1, ncol=1)
var_mix_O <- matrix(nrow=1, ncol=1)
mu_mix_S <- matrix(nrow=1, ncol=1)
var_mix_S <- matrix(nrow=1, ncol=1)
mu_mix_SO <- matrix(nrow=1, ncol=1)
theta_mix <- matrix(nrow=1, ncol=1)
delta_mix <- matrix(nrow=1, ncol=1)
Co_mix <- matrix(nrow=1, ncol=1)
Cs_mix <- matrix(nrow=1, ncol=1)
r_mix <- matrix(nrow=1, ncol=1)
LBE_mix <- matrix(nrow=1, ncol=1)
LBEprime_mix <- matrix(nrow=1, ncol=1)
rho <- matrix(nrow=1, ncol=1)
allData <- cbind(O,S,period)
colnames(allData, do.NULL = FALSE)
colnames(allData) <- c("O","S","month")
for (m in 1:12){ # For calculations based on monthly data
oneSite_month <- subset(allData, allData$month== m) # Pull data for individual site's month
LN3params_month <- parms(oneSite_month[,1:2])
mu_O_month[m] <- LN3params_month[1] # real space mean
rho_month[m] <- LN3params_month[2]
Co_month[m] <- LN3params_month[3]
delta_month[m] <- LN3params_month[4]
theta_month[m] <- LN3params_month[5]
tau_O_month[m] <- LN3params_month[6]
tau_S_month[m] <- LN3params_month[7]
mu_u_month[m] <- LN3params_month[8]
mu_v_month[m] <- LN3params_month[9]
sd_u_month[m] <- LN3params_month[10]
sd_v_month[m] <- LN3params_month[11]
rho_log_month[m] <- LN3params_month[12]
# Per Vogel's suggestion (11/1/2019) if tau values are negative set tau to zero. This means fitting LN2 at these sites.
if (tau_O_month[m]<0 | tau_S_month[m]<0){
tau_O_month[m] <- 0
tau_S_month[m] <- 0
LN2count_month <- LN2count_month + 1
}
# mixture moment estimators
mu_mix_O_month[m] <- tau_O_month[m]+exp(mu_u_month[m]+sd_u_month[m]^2/2)
var_mix_O_month[m] <- (exp(2*mu_u_month[m]+sd_u_month[m]^2)*(exp(sd_u_month[m]^2)-1))
mu_mix_S_month[m] <- tau_S_month[m]+exp(mu_v_month[m]+sd_v_month[m]^2/2)
var_mix_S_month[m] <- (exp(2*mu_v_month[m]+sd_v_month[m]^2)*(exp(sd_v_month[m]^2)-1))
mu_mix_SO_month[m] <- (mu_mix_S_month[m]*mu_mix_O_month[m]+rho_month[m]*sqrt(var_mix_S_month[m])*sqrt(var_mix_O_month[m]))
}
# mixture moments from RAW data
mu_mix_O <- 1/12*sum(mu_mix_O_month)
var_mix_O <- 1/12*sum(var_mix_O_month+mu_mix_O_month^2)-mu_mix_O^2
mu_mix_S <- 1/12*sum(mu_mix_S_month)
var_mix_S <- 1/12*sum(var_mix_S_month+mu_mix_S_month^2)-mu_mix_S^2
mu_mix_SO <- 1/12*sum(mu_mix_SO_month)
theta_mix <- sqrt(var_mix_S)/sqrt(var_mix_O)
delta_mix <- 1-mu_mix_S/mu_mix_O
Co_mix <- sqrt(var_mix_O)/mu_mix_O
Cs_mix <- sqrt(var_mix_S)/mu_mix_S
#r1_mix[i] <- (1/nrow(oneSite)*sum(oneSite[,3]*oneSite[,4])-mu_mix_O[i]*mu_mix_S[i])/(sqrt(var_mix_O[i]*var_mix_S[i]))
r_mix <- (mu_mix_SO-mu_mix_O*mu_mix_S)/(sqrt(var_mix_O*var_mix_S))
LBE_mix <- 2*theta_mix*r_mix-theta_mix^2-delta_mix^2/Co_mix^2
LBEprime_mix <- 1-sqrt(delta_mix^2+(theta_mix-1)^2+(r_mix-1)^2) |
## Exploratory Data Analysis
## Project 1
## plot4.R
## December 6, 2014
##
## read data
a <- read.table("household_power_consumption.txt",header=TRUE, sep=";", na.strings="?", colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
a<-(a[a$Date =="1/2/2007" | a$Date =="2/2/2007",])
a$Date <- strptime(paste(a$Date, a$Time), "%d/%m/%Y %H:%M:%S")
## plotting
par(mar=c(4.1,4.1, 1.1 ,2.1), mfrow=c(2,2), cex.lab=0.8, cex.axis=0.8) # defaul is (5.1,4.1, 4.1 ,2.1)
#Plot1
with(a, plot(Date,Global_active_power, type="l", xlab="", ylab="Global Active Power"))
#Plot2
with(a, plot(Date,Voltage, type="l", xlab="datetime", ylab="Voltage"))
#Plot3
with(a, plot(Date,Sub_metering_1, type="n", xlab="", ylab="Energy sub metering"))
with(a, points(Date,Sub_metering_1, type="l", col = "black"))
with(a, points(Date,Sub_metering_2, type="l", col = "red"))
with(a, points(Date,Sub_metering_3, type="l", col = "blue"))
legend("topright", legend=c("Sub_metering_1 ","Sub_metering_2 ","Sub_metering_3 "), lty=1, col=c("black","red", "blue"), cex=0.8, bty="n")
#Plot4
with(a, plot(Date,Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power"))
# the code that creates the PNG file
dev.copy(png, file = "plot4.png",width = 480, height = 480, units = "px")
dev.off()
| /plot4.R | no_license | mark-gao/datasciencecoursera | R | false | false | 1,359 | r |
## Exploratory Data Analysis
## Project 1
## plot4.R
## December 6, 2014
##
## read data
a <- read.table("household_power_consumption.txt",header=TRUE, sep=";", na.strings="?", colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
a<-(a[a$Date =="1/2/2007" | a$Date =="2/2/2007",])
a$Date <- strptime(paste(a$Date, a$Time), "%d/%m/%Y %H:%M:%S")
## plotting
par(mar=c(4.1,4.1, 1.1 ,2.1), mfrow=c(2,2), cex.lab=0.8, cex.axis=0.8) # defaul is (5.1,4.1, 4.1 ,2.1)
#Plot1
with(a, plot(Date,Global_active_power, type="l", xlab="", ylab="Global Active Power"))
#Plot2
with(a, plot(Date,Voltage, type="l", xlab="datetime", ylab="Voltage"))
#Plot3
with(a, plot(Date,Sub_metering_1, type="n", xlab="", ylab="Energy sub metering"))
with(a, points(Date,Sub_metering_1, type="l", col = "black"))
with(a, points(Date,Sub_metering_2, type="l", col = "red"))
with(a, points(Date,Sub_metering_3, type="l", col = "blue"))
legend("topright", legend=c("Sub_metering_1 ","Sub_metering_2 ","Sub_metering_3 "), lty=1, col=c("black","red", "blue"), cex=0.8, bty="n")
#Plot4
with(a, plot(Date,Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power"))
# the code that creates the PNG file
dev.copy(png, file = "plot4.png",width = 480, height = 480, units = "px")
dev.off()
|
## -----------------------------------------------------------------------------
set.seed(1)
expit <- function(x) exp(x)/(1+exp(x))
n <- 10000
C1 <- rnorm(n, mean = 1, sd = 0.5)
C1_error <- C1 + rnorm(n, 0, 0.05)
C2 <- rbinom(n, 1, 0.6)
A <- rbinom(n, 1, expit(0.2 + 0.5*C1 + 0.1*C2))
mc <- matrix(c(0.9,0.1,0.1,0.9), nrow = 2)
A_error <- A
for (j in 1:2) {
A_error[which(A_error == c(0,1)[j])] <-
sample(x = c(0,1), size = length(which(A_error == c(0,1)[j])),
prob = mc[, j], replace = TRUE)
}
M <- rbinom(n, 1, expit(1 + 2*A + 1.5*C1 + 0.8*C2))
Y <- rbinom(n, 1, expit(-3 - 0.4*A - 1.2*M + 0.5*A*M + 0.3*C1 - 0.6*C2))
data <- data.frame(A, A_error, M, Y, C1, C1_error, C2)
## -----------------------------------------------------------------------------
library(CMAverse)
cmdag(outcome = "Y", exposure = "A", mediator = "M",
basec = c("C1", "C2"), postc = NULL, node = TRUE, text_col = "white")
## ----message=F,warning=F,results='hide'---------------------------------------
res_naive_cont <- cmest(data = data, model = "rb", outcome = "Y", exposure = "A",
mediator = "M", basec = c("C1_error", "C2"), EMint = TRUE,
mreg = list("logistic"), yreg = "logistic",
astar = 0, a = 1, mval = list(1), yref = 1,
estimation = "paramfunc", inference = "delta")
## ----message=F,warning=F------------------------------------------------------
summary(res_naive_cont)
## ----message=F,warning=F,results='hide'---------------------------------------
res_rc_cont <- cmsens(object = res_naive_cont, sens = "me", MEmethod = "rc",
MEvariable = "C1_error", MEvartype = "con", MEerror = 0.05)
## ----message=F,warning=F------------------------------------------------------
summary(res_rc_cont)
## ----message=F,warning=F,results='hide'---------------------------------------
res_simex_cont <- cmsens(object = res_naive_cont, sens = "me", MEmethod = "simex",
MEvariable = "C1_error", MEvartype = "con", MEerror = 0.05)
## ----message=F,warning=F------------------------------------------------------
summary(res_simex_cont)
## ----message=F,warning=F,results='hide'---------------------------------------
res_naive_cat <- cmest(data = data, model = "rb", outcome = "Y", exposure = "A_error",
mediator = "M", basec = c("C1", "C2"), EMint = TRUE,
mreg = list("logistic"), yreg = "logistic",
astar = 0, a = 1, mval = list(1), yref = 1,
estimation = "paramfunc", inference = "delta")
## ----message=F,warning=F------------------------------------------------------
summary(res_naive_cat)
## ----message=F,warning=F,results='hide'---------------------------------------
res_simex_cat <- cmsens(object = res_naive_cat, sens = "me", MEmethod = "simex",
MEvariable = "A_error", MEvartype = "cat", MEerror = list(mc))
## ----message=F,warning=F------------------------------------------------------
summary(res_simex_cat)
## ----message=F,warning=F,results='hide'---------------------------------------
res_true <- cmest(data = data, model = "rb", outcome = "Y", exposure = "A",
mediator = "M", basec = c("C1", "C2"), EMint = TRUE,
mreg = list("logistic"), yreg = "logistic",
astar = 0, a = 1, mval = list(1), yref = 1,
estimation = "paramfunc", inference = "delta")
## ----message=F,warning=F------------------------------------------------------
summary(res_true)
| /vignettes/measurement_error.R | no_license | david5ive/CMAverse | R | false | false | 3,635 | r | ## -----------------------------------------------------------------------------
set.seed(1)
expit <- function(x) exp(x)/(1+exp(x))
n <- 10000
C1 <- rnorm(n, mean = 1, sd = 0.5)
C1_error <- C1 + rnorm(n, 0, 0.05)
C2 <- rbinom(n, 1, 0.6)
A <- rbinom(n, 1, expit(0.2 + 0.5*C1 + 0.1*C2))
mc <- matrix(c(0.9,0.1,0.1,0.9), nrow = 2)
A_error <- A
for (j in 1:2) {
A_error[which(A_error == c(0,1)[j])] <-
sample(x = c(0,1), size = length(which(A_error == c(0,1)[j])),
prob = mc[, j], replace = TRUE)
}
M <- rbinom(n, 1, expit(1 + 2*A + 1.5*C1 + 0.8*C2))
Y <- rbinom(n, 1, expit(-3 - 0.4*A - 1.2*M + 0.5*A*M + 0.3*C1 - 0.6*C2))
data <- data.frame(A, A_error, M, Y, C1, C1_error, C2)
## -----------------------------------------------------------------------------
library(CMAverse)
cmdag(outcome = "Y", exposure = "A", mediator = "M",
basec = c("C1", "C2"), postc = NULL, node = TRUE, text_col = "white")
## ----message=F,warning=F,results='hide'---------------------------------------
res_naive_cont <- cmest(data = data, model = "rb", outcome = "Y", exposure = "A",
mediator = "M", basec = c("C1_error", "C2"), EMint = TRUE,
mreg = list("logistic"), yreg = "logistic",
astar = 0, a = 1, mval = list(1), yref = 1,
estimation = "paramfunc", inference = "delta")
## ----message=F,warning=F------------------------------------------------------
summary(res_naive_cont)
## ----message=F,warning=F,results='hide'---------------------------------------
res_rc_cont <- cmsens(object = res_naive_cont, sens = "me", MEmethod = "rc",
MEvariable = "C1_error", MEvartype = "con", MEerror = 0.05)
## ----message=F,warning=F------------------------------------------------------
summary(res_rc_cont)
## ----message=F,warning=F,results='hide'---------------------------------------
res_simex_cont <- cmsens(object = res_naive_cont, sens = "me", MEmethod = "simex",
MEvariable = "C1_error", MEvartype = "con", MEerror = 0.05)
## ----message=F,warning=F------------------------------------------------------
summary(res_simex_cont)
## ----message=F,warning=F,results='hide'---------------------------------------
res_naive_cat <- cmest(data = data, model = "rb", outcome = "Y", exposure = "A_error",
mediator = "M", basec = c("C1", "C2"), EMint = TRUE,
mreg = list("logistic"), yreg = "logistic",
astar = 0, a = 1, mval = list(1), yref = 1,
estimation = "paramfunc", inference = "delta")
## ----message=F,warning=F------------------------------------------------------
summary(res_naive_cat)
## ----message=F,warning=F,results='hide'---------------------------------------
res_simex_cat <- cmsens(object = res_naive_cat, sens = "me", MEmethod = "simex",
MEvariable = "A_error", MEvartype = "cat", MEerror = list(mc))
## ----message=F,warning=F------------------------------------------------------
summary(res_simex_cat)
## ----message=F,warning=F,results='hide'---------------------------------------
res_true <- cmest(data = data, model = "rb", outcome = "Y", exposure = "A",
mediator = "M", basec = c("C1", "C2"), EMint = TRUE,
mreg = list("logistic"), yreg = "logistic",
astar = 0, a = 1, mval = list(1), yref = 1,
estimation = "paramfunc", inference = "delta")
## ----message=F,warning=F------------------------------------------------------
summary(res_true)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cytar-producciones.R
\name{CYTARProductoAutorYear}
\alias{CYTARProductoAutorYear}
\title{CYTARProductoAutorYear}
\description{
CYTARProductoAutorYear
CYTARProductoAutorYear
}
\author{
kenarab
}
\section{Super class}{
\code{\link[CYTAR:CYTARDatasource]{CYTAR::CYTARDatasource}} -> \code{CYTARProductoAutorYear}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{CYTARProductoAutorYear$new()}}
\item \href{#method-consolidate}{\code{CYTARProductoAutorYear$consolidate()}}
\item \href{#method-clone}{\code{CYTARProductoAutorYear$clone()}}
}
}
\if{html}{
\out{<details open ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="CYTAR" data-topic="CYTARDatasource" data-id="loadData">}\href{../../CYTAR/html/CYTARDatasource.html#method-loadData}{\code{CYTAR::CYTARDatasource$loadData()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{CYTARProductoAutorYear$new(data.url, year)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-consolidate"></a>}}
\if{latex}{\out{\hypertarget{method-consolidate}{}}}
\subsection{Method \code{consolidate()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{CYTARProductoAutorYear$consolidate()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{CYTARProductoAutorYear$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/CYTARProductoAutorYear.Rd | no_license | rOpenStats/CYTAR | R | false | true | 2,053 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cytar-producciones.R
\name{CYTARProductoAutorYear}
\alias{CYTARProductoAutorYear}
\title{CYTARProductoAutorYear}
\description{
CYTARProductoAutorYear
CYTARProductoAutorYear
}
\author{
kenarab
}
\section{Super class}{
\code{\link[CYTAR:CYTARDatasource]{CYTAR::CYTARDatasource}} -> \code{CYTARProductoAutorYear}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{CYTARProductoAutorYear$new()}}
\item \href{#method-consolidate}{\code{CYTARProductoAutorYear$consolidate()}}
\item \href{#method-clone}{\code{CYTARProductoAutorYear$clone()}}
}
}
\if{html}{
\out{<details open ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="CYTAR" data-topic="CYTARDatasource" data-id="loadData">}\href{../../CYTAR/html/CYTARDatasource.html#method-loadData}{\code{CYTAR::CYTARDatasource$loadData()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{CYTARProductoAutorYear$new(data.url, year)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-consolidate"></a>}}
\if{latex}{\out{\hypertarget{method-consolidate}{}}}
\subsection{Method \code{consolidate()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{CYTARProductoAutorYear$consolidate()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{CYTARProductoAutorYear$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#!/usr/bin/env Rscript
library(data.table)
#############
# FUNCTIONS #
#############
GenerateMessage <- function(message.text) {
message(paste0("[ ", date(), " ]: ", message.text))
}
ParsePopMap <- function(popmap) {
my_pops <- fread(popmap,
header = FALSE,
col.names = c("sample", "population"))
my_pops[, sort(unique(sample))]
}
FindSampleResultsFiles <- function(stacks_dir, samples) {
my_samples <- samples
names(my_samples) <- my_samples
my_sample_files <- lapply(my_samples, function(x)
list.files(stacks_dir,
pattern = paste0(x, "\\.")))
my_parsed_files <- lapply(my_sample_files, function(x)
data.table(
tag_file = grep("tags.tsv.gz$", x, value = TRUE),
alleles_file = grep("alleles.tsv.gz$", x, value = TRUE),
snp_file = grep("snps.tsv.gz$", x, value = TRUE)))
rbindlist(my_parsed_files, idcol = "sample")}
ParseIndividualLoci <- function(stacks_dir, tag_file) {
# Number of assembled loci:
# for i in *.tags.tsv.gz; do zcat $i | cut -f 3 | tail -n 1; done
GenerateMessage(paste0("Reading ",
stacks_dir,
"/",
tag_file))
my_tags <- fread(paste0("zgrep -v '^#' ", stacks_dir, "/", tag_file),
header = FALSE,
sep = "\t")
my_tags[, length(unique(V2))]
}
ParseIndividualPolymorphicLoci <- function(stacks_dir, allele_file) {
# Number of polymorphic loci:
# for i in *.alleles.tsv.gz; do zcat $i | grep -v "^#" | cut -f 3 | sort | uniq | wc -l; done
GenerateMessage(paste0("Reading ",
stacks_dir,
"/",
allele_file))
my_alleles <- fread(paste0("zgrep -v '^#' ", stacks_dir, "/", allele_file),
header = FALSE,
sep = "\t")
my_alleles[, length(unique(V2))]
}
ParseIndividualSNPs <- function(stacks_dir, snp_file) {
# Number of SNPs:
# for i in *.snps.tsv.gz; do zcat $i | grep -v "^#" | cut -f 5 | grep -c "E"; done
GenerateMessage(paste0("Reading ",
stacks_dir,
"/",
snp_file))
my_snps <- fread(paste0("zgrep -v '^#' ", stacks_dir, "/", snp_file),
header = FALSE,
sep = "\t")
dim(my_snps[V4 == "E"])[1]
}
ParsePopulationsStats <- function(stacks_dir){
# Number of loci
# cat batch_1.haplotypes.tsv | sed '1d' | wc -l
# Reads every locus regardless of population (and pop map specified) don't
# have to provide 'defaultpop'
# Polymorphic loci
# cat batch_1.sumstats.tsv | grep -v "^#" | cut -f 2 | sort -n | uniq | wc -l
# Only takes the locus ID but by sorting them means no need to specify a
# particular pop with a popmap
# SNPs
# cat batch_1.sumstats.tsv | grep -v "^#" | cut -f 2,5 | sort -n | uniq | wc -l
# Takes the locus ID and the SNP column position and works regardless of
# which pop the locus was found in
# check sumstats exists
if (length(list.files(stacks_dir,
pattern = "populations.sumstats.tsv")) == 0) {
stop(paste("populations.sumstats.tsv not found in", stacks_dir))
}
# check if haplotypes exists
if (length(list.files(stacks_dir,
pattern = "populations.haplotypes.tsv")) == 0) {
stop(paste("populations.haplotypes.tsv not found in", stacks_dir))
}
# parse hapstats
GenerateMessage(paste0("Reading ",
stacks_dir,
"/populations.haplotypes.tsv"))
my_hapstats <- fread(paste0("grep -v '^#' ",
stacks_dir,
"/populations.haplotypes.tsv"))
my_loci <- my_hapstats[, length(unique(V1))]
# parse sumstats
GenerateMessage(paste0("Reading ",
stacks_dir,
"/populations.sumstats.tsv"))
my_sumstats <- fread(paste0("grep -v '^#' ",
stacks_dir,
"/populations.sumstats.tsv"))
my_polymorphic_loci <- my_sumstats[, length(unique(V1))]
my_snps <- dim(unique(my_sumstats, by = c("V1", "V4")))[1]
# return stats
data.table(
assembled_loci = my_loci,
polymorphic_loci = my_polymorphic_loci,
snps = my_snps
)
}
###########
# GLOBALS #
###########
popmap <- snakemake@input[["map"]]
stacks_dir <- snakemake@params[["stats_dir"]]
output_pop_stats <- snakemake@output[["pop_stats"]]
output_sample_stats <- snakemake@output[["sample_stats"]]
log_file <- snakemake@log[["log"]]
########
# MAIN #
########
# set log
log <- file(log_file, open = "wt")
sink(log, type = "message")
sink(log, append = TRUE, type = "output")
# get the populations summary
population_stats <- ParsePopulationsStats(stacks_dir)
# get a list of samples
all_samples <- ParsePopMap(popmap)
# parse the file locations
sample_files <- FindSampleResultsFiles(stacks_dir, all_samples)
# run the counts
sample_stats <- sample_files[
, .(assembled_loci =
ParseIndividualLoci(stacks_dir = stacks_dir,
tag_file = tag_file),
polymorphic_loci =
ParseIndividualPolymorphicLoci(stacks_dir = stacks_dir,
allele_file = alleles_file),
snps = ParseIndividualSNPs(stacks_dir = stacks_dir,
snp_file = snp_file)),
by = sample]
# write output
fwrite(population_stats, output_pop_stats)
fwrite(sample_stats, output_sample_stats)
# write session info
sessionInfo()
| /stacks_parameters/src/parse_stacks_output.R | no_license | TomHarrop/stacks_parameters | R | false | false | 5,848 | r | #!/usr/bin/env Rscript
library(data.table)
#############
# FUNCTIONS #
#############
GenerateMessage <- function(message.text) {
message(paste0("[ ", date(), " ]: ", message.text))
}
ParsePopMap <- function(popmap) {
my_pops <- fread(popmap,
header = FALSE,
col.names = c("sample", "population"))
my_pops[, sort(unique(sample))]
}
FindSampleResultsFiles <- function(stacks_dir, samples) {
my_samples <- samples
names(my_samples) <- my_samples
my_sample_files <- lapply(my_samples, function(x)
list.files(stacks_dir,
pattern = paste0(x, "\\.")))
my_parsed_files <- lapply(my_sample_files, function(x)
data.table(
tag_file = grep("tags.tsv.gz$", x, value = TRUE),
alleles_file = grep("alleles.tsv.gz$", x, value = TRUE),
snp_file = grep("snps.tsv.gz$", x, value = TRUE)))
rbindlist(my_parsed_files, idcol = "sample")}
ParseIndividualLoci <- function(stacks_dir, tag_file) {
# Number of assembled loci:
# for i in *.tags.tsv.gz; do zcat $i | cut -f 3 | tail -n 1; done
GenerateMessage(paste0("Reading ",
stacks_dir,
"/",
tag_file))
my_tags <- fread(paste0("zgrep -v '^#' ", stacks_dir, "/", tag_file),
header = FALSE,
sep = "\t")
my_tags[, length(unique(V2))]
}
ParseIndividualPolymorphicLoci <- function(stacks_dir, allele_file) {
# Number of polymorphic loci:
# for i in *.alleles.tsv.gz; do zcat $i | grep -v "^#" | cut -f 3 | sort | uniq | wc -l; done
GenerateMessage(paste0("Reading ",
stacks_dir,
"/",
allele_file))
my_alleles <- fread(paste0("zgrep -v '^#' ", stacks_dir, "/", allele_file),
header = FALSE,
sep = "\t")
my_alleles[, length(unique(V2))]
}
ParseIndividualSNPs <- function(stacks_dir, snp_file) {
# Number of SNPs:
# for i in *.snps.tsv.gz; do zcat $i | grep -v "^#" | cut -f 5 | grep -c "E"; done
GenerateMessage(paste0("Reading ",
stacks_dir,
"/",
snp_file))
my_snps <- fread(paste0("zgrep -v '^#' ", stacks_dir, "/", snp_file),
header = FALSE,
sep = "\t")
dim(my_snps[V4 == "E"])[1]
}
ParsePopulationsStats <- function(stacks_dir){
# Number of loci
# cat batch_1.haplotypes.tsv | sed '1d' | wc -l
# Reads every locus regardless of population (and pop map specified) don't
# have to provide 'defaultpop'
# Polymorphic loci
# cat batch_1.sumstats.tsv | grep -v "^#" | cut -f 2 | sort -n | uniq | wc -l
# Only takes the locus ID but by sorting them means no need to specify a
# particular pop with a popmap
# SNPs
# cat batch_1.sumstats.tsv | grep -v "^#" | cut -f 2,5 | sort -n | uniq | wc -l
# Takes the locus ID and the SNP column position and works regardless of
# which pop the locus was found in
# check sumstats exists
if (length(list.files(stacks_dir,
pattern = "populations.sumstats.tsv")) == 0) {
stop(paste("populations.sumstats.tsv not found in", stacks_dir))
}
# check if haplotypes exists
if (length(list.files(stacks_dir,
pattern = "populations.haplotypes.tsv")) == 0) {
stop(paste("populations.haplotypes.tsv not found in", stacks_dir))
}
# parse hapstats
GenerateMessage(paste0("Reading ",
stacks_dir,
"/populations.haplotypes.tsv"))
my_hapstats <- fread(paste0("grep -v '^#' ",
stacks_dir,
"/populations.haplotypes.tsv"))
my_loci <- my_hapstats[, length(unique(V1))]
# parse sumstats
GenerateMessage(paste0("Reading ",
stacks_dir,
"/populations.sumstats.tsv"))
my_sumstats <- fread(paste0("grep -v '^#' ",
stacks_dir,
"/populations.sumstats.tsv"))
my_polymorphic_loci <- my_sumstats[, length(unique(V1))]
my_snps <- dim(unique(my_sumstats, by = c("V1", "V4")))[1]
# return stats
data.table(
assembled_loci = my_loci,
polymorphic_loci = my_polymorphic_loci,
snps = my_snps
)
}
###########
# GLOBALS #
###########
popmap <- snakemake@input[["map"]]
stacks_dir <- snakemake@params[["stats_dir"]]
output_pop_stats <- snakemake@output[["pop_stats"]]
output_sample_stats <- snakemake@output[["sample_stats"]]
log_file <- snakemake@log[["log"]]
########
# MAIN #
########
# set log
log <- file(log_file, open = "wt")
sink(log, type = "message")
sink(log, append = TRUE, type = "output")
# get the populations summary
population_stats <- ParsePopulationsStats(stacks_dir)
# get a list of samples
all_samples <- ParsePopMap(popmap)
# parse the file locations
sample_files <- FindSampleResultsFiles(stacks_dir, all_samples)
# run the counts
sample_stats <- sample_files[
, .(assembled_loci =
ParseIndividualLoci(stacks_dir = stacks_dir,
tag_file = tag_file),
polymorphic_loci =
ParseIndividualPolymorphicLoci(stacks_dir = stacks_dir,
allele_file = alleles_file),
snps = ParseIndividualSNPs(stacks_dir = stacks_dir,
snp_file = snp_file)),
by = sample]
# write output
fwrite(population_stats, output_pop_stats)
fwrite(sample_stats, output_sample_stats)
# write session info
sessionInfo()
|
\name{womensrole}
\alias{womensrole}
\docType{data}
\title{ Womens Role in Society }
\description{
Data from a survey from 1974 / 1975 asking both female and male
responders about their opinion on the statement: Women
should take care of running their homes and leave running the
country up to men.
}
\usage{data("womensrole")}
\format{
A data frame with 42 observations on the following 4 variables.
\describe{
\item{\code{education}}{years of education.}
\item{\code{sex}}{a factor with levels \code{Male} and \code{Female}.}
\item{\code{agree}}{number of subjects in agreement with the statement.}
\item{\code{disagree}}{number of subjects in disagreement with the
statement.}
}
}
\details{
The data are from Haberman (1973) and also given in
Collett (2003). The questions here are whether the response of men and women
differ.
}
\source{
S. J. Haberman (1973), The analysis of residuals in cross-classificed
tables. \emph{Biometrics}, \bold{29}, 205--220.
D. Collett (2003), \emph{Modelling Binary Data}. Chapman and Hall / CRC,
London. 2nd edition.
}
\examples{
data("womensrole", package = "HSAUR")
summary(subset(womensrole, sex == "Female"))
summary(subset(womensrole, sex == "Male"))
}
\keyword{datasets}
| /man/womensrole.Rd | no_license | cran/HSAUR | R | false | false | 1,312 | rd | \name{womensrole}
\alias{womensrole}
\docType{data}
\title{ Womens Role in Society }
\description{
Data from a survey from 1974 / 1975 asking both female and male
responders about their opinion on the statement: Women
should take care of running their homes and leave running the
country up to men.
}
\usage{data("womensrole")}
\format{
A data frame with 42 observations on the following 4 variables.
\describe{
\item{\code{education}}{years of education.}
\item{\code{sex}}{a factor with levels \code{Male} and \code{Female}.}
\item{\code{agree}}{number of subjects in agreement with the statement.}
\item{\code{disagree}}{number of subjects in disagreement with the
statement.}
}
}
\details{
The data are from Haberman (1973) and also given in
Collett (2003). The questions here are whether the response of men and women
differ.
}
\source{
S. J. Haberman (1973), The analysis of residuals in cross-classificed
tables. \emph{Biometrics}, \bold{29}, 205--220.
D. Collett (2003), \emph{Modelling Binary Data}. Chapman and Hall / CRC,
London. 2nd edition.
}
\examples{
data("womensrole", package = "HSAUR")
summary(subset(womensrole, sex == "Female"))
summary(subset(womensrole, sex == "Male"))
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_education_data.R
\name{get_education_data}
\alias{get_education_data}
\title{Obtain data from the Urban Institute Education Data Portal API}
\usage{
get_education_data(
level = NULL,
source = NULL,
topic = NULL,
subtopic = NULL,
by = NULL,
filters = NULL,
add_labels = FALSE,
csv = FALSE,
verbose = TRUE
)
}
\arguments{
\item{level}{API data level to query}
\item{source}{API data source to query}
\item{topic}{API data topic to query}
\item{subtopic}{Optional 'list' of grouping parameters to pass to an API call}
\item{by}{DEPRECATED in favor of `subtopic`}
\item{filters}{Optional 'list' of query values to filter an API call}
\item{add_labels}{Add variable labels (when applicable)? Defaults to FALSE.}
\item{csv}{Download the full csv file? Defaults to FALSE.}
\item{verbose}{Print messages and warnings? Defaults to TRUE.}
}
\value{
A `data.frame` of education data
}
\description{
Obtain data from the Urban Institute Education Data Portal API
}
| /man/get_education_data.Rd | permissive | UrbanInstitute/education-data-package-r | R | false | true | 1,061 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_education_data.R
\name{get_education_data}
\alias{get_education_data}
\title{Obtain data from the Urban Institute Education Data Portal API}
\usage{
get_education_data(
level = NULL,
source = NULL,
topic = NULL,
subtopic = NULL,
by = NULL,
filters = NULL,
add_labels = FALSE,
csv = FALSE,
verbose = TRUE
)
}
\arguments{
\item{level}{API data level to query}
\item{source}{API data source to query}
\item{topic}{API data topic to query}
\item{subtopic}{Optional 'list' of grouping parameters to pass to an API call}
\item{by}{DEPRECATED in favor of `subtopic`}
\item{filters}{Optional 'list' of query values to filter an API call}
\item{add_labels}{Add variable labels (when applicable)? Defaults to FALSE.}
\item{csv}{Download the full csv file? Defaults to FALSE.}
\item{verbose}{Print messages and warnings? Defaults to TRUE.}
}
\value{
A `data.frame` of education data
}
\description{
Obtain data from the Urban Institute Education Data Portal API
}
|
datapath <- Sys.getenv("MGMTPLANE_DATA")
codepath <- Sys.getenv("MGMTPLANE_CODE")
datafile <- "all_metrics_1m_nomissing.csv"
source(paste(codepath,'analyze/read_metrics.R',sep="/"))
lastmonth <- metrics[which(metrics$Month=="2014-12"),]
counts <- table(lastmonth$NumRoles)
hasrole <- names(lastmonth)[grep("HasRole", names(lastmonth))]
hasrole <- c('HasRoleSwitch', 'HasRoleRouter', 'HasRoleL3Switch', 'HasRoleFirewall', 'HasRoleApplicationSwitch', 'HasRoleLoadBalancer')
counts <- colSums(lastmonth[,hasrole])
counts <- counts/nrow(lastmonth)
lbls <- sub("HasRole", "", hasrole)
lbls <- sub("LoadBalancer", "LoadBal", lbls)
lbls <- sub("ApplicationSwitch", "ADC", lbls)
plotfile <- paste(datapath,'plots','hasrole_distribution.pdf',sep="/")
pdf(plotfile, height=3, width=3)
par(mar=c(4,3.5,1,0), mgp=c(2,0.3,0))
xtics <- barplot(counts, ylab='Fraction of Networks', xlab='', ylim=c(0,1), xaxt='n', yaxt='n',
cex.lab=1.4)
axis(2,las=2,cex.axis=1.4,tck=0.03)
text(xtics+0.5, -0.03, labels=lbls, cex=1.4, srt=45, xpd=TRUE, pos=2)
#mtext("Role", side=1, line=4, cex=1.25)
box(which = "plot", lty = "solid")
dev.off()
| /plots/hasrole_distribution.R | no_license | agember/mpa | R | false | false | 1,125 | r | datapath <- Sys.getenv("MGMTPLANE_DATA")
codepath <- Sys.getenv("MGMTPLANE_CODE")
datafile <- "all_metrics_1m_nomissing.csv"
source(paste(codepath,'analyze/read_metrics.R',sep="/"))
lastmonth <- metrics[which(metrics$Month=="2014-12"),]
counts <- table(lastmonth$NumRoles)
hasrole <- names(lastmonth)[grep("HasRole", names(lastmonth))]
hasrole <- c('HasRoleSwitch', 'HasRoleRouter', 'HasRoleL3Switch', 'HasRoleFirewall', 'HasRoleApplicationSwitch', 'HasRoleLoadBalancer')
counts <- colSums(lastmonth[,hasrole])
counts <- counts/nrow(lastmonth)
lbls <- sub("HasRole", "", hasrole)
lbls <- sub("LoadBalancer", "LoadBal", lbls)
lbls <- sub("ApplicationSwitch", "ADC", lbls)
plotfile <- paste(datapath,'plots','hasrole_distribution.pdf',sep="/")
pdf(plotfile, height=3, width=3)
par(mar=c(4,3.5,1,0), mgp=c(2,0.3,0))
xtics <- barplot(counts, ylab='Fraction of Networks', xlab='', ylim=c(0,1), xaxt='n', yaxt='n',
cex.lab=1.4)
axis(2,las=2,cex.axis=1.4,tck=0.03)
text(xtics+0.5, -0.03, labels=lbls, cex=1.4, srt=45, xpd=TRUE, pos=2)
#mtext("Role", side=1, line=4, cex=1.25)
box(which = "plot", lty = "solid")
dev.off()
|
#### 1. Import Data
library(GGally)
library(tidyverse)
library(PerformanceAnalytics)
setwd("/Users/yeonghyeon/Documents/GitHub/COVID-19/201116_Permutation_Test")
coef <- read.csv("coef_result.csv")
outlier_countries <- c("Andorra", "Aruba", "Benin", "French_Polynesia",
"Kyrgyzstan", "Kuwait", "Mongolia", "Niger",
"Sao_Tome_and_Principe", "Seychelles")
exclude_countries <- c("Albania", "Argentina", "Cameroon", "Colombia",
"Dominican_Republic", "India", "Indonesia", "Kosovo",
"Moldova", "Mozambique", "Namibia", "Puero_Rico",
"uzbekistan", "Venezuela", "Zambia")
coef <- filter(read.csv("coef_result.csv"), !X %in% outlier_countries)[, -1]
coef <- filter(read.csv("coef_result.csv"), !X %in% exclude_countries)[, -1]
log_coef <- log(coef)
log_coef_st <- scale(log(coef))
chart.Correlation(coef[, c(1:6)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(1:6)], histogram = TRUE, pch=19)
#### 2. Correlation between Segments / Models
## segment 1에서 계수별 비교
chart.Correlation(coef[, c(2,3,8,9)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(2,3,8,9)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(2,3,8,9)], histogram = TRUE, pch=19)
# b1_Logi와 b1_Gom, c1_Logi와 c1_Gom이 각각 상관계수가 높음 -> permutation test
## segment 2에서 계수별 비교
chart.Correlation(coef[, c(5,6,11,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(5,6,11,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(5,6,11,12)], histogram = TRUE, pch=19)
# segment 1에서와 마찬가지로 Logistic, Gompertz 모델의 계수 b와 c가 각각 상관계수가 높음.
# 다만 Logistic에서의 계수 b와 c가 상당히 상관계수가 높게 나와 의아함 ->
## segment간 Logistic 계수별 비교
chart.Correlation(coef[, c(2,5,3,6)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(2,5,3,6)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(2,5,3,6)], histogram = TRUE, pch=19)
# Logistic의 계수 비교에서 segment 2일 때 계수 b와 c의 상관계수가 높음.
## segment간 Gompertz 계수별 비교
chart.Correlation(coef[, c(8,11,9,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(8,11,9,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(8,11,9,12)], histogram = TRUE, pch=19)
# Gompertz의 계수 비교에서 segment 2일 때 계수 b와 c의 상관계수가 높음.
#### 3. Permutation Test
### corr coef 비교
chart.Correlation(log_coef[, c(1,4,2,5,3,6)])
### segment별 계수 비교
## a1_Logi vs a2_Logi
set.seed(1)
chart.Correlation(log_coef[, c(1,4)], histogram = TRUE, pch=19)
a_logi <- coef[, c(1, 4)] %>% filter(!is.na(a2_Logi), !is.nan(a1_Logi))
a_logi <- log_coef[, c(1, 4)] %>% filter(!is.na(a2_Logi), !is.nan(a1_Logi))
attach(a_logi)
boxplot(a_logi, main="Boxplots for Logistic coef. a between two segments")
paired_t_stat <- t.test(a1_Logi, a2_Logi, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(a_logi, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for a1_Logi & a2_Logi")
abline(v=abs(paired_t_stat),lty=2,col=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
## b1_Logi vs b2_Logi
set.seed(1)
chart.Correlation(log_coef[, c(2,5)], histogram = TRUE, pch=19)
b_logi <- log_coef[, c(2, 5)] %>% filter(!is.na(b2_Logi), !is.nan(b1_Logi))
attach(b_logi)
boxplot(b_logi, main="Boxplots for Logistic coef. b between two segments")
paired_t_stat <- t.test(b1_Logi, b2_Logi, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(b_logi, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for b1_Logi & b2_Logi")
abline(v=abs(paired_t_stat),lty=2,col=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
## c1_Logi vs c2_Logi
set.seed(1)
chart.Correlation(log_coef[, c(3,6)], histogram = TRUE, pch=19)
c_logi <- log_coef[, c(3, 6)] %>% filter(!is.na(c2_Logi), !is.nan(c1_Logi))
attach(c_logi)
boxplot(c_logi, main="Boxplots for Logistic coef. c between two segments")
paired_t_stat <- t.test(c1_Logi, c2_Logi, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(c_logi, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for c1_Logi & c2_Logi")
abline(v=abs(paired_t_stat),lty=2,col=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
### a1_Gom vs a2_Gom
set.seed(1)
chart.Correlation(log_coef[, c(7,10)], histogram = TRUE, pch=19)
a_Gom <- log_coef[, c(7, 10)] %>% filter(!is.na(a2_Gom), !is.nan(a1_Gom))
attach(a_Gom)
boxplot(a_Gom, main="Boxplots for Gompertz coef. a between two segments")
paired_t_stat <- t.test(a1_Gom, a2_Gom, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(a_Gom, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for a1_Gom & a2_Gom")
abline(v=abs(paired_t_stat),lty=2,col=2, lwd=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
### b1_Gom vs b2_Gom
set.seed(1)
chart.Correlation(log_coef[, c(8,11)], histogram = TRUE, pch=19)
b_Gom <- log_coef[, c(8, 11)] %>% filter(!is.na(b2_Gom), !is.nan(b1_Gom))
attach(b_Gom)
boxplot(b_Gom, main="Boxplots for Gompertz coef. b between two segments")
paired_t_stat <- t.test(b1_Gom, b2_Gom, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(b_Gom, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for b1_Gom & b2_Gom")
abline(v=abs(paired_t_stat),lty=2,col=2, lwd=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
### c1_Gom vs c2_Gom
set.seed(1)
chart.Correlation(log_coef[, c(9,12)], histogram = TRUE, pch=19)
c_Gom <- log_coef[, c(9, 12)] %>% filter(!is.na(c2_Gom), !is.nan(c1_Gom))
attach(c_Gom)
boxplot(c_Gom, main="Boxplots for Gompertz coef. c between two segments")
paired_t_stat <- t.test(c1_Gom, c2_Gom, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(c_Gom, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for c1_Gom & c2_Gom")
abline(v=abs(paired_t_stat),lty=2,col=2, lwd=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
| /200921 Segmented Logistic Model, Categorization, state analysis using derivative/201116_Permutation_Test/201116_Permutation_Test.R | no_license | YeonghyeonKO/COVID-19 | R | false | false | 6,320 | r | #### 1. Import Data
library(GGally)
library(tidyverse)
library(PerformanceAnalytics)
setwd("/Users/yeonghyeon/Documents/GitHub/COVID-19/201116_Permutation_Test")
coef <- read.csv("coef_result.csv")
outlier_countries <- c("Andorra", "Aruba", "Benin", "French_Polynesia",
"Kyrgyzstan", "Kuwait", "Mongolia", "Niger",
"Sao_Tome_and_Principe", "Seychelles")
exclude_countries <- c("Albania", "Argentina", "Cameroon", "Colombia",
"Dominican_Republic", "India", "Indonesia", "Kosovo",
"Moldova", "Mozambique", "Namibia", "Puero_Rico",
"uzbekistan", "Venezuela", "Zambia")
coef <- filter(read.csv("coef_result.csv"), !X %in% outlier_countries)[, -1]
coef <- filter(read.csv("coef_result.csv"), !X %in% exclude_countries)[, -1]
log_coef <- log(coef)
log_coef_st <- scale(log(coef))
chart.Correlation(coef[, c(1:6)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(1:6)], histogram = TRUE, pch=19)
#### 2. Correlation between Segments / Models
## segment 1에서 계수별 비교
chart.Correlation(coef[, c(2,3,8,9)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(2,3,8,9)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(2,3,8,9)], histogram = TRUE, pch=19)
# b1_Logi와 b1_Gom, c1_Logi와 c1_Gom이 각각 상관계수가 높음 -> permutation test
## segment 2에서 계수별 비교
chart.Correlation(coef[, c(5,6,11,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(5,6,11,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(5,6,11,12)], histogram = TRUE, pch=19)
# segment 1에서와 마찬가지로 Logistic, Gompertz 모델의 계수 b와 c가 각각 상관계수가 높음.
# 다만 Logistic에서의 계수 b와 c가 상당히 상관계수가 높게 나와 의아함 ->
## segment간 Logistic 계수별 비교
chart.Correlation(coef[, c(2,5,3,6)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(2,5,3,6)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(2,5,3,6)], histogram = TRUE, pch=19)
# Logistic의 계수 비교에서 segment 2일 때 계수 b와 c의 상관계수가 높음.
## segment간 Gompertz 계수별 비교
chart.Correlation(coef[, c(8,11,9,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef[, c(8,11,9,12)], histogram = TRUE, pch=19)
chart.Correlation(log_coef_st[, c(8,11,9,12)], histogram = TRUE, pch=19)
# Gompertz의 계수 비교에서 segment 2일 때 계수 b와 c의 상관계수가 높음.
#### 3. Permutation Test
### corr coef 비교
chart.Correlation(log_coef[, c(1,4,2,5,3,6)])
### segment별 계수 비교
## a1_Logi vs a2_Logi
set.seed(1)
chart.Correlation(log_coef[, c(1,4)], histogram = TRUE, pch=19)
a_logi <- coef[, c(1, 4)] %>% filter(!is.na(a2_Logi), !is.nan(a1_Logi))
a_logi <- log_coef[, c(1, 4)] %>% filter(!is.na(a2_Logi), !is.nan(a1_Logi))
attach(a_logi)
boxplot(a_logi, main="Boxplots for Logistic coef. a between two segments")
paired_t_stat <- t.test(a1_Logi, a2_Logi, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(a_logi, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for a1_Logi & a2_Logi")
abline(v=abs(paired_t_stat),lty=2,col=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
## b1_Logi vs b2_Logi
set.seed(1)
chart.Correlation(log_coef[, c(2,5)], histogram = TRUE, pch=19)
b_logi <- log_coef[, c(2, 5)] %>% filter(!is.na(b2_Logi), !is.nan(b1_Logi))
attach(b_logi)
boxplot(b_logi, main="Boxplots for Logistic coef. b between two segments")
paired_t_stat <- t.test(b1_Logi, b2_Logi, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(b_logi, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for b1_Logi & b2_Logi")
abline(v=abs(paired_t_stat),lty=2,col=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
## c1_Logi vs c2_Logi
set.seed(1)
chart.Correlation(log_coef[, c(3,6)], histogram = TRUE, pch=19)
c_logi <- log_coef[, c(3, 6)] %>% filter(!is.na(c2_Logi), !is.nan(c1_Logi))
attach(c_logi)
boxplot(c_logi, main="Boxplots for Logistic coef. c between two segments")
paired_t_stat <- t.test(c1_Logi, c2_Logi, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(c_logi, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for c1_Logi & c2_Logi")
abline(v=abs(paired_t_stat),lty=2,col=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
### a1_Gom vs a2_Gom
set.seed(1)
chart.Correlation(log_coef[, c(7,10)], histogram = TRUE, pch=19)
a_Gom <- log_coef[, c(7, 10)] %>% filter(!is.na(a2_Gom), !is.nan(a1_Gom))
attach(a_Gom)
boxplot(a_Gom, main="Boxplots for Gompertz coef. a between two segments")
paired_t_stat <- t.test(a1_Gom, a2_Gom, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(a_Gom, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for a1_Gom & a2_Gom")
abline(v=abs(paired_t_stat),lty=2,col=2, lwd=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
### b1_Gom vs b2_Gom
set.seed(1)
chart.Correlation(log_coef[, c(8,11)], histogram = TRUE, pch=19)
b_Gom <- log_coef[, c(8, 11)] %>% filter(!is.na(b2_Gom), !is.nan(b1_Gom))
attach(b_Gom)
boxplot(b_Gom, main="Boxplots for Gompertz coef. b between two segments")
paired_t_stat <- t.test(b1_Gom, b2_Gom, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(b_Gom, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for b1_Gom & b2_Gom")
abline(v=abs(paired_t_stat),lty=2,col=2, lwd=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
### c1_Gom vs c2_Gom
set.seed(1)
chart.Correlation(log_coef[, c(9,12)], histogram = TRUE, pch=19)
c_Gom <- log_coef[, c(9, 12)] %>% filter(!is.na(c2_Gom), !is.nan(c1_Gom))
attach(c_Gom)
boxplot(c_Gom, main="Boxplots for Gompertz coef. c between two segments")
paired_t_stat <- t.test(c1_Gom, c2_Gom, paired=TRUE)$statistic
# Paired t-test
paired_t_perm <- paired.perm.test(c_Gom, n.perm=1000)
hist(paired_t_perm, xlim=c(-20,20),
main="Paired Permutation Test for c1_Gom & c2_Gom")
abline(v=abs(paired_t_stat),lty=2,col=2, lwd=2)
pvalue=mean(abs(paired_t_perm)>=abs(paired_t_stat)); pvalue
|
## =========================================
## Demo 1-04
## < Global >
## =========================================
# Load packages
library(shiny)
library(teadashboard)
library(glue)
# Record reactivity
options(shiny.reactlog=TRUE)
# View reactivity
# reactlogShow()
# reactlogReset()
| /Exercises/Demo/App1.04_DemoReactivity/global.r | no_license | TEA-Analytics/ShinyWorkshop | R | false | false | 309 | r | ## =========================================
## Demo 1-04
## < Global >
## =========================================
# Load packages
library(shiny)
library(teadashboard)
library(glue)
# Record reactivity
options(shiny.reactlog=TRUE)
# View reactivity
# reactlogShow()
# reactlogReset()
|
library(ape)
testtree <- read.tree("3010_5.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3010_5_unrooted.txt") | /codeml_files/newick_trees_processed/3010_5/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("3010_5.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3010_5_unrooted.txt") |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <-x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
| /cachematrix.R | no_license | pkk82/ProgrammingAssignment2 | R | false | false | 998 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <-x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
|
library(shiny)
library(shinyBS)
library(ggplot2)
library(shinyProc)
flog.threshold(DEBUG)
ui <- fluidPage(
titlePanel("Titles for plot"),
plotTitlesUI(id = "PlotTitle"),
sidebarLayout(
sidebarPanel(
),
mainPanel(
plotOutput("plot", dblclick = "spTitle")
)
)
)
server <- function(input, output, session) {
titles = callModule(makePlotTitles, "PlotTitle")
observe({
req(input$spTitle)
req(titles$modalId)
toggleModal(session, titles$modalId)
})
output$plot = renderPlot({
p = qplot(mpg, wt, data = mtcars, colour = I("red"))
titles$call(p)
})
}
shinyApp(ui = ui, server = server)
| /inst/examples/plotTitle/app.R | permissive | zzawadz/shinyProc | R | false | false | 688 | r | library(shiny)
library(shinyBS)
library(ggplot2)
library(shinyProc)
flog.threshold(DEBUG)
ui <- fluidPage(
titlePanel("Titles for plot"),
plotTitlesUI(id = "PlotTitle"),
sidebarLayout(
sidebarPanel(
),
mainPanel(
plotOutput("plot", dblclick = "spTitle")
)
)
)
server <- function(input, output, session) {
titles = callModule(makePlotTitles, "PlotTitle")
observe({
req(input$spTitle)
req(titles$modalId)
toggleModal(session, titles$modalId)
})
output$plot = renderPlot({
p = qplot(mpg, wt, data = mtcars, colour = I("red"))
titles$call(p)
})
}
shinyApp(ui = ui, server = server)
|
\name{BGMtest}
\alias{BGMtest}
\title{Tests the five Berry, Golder and Milton (2012) Interactive Hypothesis}
\description{This function tests the five hypotheses that Berry, Golder and Milton identify as important when two quantitative variables are interacted in a linear model.}
\usage{
BGMtest(obj, vars, digits = 3, level = 0.05, two.sided=T)
}
\arguments{
\item{obj}{An object of class \code{lm}.}
\item{vars}{A vector of two variable names giving the two quantitative variables involved in the interaction. These variables must be involved in one, and only one, interaction. }
\item{digits}{Number of digits to be printed in the summary.}
\item{level}{Type I error rate for the tests.}
\item{two.sided}{Logical indicating whether the tests should be two-sided (if \code{TRUE}, the default) or one-sided (if \code{FALSE}).}
}
\value{
A matrix giving five t-tests.
}
\author{Dave Armstrong (UW-Milwaukee, Department of Political Science)}
\examples{
library(car)
data(Duncan)
mod <- lm(prestige ~ income*education + type, data=Duncan)
BGMtest(mod, c("income", "education"))
} | /man/BGMtest.Rd | no_license | TotallyBullshit/damisc | R | false | false | 1,091 | rd | \name{BGMtest}
\alias{BGMtest}
\title{Tests the five Berry, Golder and Milton (2012) Interactive Hypothesis}
\description{This function tests the five hypotheses that Berry, Golder and Milton identify as important when two quantitative variables are interacted in a linear model.}
\usage{
BGMtest(obj, vars, digits = 3, level = 0.05, two.sided=T)
}
\arguments{
\item{obj}{An object of class \code{lm}.}
\item{vars}{A vector of two variable names giving the two quantitative variables involved in the interaction. These variables must be involved in one, and only one, interaction. }
\item{digits}{Number of digits to be printed in the summary.}
\item{level}{Type I error rate for the tests.}
\item{two.sided}{Logical indicating whether the tests should be two-sided (if \code{TRUE}, the default) or one-sided (if \code{FALSE}).}
}
\value{
A matrix giving five t-tests.
}
\author{Dave Armstrong (UW-Milwaukee, Department of Political Science)}
\examples{
library(car)
data(Duncan)
mod <- lm(prestige ~ income*education + type, data=Duncan)
BGMtest(mod, c("income", "education"))
} |
#################################################
input_all <- read.csv("data/eta_n0.5-0.5_phi_ninf-pinf.csv")
input_train <- read.csv("data/training_sample-05_05.csv")
#chosen_sample_id <- c(3177, 12115, 12612, 14010, 28518, 29804)
layers <- c(1:10)
break.hists = 100
x.columns <- c("x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9")
y.columns <- c("y_0", "y_1", "y_2", "y_3", "y_4", "y_5", "y_6", "y_7", "y_8", "y_9")
rgb.color <- list( rgb(255, 0, 0, maxColorValue=255), # red
rgb( 0, 100, 0, maxColorValue=255), # dark green
rgb( 0, 0, 255, maxColorValue=255), # blue
rgb(255, 255, 0, maxColorValue=255), # yellow
rgb( 0, 128, 128, maxColorValue=255), # teal
rgb(255, 0, 255, maxColorValue=255), # magenta
rgb(128, 0, 0, maxColorValue=255), # maroon
rgb(128, 128, 0, maxColorValue=255), # olive
rgb(119, 135, 153, maxColorValue=255), # light state gray
rgb( 0, 255, 255, maxColorValue=255) )# aqua
h.scale = 0.7
###############################################
px <- input_all[, "px"]
py <- input_all[, "py"]
pt <- sqrt( px^2 + py^2 )
sample_id <- input_train[, "X"]
x <- input_train[, x.columns]
y <- input_train[, y.columns]
library(pracma)
library(latex2exp)
library(scales)
x.c <- list()
y.c <- list()
x.fit <- list()
y.fit <- list()
x0.fit <- list()
y0.fit <- list()
fit.cos <- list()
fit.sin <- list()
fit.xy.dist <- list()
fit.circle <- list()
r.fit <- c()
message("* Starting fits...")
for( r in 1:nrow(input_train) ){
if( r %% 100 == 0 ) cat(".")
x.c[[r]] <- unlist( x[r,], use.names=FALSE)
y.c[[r]] <- unlist( y[r,], use.names=FALSE)
fit.circle[[r]] <- circlefit(x.c[[r]], y.c[[r]])
x0.fit[[r]] <- fit.circle[[r]][1]
y0.fit[[r]] <- fit.circle[[r]][2]
fit.xy.dist[[r]] <- sqrt( (x.c[[r]] - x0.fit[[r]])^2 +
(y.c[[r]] - y0.fit[[r]])^2 )
fit.cos[[r]] <- (x.c[[r]] - x0.fit[[r]]) / fit.xy.dist[[r]]
fit.sin[[r]] <- (y.c[[r]] - y0.fit[[r]]) / fit.xy.dist[[r]]
r.fit <- c(r.fit, fit.circle[[r]][3])
x.fit[[r]] <- x0.fit[[r]] + r.fit[r]*fit.cos[[r]]
y.fit[[r]] <- y0.fit[[r]] + r.fit[r]*fit.sin[[r]]
}
cat("\n")
pt.train <- pt[sample_id]
message("* Radius results:")
print(summary(r.fit))
stop("******** Stop ************")
#####################################################
r.fit1000 <- (r.fit/1000)
png("momentum/fit_2020_06_20/pt_vs_R_high.R.png",
units="px", width=1600, height=1600, res=250)
plot(r.fit1000[r.fit1000 > 0.1 & r.fit1000 < 1000],
pt.train[r.fit1000 > 0.1 & r.fit1000 < 1000],
xlab="Radius of Track Curvature [m]", ylab="Transverse Momentum [GeV/c]",
main="21k Tracks",
xlim=c(0.1,1000),
#ylim=c(0,6),
col="blue", pch=1)
pt_R.fit <- lm( pt.train[r.fit1000 > 0.1 & r.fit1000 < 1000] ~
(r.fit1000[r.fit1000 > 0.1 & r.fit1000 < 1000]) )
abline( pt_R.fit, col="red", lwd=2 )
grid(col="gray48")
legend( "topright", legend=c("Training Sample",
sprintf("pT(R) = %.2f + %.2f.R",
pt_R.fit$coefficients[1],
pt_R.fit$coefficients[2])),
lwd=c(NA, 2), pch=c(1, NA), col=c("blue", "red"), border=NA, bg="white" )
dev.off()
#####################################################
png("momentum/fit_2020_06_20/hist_R.png",
units="px", width=1600, height=1600, res=250)
hist(r.fit[r.fit < 10000 & r.fit > 100],
breaks=c(seq(100, 10000, 100)),
xlab="Radius of Track Curvature [mm]",
main="21k Tracks",
col="red")
dev.off()
message("*****************************")
#####################################################
#png("momentum/fit_2020_06_20/hist_pt_1-4.png",
# units="px", width=1600, height=1600, res=250)
#pt.hist <- hist(pt.train, breaks=1000)
#plot(pt.hist$mids[pt.hist$counts > 0], pt.hist$counts[pt.hist$counts > 0],
# xlim=c(1,4),
# #ylim=c(1, max(pt.hist$counts)),
# xlab="Transverse Momentum [GeV/c]", ylab="Frequency",
# main="21k Tracks",
# col="blue", type="h", lwd=2, log="")
#dev.off()
#####################################################
x.diff <- list()
x.diff.hist <- list()
x.diff.min = -0.4
x.diff.max = 0.8
x.diff.breaks = c(seq(x.diff.min,x.diff.max,0.02))
png("momentum/fit_2020_06_20/hist_x_difference.png",
units="px", width=1600, height=1600, res=250)
for( l in layers ){
x.diff[[l]] <- unlist(x.fit[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))] -
unlist(x.c[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))]
x.diff.hist[[l]] <- hist( x.diff[[l]][(x.diff[[l]] < x.diff.max) &
(x.diff[[l]] > x.diff.min)],
breaks=x.diff.breaks, plot=F )
if( l == 1 ){
plot( x.diff.hist[[l]]$mids, x.diff.hist[[l]]$counts,
xlab=TeX("$(x_{Fitted} - x_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
main="21k Tracks",
ylim=c(0,5000), lwd=2, col=rgb.color[[l]], type="l" )
}
else{
lines( x.diff.hist[[l]]$mids, x.diff.hist[[l]]$counts,
lwd=2, col=rgb.color[[l]], type="l" )
}
}
sd.x.diff <- c()
for( l in layers ) { sd.x.diff <- c(sd.x.diff, sd(x.diff[[l]])) }
bw.x.diff <- c()
for( l in layers ) { bw.x.diff <- c(bw.x.diff, bw.nrd(x.diff[[l]])) }
legend( "topright", legend=sprintf( "Layer %2d, (sd, BW) = (%2.0f, %1.3f) mm",
layers, sd.x.diff, bw.x.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bty="n" )
dev.off()
#####################################################
y.diff <- list()
y.diff.hist <- list()
y.diff.min = -0.4
y.diff.max = 0.8
y.diff.breaks = c(seq(y.diff.min,y.diff.max,0.02))
png("momentum/fit_2020_06_20/hist_y_difference.png",
units="px", width=1600, height=1600, res=250)
for( l in layers ){
y.diff[[l]] <- unlist(y.fit[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))] -
unlist(y.c[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))]
y.diff.hist[[l]] <- hist( y.diff[[l]][(y.diff[[l]] < y.diff.max) &
(y.diff[[l]] > y.diff.min)],
breaks=y.diff.breaks, plot=F )
if( l == 1 ){
plot( y.diff.hist[[l]]$mids, y.diff.hist[[l]]$counts,
xlab=TeX("$(y_{Fitted} - y_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
main="21k Tracks",
ylim=c(0,5000), lwd=2, col=rgb.color[[l]], type="l")
}
else{
lines( y.diff.hist[[l]]$mids, y.diff.hist[[l]]$counts,
lwd=2, col=rgb.color[[l]], type="l")
}
}
sd.y.diff <- c()
for( l in layers ) { sd.y.diff <- c(sd.y.diff, sd(y.diff[[l]])) }
bw.y.diff <- c()
for( l in layers ) { bw.y.diff <- c(bw.y.diff, bw.nrd(y.diff[[l]])) }
legend( "topright", legend=sprintf( "Layer %2d, (sd, BW) = (%2.0f, %1.3f) mm",
layers, sd.y.diff, bw.y.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bty="n" )
dev.off()
#####################################################
xy.diff <- list()
xy.diff.hist <- list()
xy.diff.xmax = 1
xy.diff.breaks = c(seq(0,xy.diff.xmax,0.04))
png("momentum/fit_2020_06_20/hist_xy_difference.png",
units="px", width=1600, height=1600, res=250)
for( l in layers ){
xy.diff[[l]] <- sqrt( x.diff[[l]]^2 + y.diff[[l]]^2 )
xy.diff.hist[[l]] <- hist( xy.diff[[l]][xy.diff[[l]] < xy.diff.xmax],
breaks=xy.diff.breaks, plot=F )
if( l == 1 ){
plot( xy.diff.hist[[l]]$mids,
xy.diff.hist[[l]]$counts,
xlab=TeX("$(Hit_{Fitted} - Hit_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
xlim=c(min(xy.diff.breaks),max(xy.diff.breaks)),
ylim=c(0,12000),
main=TeX("Distance between $Hit_{Fitted}$ and $Hit_{Real}$ in XY Plane"),
col=alpha(rgb.color[[l]], 1), lwd=2, type="l" )
}
else{
lines(xy.diff.hist[[l]]$mids,
xy.diff.hist[[l]]$counts,
col=alpha(rgb.color[[l]], 1),
lwd=2, type="l")
}
}
sd.xy.diff <- c()
for( l in layers ) sd.xy.diff <- c(sd.xy.diff, sd(xy.diff[[l]]))
bw.xy.diff <- c()
for( l in layers ) bw.xy.diff <- c(bw.xy.diff, bw.nrd(xy.diff[[l]]))
legend( "topright", legend=sprintf( "Layer %2d, (sd, BW) = (%2.0f, %1.3f) mm",
layers, sd.xy.diff, bw.xy.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bty="n" )
dev.off()
#####################################################
count.diff <- data.frame()
#hit.diff <- seq(0.05,60,0.05)
hit.diff <- c(seq(0.05,60,0.05), seq(70, 650, 10))
png("momentum/fit_2020_06_20/count_fits_outside_diff.png",
units="px", width=1600, height=1600, res=250)
for( l in 1:length(layers) ){
for( h.d in 1:length(hit.diff) ){
count.diff[l,h.d] <- length(which(xy.diff[[layers[l]]] > hit.diff[h.d]))
}
if( l == 1 ){
plot( hit.diff[count.diff[l,] > 0],
unlist(count.diff[l,][count.diff[l,] > 0],use.names=F),
xlab=TeX("$(Hit_{Fitted} - Hit_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
xlim=c(min(hit.diff),max(hit.diff)),
ylim=c(0,21000),
main=TeX("Number of Fits Out of Difference $(Hit_{Fitted} - Hit_{Real})$"),
col=alpha(rgb.color[[l]], 1), lwd=2, type="l", log="x" )
}
else{
lines(hit.diff[count.diff[l,] > 0],
unlist(count.diff[l,][count.diff[l,] > 0],use.names=F),
col=alpha(rgb.color[[l]], 1), lwd=2, type="l", log="x")
}
}
grid(col="gray48")
legend( "topright", legend=sprintf( "Layer %2d", layers, sd.xy.diff, bw.xy.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bg="white")
dev.off()
#####################################################
message("******************************************")
dist.2d <- c(0.05,0.1,0.2,0.5,1,2,5,10,20,30)
cat("D \t"); for( d in dist.2d ){ cat(d, "\t")}; cat("\n")
cat("\n")
for( l in layers ){
cat("L ", l, "\t")
for( d in dist.2d ){
cat(length(which(xy.diff[[layers[l]]] > d)), "\t")
}
cat("\n")
}
message("******************************************")
#####################################################
#### (i.e, > 30 cm)
# which(xy.diff[[layers[1]]] > 370)
# 5136 11006 12482 17474
# which(xy.diff[[layers[2]]] > 370)
# 12482
# which(xy.diff[[layers[9]]] > 370)
# 5136 6208 10364 17474
# which(xy.diff[[layers[10]]] > 370)
# 5136 6208 10364 11006 12482 17474 19030 20070 20422 20986
#
# train ID
# 5136 6208 10364 11006 12482 17474 19030 20070 20422 20986
# pt
# 69.188822 3.172348 3.939546 1.510150 1.778773 45.963099 1.234339
# 1.475285 2.279276 1.361427
####
#### high.diff <- c( 1548, 1641, 7689, 8697, 9954, 12088,
#### 13339, 15209, 16472, 20367, 20632)
high.diff <- c( 5136, 6208, 10364, 11006, 12482,
17474, 19030, 20070, 20422, 20986)
png("momentum/fit_2020_06_20/fit.circle_high.diff.2.png",
units="px", width=1600, height=1600, res=250)
plot(unlist(x.c, use.names=F), unlist(y.c, use.names=F),
xlab="x (mm)", ylab="y (mm)",
main=TeX("Fitting Tracks with ($Hit_{Fitted} - Hit_{Real}$) > 30 mm"),
col="grey", pch=".")
for( c in 1:length(high.diff) ){
points( x.c[[high.diff[c]]], y.c[[high.diff[c]]],
col="red", lty=1, pch=20, type="b" )
points(x.fit[[high.diff[c]]], y.fit[[high.diff[c]]],
col="blue",pch=1)
lines( x.fit[[high.diff[c]]], y.fit[[high.diff[c]]],
col="blue",lty=2,type="l")
}
legend( "topleft", legend=c("Chosen Tracks", "Hits from Fit", "Circular Fit"),
lty = c(1, NA, 2), col = c("red", "blue", "blue"), pch = c(20, 1, NA) )
dev.off()
#####################################################
# sample(which( pt.train > 10 ), 10)
high.pt <- c(1547, 4490, 5523, 7679, 11366, 13316, 14366, 15184, 16079, 17866, 20627)
png("momentum/fit_2020_06_20/fit.circle_high.pt.png",
units="px", width=1600, height=1600, res=250)
plot(unlist(x.c, use.names=F), unlist(y.c, use.names=F),
xlab="x (mm)", ylab="y (mm)",
main=TeX("Fitting Tracks ($p_{T}$ > 10 GeV/c)"),
col="grey", pch=".")
for( c in 1:length(high.pt) ){
points( x.c[[high.pt[c]]], y.c[[high.pt[c]]],
col="red", pch=20, lty=1, type="b" )
points(x.fit[[high.pt[c]]], y.fit[[high.pt[c]]],
col="blue", pch=1)
lines( x.fit[[high.pt[c]]], y.fit[[high.pt[c]]],
col="blue", lty=2, type="l" )
}
legend( "bottomleft", legend=c("Chosen Tracks", "Hits from Fit", "Circular Fit"),
lty = c(1, NA, 2), col = c("red", "blue", "blue"), pch = c(20, 1, NA) )
dev.off()
#####################################################
message("***********************************")
cat("Layer\t") ; for( l in layers ){ cat(l, "\t") } ; cat("\n")
cat("Track ID\n")
for( i in 1:length(high.pt) ){
cat(high.pt[i], "\t")
for( l in layers ) cat(sprintf("%.1f", xy.diff[[l]][high.pt[i]]), "\t")
cat("\n")
}
message("***********************************")
#####################################################
sum.diff.all <- c()
xy.diff.all <- unlist(xy.diff,use.names=F)
for( r in 1:nrow(input_train) ){
if( (r %% 100) == 0 ) cat(".")
sum.diff = sum( xy.diff.all[seq(r,length(xy.diff.all),nrow(input_train))] )
# lower.pt <- c(lower.pt, which( xy.diff[[layers[l]]] < 10 ))
sum.diff.all <- c(sum.diff.all, sum.diff)
}
cat("\n")
png("momentum/fit_2020_06_20/sum.all.diff_0-20_mm.png",
units="px", width=1600, height=1600, res=250)
hist( sum.diff.all, breaks=10000,
xlim=c(0,10),
ylim=c(0,4000),
xlab=TeX("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
main="21k Tracks", col="blue", border=NA )
dev.off()
#####################################################
sum.diff.cut = 2
lower.pt <- c(which(sum.diff.all < sum.diff.cut))
png("momentum/fit_2020_06_20/pt_vs_R_all.pT_and_lower.pT.png",
units="px", width=1600, height=1600, res=250)
plot(r.fit1000, pt.train,
xlab="Radius of Track Curvature [m]", ylab="Transverse Momentum [GeV/c]",
main=TeX(sprintf("Before and After $\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut)),
xlim=c(0,100),
#ylim=c(0.5,3),
col="blue", pch=1)
points(r.fit1000[lower.pt], pt.train[lower.pt], col="green", pch=1)
pt_R.fit.lower.pt <- lm( pt.train[lower.pt] ~ (r.fit1000[lower.pt]) )
abline( pt_R.fit.lower.pt, col="red", lwd=2 )
grid(col="gray48")
legend( "topleft", legend=c("Training Sample", TeX(sprintf("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut)),
sprintf("pT(R) = %.1f + %.1f.R",
pt_R.fit.lower.pt$coefficients[1],
pt_R.fit.lower.pt$coefficients[2])),
lwd=c(NA,NA,2), pch=c(1,1,NA), col=c("blue","green","red") )
dev.off()
#####################################################
#png("momentum/fit_2020_06_20/pt_vs_R_lower.pT.png",
# units="px", width=1600, height=1600, res=250)
#plot(r.fit1000[lower.pt], pt.train[lower.pt],
# xlab="Radius of Track Curvature [m]", ylab="Transverse Momentum [GeV/c]",
# main="10k Tracks",
# xlim=c(0.5,5.5),
# ylim=c(0.5,3),
# col="forestgreen", pch=1)
#pt_R.fit.lower.pt <- lm( pt.train[lower.pt] ~ (r.fit1000[lower.pt]) )
#abline( pt_R.fit.lower.pt, col="red", lwd=2 )
#grid(col="gray48")
#legend( "topleft", legend=c(TeX(sprintf("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut)),
# sprintf("pT(R) = %.1f + %.1f.R",
# pt_R.fit.lower.pt$coefficients[1],
# pt_R.fit.lower.pt$coefficients[2])),
# lwd=c(NA,2), pch=c(1,NA), col=c("forestgreen","red"), border=NA, bg="white" )
#dev.off()
#
#####################################################
r.breaks <- seq(100, 10000, 100)
png("momentum/fit_2020_06_20/hist_R_lower.pT.png",
units="px", width=1600, height=1600, res=250)
hist(r.fit[(r.fit > min(r.breaks)) & (r.fit < max(r.breaks))],
breaks=r.breaks,
xlab="Radius of Track Curvature [mm]",
xlim=c(100,10000),
#ylim=c(0,1500),
main=TeX("Before and After Cut in $\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$"),
col=alpha(rgb.color[[1]], 1))
#r.fit.low.pt.hist <- hist(r.fit[lower.pt], plot=F)
hist(r.fit[(r.fit > min(r.breaks)) & (r.fit < max(r.breaks))][lower.pt],
breaks=r.breaks,
col=alpha(rgb.color[[3]], 0.7), add=TRUE)
legend( "topright", legend=c("21k Tracks", "15k Tracks"),
fill=c(alpha(rgb.color[[1]], 1), alpha(rgb.color[[3]], 0.7)) )
dev.off()
#####################################################
png("momentum/fit_2020_06_20/hist_all.pt_and_low.pt.png",
units="px", width=1600, height=1600, res=250)
pt.hist <- hist(pt.train, breaks=1000, plot=F)
pt.breaks <- pt.hist$breaks
pt.hist <- hist(pt.train, breaks=pt.breaks, plot=F)
plot(pt.hist$mids[pt.hist$counts > 0], pt.hist$counts[pt.hist$counts > 0],
xlab="Transverse Momentum [GeV/c]", ylab="Frequency",
main=TeX("Before and After Cut in $\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$"),
col="red", type="h", lwd=5, log="xy")
pt.hist.low.pt <- hist(pt.train[lower.pt], breaks=pt.breaks, plot=F)
points(pt.hist.low.pt$mids[pt.hist.low.pt$counts >0],
pt.hist.low.pt$counts[pt.hist.low.pt$counts >0],
col="blue", type="h", lwd=5, lty=1)
legend( "topright", legend=c("21k Tracks",
TeX(sprintf("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut))), col=c("red","blue"), lwd=5, lty=c(1,1) )
dev.off()
#####################################################
####
####
####
| /get_radius_pracma.lib.R | no_license | AngeloSantos/TrackML | R | false | false | 18,185 | r | #################################################
input_all <- read.csv("data/eta_n0.5-0.5_phi_ninf-pinf.csv")
input_train <- read.csv("data/training_sample-05_05.csv")
#chosen_sample_id <- c(3177, 12115, 12612, 14010, 28518, 29804)
layers <- c(1:10)
break.hists = 100
x.columns <- c("x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9")
y.columns <- c("y_0", "y_1", "y_2", "y_3", "y_4", "y_5", "y_6", "y_7", "y_8", "y_9")
rgb.color <- list( rgb(255, 0, 0, maxColorValue=255), # red
rgb( 0, 100, 0, maxColorValue=255), # dark green
rgb( 0, 0, 255, maxColorValue=255), # blue
rgb(255, 255, 0, maxColorValue=255), # yellow
rgb( 0, 128, 128, maxColorValue=255), # teal
rgb(255, 0, 255, maxColorValue=255), # magenta
rgb(128, 0, 0, maxColorValue=255), # maroon
rgb(128, 128, 0, maxColorValue=255), # olive
rgb(119, 135, 153, maxColorValue=255), # light state gray
rgb( 0, 255, 255, maxColorValue=255) )# aqua
h.scale = 0.7
###############################################
px <- input_all[, "px"]
py <- input_all[, "py"]
pt <- sqrt( px^2 + py^2 )
sample_id <- input_train[, "X"]
x <- input_train[, x.columns]
y <- input_train[, y.columns]
library(pracma)
library(latex2exp)
library(scales)
x.c <- list()
y.c <- list()
x.fit <- list()
y.fit <- list()
x0.fit <- list()
y0.fit <- list()
fit.cos <- list()
fit.sin <- list()
fit.xy.dist <- list()
fit.circle <- list()
r.fit <- c()
message("* Starting fits...")
for( r in 1:nrow(input_train) ){
if( r %% 100 == 0 ) cat(".")
x.c[[r]] <- unlist( x[r,], use.names=FALSE)
y.c[[r]] <- unlist( y[r,], use.names=FALSE)
fit.circle[[r]] <- circlefit(x.c[[r]], y.c[[r]])
x0.fit[[r]] <- fit.circle[[r]][1]
y0.fit[[r]] <- fit.circle[[r]][2]
fit.xy.dist[[r]] <- sqrt( (x.c[[r]] - x0.fit[[r]])^2 +
(y.c[[r]] - y0.fit[[r]])^2 )
fit.cos[[r]] <- (x.c[[r]] - x0.fit[[r]]) / fit.xy.dist[[r]]
fit.sin[[r]] <- (y.c[[r]] - y0.fit[[r]]) / fit.xy.dist[[r]]
r.fit <- c(r.fit, fit.circle[[r]][3])
x.fit[[r]] <- x0.fit[[r]] + r.fit[r]*fit.cos[[r]]
y.fit[[r]] <- y0.fit[[r]] + r.fit[r]*fit.sin[[r]]
}
cat("\n")
pt.train <- pt[sample_id]
message("* Radius results:")
print(summary(r.fit))
stop("******** Stop ************")
#####################################################
r.fit1000 <- (r.fit/1000)
png("momentum/fit_2020_06_20/pt_vs_R_high.R.png",
units="px", width=1600, height=1600, res=250)
plot(r.fit1000[r.fit1000 > 0.1 & r.fit1000 < 1000],
pt.train[r.fit1000 > 0.1 & r.fit1000 < 1000],
xlab="Radius of Track Curvature [m]", ylab="Transverse Momentum [GeV/c]",
main="21k Tracks",
xlim=c(0.1,1000),
#ylim=c(0,6),
col="blue", pch=1)
pt_R.fit <- lm( pt.train[r.fit1000 > 0.1 & r.fit1000 < 1000] ~
(r.fit1000[r.fit1000 > 0.1 & r.fit1000 < 1000]) )
abline( pt_R.fit, col="red", lwd=2 )
grid(col="gray48")
legend( "topright", legend=c("Training Sample",
sprintf("pT(R) = %.2f + %.2f.R",
pt_R.fit$coefficients[1],
pt_R.fit$coefficients[2])),
lwd=c(NA, 2), pch=c(1, NA), col=c("blue", "red"), border=NA, bg="white" )
dev.off()
#####################################################
png("momentum/fit_2020_06_20/hist_R.png",
units="px", width=1600, height=1600, res=250)
hist(r.fit[r.fit < 10000 & r.fit > 100],
breaks=c(seq(100, 10000, 100)),
xlab="Radius of Track Curvature [mm]",
main="21k Tracks",
col="red")
dev.off()
message("*****************************")
#####################################################
#png("momentum/fit_2020_06_20/hist_pt_1-4.png",
# units="px", width=1600, height=1600, res=250)
#pt.hist <- hist(pt.train, breaks=1000)
#plot(pt.hist$mids[pt.hist$counts > 0], pt.hist$counts[pt.hist$counts > 0],
# xlim=c(1,4),
# #ylim=c(1, max(pt.hist$counts)),
# xlab="Transverse Momentum [GeV/c]", ylab="Frequency",
# main="21k Tracks",
# col="blue", type="h", lwd=2, log="")
#dev.off()
#####################################################
x.diff <- list()
x.diff.hist <- list()
x.diff.min = -0.4
x.diff.max = 0.8
x.diff.breaks = c(seq(x.diff.min,x.diff.max,0.02))
png("momentum/fit_2020_06_20/hist_x_difference.png",
units="px", width=1600, height=1600, res=250)
for( l in layers ){
x.diff[[l]] <- unlist(x.fit[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))] -
unlist(x.c[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))]
x.diff.hist[[l]] <- hist( x.diff[[l]][(x.diff[[l]] < x.diff.max) &
(x.diff[[l]] > x.diff.min)],
breaks=x.diff.breaks, plot=F )
if( l == 1 ){
plot( x.diff.hist[[l]]$mids, x.diff.hist[[l]]$counts,
xlab=TeX("$(x_{Fitted} - x_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
main="21k Tracks",
ylim=c(0,5000), lwd=2, col=rgb.color[[l]], type="l" )
}
else{
lines( x.diff.hist[[l]]$mids, x.diff.hist[[l]]$counts,
lwd=2, col=rgb.color[[l]], type="l" )
}
}
sd.x.diff <- c()
for( l in layers ) { sd.x.diff <- c(sd.x.diff, sd(x.diff[[l]])) }
bw.x.diff <- c()
for( l in layers ) { bw.x.diff <- c(bw.x.diff, bw.nrd(x.diff[[l]])) }
legend( "topright", legend=sprintf( "Layer %2d, (sd, BW) = (%2.0f, %1.3f) mm",
layers, sd.x.diff, bw.x.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bty="n" )
dev.off()
#####################################################
y.diff <- list()
y.diff.hist <- list()
y.diff.min = -0.4
y.diff.max = 0.8
y.diff.breaks = c(seq(y.diff.min,y.diff.max,0.02))
png("momentum/fit_2020_06_20/hist_y_difference.png",
units="px", width=1600, height=1600, res=250)
for( l in layers ){
y.diff[[l]] <- unlist(y.fit[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))] -
unlist(y.c[1:nrow(input_train)],
use.names=F)[seq(l,nrow(input_train)*length(layers),
length(layers))]
y.diff.hist[[l]] <- hist( y.diff[[l]][(y.diff[[l]] < y.diff.max) &
(y.diff[[l]] > y.diff.min)],
breaks=y.diff.breaks, plot=F )
if( l == 1 ){
plot( y.diff.hist[[l]]$mids, y.diff.hist[[l]]$counts,
xlab=TeX("$(y_{Fitted} - y_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
main="21k Tracks",
ylim=c(0,5000), lwd=2, col=rgb.color[[l]], type="l")
}
else{
lines( y.diff.hist[[l]]$mids, y.diff.hist[[l]]$counts,
lwd=2, col=rgb.color[[l]], type="l")
}
}
sd.y.diff <- c()
for( l in layers ) { sd.y.diff <- c(sd.y.diff, sd(y.diff[[l]])) }
bw.y.diff <- c()
for( l in layers ) { bw.y.diff <- c(bw.y.diff, bw.nrd(y.diff[[l]])) }
legend( "topright", legend=sprintf( "Layer %2d, (sd, BW) = (%2.0f, %1.3f) mm",
layers, sd.y.diff, bw.y.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bty="n" )
dev.off()
#####################################################
xy.diff <- list()
xy.diff.hist <- list()
xy.diff.xmax = 1
xy.diff.breaks = c(seq(0,xy.diff.xmax,0.04))
png("momentum/fit_2020_06_20/hist_xy_difference.png",
units="px", width=1600, height=1600, res=250)
for( l in layers ){
xy.diff[[l]] <- sqrt( x.diff[[l]]^2 + y.diff[[l]]^2 )
xy.diff.hist[[l]] <- hist( xy.diff[[l]][xy.diff[[l]] < xy.diff.xmax],
breaks=xy.diff.breaks, plot=F )
if( l == 1 ){
plot( xy.diff.hist[[l]]$mids,
xy.diff.hist[[l]]$counts,
xlab=TeX("$(Hit_{Fitted} - Hit_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
xlim=c(min(xy.diff.breaks),max(xy.diff.breaks)),
ylim=c(0,12000),
main=TeX("Distance between $Hit_{Fitted}$ and $Hit_{Real}$ in XY Plane"),
col=alpha(rgb.color[[l]], 1), lwd=2, type="l" )
}
else{
lines(xy.diff.hist[[l]]$mids,
xy.diff.hist[[l]]$counts,
col=alpha(rgb.color[[l]], 1),
lwd=2, type="l")
}
}
sd.xy.diff <- c()
for( l in layers ) sd.xy.diff <- c(sd.xy.diff, sd(xy.diff[[l]]))
bw.xy.diff <- c()
for( l in layers ) bw.xy.diff <- c(bw.xy.diff, bw.nrd(xy.diff[[l]]))
legend( "topright", legend=sprintf( "Layer %2d, (sd, BW) = (%2.0f, %1.3f) mm",
layers, sd.xy.diff, bw.xy.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bty="n" )
dev.off()
#####################################################
count.diff <- data.frame()
#hit.diff <- seq(0.05,60,0.05)
hit.diff <- c(seq(0.05,60,0.05), seq(70, 650, 10))
png("momentum/fit_2020_06_20/count_fits_outside_diff.png",
units="px", width=1600, height=1600, res=250)
for( l in 1:length(layers) ){
for( h.d in 1:length(hit.diff) ){
count.diff[l,h.d] <- length(which(xy.diff[[layers[l]]] > hit.diff[h.d]))
}
if( l == 1 ){
plot( hit.diff[count.diff[l,] > 0],
unlist(count.diff[l,][count.diff[l,] > 0],use.names=F),
xlab=TeX("$(Hit_{Fitted} - Hit_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
xlim=c(min(hit.diff),max(hit.diff)),
ylim=c(0,21000),
main=TeX("Number of Fits Out of Difference $(Hit_{Fitted} - Hit_{Real})$"),
col=alpha(rgb.color[[l]], 1), lwd=2, type="l", log="x" )
}
else{
lines(hit.diff[count.diff[l,] > 0],
unlist(count.diff[l,][count.diff[l,] > 0],use.names=F),
col=alpha(rgb.color[[l]], 1), lwd=2, type="l", log="x")
}
}
grid(col="gray48")
legend( "topright", legend=sprintf( "Layer %2d", layers, sd.xy.diff, bw.xy.diff ),
fill=c( alpha(rgb.color, 1) ), border=NA, bg="white")
dev.off()
#####################################################
message("******************************************")
dist.2d <- c(0.05,0.1,0.2,0.5,1,2,5,10,20,30)
cat("D \t"); for( d in dist.2d ){ cat(d, "\t")}; cat("\n")
cat("\n")
for( l in layers ){
cat("L ", l, "\t")
for( d in dist.2d ){
cat(length(which(xy.diff[[layers[l]]] > d)), "\t")
}
cat("\n")
}
message("******************************************")
#####################################################
#### (i.e, > 30 cm)
# which(xy.diff[[layers[1]]] > 370)
# 5136 11006 12482 17474
# which(xy.diff[[layers[2]]] > 370)
# 12482
# which(xy.diff[[layers[9]]] > 370)
# 5136 6208 10364 17474
# which(xy.diff[[layers[10]]] > 370)
# 5136 6208 10364 11006 12482 17474 19030 20070 20422 20986
#
# train ID
# 5136 6208 10364 11006 12482 17474 19030 20070 20422 20986
# pt
# 69.188822 3.172348 3.939546 1.510150 1.778773 45.963099 1.234339
# 1.475285 2.279276 1.361427
####
#### high.diff <- c( 1548, 1641, 7689, 8697, 9954, 12088,
#### 13339, 15209, 16472, 20367, 20632)
high.diff <- c( 5136, 6208, 10364, 11006, 12482,
17474, 19030, 20070, 20422, 20986)
png("momentum/fit_2020_06_20/fit.circle_high.diff.2.png",
units="px", width=1600, height=1600, res=250)
plot(unlist(x.c, use.names=F), unlist(y.c, use.names=F),
xlab="x (mm)", ylab="y (mm)",
main=TeX("Fitting Tracks with ($Hit_{Fitted} - Hit_{Real}$) > 30 mm"),
col="grey", pch=".")
for( c in 1:length(high.diff) ){
points( x.c[[high.diff[c]]], y.c[[high.diff[c]]],
col="red", lty=1, pch=20, type="b" )
points(x.fit[[high.diff[c]]], y.fit[[high.diff[c]]],
col="blue",pch=1)
lines( x.fit[[high.diff[c]]], y.fit[[high.diff[c]]],
col="blue",lty=2,type="l")
}
legend( "topleft", legend=c("Chosen Tracks", "Hits from Fit", "Circular Fit"),
lty = c(1, NA, 2), col = c("red", "blue", "blue"), pch = c(20, 1, NA) )
dev.off()
#####################################################
# sample(which( pt.train > 10 ), 10)
high.pt <- c(1547, 4490, 5523, 7679, 11366, 13316, 14366, 15184, 16079, 17866, 20627)
png("momentum/fit_2020_06_20/fit.circle_high.pt.png",
units="px", width=1600, height=1600, res=250)
plot(unlist(x.c, use.names=F), unlist(y.c, use.names=F),
xlab="x (mm)", ylab="y (mm)",
main=TeX("Fitting Tracks ($p_{T}$ > 10 GeV/c)"),
col="grey", pch=".")
for( c in 1:length(high.pt) ){
points( x.c[[high.pt[c]]], y.c[[high.pt[c]]],
col="red", pch=20, lty=1, type="b" )
points(x.fit[[high.pt[c]]], y.fit[[high.pt[c]]],
col="blue", pch=1)
lines( x.fit[[high.pt[c]]], y.fit[[high.pt[c]]],
col="blue", lty=2, type="l" )
}
legend( "bottomleft", legend=c("Chosen Tracks", "Hits from Fit", "Circular Fit"),
lty = c(1, NA, 2), col = c("red", "blue", "blue"), pch = c(20, 1, NA) )
dev.off()
#####################################################
message("***********************************")
cat("Layer\t") ; for( l in layers ){ cat(l, "\t") } ; cat("\n")
cat("Track ID\n")
for( i in 1:length(high.pt) ){
cat(high.pt[i], "\t")
for( l in layers ) cat(sprintf("%.1f", xy.diff[[l]][high.pt[i]]), "\t")
cat("\n")
}
message("***********************************")
#####################################################
sum.diff.all <- c()
xy.diff.all <- unlist(xy.diff,use.names=F)
for( r in 1:nrow(input_train) ){
if( (r %% 100) == 0 ) cat(".")
sum.diff = sum( xy.diff.all[seq(r,length(xy.diff.all),nrow(input_train))] )
# lower.pt <- c(lower.pt, which( xy.diff[[layers[l]]] < 10 ))
sum.diff.all <- c(sum.diff.all, sum.diff)
}
cat("\n")
png("momentum/fit_2020_06_20/sum.all.diff_0-20_mm.png",
units="px", width=1600, height=1600, res=250)
hist( sum.diff.all, breaks=10000,
xlim=c(0,10),
ylim=c(0,4000),
xlab=TeX("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real}) \\; \\[mm\\]$"),
ylab="Frequency",
main="21k Tracks", col="blue", border=NA )
dev.off()
#####################################################
sum.diff.cut = 2
lower.pt <- c(which(sum.diff.all < sum.diff.cut))
png("momentum/fit_2020_06_20/pt_vs_R_all.pT_and_lower.pT.png",
units="px", width=1600, height=1600, res=250)
plot(r.fit1000, pt.train,
xlab="Radius of Track Curvature [m]", ylab="Transverse Momentum [GeV/c]",
main=TeX(sprintf("Before and After $\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut)),
xlim=c(0,100),
#ylim=c(0.5,3),
col="blue", pch=1)
points(r.fit1000[lower.pt], pt.train[lower.pt], col="green", pch=1)
pt_R.fit.lower.pt <- lm( pt.train[lower.pt] ~ (r.fit1000[lower.pt]) )
abline( pt_R.fit.lower.pt, col="red", lwd=2 )
grid(col="gray48")
legend( "topleft", legend=c("Training Sample", TeX(sprintf("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut)),
sprintf("pT(R) = %.1f + %.1f.R",
pt_R.fit.lower.pt$coefficients[1],
pt_R.fit.lower.pt$coefficients[2])),
lwd=c(NA,NA,2), pch=c(1,1,NA), col=c("blue","green","red") )
dev.off()
#####################################################
#png("momentum/fit_2020_06_20/pt_vs_R_lower.pT.png",
# units="px", width=1600, height=1600, res=250)
#plot(r.fit1000[lower.pt], pt.train[lower.pt],
# xlab="Radius of Track Curvature [m]", ylab="Transverse Momentum [GeV/c]",
# main="10k Tracks",
# xlim=c(0.5,5.5),
# ylim=c(0.5,3),
# col="forestgreen", pch=1)
#pt_R.fit.lower.pt <- lm( pt.train[lower.pt] ~ (r.fit1000[lower.pt]) )
#abline( pt_R.fit.lower.pt, col="red", lwd=2 )
#grid(col="gray48")
#legend( "topleft", legend=c(TeX(sprintf("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut)),
# sprintf("pT(R) = %.1f + %.1f.R",
# pt_R.fit.lower.pt$coefficients[1],
# pt_R.fit.lower.pt$coefficients[2])),
# lwd=c(NA,2), pch=c(1,NA), col=c("forestgreen","red"), border=NA, bg="white" )
#dev.off()
#
#####################################################
r.breaks <- seq(100, 10000, 100)
png("momentum/fit_2020_06_20/hist_R_lower.pT.png",
units="px", width=1600, height=1600, res=250)
hist(r.fit[(r.fit > min(r.breaks)) & (r.fit < max(r.breaks))],
breaks=r.breaks,
xlab="Radius of Track Curvature [mm]",
xlim=c(100,10000),
#ylim=c(0,1500),
main=TeX("Before and After Cut in $\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$"),
col=alpha(rgb.color[[1]], 1))
#r.fit.low.pt.hist <- hist(r.fit[lower.pt], plot=F)
hist(r.fit[(r.fit > min(r.breaks)) & (r.fit < max(r.breaks))][lower.pt],
breaks=r.breaks,
col=alpha(rgb.color[[3]], 0.7), add=TRUE)
legend( "topright", legend=c("21k Tracks", "15k Tracks"),
fill=c(alpha(rgb.color[[1]], 1), alpha(rgb.color[[3]], 0.7)) )
dev.off()
#####################################################
png("momentum/fit_2020_06_20/hist_all.pt_and_low.pt.png",
units="px", width=1600, height=1600, res=250)
pt.hist <- hist(pt.train, breaks=1000, plot=F)
pt.breaks <- pt.hist$breaks
pt.hist <- hist(pt.train, breaks=pt.breaks, plot=F)
plot(pt.hist$mids[pt.hist$counts > 0], pt.hist$counts[pt.hist$counts > 0],
xlab="Transverse Momentum [GeV/c]", ylab="Frequency",
main=TeX("Before and After Cut in $\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$"),
col="red", type="h", lwd=5, log="xy")
pt.hist.low.pt <- hist(pt.train[lower.pt], breaks=pt.breaks, plot=F)
points(pt.hist.low.pt$mids[pt.hist.low.pt$counts >0],
pt.hist.low.pt$counts[pt.hist.low.pt$counts >0],
col="blue", type="h", lwd=5, lty=1)
legend( "topright", legend=c("21k Tracks",
TeX(sprintf("$\\sum_{Layers}(Hit_{Fitted} - Hit_{Real})$ < %d mm", sum.diff.cut))), col=c("red","blue"), lwd=5, lty=c(1,1) )
dev.off()
#####################################################
####
####
####
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codondiffR.R
\docType{package}
\name{codondiffR}
\alias{codondiffR}
\title{codondiffR: inter-taxon comparison of codon usage}
\description{
An R package for the comparative analysis of codon usage in a set of
user-defined sequences with those in reference taxa. Allows the calculation
and visualisation of codon usage metrics and statistical analysis of
differences in these metrics between taxa.
}
| /man/codondiffR.Rd | permissive | adamd3/codondiffR | R | false | true | 477 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codondiffR.R
\docType{package}
\name{codondiffR}
\alias{codondiffR}
\title{codondiffR: inter-taxon comparison of codon usage}
\description{
An R package for the comparative analysis of codon usage in a set of
user-defined sequences with those in reference taxa. Allows the calculation
and visualisation of codon usage metrics and statistical analysis of
differences in these metrics between taxa.
}
|
################################################################################################################
# #
# Code by Eva Maire (emg.maire@gmail.com). Last update on 2020-03-27 #
# This code provides results of analysis used in Maire, E., D'agata, S., Aliaume, C., Mouillot, D., #
# Darling, D., Ramahery, V., Ranaivoson, R., Randriamanantsoa, B., Tianarisoa, T., Santisy, A., & #
# Cinner J. (2020). Disentangling the complex roles of markets on coral reefs in northwest Madagascar. #
# Ecology and Society. #
# #
################################################################################################################
#loading required libraries
require(visreg)
require(mgcv)
require(ggplot2)
require(dplyr)
require(MuMIn)
require(FactoMineR)
require(factoextra)
require(gridExtra)
# setting working directory
my_path<-"" # <= folder with RData
setwd(my_path)
#############################################################################################################
# Figure 2 - Partial effects of each socioeconomic covariate predicting log fish biomass in the model
# while considering the other predictor variables are held constant. Relationships between fish biomass and
# travel time from the nearest community (a),
# travel time from the nearest market (b),
# human population size (c) and
# management (d) for reefs where fishing is permitted (orange) and prohibited (green).
#Load fish biomass data
load("fish_biomass.RData")
# Step 1 - we first performe a Principal Components Analysis (PCA) using a set of six reef habitat and environmental variables
# (depth, weekly average SST and primary productivity, reef complexity, percent cover of macroalgae and live hard coral) to
# describe similarities between our ecological sites while dealing with multicollinearity.
# Six reef habitat and environmental variables
Pred <- c("Depth","Live_Hard_Coral","Macroalgae","Complexity","MeanChl","MeanTemp")
Ncol=vector(length=length(Pred))
for (i in 1:length(Pred))
{
Ncol[i]=which(colnames(fish_biomass)==Pred[i])
}
Ncol
env=fish_biomass[,Ncol]
head(env)
PCA(env, scale.unit = TRUE, graph = F)
res.pca <- PCA(env, graph = FALSE)
# Appendix 1 - Associations between environmental and benthic conditions of reefs through a Principal Component
# Analysis and corresponding loadings.
pca_S1 <- fviz_pca_var(res.pca, col.var = "contrib",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),midpoint=15,
repel = T,title="") + theme_minimal()
pca_S1
#Loadings
loadings <- sweep(res.pca$var$coord,2,sqrt(res.pca$eig[1:ncol(res.pca$var$coord),1]),FUN="/")
loadings_S1 <- tableGrob(round(loadings,2),theme = ttheme_minimal(base_size = 15))
# 2-panel Plot
grid.arrange(pca_S1,loadings_S1) # Figure_S1
# We only retaine the first two components (representing 56% of the total variance, see Figure S1)
# that mix abiotic and benthic conditions as environmental covariates for further analysis.
pc1 <- res.pca$ind$coord[,1]
pc2 <- res.pca$ind$coord[,2]
fish_biomass$pc1 <- pc1
fish_biomass$pc2 <- pc2
# To explore how proximity to markets and communities affect reef fish biomass beyond ecological and human population size effects,
# we build Generalized Additive Models (GAMs) considering the two environmental covariates provided by PCA (pc1 et pc2),
# human population size, travel time from human settlements and markets, and management.
mod<-gam(log_fish_biomass~s(pc1,bs="cr",k=3)+s(pc2,bs="cr",k=3)+s(human_pop,bs="cr",k=3)+
management+s(tt_village,bs="cr",k=3)+s(tt_market,bs="cr",k=3)
+s(tt_village, bs = "cr", by = management, k = 3, m = 1)
,data=fish_biomass,na.action="na.fail")
AICc(mod) #AICc = 14.9
summary(mod) #R-sq (adj) = 0.83
# Step 2 - Each partial effect plot needs to be limited to the range of the data
#Travel time from the nearest village
visreg_village <- visreg(mod,"tt_village",by="management",overlay=T,ylim=c(2,4),band=T,plot=F)
pred <- mod$fitted.values
ttv <- fish_biomass$tt_village
management <- fish_biomass$management
nb_perm <- length(which(fish_biomass$management=="fishing_permitted"))
nb_pro <- length(which(fish_biomass$management=="fishing_prohibited"))
nafperm <- rep(NA,length(which(visreg_village$fit$management=="fishing_permitted"))-nb_perm)
nafpro <- rep(NA,length(which(visreg_village$fit$management=="fishing_prohibited"))-nb_pro)
pred_bm_fperm <- c(pred[which(management=="fishing_permitted")],nafperm)
pred_bm_fpro <- c(pred[which(management=="fishing_prohibited")],nafpro)
ttv_fperm <- c(ttv[which(management=="fishing_permitted")],nafperm)
ttv_fpro <- c(ttv[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_village$fit, visreg_village$fit$management == "fishing_prohibited")
datpro <- datpro[,c("tt_village","visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("tt_village","fitpro","lowpro","highpro")
datperm <- subset(visreg_village$fit, visreg_village$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
datvillage <- cbind(datpro,datperm,pred_bm_fperm,ttv_fperm,pred_bm_fpro,ttv_fpro)
delv <- which(datvillage$tt_village > max(datvillage$ttv_fperm,na.rm=T) )
datvillage[delv,c("highperm","lowperm","fitperm")]<-NA
#Plot
white_theme<-theme(axis.text=element_text(colour="black",size=16),
axis.title=element_text(size=18),
axis.ticks=element_line(colour="black"),
panel.grid.minor=element_blank(),
panel.background=element_rect(fill="white",colour="black"),
legend.justification=c(1,0),legend.position=c(.95, .05),line= element_blank(),
plot.background=element_rect(fill="transparent",colour=NA))
pvillage <- ggplot(datvillage)+
geom_line(aes(tt_village,fitpro),colour="#5ab4ac",size=1)+
geom_ribbon(aes(x=tt_village,ymax=highpro,ymin=lowpro),fill="#5ab4ac",alpha=0.5)+
geom_line(aes(tt_village,fitperm),colour="#d8b365",size=1)+
geom_ribbon(aes(x=tt_village,ymax=highperm,ymin=lowperm),fill="#d8b365",alpha=0.5)+
scale_x_continuous("Travel time from community (h)")+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme+
annotate("text", x = 0, y=3.66, label = "a", size=7,fontface =2)+
#legend
annotate("text", x = 3.29, y=1.75, label = "Fishing permitted",size=5)+
annotate("text", x = 3.31, y=1.85, label = "Fishing prohibited",size=5)+
geom_segment(aes(x = 2, y = 1.75, xend = 2.4, yend = 1.75), size=2, col = "#d8b365")+
geom_segment(aes(x = 2, y = 1.85, xend = 2.4 , yend = 1.85), size=2, col = "#5ab4ac")
#Human population size
visreg_pop <- visreg(mod,"human_pop",by="management",overlay=T,ylim=c(1,4),band=T,plot=F)
pop <- fish_biomass$human_pop
pop_fperm <- c(pop[which(management=="fishing_permitted")],nafperm)
pop_fpro <- c(pop[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_pop$fit, visreg_pop$fit$management == "fishing_prohibited")
datpro <- datpro[,c("human_pop","visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("human_pop","fitpro","lowpro","highpro")
datperm <- subset(visreg_pop$fit, visreg_pop$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
datpop <- cbind(datpro,datperm,pred_bm_fperm,pop_fperm,pred_bm_fpro,pop_fpro)
del <- which(datpop$human_pop > max(datpop$pop_fpro,na.rm=T) )
datpop[del,c("highpro","lowpro","fitpro")]<-NA
#Plot
ppop <- ggplot(datpop)+
geom_line(aes(human_pop,fitpro),colour="#5ab4ac",size=1)+
geom_ribbon(aes(x=human_pop,ymax=highpro,ymin=lowpro),fill="#5ab4ac",alpha=0.5)+
geom_line(aes(human_pop,fitperm),colour="#d8b365",size=1)+
geom_ribbon(aes(x=human_pop,ymax=highperm,ymin=lowperm),fill="#d8b365",alpha=0.5)+
scale_x_continuous("Log human population size",limits=c(0,4))+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme+
annotate("text", x = 0, y=3.66, label = "c", size=7,fontface =2)
#Travel time from the nearest market
visreg_market <- visreg(mod,"tt_market",by="management",overlay=T,ylim=c(1,4),band=T,plot=F)
ttm <- fish_biomass$tt_market
ttm_fperm <- c(ttm[which(management=="fishing_permitted")],nafperm)
ttm_fpro <- c(ttm[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_market$fit, visreg_market$fit$management == "fishing_prohibited")
datpro <- datpro[,c("tt_market","visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("tt_market","fitpro","lowpro","highpro")
datperm <- subset(visreg_market$fit, visreg_market$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
datmarket <- cbind(datpro,datperm,pred_bm_fperm,ttm_fperm,pred_bm_fpro,ttm_fpro)
delm <- which(datmarket$tt_market > max(datmarket$ttm_fperm,na.rm=T) )
datmarket[delm,c("highperm","lowperm","fitperm")]<-NA
# Plot
pmarket <- ggplot(datmarket)+
geom_line(aes(tt_market,fitpro),colour="#5ab4ac",size=1)+
geom_ribbon(aes(x=tt_market,ymax=highpro,ymin=lowpro),fill="#5ab4ac",alpha=0.5)+
geom_line(aes(tt_market,fitperm),colour="#d8b365",size=1)+
geom_ribbon(aes(x=tt_market,ymax=highperm,ymin=lowperm),fill="#d8b365",alpha=0.5)+
scale_x_continuous("Travel time from market (h)",limits=c(1,10),breaks=c(1,2,3,4,5,6,7,8,9,10),
labels=c(1,2,3,4,5,6,7,8,9,10))+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme+
annotate("text", x = 1, y=3.66, label = "b", size=7,fontface =2)
#Management
visreg_management <- visreg(mod,"management",ylim=c(1,4),band=T,plot=F)
ttm_fperm <- c(ttm[which(management=="fishing_permitted")],nafperm)
ttm_fpro <- c(ttm[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_management$fit, visreg_management$fit$management == "fishing_prohibited")
datpro <- datpro[,c("visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("fitpro","lowpro","highpro")
datperm <- subset(visreg_management$fit, visreg_management$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
x1 <- c(1,2.5)
y1 <- c(datperm$lowperm,datpro$lowpro)
x2 <- c(2,3.5)
y2 <- c(datperm$highperm,datpro$highpro)
type=c("Fishing permitted","Fishing prohibited")
d <- data.frame(y1,y2,x1,x2,type)
#Plot
white_theme2<-theme(axis.text=element_text(colour="black",size=16),
axis.title=element_text(size=18),
axis.ticks=element_line(colour="black"),
panel.grid.minor=element_blank(),
panel.background=element_rect(fill="white",colour="black"),
legend.position='none',
plot.background=element_rect(fill="transparent",colour=NA))
pmanagement <- ggplot()+
geom_rect(data = d, mapping=aes(xmin=x1, xmax=x2, ymin=y1, ymax=y2,fill=type), alpha=0.5) +
scale_fill_manual(values = alpha(c("Fishing permitted"= "#d8b365","Fishing prohibited" = "#5ab4ac"), 0.5))+
geom_segment(aes(x = 1, y = fitperm, xend = 2, yend = fitperm, colour = "segment"),col="#d8b365",size=2, data = datperm)+
geom_segment(aes(x = 2.5, y = fitpro, xend = 3.5, yend = fitpro, colour = "segment"),col="#5ab4ac",size=2, data = datpro)+
scale_x_continuous("Management",limits=c(1,3.5),breaks=c(1.5,3),
labels=c("Fishing permitted","Fishing prohibited"))+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme2+
annotate("text", x = 1, y=3.66, label = "d", size=7,fontface =2)
#Multiplot function
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
} # end of the function
# 4-panel plot
#jpeg("Partial_effects_Maire_et_al_2020.jpeg",res=300, width=3000, height=3000)
multiplot(pvillage, ppop, pmarket, pmanagement, cols=2)
#graphics.off()
rm(list=ls())
#############################################################################################################
#############################################################################################################
# Figure 3 - Associations between market proximity and (a) socioeconomic characteristics of communities
# through a Principal Components Analysis and (b) selling strategies.
# (a)
load("coastal_communities.RData")
#PCA
res.pca <- PCA(coastal_communities[,-13], graph = F, quanti.sup=12) #Management as supplementary variable and remove the identity of Market for PCA
colvar <- c("#66c2a5","#66c2a5",
"#fc8d62","#fc8d62","#fc8d62","#fc8d62",
"#8da0cb","#8da0cb","#8da0cb","#8da0cb",
"black")
var <- c("aComposition","aComposition",
"cScale","cScale","cScale","cScale",
"bTechnique","bTechnique","bTechnique","bTechnique",
"dMarket access")
themepca <-theme(legend.title = element_text(size=15),
legend.text=element_text(size=14),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=14),
axis.title.x =element_text(size=14),
axis.title.y =element_text(size=14))
p <- fviz_pca_biplot(res.pca,
#Indivdiduals
pointshape = 21, pointsize=3, repel=T, col.ind = "black", fill.ind = coastal_communities$Market, mean.point = F,
#Variables
col.var = var, addEllipses=F, ellipse.level=0.90,fill.var = var, col.quanti.sup = "black",arrowsize=1,
labelsize=4,title="", legend.title = list(fill = "Nearest market", color = "Effects") ) + themepca
pca <- p + scale_color_manual(labels = c("Composition","Technique","Scale","Market access"), values = c("#1b9e77", "#d95f02","#7570b3","#e7298a"))+
scale_fill_manual(labels=c("Ambanja","Ambilobe","Hell Ville"),values=c("#f7f7f7","#252525","#969696"))
# (b)
load("selling_strategies.RData")
quantiles_95 <- function(x) {
r <- quantile(x, probs=c(0.05, 0.25, 0.5, 0.75, 0.95))
names(r) <- c("ymin", "lower", "middle", "upper", "ymax")
r
}
white_theme<-theme(axis.text=element_text(colour="black",size=14),
axis.title=element_text(size=15),
axis.ticks=element_line(colour="black"),
panel.grid.minor=element_blank(),
panel.background=element_rect(fill="white",colour="black"),
legend.justification=c(1,0),legend.position=c(.95, .05),line= element_blank(),
plot.background=element_rect(fill="transparent",colour=NA))
midd <- arrange(selling_strategies, factor(selling, levels = c("Middlemen & market","Middlemen","Community only")))
middlemen <- ggplot(midd, aes(x=selling, y=travel_time_market)) +
stat_summary(fun.data = quantiles_95, geom="boxplot",fill="#8C9091")+
scale_y_continuous("Travel time from market (h)")+
scale_x_discrete("") +
coord_flip() +
white_theme
# 2-panel plot
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
} # end of the function
multiplot(pca, middlemen, cols=2)
#############################################################################################################
# Appendices
#############################################################################################################
# Appendix 2 - Correlogram showing correlations between the ten socioeconomic indicators measured of coastal
# communities and market access.
require(corrgram)
require(corrplot)
ccor <- coastal_communities[,-c(12,13)] #Delete Management and Market
cor_mat <- cor(ccor)
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
# matrix of the p-value of the correlation
p.mat <- cor.mtest(ccor)
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(cor_mat, method="color", col=col(200),
type="upper",
addCoef.col = "black", # Add coefficient of correlation
tl.col="black", tl.srt=45, tl.cex = 0.9, number.cex = .7, #Text label color and rotation
# Combine with significance
p.mat = p.mat, sig.level = 1, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag=FALSE
)
#############################################################################################################
# Appendix 3 - Scores (cos2) of (a) each variable and (b) community integrated in the PCA linking
# market access and social characteristics of coastal communities.
res.pca <- PCA(coastal_communities[,-13], graph = F, quanti.sup=12) #Management as supplementary variable and remove the identity of Market for PCA
var <- fviz_cos2(res.pca, choice = "var", labelsize=3, axes = 1:2,
title="")+theme_minimal()+geom_hline(yintercept=0.4)
var2 <- ggpubr::ggpar(var,
title = "",
xlab = "Variables", ylab = "Cos2",
ggtheme = theme_minimal(), palette = "jco",
cex.lab=2,ylim=c(0,1),font.tickslab=c(10),font.xtickslab=8)
var3 <- var2 + theme(axis.text.x = element_text(angle = 45, hjust = 1))
comm <- fviz_cos2(res.pca, choice = "ind", labelsize=3, axes = 1:2,
title="")+theme_minimal()+geom_hline(yintercept=0.4)
comm2 <- ggpubr::ggpar(comm,
title = "",
xlab = "Communities", ylab = "Cos2",
ggtheme = theme_minimal(),
palette = "jco",
cex.lab=2,ylim=c(0,1),font.tickslab=c(10),font.xtickslab=8)
comm3 <- comm2 + theme(axis.text.x = element_text(angle = 45, hjust = 1))
multiplot(var3, comm3, cols=2)
#############################################################################################################
# Appendix 4 - Associations between market proximity and selling fish catches
sold <- arrange(selling_strategies, factor(selling, levels = c("Community only","Middlemen & market","Middlemen")))
prop_fish_sold <- ggplot(aes(x=selling, y=fish_sold),data=sold) +
stat_summary(fun.data = quantiles_95, geom="boxplot",fill="#8C9091")+
scale_y_continuous(c("% of fish sold"),limits=c(65,95),breaks=c(65,70,75,80,85,90,95))+
scale_x_discrete("") +
coord_flip() +
white_theme
plot(prop_fish_sold)
#############################################################################################################
#End of script
#
| /Maire_et_al_2020.R | no_license | EvaMaire/Markets_NWMadagascar | R | false | false | 21,830 | r | ################################################################################################################
# #
# Code by Eva Maire (emg.maire@gmail.com). Last update on 2020-03-27 #
# This code provides results of analysis used in Maire, E., D'agata, S., Aliaume, C., Mouillot, D., #
# Darling, D., Ramahery, V., Ranaivoson, R., Randriamanantsoa, B., Tianarisoa, T., Santisy, A., & #
# Cinner J. (2020). Disentangling the complex roles of markets on coral reefs in northwest Madagascar. #
# Ecology and Society. #
# #
################################################################################################################
#loading required libraries
require(visreg)
require(mgcv)
require(ggplot2)
require(dplyr)
require(MuMIn)
require(FactoMineR)
require(factoextra)
require(gridExtra)
# setting working directory
my_path<-"" # <= folder with RData
setwd(my_path)
#############################################################################################################
# Figure 2 - Partial effects of each socioeconomic covariate predicting log fish biomass in the model
# while considering the other predictor variables are held constant. Relationships between fish biomass and
# travel time from the nearest community (a),
# travel time from the nearest market (b),
# human population size (c) and
# management (d) for reefs where fishing is permitted (orange) and prohibited (green).
#Load fish biomass data
load("fish_biomass.RData")
# Step 1 - we first performe a Principal Components Analysis (PCA) using a set of six reef habitat and environmental variables
# (depth, weekly average SST and primary productivity, reef complexity, percent cover of macroalgae and live hard coral) to
# describe similarities between our ecological sites while dealing with multicollinearity.
# Six reef habitat and environmental variables
Pred <- c("Depth","Live_Hard_Coral","Macroalgae","Complexity","MeanChl","MeanTemp")
Ncol=vector(length=length(Pred))
for (i in 1:length(Pred))
{
Ncol[i]=which(colnames(fish_biomass)==Pred[i])
}
Ncol
env=fish_biomass[,Ncol]
head(env)
PCA(env, scale.unit = TRUE, graph = F)
res.pca <- PCA(env, graph = FALSE)
# Appendix 1 - Associations between environmental and benthic conditions of reefs through a Principal Component
# Analysis and corresponding loadings.
pca_S1 <- fviz_pca_var(res.pca, col.var = "contrib",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),midpoint=15,
repel = T,title="") + theme_minimal()
pca_S1
#Loadings
loadings <- sweep(res.pca$var$coord,2,sqrt(res.pca$eig[1:ncol(res.pca$var$coord),1]),FUN="/")
loadings_S1 <- tableGrob(round(loadings,2),theme = ttheme_minimal(base_size = 15))
# 2-panel Plot
grid.arrange(pca_S1,loadings_S1) # Figure_S1
# We only retaine the first two components (representing 56% of the total variance, see Figure S1)
# that mix abiotic and benthic conditions as environmental covariates for further analysis.
pc1 <- res.pca$ind$coord[,1]
pc2 <- res.pca$ind$coord[,2]
fish_biomass$pc1 <- pc1
fish_biomass$pc2 <- pc2
# To explore how proximity to markets and communities affect reef fish biomass beyond ecological and human population size effects,
# we build Generalized Additive Models (GAMs) considering the two environmental covariates provided by PCA (pc1 et pc2),
# human population size, travel time from human settlements and markets, and management.
mod<-gam(log_fish_biomass~s(pc1,bs="cr",k=3)+s(pc2,bs="cr",k=3)+s(human_pop,bs="cr",k=3)+
management+s(tt_village,bs="cr",k=3)+s(tt_market,bs="cr",k=3)
+s(tt_village, bs = "cr", by = management, k = 3, m = 1)
,data=fish_biomass,na.action="na.fail")
AICc(mod) #AICc = 14.9
summary(mod) #R-sq (adj) = 0.83
# Step 2 - Each partial effect plot needs to be limited to the range of the data
#Travel time from the nearest village
visreg_village <- visreg(mod,"tt_village",by="management",overlay=T,ylim=c(2,4),band=T,plot=F)
pred <- mod$fitted.values
ttv <- fish_biomass$tt_village
management <- fish_biomass$management
nb_perm <- length(which(fish_biomass$management=="fishing_permitted"))
nb_pro <- length(which(fish_biomass$management=="fishing_prohibited"))
nafperm <- rep(NA,length(which(visreg_village$fit$management=="fishing_permitted"))-nb_perm)
nafpro <- rep(NA,length(which(visreg_village$fit$management=="fishing_prohibited"))-nb_pro)
pred_bm_fperm <- c(pred[which(management=="fishing_permitted")],nafperm)
pred_bm_fpro <- c(pred[which(management=="fishing_prohibited")],nafpro)
ttv_fperm <- c(ttv[which(management=="fishing_permitted")],nafperm)
ttv_fpro <- c(ttv[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_village$fit, visreg_village$fit$management == "fishing_prohibited")
datpro <- datpro[,c("tt_village","visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("tt_village","fitpro","lowpro","highpro")
datperm <- subset(visreg_village$fit, visreg_village$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
datvillage <- cbind(datpro,datperm,pred_bm_fperm,ttv_fperm,pred_bm_fpro,ttv_fpro)
delv <- which(datvillage$tt_village > max(datvillage$ttv_fperm,na.rm=T) )
datvillage[delv,c("highperm","lowperm","fitperm")]<-NA
#Plot
white_theme<-theme(axis.text=element_text(colour="black",size=16),
axis.title=element_text(size=18),
axis.ticks=element_line(colour="black"),
panel.grid.minor=element_blank(),
panel.background=element_rect(fill="white",colour="black"),
legend.justification=c(1,0),legend.position=c(.95, .05),line= element_blank(),
plot.background=element_rect(fill="transparent",colour=NA))
pvillage <- ggplot(datvillage)+
geom_line(aes(tt_village,fitpro),colour="#5ab4ac",size=1)+
geom_ribbon(aes(x=tt_village,ymax=highpro,ymin=lowpro),fill="#5ab4ac",alpha=0.5)+
geom_line(aes(tt_village,fitperm),colour="#d8b365",size=1)+
geom_ribbon(aes(x=tt_village,ymax=highperm,ymin=lowperm),fill="#d8b365",alpha=0.5)+
scale_x_continuous("Travel time from community (h)")+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme+
annotate("text", x = 0, y=3.66, label = "a", size=7,fontface =2)+
#legend
annotate("text", x = 3.29, y=1.75, label = "Fishing permitted",size=5)+
annotate("text", x = 3.31, y=1.85, label = "Fishing prohibited",size=5)+
geom_segment(aes(x = 2, y = 1.75, xend = 2.4, yend = 1.75), size=2, col = "#d8b365")+
geom_segment(aes(x = 2, y = 1.85, xend = 2.4 , yend = 1.85), size=2, col = "#5ab4ac")
#Human population size
visreg_pop <- visreg(mod,"human_pop",by="management",overlay=T,ylim=c(1,4),band=T,plot=F)
pop <- fish_biomass$human_pop
pop_fperm <- c(pop[which(management=="fishing_permitted")],nafperm)
pop_fpro <- c(pop[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_pop$fit, visreg_pop$fit$management == "fishing_prohibited")
datpro <- datpro[,c("human_pop","visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("human_pop","fitpro","lowpro","highpro")
datperm <- subset(visreg_pop$fit, visreg_pop$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
datpop <- cbind(datpro,datperm,pred_bm_fperm,pop_fperm,pred_bm_fpro,pop_fpro)
del <- which(datpop$human_pop > max(datpop$pop_fpro,na.rm=T) )
datpop[del,c("highpro","lowpro","fitpro")]<-NA
#Plot
ppop <- ggplot(datpop)+
geom_line(aes(human_pop,fitpro),colour="#5ab4ac",size=1)+
geom_ribbon(aes(x=human_pop,ymax=highpro,ymin=lowpro),fill="#5ab4ac",alpha=0.5)+
geom_line(aes(human_pop,fitperm),colour="#d8b365",size=1)+
geom_ribbon(aes(x=human_pop,ymax=highperm,ymin=lowperm),fill="#d8b365",alpha=0.5)+
scale_x_continuous("Log human population size",limits=c(0,4))+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme+
annotate("text", x = 0, y=3.66, label = "c", size=7,fontface =2)
#Travel time from the nearest market
visreg_market <- visreg(mod,"tt_market",by="management",overlay=T,ylim=c(1,4),band=T,plot=F)
ttm <- fish_biomass$tt_market
ttm_fperm <- c(ttm[which(management=="fishing_permitted")],nafperm)
ttm_fpro <- c(ttm[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_market$fit, visreg_market$fit$management == "fishing_prohibited")
datpro <- datpro[,c("tt_market","visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("tt_market","fitpro","lowpro","highpro")
datperm <- subset(visreg_market$fit, visreg_market$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
datmarket <- cbind(datpro,datperm,pred_bm_fperm,ttm_fperm,pred_bm_fpro,ttm_fpro)
delm <- which(datmarket$tt_market > max(datmarket$ttm_fperm,na.rm=T) )
datmarket[delm,c("highperm","lowperm","fitperm")]<-NA
# Plot
pmarket <- ggplot(datmarket)+
geom_line(aes(tt_market,fitpro),colour="#5ab4ac",size=1)+
geom_ribbon(aes(x=tt_market,ymax=highpro,ymin=lowpro),fill="#5ab4ac",alpha=0.5)+
geom_line(aes(tt_market,fitperm),colour="#d8b365",size=1)+
geom_ribbon(aes(x=tt_market,ymax=highperm,ymin=lowperm),fill="#d8b365",alpha=0.5)+
scale_x_continuous("Travel time from market (h)",limits=c(1,10),breaks=c(1,2,3,4,5,6,7,8,9,10),
labels=c(1,2,3,4,5,6,7,8,9,10))+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme+
annotate("text", x = 1, y=3.66, label = "b", size=7,fontface =2)
#Management
visreg_management <- visreg(mod,"management",ylim=c(1,4),band=T,plot=F)
ttm_fperm <- c(ttm[which(management=="fishing_permitted")],nafperm)
ttm_fpro <- c(ttm[which(management=="fishing_prohibited")],nafpro)
datpro <- subset(visreg_management$fit, visreg_management$fit$management == "fishing_prohibited")
datpro <- datpro[,c("visregFit","visregLwr","visregUpr")]
colnames(datpro) <- c("fitpro","lowpro","highpro")
datperm <- subset(visreg_management$fit, visreg_management$fit$management == "fishing_permitted")
datperm <- datperm[,c("visregFit","visregLwr","visregUpr")]
colnames(datperm) <- c("fitperm","lowperm","highperm")
x1 <- c(1,2.5)
y1 <- c(datperm$lowperm,datpro$lowpro)
x2 <- c(2,3.5)
y2 <- c(datperm$highperm,datpro$highpro)
type=c("Fishing permitted","Fishing prohibited")
d <- data.frame(y1,y2,x1,x2,type)
#Plot
white_theme2<-theme(axis.text=element_text(colour="black",size=16),
axis.title=element_text(size=18),
axis.ticks=element_line(colour="black"),
panel.grid.minor=element_blank(),
panel.background=element_rect(fill="white",colour="black"),
legend.position='none',
plot.background=element_rect(fill="transparent",colour=NA))
pmanagement <- ggplot()+
geom_rect(data = d, mapping=aes(xmin=x1, xmax=x2, ymin=y1, ymax=y2,fill=type), alpha=0.5) +
scale_fill_manual(values = alpha(c("Fishing permitted"= "#d8b365","Fishing prohibited" = "#5ab4ac"), 0.5))+
geom_segment(aes(x = 1, y = fitperm, xend = 2, yend = fitperm, colour = "segment"),col="#d8b365",size=2, data = datperm)+
geom_segment(aes(x = 2.5, y = fitpro, xend = 3.5, yend = fitpro, colour = "segment"),col="#5ab4ac",size=2, data = datpro)+
scale_x_continuous("Management",limits=c(1,3.5),breaks=c(1.5,3),
labels=c("Fishing permitted","Fishing prohibited"))+
scale_y_continuous("Log fish biomass (kg/ha)",limits=c(1.72,3.66),breaks=c(1.88,2.2,2.48,2.7,2.9,3,3.18,3.3,3.48,3.6),
labels=c(75,150,300,500,800,1000,1500,2000,3000,4000))+
white_theme2+
annotate("text", x = 1, y=3.66, label = "d", size=7,fontface =2)
#Multiplot function
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
} # end of the function
# 4-panel plot
#jpeg("Partial_effects_Maire_et_al_2020.jpeg",res=300, width=3000, height=3000)
multiplot(pvillage, ppop, pmarket, pmanagement, cols=2)
#graphics.off()
rm(list=ls())
#############################################################################################################
#############################################################################################################
# Figure 3 - Associations between market proximity and (a) socioeconomic characteristics of communities
# through a Principal Components Analysis and (b) selling strategies.
# (a)
load("coastal_communities.RData")
#PCA
res.pca <- PCA(coastal_communities[,-13], graph = F, quanti.sup=12) #Management as supplementary variable and remove the identity of Market for PCA
colvar <- c("#66c2a5","#66c2a5",
"#fc8d62","#fc8d62","#fc8d62","#fc8d62",
"#8da0cb","#8da0cb","#8da0cb","#8da0cb",
"black")
var <- c("aComposition","aComposition",
"cScale","cScale","cScale","cScale",
"bTechnique","bTechnique","bTechnique","bTechnique",
"dMarket access")
themepca <-theme(legend.title = element_text(size=15),
legend.text=element_text(size=14),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=14),
axis.title.x =element_text(size=14),
axis.title.y =element_text(size=14))
p <- fviz_pca_biplot(res.pca,
#Indivdiduals
pointshape = 21, pointsize=3, repel=T, col.ind = "black", fill.ind = coastal_communities$Market, mean.point = F,
#Variables
col.var = var, addEllipses=F, ellipse.level=0.90,fill.var = var, col.quanti.sup = "black",arrowsize=1,
labelsize=4,title="", legend.title = list(fill = "Nearest market", color = "Effects") ) + themepca
pca <- p + scale_color_manual(labels = c("Composition","Technique","Scale","Market access"), values = c("#1b9e77", "#d95f02","#7570b3","#e7298a"))+
scale_fill_manual(labels=c("Ambanja","Ambilobe","Hell Ville"),values=c("#f7f7f7","#252525","#969696"))
# (b)
load("selling_strategies.RData")
quantiles_95 <- function(x) {
r <- quantile(x, probs=c(0.05, 0.25, 0.5, 0.75, 0.95))
names(r) <- c("ymin", "lower", "middle", "upper", "ymax")
r
}
white_theme<-theme(axis.text=element_text(colour="black",size=14),
axis.title=element_text(size=15),
axis.ticks=element_line(colour="black"),
panel.grid.minor=element_blank(),
panel.background=element_rect(fill="white",colour="black"),
legend.justification=c(1,0),legend.position=c(.95, .05),line= element_blank(),
plot.background=element_rect(fill="transparent",colour=NA))
midd <- arrange(selling_strategies, factor(selling, levels = c("Middlemen & market","Middlemen","Community only")))
middlemen <- ggplot(midd, aes(x=selling, y=travel_time_market)) +
stat_summary(fun.data = quantiles_95, geom="boxplot",fill="#8C9091")+
scale_y_continuous("Travel time from market (h)")+
scale_x_discrete("") +
coord_flip() +
white_theme
# 2-panel plot
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
} # end of the function
multiplot(pca, middlemen, cols=2)
#############################################################################################################
# Appendices
#############################################################################################################
# Appendix 2 - Correlogram showing correlations between the ten socioeconomic indicators measured of coastal
# communities and market access.
require(corrgram)
require(corrplot)
ccor <- coastal_communities[,-c(12,13)] #Delete Management and Market
cor_mat <- cor(ccor)
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
# matrix of the p-value of the correlation
p.mat <- cor.mtest(ccor)
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(cor_mat, method="color", col=col(200),
type="upper",
addCoef.col = "black", # Add coefficient of correlation
tl.col="black", tl.srt=45, tl.cex = 0.9, number.cex = .7, #Text label color and rotation
# Combine with significance
p.mat = p.mat, sig.level = 1, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag=FALSE
)
#############################################################################################################
# Appendix 3 - Scores (cos2) of (a) each variable and (b) community integrated in the PCA linking
# market access and social characteristics of coastal communities.
res.pca <- PCA(coastal_communities[,-13], graph = F, quanti.sup=12) #Management as supplementary variable and remove the identity of Market for PCA
var <- fviz_cos2(res.pca, choice = "var", labelsize=3, axes = 1:2,
title="")+theme_minimal()+geom_hline(yintercept=0.4)
var2 <- ggpubr::ggpar(var,
title = "",
xlab = "Variables", ylab = "Cos2",
ggtheme = theme_minimal(), palette = "jco",
cex.lab=2,ylim=c(0,1),font.tickslab=c(10),font.xtickslab=8)
var3 <- var2 + theme(axis.text.x = element_text(angle = 45, hjust = 1))
comm <- fviz_cos2(res.pca, choice = "ind", labelsize=3, axes = 1:2,
title="")+theme_minimal()+geom_hline(yintercept=0.4)
comm2 <- ggpubr::ggpar(comm,
title = "",
xlab = "Communities", ylab = "Cos2",
ggtheme = theme_minimal(),
palette = "jco",
cex.lab=2,ylim=c(0,1),font.tickslab=c(10),font.xtickslab=8)
comm3 <- comm2 + theme(axis.text.x = element_text(angle = 45, hjust = 1))
multiplot(var3, comm3, cols=2)
#############################################################################################################
# Appendix 4 - Associations between market proximity and selling fish catches
sold <- arrange(selling_strategies, factor(selling, levels = c("Community only","Middlemen & market","Middlemen")))
prop_fish_sold <- ggplot(aes(x=selling, y=fish_sold),data=sold) +
stat_summary(fun.data = quantiles_95, geom="boxplot",fill="#8C9091")+
scale_y_continuous(c("% of fish sold"),limits=c(65,95),breaks=c(65,70,75,80,85,90,95))+
scale_x_discrete("") +
coord_flip() +
white_theme
plot(prop_fish_sold)
#############################################################################################################
#End of script
#
|
require(RJSONIO)
run <- function(jsonObj) {
o = fromJSON(jsonObj)
toJSON(o)
}
| /examples/ex11.R | permissive | ffittschen/node-rio | R | false | false | 88 | r | require(RJSONIO)
run <- function(jsonObj) {
o = fromJSON(jsonObj)
toJSON(o)
}
|
library(DOvalidation)
### Name: K.epa
### Title: Epanechnikov Kernel
### Aliases: K.epa
### Keywords: distribution
### ** Examples
curve(K.epa,-1.5,1.5,main="Epanechnikov kernel",ylab="K(u)",xlab="u")
# The left onesided
K.epa.left<-function(u) return(2*K.epa(u)*(u<0))
curve(K.epa.left,-1.5,1.5,main="Left onesided Epanechnikov kernel",ylab="K(u)",xlab="u")
| /data/genthat_extracted_code/DOvalidation/examples/K.epa.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 367 | r | library(DOvalidation)
### Name: K.epa
### Title: Epanechnikov Kernel
### Aliases: K.epa
### Keywords: distribution
### ** Examples
curve(K.epa,-1.5,1.5,main="Epanechnikov kernel",ylab="K(u)",xlab="u")
# The left onesided
K.epa.left<-function(u) return(2*K.epa(u)*(u<0))
curve(K.epa.left,-1.5,1.5,main="Left onesided Epanechnikov kernel",ylab="K(u)",xlab="u")
|
#' The function to impute ordered categorical variables
#'
#' The function uses the proportional odds logistic regression (polr) approach,
#' implemented in \code{mice}.
#' @param y_imp A Vector with the variable to impute.
#' @param X_imp A data.frame with the fixed effects variables.
#' @param rounding_degrees A numeric vector with the presumed rounding degrees.
#' @return A n x 1 data.frame with the original and imputed values as a factor.
imp_orderedcat_single <- function(y_imp, X_imp, rounding_degrees = c(1, 10, 100, 1000)){
categories <- levels(y_imp)
# ----------------------------- preparing the X data ------------------
# remove excessive variables
X_imp <- cleanup(X_imp)
# standardize X
X_imp_stand <- stand(X_imp, rounding_degrees = rounding_degrees)
#the missing indactor indicates, which values of y are missing.
missind <- is.na(y_imp)
#starting model
ph <- sample_imp(y_imp)[, 1]
tmp_0_all <- data.frame(target = ph)
xnames_0 <- paste("X", 1:ncol(X_imp_stand), sep = "")
tmp_0_all[xnames_0] <- X_imp_stand
tmp_0_sub <- tmp_0_all[!missind, , drop = FALSE]
reg_1_all <- MASS::polr(target ~ 1 + ., data = tmp_0_all, method = "probit")
reg_1_sub <- MASS::polr(target ~ 1 + ., data = tmp_0_sub, method = "probit")
X_model_matrix_1_all <- stats::model.matrix(reg_1_all)
xnames_1 <- paste("X", 1:ncol(X_model_matrix_1_all), sep = "")
#remove unneeded variables
xnames_2 <- xnames_1[!is.na(stats::coefficients(reg_1_sub))]
tmp_2_all <- data.frame(target = as.factor(y_imp)) # mice needs the variable as a factor
tmp_2_all[, xnames_2] <- X_model_matrix_1_all[, !is.na(stats::coefficients(reg_1_sub)),
drop = FALSE]
everything <- mice::mice(data = tmp_2_all, m = 1,
method = "polr",
predictorMatrix = (1 - diag(1, ncol(tmp_2_all))),
visitSequence = (1:ncol(tmp_2_all))[apply(is.na(tmp_2_all),2,any)],
post = vector("character", length = ncol(tmp_2_all)),
defaultMethod = "polr",
maxit = 10,
diagnostics = TRUE,
printFlag = FALSE,
seed = NA,
imputationMethod = NULL,
defaultImputationMethod = NULL,
data.init = NULL)
#Initialising the returning vector
y_ret <- data.frame(y_ret = y_imp)
y_ret[missind, 1] <- everything$imp[[1]][, 1]
return(y_ret)
}
| /R/hmi_imp_catordered_single_2017-08-02.R | no_license | matthiasspeidel/hmi | R | false | false | 2,454 | r | #' The function to impute ordered categorical variables
#'
#' The function uses the proportional odds logistic regression (polr) approach,
#' implemented in \code{mice}.
#' @param y_imp A Vector with the variable to impute.
#' @param X_imp A data.frame with the fixed effects variables.
#' @param rounding_degrees A numeric vector with the presumed rounding degrees.
#' @return A n x 1 data.frame with the original and imputed values as a factor.
imp_orderedcat_single <- function(y_imp, X_imp, rounding_degrees = c(1, 10, 100, 1000)){
categories <- levels(y_imp)
# ----------------------------- preparing the X data ------------------
# remove excessive variables
X_imp <- cleanup(X_imp)
# standardize X
X_imp_stand <- stand(X_imp, rounding_degrees = rounding_degrees)
#the missing indactor indicates, which values of y are missing.
missind <- is.na(y_imp)
#starting model
ph <- sample_imp(y_imp)[, 1]
tmp_0_all <- data.frame(target = ph)
xnames_0 <- paste("X", 1:ncol(X_imp_stand), sep = "")
tmp_0_all[xnames_0] <- X_imp_stand
tmp_0_sub <- tmp_0_all[!missind, , drop = FALSE]
reg_1_all <- MASS::polr(target ~ 1 + ., data = tmp_0_all, method = "probit")
reg_1_sub <- MASS::polr(target ~ 1 + ., data = tmp_0_sub, method = "probit")
X_model_matrix_1_all <- stats::model.matrix(reg_1_all)
xnames_1 <- paste("X", 1:ncol(X_model_matrix_1_all), sep = "")
#remove unneeded variables
xnames_2 <- xnames_1[!is.na(stats::coefficients(reg_1_sub))]
tmp_2_all <- data.frame(target = as.factor(y_imp)) # mice needs the variable as a factor
tmp_2_all[, xnames_2] <- X_model_matrix_1_all[, !is.na(stats::coefficients(reg_1_sub)),
drop = FALSE]
everything <- mice::mice(data = tmp_2_all, m = 1,
method = "polr",
predictorMatrix = (1 - diag(1, ncol(tmp_2_all))),
visitSequence = (1:ncol(tmp_2_all))[apply(is.na(tmp_2_all),2,any)],
post = vector("character", length = ncol(tmp_2_all)),
defaultMethod = "polr",
maxit = 10,
diagnostics = TRUE,
printFlag = FALSE,
seed = NA,
imputationMethod = NULL,
defaultImputationMethod = NULL,
data.init = NULL)
#Initialising the returning vector
y_ret <- data.frame(y_ret = y_imp)
y_ret[missind, 1] <- everything$imp[[1]][, 1]
return(y_ret)
}
|
/CM274-Introduccion_a_la_Estadistica_y_Probabilidades/Estadistica_Descriptiva/Tabla_Frecuencias_Estadistica.R | no_license | fmorenovr/ComputerScience_UNI | R | false | false | 4,386 | r | ||
theme_clean <- function() {
theme_minimal(base_family = "sans", base_size = 12) +
theme(
plot.title = element_text(size = rel(1), margin = margin(0,0,0,0,"cm")),
plot.background = element_rect(fill = "white", color = NA),
panel.background = element_blank(),
panel.border = element_rect(fill = NA, color = "black", size = .5),
panel.grid = element_blank(),
panel.spacing = unit(.5, "lines"),
axis.ticks = element_line(size = 0.5, color = "black"),
axis.ticks.length = unit(.2, 'cm'),
strip.text = element_text(hjust = 0.5),
strip.background = element_rect(color = NA, fill = "white"),
legend.margin = margin(0,0,0,0,'cm'),
legend.position = "none"
)
}
theme_set(theme_clean())
# # use for Class 1 models
# df_analysis_bkt0 <- df_analysis_bkt0 %>%
# filter(trout_class %in% c("CLASS I","CLASS II")) %>% droplevels()
# df_analysis_bnt0 <- df_analysis_bnt0 %>%
# filter(trout_class %in% c("CLASS I","CLASS II")) %>% droplevels()
# pred1 %>%
# ggplot(aes(x = mean.tmax_summer, y = .epred,
# group = latitude_f)) +
# stat_lineribbon(.width = c(0.5, 0.8)) +
# scale_fill_brewer(palette = "Reds") +
# facet_wrap(vars(latitude_f), scales = "free_y") +
# labs(x = "Max summer temperature",
# y = expression(Density~(fish~km^{-1})),
# fill = "CI", title = "(a) Brook Trout - summer max temperature") +
# coord_cartesian(clip = "off", ylim = c(0,1300))
# Summer temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = seq(-4,4, length.out = 200),
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = seq(-4,4, length.out = 200),
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred1 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred2 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.su.x <- pred1 %>%
ggplot(aes(x = mean.tmax_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max summer temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.su.x <- pred2 %>%
ggplot(aes(x = mean.tmax_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max summer temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# Fall temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = seq(-4,4, length.out = 200),
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = seq(-4,4, length.out = 200),
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred3 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred4 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.au.x <- pred3 %>%
ggplot(aes(x = mean.tmax_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max Autumn temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.au.x <- pred4 %>%
ggplot(aes(x = mean.tmax_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8),) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max Autumn temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.temp.au.int <-
# p.bkt.temp.au.x / p.bnt.temp.au.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
#
# ggsave(here("output","figs","temp_main_au_interact.png"),
# p.temp.au.int,
# device=agg_png, res=300, height = 6.5, width = 11)
# Winter temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = seq(-4,4, length.out = 200),
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = seq(-4,4, length.out = 200),
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred5 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred6 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.wi.x <- pred5 %>%
ggplot(aes(x = mean.tmax_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max winter temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.wi.x <- pred6 %>%
ggplot(aes(x = mean.tmax_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max winter temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.temp.wi.int <-
# p.bkt.temp.wi.x / p.bnt.temp.wi.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","temp_main_wi_interact.png"),
# p.temp.wi.int,
# device=agg_png, res=300, height = 6.5, width = 11)
# Spring temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = seq(-4,4, length.out = 200),
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = seq(-4,4, length.out = 200),
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred7 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred8 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.sp.x <- pred7 %>%
ggplot(aes(x = mean.tmax_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max spring temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.sp.x <- pred8 %>%
ggplot(aes(x = mean.tmax_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max spring temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.temp.sp.int <-
# p.bkt.temp.sp.x / p.bnt.temp.sp.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","temp_main_sp_interact.png"),
# p.temp.sp.int,
# device=agg_png, res=300, height = 6.5, width = 11)
# Summer rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = seq(-4,5, length.out = 200),
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = seq(-4,5, length.out = 200),
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred9 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred10 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.su.x <- pred9 %>%
ggplot(aes(x = total.prcp_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Summer precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.su.x <- pred10 %>%
ggplot(aes(x = total.prcp_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Summer precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.su.panel <-
# p.bkt.rain.su.x / p.bnt.rain.su.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_su_x.png"),
# p.rain.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Fall rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = 0,
total.prcp_autumn = seq(-4,5, length.out = 200),
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = 0,
total.prcp_autumn = seq(-4,5, length.out = 200),
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred11 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred12 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.au.x <- pred11 %>%
ggplot(aes(x = total.prcp_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Autumn precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.au.x <- pred12 %>%
ggplot(aes(x = total.prcp_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Autumn precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.au.panel <-
# p.bkt.rain.au.x / p.bnt.rain.au.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_au_x.png"),
# p.rain.au.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Winter rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = seq(-4,5, length.out = 200),
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = seq(-4,5, length.out = 200),
total.prcp_spring = 0
)
pred13 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred14 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.wi.x <- pred13 %>%
ggplot(aes(x = total.prcp_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Winter precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.wi.x <- pred14 %>%
ggplot(aes(x = total.prcp_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Winter precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.wi.panel <-
# p.bkt.rain.wi.x / p.bnt.rain.wi.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_wi_x.png"),
# p.rain.wi.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Spring rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0, mean.tmax_autumn = 0,
mean.tmax_winter = 0, mean.tmax_spring = 0,
total.prcp_summer = 0, total.prcp_autumn = 0, total.prcp_winter = 0,
total.prcp_spring = seq(-4,5, length.out = 200)
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0, mean.tmax_autumn = 0,
mean.tmax_winter = 0, mean.tmax_spring = 0,
total.prcp_summer = 0, total.prcp_autumn = 0, total.prcp_winter = 0,
total.prcp_spring = seq(-4,5, length.out = 200)
)
pred15 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred16 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.sp.x <- pred15 %>%
ggplot(aes(x = total.prcp_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Spring precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.sp.x <- pred16 %>%
ggplot(aes(x = total.prcp_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Spring precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.sp.panel <-
# p.bkt.rain.sp.x / p.bnt.rain.sp.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_sp_x.png"),
# p.rain.sp.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Plots ============================================
# Temp ---------------------------------
# p.bkt.temp.su.x
# p.bkt.temp.au.x
# p.bkt.temp.wi.x
# p.bkt.temp.sp.x
#
# p.bnt.temp.su.x
# p.bnt.temp.au.x
# p.bnt.temp.wi.x
# p.bnt.temp.sp.x
p.temp.x <-
(p.bkt.temp.su.x |
(p.bkt.temp.au.x + theme(axis.title.y = element_blank())) |
(p.bkt.temp.wi.x + theme(axis.title.y = element_blank())) |
(p.bkt.temp.sp.x + theme(axis.title.y = element_blank()))
)/
(p.bnt.temp.su.x |
(p.bnt.temp.au.x + theme(axis.title.y = element_blank())) |
(p.bnt.temp.wi.x + theme(axis.title.y = element_blank())) |
(p.bnt.temp.sp.x + theme(axis.title.y = element_blank()))
)
# save plot
path <- here::here("output","figs1","fig4_temp_panel_km")
ggsave(
glue::glue("{path}.pdf"),
plot = p.temp.x,
width = 10,
height = 12,
device = cairo_pdf
)
# manually add panel letters then covert
pdftools::pdf_convert(
pdf = glue::glue("{path}.pdf"),
filenames = glue::glue("{path}.png"),
format = "png",
dpi = 600
)
# ggsave(here("output","figs","epred_x_temp.png"),p.temp.x,
# device=agg_png, res=300, height = 12, width = 10)
# ggsave(here("output","figs","epred_x_temp.pdf"),p.temp.x,
# device=cairo_pdf, height = 12, width = 10)
# a <- p.bkt.temp.su.x | (p.bnt.temp.su.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_temp_su.png"), a,
# device=agg_png, res=600, height = 6.5, width = 7)
#
# b <- p.bkt.temp.sp.x | (p.bnt.temp.sp.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_temp_sp.png"), b,
# device=agg_png, res=600, height = 6.5, width = 7)
# Rain -----------------------
# p.bkt.rain.su.x
# p.bkt.rain.au.x
# p.bkt.rain.wi.x
# p.bkt.rain.sp.x
#
# p.bnt.rain.su.x
# p.bnt.rain.au.x
# p.bnt.rain.wi.x
# p.bnt.rain.sp.x
p.rain.x <-
(p.bkt.rain.su.x |
(p.bkt.rain.au.x + theme(axis.title.y = element_blank())) |
(p.bkt.rain.wi.x + theme(axis.title.y = element_blank())) |
(p.bkt.rain.sp.x + theme(axis.title.y = element_blank()))
) /
(p.bnt.rain.su.x |
(p.bnt.rain.au.x + theme(axis.title.y = element_blank())) |
(p.bnt.rain.wi.x + theme(axis.title.y = element_blank())) |
(p.bnt.rain.sp.x + theme(axis.title.y = element_blank()))
)
# save plot
path <- here::here("output","figs1","fig5_rain_panel_km")
ggsave(
glue::glue("{path}.pdf"),
plot = p.rain.x,
width = 10,
height = 12,
device = cairo_pdf
)
# manually add fish images then covert
pdftools::pdf_convert(
pdf = glue::glue("{path}.pdf"),
filenames = glue::glue("{path}.png"),
format = "png",
dpi = 600
)
# ggsave(here("output","figs","epred_x_rain.png"), p.rain.x,
# device=agg_png, res=300, height = 12, width = 10)
# ggsave(here("output","figs","epred_x_rain.pdf"), p.rain.x,
# device=cairo_pdf, height = 12, width = 10)
# a <- p.bkt.rain.su.x | (p.bnt.rain.su.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_rain_su.png"), a,
# device=agg_png, res=600, height = 6.5, width = 7)
#
# b <- p.bkt.rain.wi.x | (p.bnt.rain.wi.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_rain_wi.png"), b,
# device=agg_png, res=600, height = 6.5, width = 7)
#
# c <- p.bkt.rain.sp.x | (p.bnt.rain.sp.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_rain_sp.png"), c,
# device=agg_png, res=600, height = 6.5, width = 7)
| /R/63_brms_plots_interactions.R | no_license | bmait101/swass | R | false | false | 24,948 | r |
theme_clean <- function() {
theme_minimal(base_family = "sans", base_size = 12) +
theme(
plot.title = element_text(size = rel(1), margin = margin(0,0,0,0,"cm")),
plot.background = element_rect(fill = "white", color = NA),
panel.background = element_blank(),
panel.border = element_rect(fill = NA, color = "black", size = .5),
panel.grid = element_blank(),
panel.spacing = unit(.5, "lines"),
axis.ticks = element_line(size = 0.5, color = "black"),
axis.ticks.length = unit(.2, 'cm'),
strip.text = element_text(hjust = 0.5),
strip.background = element_rect(color = NA, fill = "white"),
legend.margin = margin(0,0,0,0,'cm'),
legend.position = "none"
)
}
theme_set(theme_clean())
# # use for Class 1 models
# df_analysis_bkt0 <- df_analysis_bkt0 %>%
# filter(trout_class %in% c("CLASS I","CLASS II")) %>% droplevels()
# df_analysis_bnt0 <- df_analysis_bnt0 %>%
# filter(trout_class %in% c("CLASS I","CLASS II")) %>% droplevels()
# pred1 %>%
# ggplot(aes(x = mean.tmax_summer, y = .epred,
# group = latitude_f)) +
# stat_lineribbon(.width = c(0.5, 0.8)) +
# scale_fill_brewer(palette = "Reds") +
# facet_wrap(vars(latitude_f), scales = "free_y") +
# labs(x = "Max summer temperature",
# y = expression(Density~(fish~km^{-1})),
# fill = "CI", title = "(a) Brook Trout - summer max temperature") +
# coord_cartesian(clip = "off", ylim = c(0,1300))
# Summer temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = seq(-4,4, length.out = 200),
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = seq(-4,4, length.out = 200),
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred1 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred2 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.su.x <- pred1 %>%
ggplot(aes(x = mean.tmax_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max summer temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.su.x <- pred2 %>%
ggplot(aes(x = mean.tmax_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max summer temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# Fall temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = seq(-4,4, length.out = 200),
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = seq(-4,4, length.out = 200),
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred3 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred4 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.au.x <- pred3 %>%
ggplot(aes(x = mean.tmax_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max Autumn temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.au.x <- pred4 %>%
ggplot(aes(x = mean.tmax_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8),) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max Autumn temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.temp.au.int <-
# p.bkt.temp.au.x / p.bnt.temp.au.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
#
# ggsave(here("output","figs","temp_main_au_interact.png"),
# p.temp.au.int,
# device=agg_png, res=300, height = 6.5, width = 11)
# Winter temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = seq(-4,4, length.out = 200),
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = seq(-4,4, length.out = 200),
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred5 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred6 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.wi.x <- pred5 %>%
ggplot(aes(x = mean.tmax_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max winter temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.wi.x <- pred6 %>%
ggplot(aes(x = mean.tmax_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max winter temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.temp.wi.int <-
# p.bkt.temp.wi.x / p.bnt.temp.wi.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","temp_main_wi_interact.png"),
# p.temp.wi.int,
# device=agg_png, res=300, height = 6.5, width = 11)
# Spring temp ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = seq(-4,4, length.out = 200),
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = seq(-4,4, length.out = 200),
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred7 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred8 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.temp.sp.x <- pred7 %>%
ggplot(aes(x = mean.tmax_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max spring temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.temp.sp.x <- pred8 %>%
ggplot(aes(x = mean.tmax_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Reds") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Max spring temperature",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.temp.sp.int <-
# p.bkt.temp.sp.x / p.bnt.temp.sp.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","temp_main_sp_interact.png"),
# p.temp.sp.int,
# device=agg_png, res=300, height = 6.5, width = 11)
# Summer rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = seq(-4,5, length.out = 200),
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = seq(-4,5, length.out = 200),
total.prcp_autumn = 0,
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred9 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred10 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.su.x <- pred9 %>%
ggplot(aes(x = total.prcp_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Summer precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.su.x <- pred10 %>%
ggplot(aes(x = total.prcp_summer, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Summer precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.su.panel <-
# p.bkt.rain.su.x / p.bnt.rain.su.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_su_x.png"),
# p.rain.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Fall rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = 0,
total.prcp_autumn = seq(-4,5, length.out = 200),
total.prcp_winter = 0,
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer = 0,
total.prcp_autumn = seq(-4,5, length.out = 200),
total.prcp_winter = 0,
total.prcp_spring = 0
)
pred11 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred12 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.au.x <- pred11 %>%
ggplot(aes(x = total.prcp_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Autumn precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.au.x <- pred12 %>%
ggplot(aes(x = total.prcp_autumn, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Autumn precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.au.panel <-
# p.bkt.rain.au.x / p.bnt.rain.au.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_au_x.png"),
# p.rain.au.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Winter rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = seq(-4,5, length.out = 200),
total.prcp_spring = 0
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,
latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0,
mean.tmax_autumn = 0,
mean.tmax_winter = 0,
mean.tmax_spring = 0,
total.prcp_summer =0,
total.prcp_autumn = 0,
total.prcp_winter = seq(-4,5, length.out = 200),
total.prcp_spring = 0
)
pred13 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred14 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.wi.x <- pred13 %>%
ggplot(aes(x = total.prcp_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Winter precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.wi.x <- pred14 %>%
ggplot(aes(x = total.prcp_winter, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Winter precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.wi.panel <-
# p.bkt.rain.wi.x / p.bnt.rain.wi.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_wi_x.png"),
# p.rain.wi.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Spring rain ----------------------------------------
nd.1 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bkt0$reach_id)[[1]],
gradient = 0, stream_order = 0,latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0, mean.tmax_autumn = 0,
mean.tmax_winter = 0, mean.tmax_spring = 0,
total.prcp_summer = 0, total.prcp_autumn = 0, total.prcp_winter = 0,
total.prcp_spring = seq(-4,5, length.out = 200)
)
nd.2 <- expand_grid(
total_effort = 1,year_s = 0,
reach_id = levels(df_analysis_bnt0$reach_id)[[1]],
gradient = 0, stream_order = 0,latitude_s = c(-1,0.5,1.5),
mean.tmax_summer = 0, mean.tmax_autumn = 0,
mean.tmax_winter = 0, mean.tmax_spring = 0,
total.prcp_summer = 0, total.prcp_autumn = 0, total.prcp_winter = 0,
total.prcp_spring = seq(-4,5, length.out = 200)
)
pred15 <- bkt.mod %>%
epred_draws(newdata = nd.1) %>%
mutate(species="Brook Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
pred16 <- bnt.mod %>%
epred_draws(newdata = nd.2) %>%
mutate(species="Brown Trout",
latitude_f = as.factor(latitude_s)) %>%
mutate(latitude_f = recode(
latitude_f, "-1" = "South WI", "0.5" = "Mid WI", "1.5" = "North WI"))
p.bkt.rain.sp.x <- pred15 %>%
ggplot(aes(x = total.prcp_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Spring precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
p.bnt.rain.sp.x <- pred16 %>%
ggplot(aes(x = total.prcp_spring, y = .epred/1.609,
group = latitude_f)) +
stat_lineribbon(.width = c(0.5, 0.8)) +
scale_fill_brewer(palette = "Blues") +
facet_wrap(vars(fct_rev(latitude_f)), scales = "free_y", nrow=3, ncol=1) +
labs(x = "Spring precipitation",
y = expression(Recruitment~strength~(YOY~km^{-1})),
fill = "CI")
# p.rain.sp.panel <-
# p.bkt.rain.sp.x / p.bnt.rain.sp.x +
# plot_layout(guides = "collect") &
# theme(legend.position='right',
# plot.margin = margin(.1,.1,.1,.1, unit = 'cm'),
# panel.background = element_rect(color = NA))
# ggsave(here("output","figs","epred_rain_sp_x.png"),
# p.rain.sp.panel,
# device=agg_png, res=300, height = 6.5, width = 11)
# Plots ============================================
# Temp ---------------------------------
# p.bkt.temp.su.x
# p.bkt.temp.au.x
# p.bkt.temp.wi.x
# p.bkt.temp.sp.x
#
# p.bnt.temp.su.x
# p.bnt.temp.au.x
# p.bnt.temp.wi.x
# p.bnt.temp.sp.x
p.temp.x <-
(p.bkt.temp.su.x |
(p.bkt.temp.au.x + theme(axis.title.y = element_blank())) |
(p.bkt.temp.wi.x + theme(axis.title.y = element_blank())) |
(p.bkt.temp.sp.x + theme(axis.title.y = element_blank()))
)/
(p.bnt.temp.su.x |
(p.bnt.temp.au.x + theme(axis.title.y = element_blank())) |
(p.bnt.temp.wi.x + theme(axis.title.y = element_blank())) |
(p.bnt.temp.sp.x + theme(axis.title.y = element_blank()))
)
# save plot
path <- here::here("output","figs1","fig4_temp_panel_km")
ggsave(
glue::glue("{path}.pdf"),
plot = p.temp.x,
width = 10,
height = 12,
device = cairo_pdf
)
# manually add panel letters then covert
pdftools::pdf_convert(
pdf = glue::glue("{path}.pdf"),
filenames = glue::glue("{path}.png"),
format = "png",
dpi = 600
)
# ggsave(here("output","figs","epred_x_temp.png"),p.temp.x,
# device=agg_png, res=300, height = 12, width = 10)
# ggsave(here("output","figs","epred_x_temp.pdf"),p.temp.x,
# device=cairo_pdf, height = 12, width = 10)
# a <- p.bkt.temp.su.x | (p.bnt.temp.su.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_temp_su.png"), a,
# device=agg_png, res=600, height = 6.5, width = 7)
#
# b <- p.bkt.temp.sp.x | (p.bnt.temp.sp.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_temp_sp.png"), b,
# device=agg_png, res=600, height = 6.5, width = 7)
# Rain -----------------------
# p.bkt.rain.su.x
# p.bkt.rain.au.x
# p.bkt.rain.wi.x
# p.bkt.rain.sp.x
#
# p.bnt.rain.su.x
# p.bnt.rain.au.x
# p.bnt.rain.wi.x
# p.bnt.rain.sp.x
p.rain.x <-
(p.bkt.rain.su.x |
(p.bkt.rain.au.x + theme(axis.title.y = element_blank())) |
(p.bkt.rain.wi.x + theme(axis.title.y = element_blank())) |
(p.bkt.rain.sp.x + theme(axis.title.y = element_blank()))
) /
(p.bnt.rain.su.x |
(p.bnt.rain.au.x + theme(axis.title.y = element_blank())) |
(p.bnt.rain.wi.x + theme(axis.title.y = element_blank())) |
(p.bnt.rain.sp.x + theme(axis.title.y = element_blank()))
)
# save plot
path <- here::here("output","figs1","fig5_rain_panel_km")
ggsave(
glue::glue("{path}.pdf"),
plot = p.rain.x,
width = 10,
height = 12,
device = cairo_pdf
)
# manually add fish images then covert
pdftools::pdf_convert(
pdf = glue::glue("{path}.pdf"),
filenames = glue::glue("{path}.png"),
format = "png",
dpi = 600
)
# ggsave(here("output","figs","epred_x_rain.png"), p.rain.x,
# device=agg_png, res=300, height = 12, width = 10)
# ggsave(here("output","figs","epred_x_rain.pdf"), p.rain.x,
# device=cairo_pdf, height = 12, width = 10)
# a <- p.bkt.rain.su.x | (p.bnt.rain.su.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_rain_su.png"), a,
# device=agg_png, res=600, height = 6.5, width = 7)
#
# b <- p.bkt.rain.wi.x | (p.bnt.rain.wi.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_rain_wi.png"), b,
# device=agg_png, res=600, height = 6.5, width = 7)
#
# c <- p.bkt.rain.sp.x | (p.bnt.rain.sp.x + theme(axis.title.y = element_blank()))
#
# ggsave(here("output","figs","epred_x_rain_sp.png"), c,
# device=agg_png, res=600, height = 6.5, width = 7)
|
#SENTIMENT ANALYSIS
#Download positive and negative words from:
#https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
#Identify the positive and negative word files.
pos <- "positive-words.txt"
neg <- "negative-words.txt"
#Read in words from both files, seperating each word.
p <- scan(pos, character(0), sep = "\n")
n <- scan(neg, character(0), sep = "\n")
#Examine first 10 words in the positive words list.
head(p, 10) #Contains header information.
#Remove header information from both lists.
p <- p[-1:-34]
n <- n[-1:-34]
#Examine first 10 words in both the positive
#and negative words lists.
head(p, 10)
head(n, 10)
#Calculate the total number of words. Use data imported
#and sorted with the file 'Analyzing Unstructured Data (Natural Language Text).R'.
totalWords <- sum(wordCounts)
#Create a vector of the target document's words.
words <- names(wordCounts)
#Produce a vector containing indices of positively word matches.
matched <- match(words, p, nomatch = 0)
#Examine first 10 indices from the matching positive words vector.
head(matched, 30)
#Get a count of all the matching positive words.
mCounts <- wordCounts[which(matched != 0)]
length(mCounts)
#Create a seperate list of positive words and a sum of their counts.
mWords <- names(mCounts)
nPos <- sum(mCounts)
#Do the same for negative words as done for positive words.
matched <- match(words, n, nomatch = 0)
nCounts <- wordCounts[which(matched != 0)]
nWords <- names(nCounts)
nNeg <- sum(nCounts)
#Calculate the percentage of words that are positive or negative
totalWords <- length(words)
ratioPos <- nPos/totalWords
ratioPos
ratioNeg <- nNeg/totalWords
ratioNeg
| /Sentiment Analysis.R | no_license | ghrush/Basic-R-Programming-for-Data-Science | R | false | false | 1,666 | r | #SENTIMENT ANALYSIS
#Download positive and negative words from:
#https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
#Identify the positive and negative word files.
pos <- "positive-words.txt"
neg <- "negative-words.txt"
#Read in words from both files, seperating each word.
p <- scan(pos, character(0), sep = "\n")
n <- scan(neg, character(0), sep = "\n")
#Examine first 10 words in the positive words list.
head(p, 10) #Contains header information.
#Remove header information from both lists.
p <- p[-1:-34]
n <- n[-1:-34]
#Examine first 10 words in both the positive
#and negative words lists.
head(p, 10)
head(n, 10)
#Calculate the total number of words. Use data imported
#and sorted with the file 'Analyzing Unstructured Data (Natural Language Text).R'.
totalWords <- sum(wordCounts)
#Create a vector of the target document's words.
words <- names(wordCounts)
#Produce a vector containing indices of positively word matches.
matched <- match(words, p, nomatch = 0)
#Examine first 10 indices from the matching positive words vector.
head(matched, 30)
#Get a count of all the matching positive words.
mCounts <- wordCounts[which(matched != 0)]
length(mCounts)
#Create a seperate list of positive words and a sum of their counts.
mWords <- names(mCounts)
nPos <- sum(mCounts)
#Do the same for negative words as done for positive words.
matched <- match(words, n, nomatch = 0)
nCounts <- wordCounts[which(matched != 0)]
nWords <- names(nCounts)
nNeg <- sum(nCounts)
#Calculate the percentage of words that are positive or negative
totalWords <- length(words)
ratioPos <- nPos/totalWords
ratioPos
ratioNeg <- nNeg/totalWords
ratioNeg
|
# titanic is avaliable in your workspace
# 1 - Check the structure of titanic
str(titanic)
# 2 - Use ggplot() for the first instruction
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge")
# 3 - Plot 2, add facet_grid() layer
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge") +
facet_grid(.~ Survived) | /ggplot2-1/Titanic-ggplot2-part1.R | no_license | blackbiz/SpringboardMiniProjects | R | false | false | 358 | r | # titanic is avaliable in your workspace
# 1 - Check the structure of titanic
str(titanic)
# 2 - Use ggplot() for the first instruction
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge")
# 3 - Plot 2, add facet_grid() layer
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge") +
facet_grid(.~ Survived) |
if (.Platform$OS.type=="windows"){
setwd("C:/Users/seanmh/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
setwd("C:/Users/shoban/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/") }
if (.Platform$OS.type=="unix") {
setwd("/home/user/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
#setwd("/media/sean/Windows7_OS/Users/seanmh/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
}
library(adegenet); library(diveRsity)
library(parallel)
#library(foreach);
#library(doMC); registerDoMC() #OR..
#library(doParallel); cl<-makeCluster(8); registerDoParallel(cl)
source("Simulations_and_Code/src/sample_funcs.R")
colMax <- function(data) sapply(data, max, na.rm = TRUE)
thresh_freq_H<- 0.20; thresh_freq_L<- 0.05
thisgrid<-"large"
if (thisgrid=="small"){
scenarios_ran<-list.dirs(path = "./Scenarios_Nov_28_2016/10rep_10mark", full.names = TRUE,recursive=F)
SIZE_OF_GRID<-209; NUM_GRID_ROWS<-19; N_REGIONS<-15
#first_row_region<-c(1,3,5,7,9,11,13,15,17); last_row_region<-c(2,4,6,8,10,12,14,16,19)
first_row_region<- c(1,3,5,7,8,9,10,11,12,13,14,15,16,17,18)
last_row_region<- c(2,4,6,7,8,9,10,11,12,13,14,15,16,17,19)
}
if (thisgrid=="medium"){
scenarios_ran<-list.dirs(path = "./Scenarios_Nov_22_medium", full.names = TRUE,recursive=F)
SIZE_OF_GRID<-1215; NUM_GRID_ROWS<-45; N_REGIONS<-15
first_row_region<- c(1,3,6,9,12,15,18,21,24,27,30,33,36,39,42)
last_row_region<- c(2,5,8,11,14,17,20,23,26,29,32,35,38,41,45)
}
if (thisgrid=="large"){
scenarios_ran<-list.dirs(path = "./Scenarios_Nov_22_large", full.names = TRUE,recursive=F)
SIZE_OF_GRID<-4717; NUM_GRID_ROWS<-89; N_REGIONS<-18
first_row_region<- c(1,6,11,16,21,26,31,36,41,46,51,56,61,66,71,76,81,86)
last_row_region<- c(5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,89)
}
scenarios_ran<-list.dirs(path = "./Simulations_and_Code/test2018", full.names = TRUE,recursive=F)
#Set up file path where scenario is
num_scen<-length(scenarios_ran)
num_reps<-1000
#set up results
#Last 4 are MSB, NTSB, 10*MSB, 10*NTSB, TOTAL
summ_results<-array(dim=c(1485+4+1,8,num_scen,num_reps))
colnames(summ_results)<-c("total plants","total populations","G", "GLF", "GR", "RC", "LC", "LR")
type_samp<-c("random","each fr diff reg", "only N 2 rows", "center 2 rows", "only S 2 rows",
"core", "edge", "focus S", "focus N")
rownames(summ_results)<-c(rep(type_samp,each=165),"MSB","NTSB","MSB10","NTSB10","TOTAL")
######################################
#---FILE CHECK AND FILE CONVERSION---#
######################################
#for (scen in 1:length(scenarios_ran)){
# #Check for and remove genind
# gen_files<-dir(path=scenarios_ran[scen],pattern="gen")
# if (length(gen_files)!=0) file.remove(file.path(scenarios_ran[scen],gen_files))
# #convert to genind
# reps_ran_arp<-list.files(scenarios_ran[scen], pattern="arp")
# arp_file_list<-file.path(scenarios_ran[scen],reps_ran_arp,sep="")
# if (.Platform$OS.type=="unix") arp_file_list<-substr(arp_file_list,1,nchar(arp_file_list)-1)
# mclapply(arp_file_list,arp2gen,mc.cores=16)
# #foreach (nrep = 1:length(reps_ran_arp)) %dopar% { arp2gen(arp_file_list[nrep]) } #alternative MC loop
#}
##############################
#---DEFINE REGIONAL MAKEUP---#
#---AND WHERE MSB SAMPLED----#
##############################
scen<-1
region_makeup<-set.regions(scenarios_ran[scen], SIZE_OF_GRID, NUM_GRID_ROWS, N_REGIONS,
first_row_region, last_row_region)
file_MSB_all<-"Data_from_simon/sampled_so_far/2018/sampled_locations_all_2018.csv"
file_NTSB<-"Data_from_simon/sampled_so_far/2018/sampled_locations_NTSB_2018.csv"
MSB_locations<-MSB.samp(get.uk.grid(scenarios_ran[scen], SIZE_OF_GRID, NUM_GRID_ROWS), file_MSB_all)
MSB_locations<-MSB_locations[MSB_locations!=0]
MSB_plant_nums<-read.csv(file_MSB_all)[,4]*10
MSB_plant_nums[MSB_plant_nums>=250]<-240
NTSB_locations<-MSB.samp(get.uk.grid(scenarios_ran[scen], SIZE_OF_GRID, NUM_GRID_ROWS), file_NTSB)
NTSB_locations<-NTSB_locations[NTSB_locations!=0]
NTSB_plant_nums<-read.csv(file_NTSB)[,4]*10
NTSB_plant_nums[NTSB_plant_nums>=250]<-240
#####################
#---MAIN ANALYSIS---#
#####################
for (scen in 1:length(scenarios_ran)){
reps_ran_gen<-list.files(scenarios_ran[scen], pattern="gen")
for (nrep in 1:num_reps){
print(scenarios_ran[scen])
#make a genind (by individual) and genpop (by population)
temp_file_name<-file.path(scenarios_ran[scen],reps_ran_gen[nrep],sep="")
if (.Platform$OS.type=="unix") temp_file_name<-substr(temp_file_name,1,nchar(temp_file_name)-1)
UK_genind<-read.genepop(temp_file_name,ncode=3)
UK_genpop<-genind2genpop(UK_genind)
#--NUMBER OF POPULATIONS, INDIVIDUALS, REGIONS, REGIONAL MAKEUP--#
n_pops<-length(levels(UK_genind@pop))
n_total_indivs<- length(UK_genind@tab[,1])
n_ind_p_pop<-table(UK_genind@pop)
allele_freqs<-colSums(UK_genpop@tab)/(n_total_indivs*2)
#######################
#---#DETERMINE WHAT ALLELES FALL IN WHAT CATEGORIES---#
#######################
allele_cat<-get.allele.cat(UK_genpop, region_makeup, N_REGIONS, n_ind_p_pop)
glob_com<-allele_cat[[1]]; glob_lowfr<-allele_cat[[2]]; glob_rare<-allele_cat[[3]]
reg_com_int<-allele_cat[[4]]; loc_com_int<-allele_cat[[5]]; loc_rare<-allele_cat[[6]]
#######################
#--SUBSAMPLE SECTION
#######################
### LIST OF POPULATIONS SAMPLED
#sample certain number of populations, and individuals, and by region
center_pops_vect<-read.csv("Grids/center_edge/center_pops.txt",sep=",",header=F)[[1]]
edge_pops_vect<-read.csv("Grids/center_edge/edge_pops.txt",sep=",",header=F)[[1]]
type_samp<-c("random","each fr diff reg", "only N 2 rows", "center 2 rows", "only S 2 rows",
"core", "edge", "focus S", "focus N")
l_plt_smp<-length(c(2,seq(5,50, by=5)))
n_pops_to_samp<- rep(c(rep(2,l_plt_smp),rep(5,l_plt_smp),rep(10,l_plt_smp),rep(15,l_plt_smp),
rep(20,l_plt_smp),rep(25,l_plt_smp),rep(30,l_plt_smp),rep(35,l_plt_smp),
rep(40,l_plt_smp),rep(45,l_plt_smp),rep(50,l_plt_smp),rep(55,l_plt_smp),
rep(60,l_plt_smp),rep(65,l_plt_smp),rep(70,l_plt_smp)),length(type_samp)) #rep(n_pops,l_plt_smp),
N_SAMPS_P_POP<-as.list(rep(c(2,seq(5,50, by=5)),(length(type_samp)*length(unique(n_pops_to_samp)))))
POPS_TO_SAMP<-list(); this_slot<-1
for (t in 1:length(type_samp)) for (p in 1:length(unique(n_pops_to_samp))) {
if (p>6) REPLC_N=T; if (p<=6) REPLC_N=F
if (t==1) the_pops<-sample(1:n_pops, unique(n_pops_to_samp)[p])
#t=2 makes list of 1 pop per region then samples n_pops from that
if (t==2) the_pops<-sample(sapply(region_makeup,sample,3), unique(n_pops_to_samp)[p],replace=REPLC_N)
#t=3,4,5 takes the top, middle, & end of the grid, unlists, then samples n_pops from that
if (t==3) the_pops<-sample(unlist(region_makeup[1:2]),unique(n_pops_to_samp)[p],replace=REPLC_N)
if (t==4) {
temp_mid<-length(region_makeup)/2
the_pops<-sample(unlist(region_makeup[(temp_mid-1):(temp_mid+1)]),unique(n_pops_to_samp)[p])
}
if (t==5) the_pops<-sample(unlist(tail(region_makeup)[4:5]),unique(n_pops_to_samp)[p])
if (t==6) the_pops<-sample(center_pops_vect, unique(n_pops_to_samp)[p])
if (t==7) the_pops<-sample(edge_pops_vect, unique(n_pops_to_samp)[p])
if (t==8) {
num_in_focus<-floor(0.75*unique(n_pops_to_samp)[p])
the_pops<-c(sample(unlist(tail(region_makeup)[4:5]),num_in_focus),
sample(1:n_pops, (unique(n_pops_to_samp)[p]-num_in_focus)))
}
if (t==9) {
num_in_focus<-floor(0.75*unique(n_pops_to_samp)[p])
the_pops<-c(sample(unlist(region_makeup[1:2]),num_in_focus, replace=REPLC_N),
sample(1:n_pops, (unique(n_pops_to_samp)[p]-num_in_focus)))
}
for (x in 1:l_plt_smp) { POPS_TO_SAMP[[this_slot]]<-the_pops; this_slot=this_slot+1}
}
for (i in 1:length(POPS_TO_SAMP))
N_SAMPS_P_POP[[i]]<-unlist(rep(N_SAMPS_P_POP[i],length(POPS_TO_SAMP[[i]])))
#This checks the above code- just swap out "34" for larger numbers
#temp_ind<-vector(length=15)
#for (i in 1:15) temp_ind[i]<-(which(UK_grid==POPS_TO_SAMP[[34]][i],arr.ind=T)[1])
#sort(temp_ind)
#This is for the MSB samples
POPS_TO_SAMP[[this_slot]]<-MSB_locations; N_SAMPS_P_POP[[this_slot]]<-MSB_plant_nums/10
N_SAMPS_P_POP[[this_slot]][N_SAMPS_P_POP[[this_slot]][]==1]<-2; this_slot=this_slot+1
POPS_TO_SAMP[[this_slot]]<-NTSB_locations; N_SAMPS_P_POP[[this_slot]]<-NTSB_plant_nums/10
N_SAMPS_P_POP[[this_slot]][N_SAMPS_P_POP[[this_slot]][]==1]<-2; this_slot=this_slot+1
POPS_TO_SAMP[[this_slot]]<-MSB_locations; N_SAMPS_P_POP[[this_slot]]<-MSB_plant_nums
this_slot=this_slot+1
POPS_TO_SAMP[[this_slot]]<-NTSB_locations; N_SAMPS_P_POP[[this_slot]]<-NTSB_plant_nums
UK_genind_sep<-seppop(UK_genind)
for (samp in 1:length(POPS_TO_SAMP)){
alleles<-matrix(nrow=length(POPS_TO_SAMP[[samp]]),ncol=length(allele_freqs))
alleles<-sample.pop(UK_genind_sep,POPS_TO_SAMP[[samp]],N_SAMPS_P_POP[[samp]])
summ_results[samp,1,scen,nrep]<-sum(N_SAMPS_P_POP[[samp]]); summ_results[samp,2,scen,nrep]<-length(N_SAMPS_P_POP[[samp]])
#NOW SEE WHAT IS CAUGHT
#this will record each time an allele is in one of our sampled populations
for (p in 1:n_pops){
caught_glob<-colSums(alleles,na.rm=T)
caught_glob_lowfr<-colSums(alleles,na.rm=T)[glob_lowfr]
caught_glob_rare<-colSums(alleles,na.rm=T)[glob_rare]
caught_reg_com<-colSums(alleles,na.rm=T)[reg_com_int]
caught_loc_com<-colSums(alleles,na.rm=T)[loc_com_int]
caught_loc_rare<-colSums(alleles,na.rm=T)[loc_rare]
}
#as long as its not missed (0) it counts as being caught (could try other minima)
got_G<-sum(caught_glob>=1); got_GLF<-sum(caught_glob_lowfr>=1); got_GR<-sum(caught_glob_rare>=1)
got_RC<-sum(caught_reg_com>=1); got_LC<-sum(caught_loc_com>=1); got_LR<-sum(caught_loc_rare>=1)
summ_results[samp,3,scen,nrep]<-got_G; summ_results[samp,4,scen,nrep]<-got_GLF
summ_results[samp,5,scen,nrep]<-got_GR; summ_results[samp,6,scen,nrep]<-got_RC
summ_results[samp,7,scen,nrep]<-got_LC; summ_results[samp,8,scen,nrep]<-got_LR
}
summ_results[1490,3:8,scen,nrep]<-c(length(allele_freqs),length(glob_lowfr),length(glob_rare),
length(reg_com_int),length(loc_com_int),length(loc_rare))
}
#difference between the scenarios_ran
#apply(summ_results[,,1,1:10]-summ_results[,,2,1:10],c(1,2),mean)
#apply(summ_results[1089:1092,,1,1:num_reps],c(1,2),mean)
}
save(summ_results,file="summ_results_mina_2_14_18_MSB2.Rdata")
#####################################
# RESULTS #
#####################################
#FOR THE CHOSEN SCENARIO
if (.Platform$OS.type=="windows"){
setwd("C:/Users/shoban.DESKTOP-DLPV5IJ/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
setwd("C:/Users/shoban/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/") }
if (.Platform$OS.type=="unix") setwd("/home/user/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
load(file="summ_results_2_14_18_MSB2.Rdata")
load(file="summ_results_mina_2_16_18_MSB2.Rdata")
type_samp<-c("random","each fr diff reg", "only N 2 rows", "center 2 rows", "only S 2 rows",
"core", "edge", "focus S", "focus N")
type_allele<-c("global", "global low frequency", "global rare", "regional", "local common", "local rare")
num_samp_strat<-length(summ_results[,1,1,1]); num_mom_pop_comb<-11*15
num_trees<-c(2,seq(5,50,5))
num_reps<-1000
###########################################################
# COMPARE SPATIAL STRATEGIES GRAPH #
###########################################################
#The first question is about ranking of the large scale spatial strategies relative to each other
#This uses 'matplot' to plot the different sampling strategies
total_exists<-apply(summ_results[num_samp_strat,3:8,,1:num_reps],1,mean)
pdf(file="compare_spatial.pdf",width=14,height=11)
par(mfrow=c(3,2),oma=c(4,3,4,3),mar=c(3,3,2,2))
#this will show 2 populations (lines 1:11), 20 populations (line 45) and 45 populations (line 100)
#If I wanted 5 populations sampled I would use 12
#and we will focus on allele category 3 (global) and 7 (locally common)
#FOR SUPPLEMENTAL
for (start_pop in c(1,45,100)){
#we need to get, for example, 2 populations for each of the sampling strategies (all nine of them)
#each spatial sampling strategy has 121 (or 11 times 11) slots for all tree/pop combinations
#so essentially need slots 1:11, then (1+121):(11+121) etc. etc. The next three lines get that list
start_each_samp<-seq(start_pop,num_samp_strat-5,by=num_mom_pop_comb)
for (i in 1:(length(num_trees)-1)) unique(start_each_samp<-c(start_each_samp,start_each_samp+1))
some_samps<-sort(unique(start_each_samp))
#For the two allele categories of focus, get the results for that list, make a matrix with
#the spatial strategies as columns and adding more individuals as rows, then plot this
for (i in c(3,7)) {
select_samp<-apply(summ_results[some_samps,i,,1:num_reps],1,mean,na.rm=T)
matplot(matrix(select_samp/total_exists[i-2],nrow=length(num_trees)),type="l",lwd=2,
col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5),
ylab="",xaxt="n",xlab="",cex.axis=1.8)
axis(4,labels=F); axis(1,labels=num_trees, at=1:11,cex.axis=1.8) }
mtext(side=4,line=2,paste(summ_results[start_pop,2,1,1]," populations sampled"),cex=1.4) }
mtext(side=1,line=1.3,"number of trees sampled per population",outer=T,cex=1.4)
mtext(side=2, line=1.3,"proportion of alleles preserved",outer=T,cex=1.4)
mtext(side=3," global alleles locally common alleles",outer=T,cex=2.1)
legend(6,.8,legend=type_samp, col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5), cex=1.3,bg="white", lwd=2)
dev.off()
#this as above but just the global alleles and assumes 40 populations
#FOR MAIN MANUSCRIPT
pdf(file="compare_spatial_global_40.pdf",width=9,height=7)
select_samp<-apply(summ_results[some_samps,3,,1:num_reps],1,mean,na.rm=T)
matplot(matrix(select_samp/total_exists[1],nrow=length(num_trees)),type="l",lwd=2.5,
col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5),
ylab="proportion of alleles captured",xaxt="n",xlab="number of trees sampled",
cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend(6,.71,legend=type_samp, col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5), cex=1.3,bg="white", lwd=2.5)
axis(1,at=1:11,labels=(c(2,seq(5,50, by=5))), cex.axis=1.5)
dev.off()
###########################################################
# COMPARE SPATIAL STRATEGIES TABLE #
###########################################################
#This analysis is to rank the spatial strategies
#NOT REALLY USED IN PAPER- THE PLOT TELLS ALL THAT WE NEED
library("tidyr"); library("reshape2")
#melt and dcast will swap L and R, so have to fix order
total_exists<-apply(summ_results[num_samp_strat,3:8,,1:100],1,mean)
total_exists_ord<-total_exists; total_exists_ord[4:5]<-total_exists[5:6]; total_exists_ord[6]<-total_exists[4]
#first rbind the reps together and cbind a column to add sample strategies to each row
bound_results<-as.data.frame(summ_results[,,1,1])
for (i in 2:100) bound_results<-rbind(bound_results,summ_results[,,1,i])
labels_one_rep<-as.factor(c(rep(type_samp,each=num_mom_pop_comb),"MSB","NTSB","MSB10","NTSB10","TOTAL"))
bound_results<-cbind(rep(labels_one_rep,100),bound_results)
colnames(bound_results)<-c("strat","plants","pops","G","GLF","GR","RC","LC","LR")
melted_results<-gather(bound_results,allcat,alleles,G,GLF,GR,RC,LC,LR,-pops,-plants,-strat)
#first get MSB results
MSB_results<-melted_results[which(melted_results$strat=="MSB"),]; means_MSB<-dcast(MSB_results,strat~allcat,mean)
means_MSB[2:7]/total_exists_ord
#remove MSB and total for now
melted_results<-melted_results[-which(melted_results$strat=="MSB"),]; melted_results<-melted_results[-which(melted_results$strat=="MSB10"),]
melted_results<-melted_results[-which(melted_results$strat=="NTSB"),]; melted_results<-melted_results[-which(melted_results$strat=="NTSB10"),]
melted_results<-melted_results[-which(melted_results$strat=="TOTAL"),]
means_all<-dcast(melted_results,strat~allcat,mean)
ss_ranking<-function(num_plants,num_pops){
spec_mean<-dcast(melted_results[melted_results$plants==num_plants&melted_results$pops==num_pops,],strat~allcat,mean) #50/ 50
rownames(spec_mean)<-spec_mean[,1]; spec_mean<-spec_mean[,-1]
#print(rownames(t(t(spec_mean)/total_exists_ord)[order(spec_mean[,4]),])[5:9])
#identify the best by ranking
write.table(cbind(rownames(t(t(spec_mean)/total_exists_ord)[order(spec_mean[,1]),])[5:9],
t(t(spec_mean)/total_exists_ord)[order(spec_mean[,1]),][5:9]),
file="ss_ranking2.csv", append=T)
write.table( t(t(spec_mean)/total_exists_ord), file="ss_ranking.csv", append=T)
}
#do this for low, medium, high sampling, more pops, more individuals see if ranking changes
temp_plants<-c("2500","625","200","50","250","250"); temp_pops<-c("50","25","10","5","5","50")
for (i in 1:length(temp_plants)) ss_ranking(temp_plants[i],temp_pops[i])
#determine which are significantly different than each other
anov_res<-aov(alleles~strat+plants+pops+allcat,data=melted_results)
anov_res<-aov(alleles~strat,data=melted_results)
Tuk_res<-TukeyHSD(anov_res); Tuk_res$strat[order(Tuk_res$strat[,4]),]
#Interestingly the edge and core dont significantly differ from each other; nearly all the rest differ
barplot(means_all[,2]/total_exists_ord[1],names.arg=means_all[,1],las=2)
###########################################################
# MSB vs. POTENTIAL SAMPLING #
###########################################################
#Compare MSB to other samplings, see which of the strategies did better at proportion of alleles
#and/ or how many samples does it take to beat MSB, with a diff strategy (i.e. north)?
summ_results_ex<-summ_results
means_caught<-apply(summ_results_ex[1:1490,,1,1:1000],c(1,2),mean)
means_caught[1490,1:2]<-1
prop_caught<-t(t(means_caught)/means_caught[1490,])
write.csv(prop_caught,file="prop_caught.csv")
#which sampling strategies could have beaten the MSB in terms of choice of populations
which_beat_MSB<-prop_caught[which(prop_caught[,3]>prop_caught[1487,3]),]
write.csv(which_beat_MSB,"which_beat_MSB_G.csv")
which_beat_MSB<-prop_caught[which(prop_caught[,7]>prop_caught[1487,7]),]
write.csv(which_beat_MSB,"which_beat_MSB_LC.csv")
#to get as much as they did, if only sampling in the south, they would have had to sample
#very hard to capture all the alleles!
pdf(file="local_vs_global_accum.pdf")
plot(prop_caught[1:1093,3],prop_caught[1:1093,7],xlim=c(0,1),ylim=c(0,1), xlab="locally common alleles", ylab="all alleles")
abline(0,1,col="green",lwd=2)
dev.off()
prop_caught[as.numeric(which(prop_caught[,3]>prop_caught[,7])),1:2]
#Get global alleles "faster" for small collections, and local alleles faster with big collections-
#this means it is easier to get all the local common alleles because the accumulation of global ones slows one
###########################################################
# COMPARE TREES VS. POP'NS #
###########################################################
#The question is it better to sample more trees or more populations can partly be answered with a graph
#This makes two plots, one for number of trees on the X axis and one for number of populations on the X axis
#With additional lines being the other variable (populations and trees)
#The following loop will do this for every allele type
for (A in 3:8){
pdf(paste(A,"trees_vs_popns.pdf"), height=5,width=8)
all_mean<-apply(summ_results[,,1,1:1000],c(1,2),mean)
n_pops_samp<- c(2,seq(5,70,by=5)); n_trees_samp<- c(2,seq(5,50,by=5))
all_mean_glob<-matrix(all_mean[1:num_mom_pop_comb,A],nrow=11,dimnames=list(n_trees_samp,n_pops_samp))/all_mean[num_samp_strat,A]
#plots of adding more trees and more populations
par(mfrow=c(1,2),mar=c(5,5,2,1))
plot(all_mean_glob[1,]~colnames(all_mean_glob),type="l",ylim=c(0,1),xlim=c(-4,50),xlab="number of populations",ylab="proportion of genetic variation")
for (t in 1:11) lines(all_mean_glob[t,]~colnames(all_mean_glob))
for (pop_index in c(1:4,6,11)) text(-3,all_mean_glob[pop_index,1],n_pops_samp[pop_index],cex=1.1)
axis(4, at= c(0,.2,.4,.6,.8,1), labels=F)
#plot(all_mean_glob[1,]~colnames(all_mean_glob),type="l",ylim=c(0.4,.9),xlab="number of populations",ylab="proportion of genetic variation")
#for (t in 2:10) lines(all_mean_glob[t,]~colnames(all_mean_glob))
par(mar=c(5,2,2,4))
plot(all_mean_glob[,1]~rownames(all_mean_glob),type="l",ylim=c(0,1),xlim=c(-5,50),xlab="number of trees",yaxt="n")
for (t in 1:11) lines(all_mean_glob[,t]~rownames(all_mean_glob))
for (mom_index in c(1:4,6,11)) text(-3,all_mean_glob[1,mom_index],n_trees_samp[mom_index])
axis(4, at= c(0,.2,.4,.6,.8,1), labels=T); axis(2, at= c(0,.2,.4,.6,.8,1), labels=F)
dev.off()
}
#So the first black bar is 5 trees/ 35 populations, the third red bar is 10 populations/ 35 trees
#Second black bar is 10 trees/ 35 populations, the fourth red bar is 15 populations/ 35 trees
#########################################################################
# GET THE DIMINISHING RETURNS POINT (PLATEAU 1%) #
#########################################################################
#ALSO CALL IT THE STOPPING POINT
#this looks at more trees or more pops
#it will take in the all_mean (all alleles means) matrix
#first for trees it will go through the all_mean matrix and calculate the difference between row r+1 and r
#then will divide this by the number of trees sampled from r+1 to r e.g. divide by 5 or 10 trees
#then this is stored in tree_diff
#then we determine for every sampling group (2 to 50 trees, 11 types) we find the first diff less then thresh
#we do skip the first minimum sampling (2) because it is the diff from the last sampling (50)
#to look at other allele categories just replace the 3]<thresh below with other categories
thresh<-0.001
#first get the proportions
all_mean[,3:8]<-t(t(all_mean[,3:8])/(all_mean[1489,3:8]))
#add a column that is number of plants sampled
all_mean<-cbind(all_mean,all_mean[,1]/all_mean[,2])
diff<-all_mean[1:1489,]
#calculate the difference from the next closest sampling
for (i in 1:length(diff[,1])) for (c in 3:8) diff[i,c]<-(all_mean[i+1,c]-all_mean[i,c])/(all_mean[i+1,9]-all_mean[i,9])
#mean(diff[diff[,3]<0.005,9]) #this is wrong
#Note that all_mean is sorted by number of populations so we can look at difference as we add more trees
#The 99 is 9 sampling spatial strategies * 11 tree possibilities
b<-0; plateau_tree<-vector(length=99)
#Go through each set of 11, identify which of those is less than thresh, take the minimum of that
for (i in 0:98) plateau_tree[i+1]<-diff[1+i*10+i+min(which(diff[(1+i*10+i+1):(11+i*10+i),3]<thresh)),9]
mean(plateau_tree,na.rm=T)
#0.001 tree = 28.1; 0.005 = 11.9; 0.01 = 7.22
#Reorder all_mean by number of trees
all_mean_temp<-all_mean
all_mean_temp<-all_mean_temp[order(rownames(all_mean_temp),all_mean_temp[,9]),]
diff<-all_mean_temp[1:1489,]
#calculate the difference from the next closest sampling
for (i in 1:length(diff[,1])) for (c in 3:8) diff[i,c]<-(all_mean_temp[i+1,c]-all_mean_temp[i,c])/(all_mean[i+1,1]-all_mean[i,1])
#so we can look at difference as we add more populations
#The 135 is 9 sampling spatial strategies * 15 tree possibilities
b<-0; plateau_pop<-vector(length=135)
for (i in 0:134) plateau_pop[i+1]<-diff[1+i*10+i+min(which(diff[(1+i*10+i+1):(11+i*10+i),3]<thresh)),2]
mean(plateau_pop,na.rm=T)
#0.005 = 17.1 populations, 0.001 = 20.4 populations
##########
#JUNKYARD
##########
#separate data into single populations
#UK_genind_sep<-lapply(seppop(UK_genind), function(x) x[sample(1:nrow(x$tab), 10)])
#this will look at what is captured in a given population
#sum(as.vector(colSums(UK_genind_sep[[39]]@tab)[glob_com])==0)
#sum(as.vector(colSums(UK_genind_sep[[39]]@tab)[glob_lowfr])==0)
#sum(as.vector(colSums(UK_genind_sep[[39]]@tab)[glob_rare])==0)
#but really I want what is captured in all sampled populations
#pooled_data<-repool(UK_genind_sep)
#sum(as.vector(colSums(pooled_data_north@tab)[glob_com])==0)
#this doesn't work to repool, because it makes new allele counts
#colSums lists all alleles and the number of populations (N) having 0 copies of that allele
#we then pull out the indices (which) of alleles for which N = number of populations - 1
#(only one pop'n does not have 0 copies)
these<-as.vector(which(colSums(UK_genpop@tab<12)==(n_pops-1)))
#then take each of these alleles and sort the counts per population- how many counts are in that pop'n
sapply(these, function(x) sort(UK_genpop@tab[,x]))
big_enough<-table_counts[39,]>=30
these<-as.vector(which(colSums(UK_genpop@tab<3)==(n_pops-1)))
sapply(these, function(x) sort(UK_genpop@tab[,x]))
#CYCLE THROUGH THIS FROM 0 TO ABOUT 14, record the population and the allele, its count in that pop, and count in next pop
sum(as.vector(colSums(north_genpop@tab)[glob_lowfr])==0)
| /ash_allele_analysis_2018.R | no_license | smhoban/UK_ash | R | false | false | 25,420 | r | if (.Platform$OS.type=="windows"){
setwd("C:/Users/seanmh/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
setwd("C:/Users/shoban/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/") }
if (.Platform$OS.type=="unix") {
setwd("/home/user/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
#setwd("/media/sean/Windows7_OS/Users/seanmh/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
}
library(adegenet); library(diveRsity)
library(parallel)
#library(foreach);
#library(doMC); registerDoMC() #OR..
#library(doParallel); cl<-makeCluster(8); registerDoParallel(cl)
source("Simulations_and_Code/src/sample_funcs.R")
colMax <- function(data) sapply(data, max, na.rm = TRUE)
thresh_freq_H<- 0.20; thresh_freq_L<- 0.05
thisgrid<-"large"
if (thisgrid=="small"){
scenarios_ran<-list.dirs(path = "./Scenarios_Nov_28_2016/10rep_10mark", full.names = TRUE,recursive=F)
SIZE_OF_GRID<-209; NUM_GRID_ROWS<-19; N_REGIONS<-15
#first_row_region<-c(1,3,5,7,9,11,13,15,17); last_row_region<-c(2,4,6,8,10,12,14,16,19)
first_row_region<- c(1,3,5,7,8,9,10,11,12,13,14,15,16,17,18)
last_row_region<- c(2,4,6,7,8,9,10,11,12,13,14,15,16,17,19)
}
if (thisgrid=="medium"){
scenarios_ran<-list.dirs(path = "./Scenarios_Nov_22_medium", full.names = TRUE,recursive=F)
SIZE_OF_GRID<-1215; NUM_GRID_ROWS<-45; N_REGIONS<-15
first_row_region<- c(1,3,6,9,12,15,18,21,24,27,30,33,36,39,42)
last_row_region<- c(2,5,8,11,14,17,20,23,26,29,32,35,38,41,45)
}
if (thisgrid=="large"){
scenarios_ran<-list.dirs(path = "./Scenarios_Nov_22_large", full.names = TRUE,recursive=F)
SIZE_OF_GRID<-4717; NUM_GRID_ROWS<-89; N_REGIONS<-18
first_row_region<- c(1,6,11,16,21,26,31,36,41,46,51,56,61,66,71,76,81,86)
last_row_region<- c(5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,89)
}
scenarios_ran<-list.dirs(path = "./Simulations_and_Code/test2018", full.names = TRUE,recursive=F)
#Set up file path where scenario is
num_scen<-length(scenarios_ran)
num_reps<-1000
#set up results
#Last 4 are MSB, NTSB, 10*MSB, 10*NTSB, TOTAL
summ_results<-array(dim=c(1485+4+1,8,num_scen,num_reps))
colnames(summ_results)<-c("total plants","total populations","G", "GLF", "GR", "RC", "LC", "LR")
type_samp<-c("random","each fr diff reg", "only N 2 rows", "center 2 rows", "only S 2 rows",
"core", "edge", "focus S", "focus N")
rownames(summ_results)<-c(rep(type_samp,each=165),"MSB","NTSB","MSB10","NTSB10","TOTAL")
######################################
#---FILE CHECK AND FILE CONVERSION---#
######################################
#for (scen in 1:length(scenarios_ran)){
# #Check for and remove genind
# gen_files<-dir(path=scenarios_ran[scen],pattern="gen")
# if (length(gen_files)!=0) file.remove(file.path(scenarios_ran[scen],gen_files))
# #convert to genind
# reps_ran_arp<-list.files(scenarios_ran[scen], pattern="arp")
# arp_file_list<-file.path(scenarios_ran[scen],reps_ran_arp,sep="")
# if (.Platform$OS.type=="unix") arp_file_list<-substr(arp_file_list,1,nchar(arp_file_list)-1)
# mclapply(arp_file_list,arp2gen,mc.cores=16)
# #foreach (nrep = 1:length(reps_ran_arp)) %dopar% { arp2gen(arp_file_list[nrep]) } #alternative MC loop
#}
##############################
#---DEFINE REGIONAL MAKEUP---#
#---AND WHERE MSB SAMPLED----#
##############################
scen<-1
region_makeup<-set.regions(scenarios_ran[scen], SIZE_OF_GRID, NUM_GRID_ROWS, N_REGIONS,
first_row_region, last_row_region)
file_MSB_all<-"Data_from_simon/sampled_so_far/2018/sampled_locations_all_2018.csv"
file_NTSB<-"Data_from_simon/sampled_so_far/2018/sampled_locations_NTSB_2018.csv"
MSB_locations<-MSB.samp(get.uk.grid(scenarios_ran[scen], SIZE_OF_GRID, NUM_GRID_ROWS), file_MSB_all)
MSB_locations<-MSB_locations[MSB_locations!=0]
MSB_plant_nums<-read.csv(file_MSB_all)[,4]*10
MSB_plant_nums[MSB_plant_nums>=250]<-240
NTSB_locations<-MSB.samp(get.uk.grid(scenarios_ran[scen], SIZE_OF_GRID, NUM_GRID_ROWS), file_NTSB)
NTSB_locations<-NTSB_locations[NTSB_locations!=0]
NTSB_plant_nums<-read.csv(file_NTSB)[,4]*10
NTSB_plant_nums[NTSB_plant_nums>=250]<-240
#####################
#---MAIN ANALYSIS---#
#####################
for (scen in 1:length(scenarios_ran)){
reps_ran_gen<-list.files(scenarios_ran[scen], pattern="gen")
for (nrep in 1:num_reps){
print(scenarios_ran[scen])
#make a genind (by individual) and genpop (by population)
temp_file_name<-file.path(scenarios_ran[scen],reps_ran_gen[nrep],sep="")
if (.Platform$OS.type=="unix") temp_file_name<-substr(temp_file_name,1,nchar(temp_file_name)-1)
UK_genind<-read.genepop(temp_file_name,ncode=3)
UK_genpop<-genind2genpop(UK_genind)
#--NUMBER OF POPULATIONS, INDIVIDUALS, REGIONS, REGIONAL MAKEUP--#
n_pops<-length(levels(UK_genind@pop))
n_total_indivs<- length(UK_genind@tab[,1])
n_ind_p_pop<-table(UK_genind@pop)
allele_freqs<-colSums(UK_genpop@tab)/(n_total_indivs*2)
#######################
#---#DETERMINE WHAT ALLELES FALL IN WHAT CATEGORIES---#
#######################
allele_cat<-get.allele.cat(UK_genpop, region_makeup, N_REGIONS, n_ind_p_pop)
glob_com<-allele_cat[[1]]; glob_lowfr<-allele_cat[[2]]; glob_rare<-allele_cat[[3]]
reg_com_int<-allele_cat[[4]]; loc_com_int<-allele_cat[[5]]; loc_rare<-allele_cat[[6]]
#######################
#--SUBSAMPLE SECTION
#######################
### LIST OF POPULATIONS SAMPLED
#sample certain number of populations, and individuals, and by region
center_pops_vect<-read.csv("Grids/center_edge/center_pops.txt",sep=",",header=F)[[1]]
edge_pops_vect<-read.csv("Grids/center_edge/edge_pops.txt",sep=",",header=F)[[1]]
type_samp<-c("random","each fr diff reg", "only N 2 rows", "center 2 rows", "only S 2 rows",
"core", "edge", "focus S", "focus N")
l_plt_smp<-length(c(2,seq(5,50, by=5)))
n_pops_to_samp<- rep(c(rep(2,l_plt_smp),rep(5,l_plt_smp),rep(10,l_plt_smp),rep(15,l_plt_smp),
rep(20,l_plt_smp),rep(25,l_plt_smp),rep(30,l_plt_smp),rep(35,l_plt_smp),
rep(40,l_plt_smp),rep(45,l_plt_smp),rep(50,l_plt_smp),rep(55,l_plt_smp),
rep(60,l_plt_smp),rep(65,l_plt_smp),rep(70,l_plt_smp)),length(type_samp)) #rep(n_pops,l_plt_smp),
N_SAMPS_P_POP<-as.list(rep(c(2,seq(5,50, by=5)),(length(type_samp)*length(unique(n_pops_to_samp)))))
POPS_TO_SAMP<-list(); this_slot<-1
for (t in 1:length(type_samp)) for (p in 1:length(unique(n_pops_to_samp))) {
if (p>6) REPLC_N=T; if (p<=6) REPLC_N=F
if (t==1) the_pops<-sample(1:n_pops, unique(n_pops_to_samp)[p])
#t=2 makes list of 1 pop per region then samples n_pops from that
if (t==2) the_pops<-sample(sapply(region_makeup,sample,3), unique(n_pops_to_samp)[p],replace=REPLC_N)
#t=3,4,5 takes the top, middle, & end of the grid, unlists, then samples n_pops from that
if (t==3) the_pops<-sample(unlist(region_makeup[1:2]),unique(n_pops_to_samp)[p],replace=REPLC_N)
if (t==4) {
temp_mid<-length(region_makeup)/2
the_pops<-sample(unlist(region_makeup[(temp_mid-1):(temp_mid+1)]),unique(n_pops_to_samp)[p])
}
if (t==5) the_pops<-sample(unlist(tail(region_makeup)[4:5]),unique(n_pops_to_samp)[p])
if (t==6) the_pops<-sample(center_pops_vect, unique(n_pops_to_samp)[p])
if (t==7) the_pops<-sample(edge_pops_vect, unique(n_pops_to_samp)[p])
if (t==8) {
num_in_focus<-floor(0.75*unique(n_pops_to_samp)[p])
the_pops<-c(sample(unlist(tail(region_makeup)[4:5]),num_in_focus),
sample(1:n_pops, (unique(n_pops_to_samp)[p]-num_in_focus)))
}
if (t==9) {
num_in_focus<-floor(0.75*unique(n_pops_to_samp)[p])
the_pops<-c(sample(unlist(region_makeup[1:2]),num_in_focus, replace=REPLC_N),
sample(1:n_pops, (unique(n_pops_to_samp)[p]-num_in_focus)))
}
for (x in 1:l_plt_smp) { POPS_TO_SAMP[[this_slot]]<-the_pops; this_slot=this_slot+1}
}
for (i in 1:length(POPS_TO_SAMP))
N_SAMPS_P_POP[[i]]<-unlist(rep(N_SAMPS_P_POP[i],length(POPS_TO_SAMP[[i]])))
#This checks the above code- just swap out "34" for larger numbers
#temp_ind<-vector(length=15)
#for (i in 1:15) temp_ind[i]<-(which(UK_grid==POPS_TO_SAMP[[34]][i],arr.ind=T)[1])
#sort(temp_ind)
#This is for the MSB samples
POPS_TO_SAMP[[this_slot]]<-MSB_locations; N_SAMPS_P_POP[[this_slot]]<-MSB_plant_nums/10
N_SAMPS_P_POP[[this_slot]][N_SAMPS_P_POP[[this_slot]][]==1]<-2; this_slot=this_slot+1
POPS_TO_SAMP[[this_slot]]<-NTSB_locations; N_SAMPS_P_POP[[this_slot]]<-NTSB_plant_nums/10
N_SAMPS_P_POP[[this_slot]][N_SAMPS_P_POP[[this_slot]][]==1]<-2; this_slot=this_slot+1
POPS_TO_SAMP[[this_slot]]<-MSB_locations; N_SAMPS_P_POP[[this_slot]]<-MSB_plant_nums
this_slot=this_slot+1
POPS_TO_SAMP[[this_slot]]<-NTSB_locations; N_SAMPS_P_POP[[this_slot]]<-NTSB_plant_nums
UK_genind_sep<-seppop(UK_genind)
for (samp in 1:length(POPS_TO_SAMP)){
alleles<-matrix(nrow=length(POPS_TO_SAMP[[samp]]),ncol=length(allele_freqs))
alleles<-sample.pop(UK_genind_sep,POPS_TO_SAMP[[samp]],N_SAMPS_P_POP[[samp]])
summ_results[samp,1,scen,nrep]<-sum(N_SAMPS_P_POP[[samp]]); summ_results[samp,2,scen,nrep]<-length(N_SAMPS_P_POP[[samp]])
#NOW SEE WHAT IS CAUGHT
#this will record each time an allele is in one of our sampled populations
for (p in 1:n_pops){
caught_glob<-colSums(alleles,na.rm=T)
caught_glob_lowfr<-colSums(alleles,na.rm=T)[glob_lowfr]
caught_glob_rare<-colSums(alleles,na.rm=T)[glob_rare]
caught_reg_com<-colSums(alleles,na.rm=T)[reg_com_int]
caught_loc_com<-colSums(alleles,na.rm=T)[loc_com_int]
caught_loc_rare<-colSums(alleles,na.rm=T)[loc_rare]
}
#as long as its not missed (0) it counts as being caught (could try other minima)
got_G<-sum(caught_glob>=1); got_GLF<-sum(caught_glob_lowfr>=1); got_GR<-sum(caught_glob_rare>=1)
got_RC<-sum(caught_reg_com>=1); got_LC<-sum(caught_loc_com>=1); got_LR<-sum(caught_loc_rare>=1)
summ_results[samp,3,scen,nrep]<-got_G; summ_results[samp,4,scen,nrep]<-got_GLF
summ_results[samp,5,scen,nrep]<-got_GR; summ_results[samp,6,scen,nrep]<-got_RC
summ_results[samp,7,scen,nrep]<-got_LC; summ_results[samp,8,scen,nrep]<-got_LR
}
summ_results[1490,3:8,scen,nrep]<-c(length(allele_freqs),length(glob_lowfr),length(glob_rare),
length(reg_com_int),length(loc_com_int),length(loc_rare))
}
#difference between the scenarios_ran
#apply(summ_results[,,1,1:10]-summ_results[,,2,1:10],c(1,2),mean)
#apply(summ_results[1089:1092,,1,1:num_reps],c(1,2),mean)
}
save(summ_results,file="summ_results_mina_2_14_18_MSB2.Rdata")
#####################################
# RESULTS #
#####################################
#FOR THE CHOSEN SCENARIO
if (.Platform$OS.type=="windows"){
setwd("C:/Users/shoban.DESKTOP-DLPV5IJ/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
setwd("C:/Users/shoban/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/") }
if (.Platform$OS.type=="unix") setwd("/home/user/Dropbox/Projects/MSB_Kew_ash_Tree_Seed/")
load(file="summ_results_2_14_18_MSB2.Rdata")
load(file="summ_results_mina_2_16_18_MSB2.Rdata")
type_samp<-c("random","each fr diff reg", "only N 2 rows", "center 2 rows", "only S 2 rows",
"core", "edge", "focus S", "focus N")
type_allele<-c("global", "global low frequency", "global rare", "regional", "local common", "local rare")
num_samp_strat<-length(summ_results[,1,1,1]); num_mom_pop_comb<-11*15
num_trees<-c(2,seq(5,50,5))
num_reps<-1000
###########################################################
# COMPARE SPATIAL STRATEGIES GRAPH #
###########################################################
#The first question is about ranking of the large scale spatial strategies relative to each other
#This uses 'matplot' to plot the different sampling strategies
total_exists<-apply(summ_results[num_samp_strat,3:8,,1:num_reps],1,mean)
pdf(file="compare_spatial.pdf",width=14,height=11)
par(mfrow=c(3,2),oma=c(4,3,4,3),mar=c(3,3,2,2))
#this will show 2 populations (lines 1:11), 20 populations (line 45) and 45 populations (line 100)
#If I wanted 5 populations sampled I would use 12
#and we will focus on allele category 3 (global) and 7 (locally common)
#FOR SUPPLEMENTAL
for (start_pop in c(1,45,100)){
#we need to get, for example, 2 populations for each of the sampling strategies (all nine of them)
#each spatial sampling strategy has 121 (or 11 times 11) slots for all tree/pop combinations
#so essentially need slots 1:11, then (1+121):(11+121) etc. etc. The next three lines get that list
start_each_samp<-seq(start_pop,num_samp_strat-5,by=num_mom_pop_comb)
for (i in 1:(length(num_trees)-1)) unique(start_each_samp<-c(start_each_samp,start_each_samp+1))
some_samps<-sort(unique(start_each_samp))
#For the two allele categories of focus, get the results for that list, make a matrix with
#the spatial strategies as columns and adding more individuals as rows, then plot this
for (i in c(3,7)) {
select_samp<-apply(summ_results[some_samps,i,,1:num_reps],1,mean,na.rm=T)
matplot(matrix(select_samp/total_exists[i-2],nrow=length(num_trees)),type="l",lwd=2,
col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5),
ylab="",xaxt="n",xlab="",cex.axis=1.8)
axis(4,labels=F); axis(1,labels=num_trees, at=1:11,cex.axis=1.8) }
mtext(side=4,line=2,paste(summ_results[start_pop,2,1,1]," populations sampled"),cex=1.4) }
mtext(side=1,line=1.3,"number of trees sampled per population",outer=T,cex=1.4)
mtext(side=2, line=1.3,"proportion of alleles preserved",outer=T,cex=1.4)
mtext(side=3," global alleles locally common alleles",outer=T,cex=2.1)
legend(6,.8,legend=type_samp, col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5), cex=1.3,bg="white", lwd=2)
dev.off()
#this as above but just the global alleles and assumes 40 populations
#FOR MAIN MANUSCRIPT
pdf(file="compare_spatial_global_40.pdf",width=9,height=7)
select_samp<-apply(summ_results[some_samps,3,,1:num_reps],1,mean,na.rm=T)
matplot(matrix(select_samp/total_exists[1],nrow=length(num_trees)),type="l",lwd=2.5,
col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5),
ylab="proportion of alleles captured",xaxt="n",xlab="number of trees sampled",
cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend(6,.71,legend=type_samp, col=c(rep("black",2),rep("red",3),rep("blue",5)), lty=c(1,2,1,2,3,1,2,3,4,5), cex=1.3,bg="white", lwd=2.5)
axis(1,at=1:11,labels=(c(2,seq(5,50, by=5))), cex.axis=1.5)
dev.off()
###########################################################
# COMPARE SPATIAL STRATEGIES TABLE #
###########################################################
#This analysis is to rank the spatial strategies
#NOT REALLY USED IN PAPER- THE PLOT TELLS ALL THAT WE NEED
library("tidyr"); library("reshape2")
#melt and dcast will swap L and R, so have to fix order
total_exists<-apply(summ_results[num_samp_strat,3:8,,1:100],1,mean)
total_exists_ord<-total_exists; total_exists_ord[4:5]<-total_exists[5:6]; total_exists_ord[6]<-total_exists[4]
#first rbind the reps together and cbind a column to add sample strategies to each row
bound_results<-as.data.frame(summ_results[,,1,1])
for (i in 2:100) bound_results<-rbind(bound_results,summ_results[,,1,i])
labels_one_rep<-as.factor(c(rep(type_samp,each=num_mom_pop_comb),"MSB","NTSB","MSB10","NTSB10","TOTAL"))
bound_results<-cbind(rep(labels_one_rep,100),bound_results)
colnames(bound_results)<-c("strat","plants","pops","G","GLF","GR","RC","LC","LR")
melted_results<-gather(bound_results,allcat,alleles,G,GLF,GR,RC,LC,LR,-pops,-plants,-strat)
#first get MSB results
MSB_results<-melted_results[which(melted_results$strat=="MSB"),]; means_MSB<-dcast(MSB_results,strat~allcat,mean)
means_MSB[2:7]/total_exists_ord
#remove MSB and total for now
melted_results<-melted_results[-which(melted_results$strat=="MSB"),]; melted_results<-melted_results[-which(melted_results$strat=="MSB10"),]
melted_results<-melted_results[-which(melted_results$strat=="NTSB"),]; melted_results<-melted_results[-which(melted_results$strat=="NTSB10"),]
melted_results<-melted_results[-which(melted_results$strat=="TOTAL"),]
means_all<-dcast(melted_results,strat~allcat,mean)
ss_ranking<-function(num_plants,num_pops){
spec_mean<-dcast(melted_results[melted_results$plants==num_plants&melted_results$pops==num_pops,],strat~allcat,mean) #50/ 50
rownames(spec_mean)<-spec_mean[,1]; spec_mean<-spec_mean[,-1]
#print(rownames(t(t(spec_mean)/total_exists_ord)[order(spec_mean[,4]),])[5:9])
#identify the best by ranking
write.table(cbind(rownames(t(t(spec_mean)/total_exists_ord)[order(spec_mean[,1]),])[5:9],
t(t(spec_mean)/total_exists_ord)[order(spec_mean[,1]),][5:9]),
file="ss_ranking2.csv", append=T)
write.table( t(t(spec_mean)/total_exists_ord), file="ss_ranking.csv", append=T)
}
#do this for low, medium, high sampling, more pops, more individuals see if ranking changes
temp_plants<-c("2500","625","200","50","250","250"); temp_pops<-c("50","25","10","5","5","50")
for (i in 1:length(temp_plants)) ss_ranking(temp_plants[i],temp_pops[i])
#determine which are significantly different than each other
anov_res<-aov(alleles~strat+plants+pops+allcat,data=melted_results)
anov_res<-aov(alleles~strat,data=melted_results)
Tuk_res<-TukeyHSD(anov_res); Tuk_res$strat[order(Tuk_res$strat[,4]),]
#Interestingly the edge and core dont significantly differ from each other; nearly all the rest differ
barplot(means_all[,2]/total_exists_ord[1],names.arg=means_all[,1],las=2)
###########################################################
# MSB vs. POTENTIAL SAMPLING #
###########################################################
#Compare MSB to other samplings, see which of the strategies did better at proportion of alleles
#and/ or how many samples does it take to beat MSB, with a diff strategy (i.e. north)?
summ_results_ex<-summ_results
means_caught<-apply(summ_results_ex[1:1490,,1,1:1000],c(1,2),mean)
means_caught[1490,1:2]<-1
prop_caught<-t(t(means_caught)/means_caught[1490,])
write.csv(prop_caught,file="prop_caught.csv")
#which sampling strategies could have beaten the MSB in terms of choice of populations
which_beat_MSB<-prop_caught[which(prop_caught[,3]>prop_caught[1487,3]),]
write.csv(which_beat_MSB,"which_beat_MSB_G.csv")
which_beat_MSB<-prop_caught[which(prop_caught[,7]>prop_caught[1487,7]),]
write.csv(which_beat_MSB,"which_beat_MSB_LC.csv")
#to get as much as they did, if only sampling in the south, they would have had to sample
#very hard to capture all the alleles!
pdf(file="local_vs_global_accum.pdf")
plot(prop_caught[1:1093,3],prop_caught[1:1093,7],xlim=c(0,1),ylim=c(0,1), xlab="locally common alleles", ylab="all alleles")
abline(0,1,col="green",lwd=2)
dev.off()
prop_caught[as.numeric(which(prop_caught[,3]>prop_caught[,7])),1:2]
#Get global alleles "faster" for small collections, and local alleles faster with big collections-
#this means it is easier to get all the local common alleles because the accumulation of global ones slows one
###########################################################
# COMPARE TREES VS. POP'NS #
###########################################################
#The question is it better to sample more trees or more populations can partly be answered with a graph
#This makes two plots, one for number of trees on the X axis and one for number of populations on the X axis
#With additional lines being the other variable (populations and trees)
#The following loop will do this for every allele type
for (A in 3:8){
pdf(paste(A,"trees_vs_popns.pdf"), height=5,width=8)
all_mean<-apply(summ_results[,,1,1:1000],c(1,2),mean)
n_pops_samp<- c(2,seq(5,70,by=5)); n_trees_samp<- c(2,seq(5,50,by=5))
all_mean_glob<-matrix(all_mean[1:num_mom_pop_comb,A],nrow=11,dimnames=list(n_trees_samp,n_pops_samp))/all_mean[num_samp_strat,A]
#plots of adding more trees and more populations
par(mfrow=c(1,2),mar=c(5,5,2,1))
plot(all_mean_glob[1,]~colnames(all_mean_glob),type="l",ylim=c(0,1),xlim=c(-4,50),xlab="number of populations",ylab="proportion of genetic variation")
for (t in 1:11) lines(all_mean_glob[t,]~colnames(all_mean_glob))
for (pop_index in c(1:4,6,11)) text(-3,all_mean_glob[pop_index,1],n_pops_samp[pop_index],cex=1.1)
axis(4, at= c(0,.2,.4,.6,.8,1), labels=F)
#plot(all_mean_glob[1,]~colnames(all_mean_glob),type="l",ylim=c(0.4,.9),xlab="number of populations",ylab="proportion of genetic variation")
#for (t in 2:10) lines(all_mean_glob[t,]~colnames(all_mean_glob))
par(mar=c(5,2,2,4))
plot(all_mean_glob[,1]~rownames(all_mean_glob),type="l",ylim=c(0,1),xlim=c(-5,50),xlab="number of trees",yaxt="n")
for (t in 1:11) lines(all_mean_glob[,t]~rownames(all_mean_glob))
for (mom_index in c(1:4,6,11)) text(-3,all_mean_glob[1,mom_index],n_trees_samp[mom_index])
axis(4, at= c(0,.2,.4,.6,.8,1), labels=T); axis(2, at= c(0,.2,.4,.6,.8,1), labels=F)
dev.off()
}
#So the first black bar is 5 trees/ 35 populations, the third red bar is 10 populations/ 35 trees
#Second black bar is 10 trees/ 35 populations, the fourth red bar is 15 populations/ 35 trees
#########################################################################
# GET THE DIMINISHING RETURNS POINT (PLATEAU 1%) #
#########################################################################
#ALSO CALL IT THE STOPPING POINT
#this looks at more trees or more pops
#it will take in the all_mean (all alleles means) matrix
#first for trees it will go through the all_mean matrix and calculate the difference between row r+1 and r
#then will divide this by the number of trees sampled from r+1 to r e.g. divide by 5 or 10 trees
#then this is stored in tree_diff
#then we determine for every sampling group (2 to 50 trees, 11 types) we find the first diff less then thresh
#we do skip the first minimum sampling (2) because it is the diff from the last sampling (50)
#to look at other allele categories just replace the 3]<thresh below with other categories
thresh<-0.001
#first get the proportions
all_mean[,3:8]<-t(t(all_mean[,3:8])/(all_mean[1489,3:8]))
#add a column that is number of plants sampled
all_mean<-cbind(all_mean,all_mean[,1]/all_mean[,2])
diff<-all_mean[1:1489,]
#calculate the difference from the next closest sampling
for (i in 1:length(diff[,1])) for (c in 3:8) diff[i,c]<-(all_mean[i+1,c]-all_mean[i,c])/(all_mean[i+1,9]-all_mean[i,9])
#mean(diff[diff[,3]<0.005,9]) #this is wrong
#Note that all_mean is sorted by number of populations so we can look at difference as we add more trees
#The 99 is 9 sampling spatial strategies * 11 tree possibilities
b<-0; plateau_tree<-vector(length=99)
#Go through each set of 11, identify which of those is less than thresh, take the minimum of that
for (i in 0:98) plateau_tree[i+1]<-diff[1+i*10+i+min(which(diff[(1+i*10+i+1):(11+i*10+i),3]<thresh)),9]
mean(plateau_tree,na.rm=T)
#0.001 tree = 28.1; 0.005 = 11.9; 0.01 = 7.22
#Reorder all_mean by number of trees
all_mean_temp<-all_mean
all_mean_temp<-all_mean_temp[order(rownames(all_mean_temp),all_mean_temp[,9]),]
diff<-all_mean_temp[1:1489,]
#calculate the difference from the next closest sampling
for (i in 1:length(diff[,1])) for (c in 3:8) diff[i,c]<-(all_mean_temp[i+1,c]-all_mean_temp[i,c])/(all_mean[i+1,1]-all_mean[i,1])
#so we can look at difference as we add more populations
#The 135 is 9 sampling spatial strategies * 15 tree possibilities
b<-0; plateau_pop<-vector(length=135)
for (i in 0:134) plateau_pop[i+1]<-diff[1+i*10+i+min(which(diff[(1+i*10+i+1):(11+i*10+i),3]<thresh)),2]
mean(plateau_pop,na.rm=T)
#0.005 = 17.1 populations, 0.001 = 20.4 populations
##########
#JUNKYARD
##########
#separate data into single populations
#UK_genind_sep<-lapply(seppop(UK_genind), function(x) x[sample(1:nrow(x$tab), 10)])
#this will look at what is captured in a given population
#sum(as.vector(colSums(UK_genind_sep[[39]]@tab)[glob_com])==0)
#sum(as.vector(colSums(UK_genind_sep[[39]]@tab)[glob_lowfr])==0)
#sum(as.vector(colSums(UK_genind_sep[[39]]@tab)[glob_rare])==0)
#but really I want what is captured in all sampled populations
#pooled_data<-repool(UK_genind_sep)
#sum(as.vector(colSums(pooled_data_north@tab)[glob_com])==0)
#this doesn't work to repool, because it makes new allele counts
#colSums lists all alleles and the number of populations (N) having 0 copies of that allele
#we then pull out the indices (which) of alleles for which N = number of populations - 1
#(only one pop'n does not have 0 copies)
these<-as.vector(which(colSums(UK_genpop@tab<12)==(n_pops-1)))
#then take each of these alleles and sort the counts per population- how many counts are in that pop'n
sapply(these, function(x) sort(UK_genpop@tab[,x]))
big_enough<-table_counts[39,]>=30
these<-as.vector(which(colSums(UK_genpop@tab<3)==(n_pops-1)))
sapply(these, function(x) sort(UK_genpop@tab[,x]))
#CYCLE THROUGH THIS FROM 0 TO ABOUT 14, record the population and the allele, its count in that pop, and count in next pop
sum(as.vector(colSums(north_genpop@tab)[glob_lowfr])==0)
|
df_output <- df_output %>% arrange(Codelist_Code, Code)
df_output_2 <- df_output %>% rename(ct.name=Codelist_Name, ct.submission_value=CodelistId, ct.code=Codelist_Code, t.code=Code,
t.submission_value=CDISC_Submission_Value, t.label=NCI_Preferred_Term)
df_output_3 <- df_output_2 %>% select(ct.name, ct.submission_value, ct.code, t.code, t.submission_value, t.label)
write.csv(df_output_3, file=str_c(output_path, "/", output_name, ".csv"), row.names=F, na='""', quote=T)
| /program/QC/output.R | permissive | nnh/ptosh-ct-update | R | false | false | 503 | r | df_output <- df_output %>% arrange(Codelist_Code, Code)
df_output_2 <- df_output %>% rename(ct.name=Codelist_Name, ct.submission_value=CodelistId, ct.code=Codelist_Code, t.code=Code,
t.submission_value=CDISC_Submission_Value, t.label=NCI_Preferred_Term)
df_output_3 <- df_output_2 %>% select(ct.name, ct.submission_value, ct.code, t.code, t.submission_value, t.label)
write.csv(df_output_3, file=str_c(output_path, "/", output_name, ".csv"), row.names=F, na='""', quote=T)
|
# characterize stages ==> nrem, rem, wake, undef
# merge undef --> closest neighbor (here or later?)
# use change point analysis to find change points
# get prevalent state between changepoints --> label
# merge --> 5 min rem, 15 nrem, 5 min wake (except first rem)
# party!
# graph
# copute stats
# compute | /dev/scripts/nrem_cycles.R | permissive | pmanko/sleep.cycle.tools | R | false | false | 308 | r | # characterize stages ==> nrem, rem, wake, undef
# merge undef --> closest neighbor (here or later?)
# use change point analysis to find change points
# get prevalent state between changepoints --> label
# merge --> 5 min rem, 15 nrem, 5 min wake (except first rem)
# party!
# graph
# copute stats
# compute |
library(RSclient);
Args <-commandArgs(TRUE);
#inputF1= Args[1];#input the CP h5 file;
inputF2= Args[1];#input the transcripts file list
#inputF3= Args[3];#input the model;
output1= Args[2];#output the prediction coding potential for transcripts
c <- RSconnect();
RSeval(c, sprintf('rnafeature2(\'%s\', \'%s\');', inputF2, output1));
RSclose(c);
| /script/dat/RNAfeature_client.R | no_license | lulab/RNAfinder | R | false | false | 347 | r | library(RSclient);
Args <-commandArgs(TRUE);
#inputF1= Args[1];#input the CP h5 file;
inputF2= Args[1];#input the transcripts file list
#inputF3= Args[3];#input the model;
output1= Args[2];#output the prediction coding potential for transcripts
c <- RSconnect();
RSeval(c, sprintf('rnafeature2(\'%s\', \'%s\');', inputF2, output1));
RSclose(c);
|
# Directories
setwd("directory")
mydata<- read.csv("mly532.csv")
attach(mydata)
weatherarima <- ts(mydata$maxtp, start = c(1941,11), frequency = 12)
plot(weatherarima,type="l",ylab="Temperature in Celsius")
title("Maximum Air Temperature - Dublin")
# Load libraries
library(MASS)
library(tseries)
library(forecast)
# Plot and convert to ln format
lnweather=log(mydata$maxtp[1:741])
lnweather
# ACF, PACF and Dickey-Fuller Test
acf(lnweather, lag.max=20)
pacf(lnweather, lag.max=20)
adf.test(lnweather)
# Time series and seasonality
weatherarima <- ts(lnweather, start = c(1941,11), frequency = 12)
plot(weatherarima,type="l")
title("Maximum Air Temperature - Dublin")
components <- decompose(weatherarima)
components
plot(components)
# ARIMA
fitlnweather<-auto.arima(weatherarima, trace=TRUE, test="kpss", ic="bic")
fitlnweather
confint(fitlnweather)
plot(weatherarima,type='l')
title('Maximum Air Temperature - Dublin')
exp(lnweather)
# Forecasted Values From ARIMA
forecastedvalues_ln=forecast(fitlnweather,h=186)
forecastedvalues_ln
plot(forecastedvalues_ln)
forecastedvaluesextracted=as.numeric(forecastedvalues_ln$mean)
finalforecastvalues=exp(forecastedvaluesextracted)
finalforecastvalues
# Percentage Error
df<-data.frame(mydata$maxtp[742:927],finalforecastvalues)
col_headings<-c("Actual Weather","Forecasted Weather")
names(df)<-col_headings
attach(df)
percentage_error=((df$`Actual Weather`-df$`Forecasted Weather`)/(df$`Actual Weather`))
percentage_error
mean(percentage_error)
percentage_error=data.frame(abs(percentage_error))
accuracy=data.frame(percentage_error[percentage_error$abs.percentage_error. < 0.1,])
frequency=as.data.frame(table(accuracy))
sum(frequency$Freq)/186
hist(percentage_error$abs.percentage_error.,main="Histogram")
# Ljung-Box
Box.test(fitlnweather$resid, lag=5, type="Ljung-Box")
Box.test(fitlnweather$resid, lag=10, type="Ljung-Box")
Box.test(fitlnweather$resid, lag=15, type="Ljung-Box")
# Simple exponential smoothing with additive errors
fit1 <- ets(weatherarima)
fit1
forecastedvalues_ets=forecast(fit1,h=186)
forecastedvalues_ets
plot(forecastedvalues_ets)
forecastedvaluesextractedets=as.numeric(forecastedvalues_ets$mean)
finalforecastvaluesets=exp(forecastedvaluesextractedets)
finalforecastvaluesets
# Percentage Error
df<-data.frame(mydata$maxtp[742:927],finalforecastvaluesets)
col_headings<-c("Actual Weather","Forecasted Weather")
names(df)<-col_headings
attach(df)
percentage_error=((df$`Actual Weather`-df$`Forecasted Weather`)/(df$`Actual Weather`))
percentage_error
mean(percentage_error)
percentage_error=data.frame(abs(percentage_error))
accuracy=data.frame(percentage_error[percentage_error$abs.percentage_error. < 0.1,])
frequency=as.data.frame(table(accuracy))
sum(frequency$Freq)/186
# SARIMA
fit.1<-Arima(weatherarima, order = c(1,0,0))
# Forecasted Values From ARIMA
forecastedvalues_s=forecast(fit.1,h=186)
forecastedvalues_s
plot(forecastedvalues_s)
forecastedvaluesextracted=as.numeric(forecastedvalues_s$mean)
finalforecastvaluesseason=exp(forecastedvaluesextracted)
finalforecastvaluesseason
# Percentage Error
df<-data.frame(mydata$maxtp[742:927],finalforecastvaluesseason)
col_headings<-c("Actual Weather","Forecasted Weather")
names(df)<-col_headings
attach(df)
percentage_error=((df$`Actual Weather`-df$`Forecasted Weather`)/(df$`Actual Weather`))
percentage_error
mean(percentage_error)
percentage_error=data.frame(abs(percentage_error))
accuracy=data.frame(percentage_error[percentage_error$abs.percentage_error. < 0.1,])
frequency=as.data.frame(table(accuracy))
sum(frequency$Freq)/186
| /arima_weather.R | no_license | vjeevankumar/arima-model-statsmodels-python | R | false | false | 3,580 | r | # Directories
setwd("directory")
mydata<- read.csv("mly532.csv")
attach(mydata)
weatherarima <- ts(mydata$maxtp, start = c(1941,11), frequency = 12)
plot(weatherarima,type="l",ylab="Temperature in Celsius")
title("Maximum Air Temperature - Dublin")
# Load libraries
library(MASS)
library(tseries)
library(forecast)
# Plot and convert to ln format
lnweather=log(mydata$maxtp[1:741])
lnweather
# ACF, PACF and Dickey-Fuller Test
acf(lnweather, lag.max=20)
pacf(lnweather, lag.max=20)
adf.test(lnweather)
# Time series and seasonality
weatherarima <- ts(lnweather, start = c(1941,11), frequency = 12)
plot(weatherarima,type="l")
title("Maximum Air Temperature - Dublin")
components <- decompose(weatherarima)
components
plot(components)
# ARIMA
fitlnweather<-auto.arima(weatherarima, trace=TRUE, test="kpss", ic="bic")
fitlnweather
confint(fitlnweather)
plot(weatherarima,type='l')
title('Maximum Air Temperature - Dublin')
exp(lnweather)
# Forecasted Values From ARIMA
forecastedvalues_ln=forecast(fitlnweather,h=186)
forecastedvalues_ln
plot(forecastedvalues_ln)
forecastedvaluesextracted=as.numeric(forecastedvalues_ln$mean)
finalforecastvalues=exp(forecastedvaluesextracted)
finalforecastvalues
# Percentage Error
df<-data.frame(mydata$maxtp[742:927],finalforecastvalues)
col_headings<-c("Actual Weather","Forecasted Weather")
names(df)<-col_headings
attach(df)
percentage_error=((df$`Actual Weather`-df$`Forecasted Weather`)/(df$`Actual Weather`))
percentage_error
mean(percentage_error)
percentage_error=data.frame(abs(percentage_error))
accuracy=data.frame(percentage_error[percentage_error$abs.percentage_error. < 0.1,])
frequency=as.data.frame(table(accuracy))
sum(frequency$Freq)/186
hist(percentage_error$abs.percentage_error.,main="Histogram")
# Ljung-Box
Box.test(fitlnweather$resid, lag=5, type="Ljung-Box")
Box.test(fitlnweather$resid, lag=10, type="Ljung-Box")
Box.test(fitlnweather$resid, lag=15, type="Ljung-Box")
# Simple exponential smoothing with additive errors
fit1 <- ets(weatherarima)
fit1
forecastedvalues_ets=forecast(fit1,h=186)
forecastedvalues_ets
plot(forecastedvalues_ets)
forecastedvaluesextractedets=as.numeric(forecastedvalues_ets$mean)
finalforecastvaluesets=exp(forecastedvaluesextractedets)
finalforecastvaluesets
# Percentage Error
df<-data.frame(mydata$maxtp[742:927],finalforecastvaluesets)
col_headings<-c("Actual Weather","Forecasted Weather")
names(df)<-col_headings
attach(df)
percentage_error=((df$`Actual Weather`-df$`Forecasted Weather`)/(df$`Actual Weather`))
percentage_error
mean(percentage_error)
percentage_error=data.frame(abs(percentage_error))
accuracy=data.frame(percentage_error[percentage_error$abs.percentage_error. < 0.1,])
frequency=as.data.frame(table(accuracy))
sum(frequency$Freq)/186
# SARIMA
fit.1<-Arima(weatherarima, order = c(1,0,0))
# Forecasted Values From ARIMA
forecastedvalues_s=forecast(fit.1,h=186)
forecastedvalues_s
plot(forecastedvalues_s)
forecastedvaluesextracted=as.numeric(forecastedvalues_s$mean)
finalforecastvaluesseason=exp(forecastedvaluesextracted)
finalforecastvaluesseason
# Percentage Error
df<-data.frame(mydata$maxtp[742:927],finalforecastvaluesseason)
col_headings<-c("Actual Weather","Forecasted Weather")
names(df)<-col_headings
attach(df)
percentage_error=((df$`Actual Weather`-df$`Forecasted Weather`)/(df$`Actual Weather`))
percentage_error
mean(percentage_error)
percentage_error=data.frame(abs(percentage_error))
accuracy=data.frame(percentage_error[percentage_error$abs.percentage_error. < 0.1,])
frequency=as.data.frame(table(accuracy))
sum(frequency$Freq)/186
|
# plot results
# trial with 2013-upper river release - best model = 2 trends
# 2013-upper river release - best model = 2 trends
# 2013-mid river release - best model = 4 trends
# 2016-mid river release - best model = 4 trends
# ordination
# from cutom_Dmatrix_for_DFA.R
library(vegan)
ordiplot((mod3$Estimates$Z), choices=c(1,2))#, type="text", display="fish.ID")
arrows(0,0,mod3$Estimates$Z[1,],mod3$Estimates$Z[2,])#,Z.rot[,4],Z.rot[,5],Z.rot[,6],Z.rot[,7],Z.rot[,8],Z.rot[,9])
text(mod3$Estimates$Z[,1],mod3$Estimates$Z[,2], row.names(dat.z), col=grps_13up$mo_arrive) # fish ID and month of arrival to estuary
# for more than one trend
#ZmatFactorGen(Data=dat.z,NumStates=2) #all done internally
#ZmatGen(Data=dat.z,NumStates=2)
#H.inv = varimax(mod2$Estimates$Z)$rotmat
#Z.rot = mod2$Estimates$Z %*% H.inv #maximum variance explained
#trends.rot = solve(H.inv) %*% t(mod2$Estimates$u)
N.ts = dim(dat.z)[1]
TT = dim(dat.z)[2]
N.trends=4
minZ = 0
ylims = c(-1.1*max(abs(mod4$Estimates$Z)), 1.1*max(abs(mod4$Estimates$Z)))
Z.rot = mod4$Estimates$Z
row.names(Z.rot) = row.names(dat.z)
par(mfrow=c(4,1))
# loadings
for(i in 1:N.trends) {
plot(c(1:N.ts)[abs(Z.rot[,i])>minZ], as.vector(Z.rot[abs(Z.rot[,i])>minZ,i]),
type="h", lwd=4, xlab="", ylab="", xaxt="n", ylim=ylims, xlim=c(0,N.ts+1),
col=as.numeric(grps_16up$Route))
for(j in 1:N.ts) {
if(Z.rot[j,i] > minZ) {text(j, -0.05, srt=90, adj=1, cex=0.9, col="black")}
abline(h=0, lwd=1, col="gray")
} # end j loop
mtext(paste("Factor loadings on trend",i,sep=" "),side=3,line=.5)
} # end i loop
# getting fish IDs
#png(filename = "2013_upper_loadings.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2013_mid_loadings.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2016_upper_loadings.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2017_upper_loadings.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
# adding color for route
colors<-c("#800000", "#a9a9a9", "#000075", "#4363d8", "#f58231")
Route<-c("SacRiver", "cendel", "SacRS", "both", "Yolo_Bypass")
col.rt<-data.frame(colors, Route)
grps<-read.csv("JSATS_CV_4DFA.csv")
grps<-merge(grps, col.rt, by="Route")
grps$colors<-as.character(grps$colors)
# double check order is the same between dataframes (after merging color)
FishID<-rownames(dat.z)
ID<-data.frame(FishID,1)
grps_16up<-grps_16up[order(grps_16up[,9], FishID),]
# final loading figure
png(filename = "2016_up_loadings.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
par(mfrow=c(4,1))
for(i in 1:N.trends) {
plot(c(1:N.ts)[abs(Z.rot[,i])>minZ], as.vector(Z.rot[abs(Z.rot[,i])>minZ,i]),
type="h", lwd=4, xlab="", ylab="", xaxt="n", ylim=ylims, xlim=c(0,N.ts+1),
col=grps_16up$colors)
for(j in 1:N.ts) {
#browser()
if(Z.rot[j,i] > minZ) {text(j, -0.05, labels = row.names(Z.rot)[j], srt=90, adj=1, cex=0.5, col="black")}
abline(h=0, lwd=1, col="gray")
} # end j loop
mtext(paste("Factor loadings on trend",i,sep=" "),side=3,line=.5)
} # end i loop
dev.off()
# trends
#png(filename = "2013_upper_trends.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2013_mid_trends.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2016_upper_trends.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2017_upper_trends.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
png(filename = "2016_up_trends.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
par(mfrow=c(4,1))
for(i in 1:dim(t(mod4$Estimates$u))[2]) {
# set up plot area
plot(t(mod4$Estimates$u)[,i],
ylim=c(-1.1,1.1)*max(abs(t(mod4$Estimates$u))),
type="n", lwd=2, bty="L",
xlab="", ylab="", xaxt="n", yaxt="n")
# draw zero-line
abline(h=0, col="gray")
# plot trend line
par(new=TRUE)
plot(t(mod4$Estimates$u)[,i],
ylim=c(-1.1,1.1)*max(abs(t(mod4$Estimates$u))),
type="l", lwd=2, bty="L",
xlab="", ylab="", xaxt="n")
# add panel labels
mtext(paste("Trend",i,sep=" "), side=3, line=0.5)
axis(1,12*(0:dim(dat.z)[2]):dim(dat.z)[2]) # writes days on x-axis
} # end i loop (trends)
dev.off()
| /archive/PlotDFA.R | no_license | goertler/acoustic-telemetry-synthesis | R | false | false | 4,495 | r | # plot results
# trial with 2013-upper river release - best model = 2 trends
# 2013-upper river release - best model = 2 trends
# 2013-mid river release - best model = 4 trends
# 2016-mid river release - best model = 4 trends
# ordination
# from cutom_Dmatrix_for_DFA.R
library(vegan)
ordiplot((mod3$Estimates$Z), choices=c(1,2))#, type="text", display="fish.ID")
arrows(0,0,mod3$Estimates$Z[1,],mod3$Estimates$Z[2,])#,Z.rot[,4],Z.rot[,5],Z.rot[,6],Z.rot[,7],Z.rot[,8],Z.rot[,9])
text(mod3$Estimates$Z[,1],mod3$Estimates$Z[,2], row.names(dat.z), col=grps_13up$mo_arrive) # fish ID and month of arrival to estuary
# for more than one trend
#ZmatFactorGen(Data=dat.z,NumStates=2) #all done internally
#ZmatGen(Data=dat.z,NumStates=2)
#H.inv = varimax(mod2$Estimates$Z)$rotmat
#Z.rot = mod2$Estimates$Z %*% H.inv #maximum variance explained
#trends.rot = solve(H.inv) %*% t(mod2$Estimates$u)
N.ts = dim(dat.z)[1]
TT = dim(dat.z)[2]
N.trends=4
minZ = 0
ylims = c(-1.1*max(abs(mod4$Estimates$Z)), 1.1*max(abs(mod4$Estimates$Z)))
Z.rot = mod4$Estimates$Z
row.names(Z.rot) = row.names(dat.z)
par(mfrow=c(4,1))
# loadings
for(i in 1:N.trends) {
plot(c(1:N.ts)[abs(Z.rot[,i])>minZ], as.vector(Z.rot[abs(Z.rot[,i])>minZ,i]),
type="h", lwd=4, xlab="", ylab="", xaxt="n", ylim=ylims, xlim=c(0,N.ts+1),
col=as.numeric(grps_16up$Route))
for(j in 1:N.ts) {
if(Z.rot[j,i] > minZ) {text(j, -0.05, srt=90, adj=1, cex=0.9, col="black")}
abline(h=0, lwd=1, col="gray")
} # end j loop
mtext(paste("Factor loadings on trend",i,sep=" "),side=3,line=.5)
} # end i loop
# getting fish IDs
#png(filename = "2013_upper_loadings.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2013_mid_loadings.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2016_upper_loadings.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2017_upper_loadings.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
# adding color for route
colors<-c("#800000", "#a9a9a9", "#000075", "#4363d8", "#f58231")
Route<-c("SacRiver", "cendel", "SacRS", "both", "Yolo_Bypass")
col.rt<-data.frame(colors, Route)
grps<-read.csv("JSATS_CV_4DFA.csv")
grps<-merge(grps, col.rt, by="Route")
grps$colors<-as.character(grps$colors)
# double check order is the same between dataframes (after merging color)
FishID<-rownames(dat.z)
ID<-data.frame(FishID,1)
grps_16up<-grps_16up[order(grps_16up[,9], FishID),]
# final loading figure
png(filename = "2016_up_loadings.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
par(mfrow=c(4,1))
for(i in 1:N.trends) {
plot(c(1:N.ts)[abs(Z.rot[,i])>minZ], as.vector(Z.rot[abs(Z.rot[,i])>minZ,i]),
type="h", lwd=4, xlab="", ylab="", xaxt="n", ylim=ylims, xlim=c(0,N.ts+1),
col=grps_16up$colors)
for(j in 1:N.ts) {
#browser()
if(Z.rot[j,i] > minZ) {text(j, -0.05, labels = row.names(Z.rot)[j], srt=90, adj=1, cex=0.5, col="black")}
abline(h=0, lwd=1, col="gray")
} # end j loop
mtext(paste("Factor loadings on trend",i,sep=" "),side=3,line=.5)
} # end i loop
dev.off()
# trends
#png(filename = "2013_upper_trends.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2013_mid_trends.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2016_upper_trends.png", width = 8, height = 8, units = "in", pointsize = 12, bg = "white", res = 350)
#png(filename = "2017_upper_trends.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
png(filename = "2016_up_trends.png", width = 8, height = 12, units = "in", pointsize = 12, bg = "white", res = 350)
par(mfrow=c(4,1))
for(i in 1:dim(t(mod4$Estimates$u))[2]) {
# set up plot area
plot(t(mod4$Estimates$u)[,i],
ylim=c(-1.1,1.1)*max(abs(t(mod4$Estimates$u))),
type="n", lwd=2, bty="L",
xlab="", ylab="", xaxt="n", yaxt="n")
# draw zero-line
abline(h=0, col="gray")
# plot trend line
par(new=TRUE)
plot(t(mod4$Estimates$u)[,i],
ylim=c(-1.1,1.1)*max(abs(t(mod4$Estimates$u))),
type="l", lwd=2, bty="L",
xlab="", ylab="", xaxt="n")
# add panel labels
mtext(paste("Trend",i,sep=" "), side=3, line=0.5)
axis(1,12*(0:dim(dat.z)[2]):dim(dat.z)[2]) # writes days on x-axis
} # end i loop (trends)
dev.off()
|
##
count_group <- function(dataset, ...) {
dataset %>%
group_by(...) %>%
summarize(count = n()) %>%
mutate(count_prop = count / sum(count)) %>%
arrange(-count)
}
pull_count <- function(dataset) {
dataset %>%
summarize(count = n()) %>%
pull(count)
}
pull_count_unique_people <- function(dataset) {
dataset %>%
distinct(Person.PersonId) %>%
pull_count()
}
focus_role_columns <- function(role_tibble, ...) {
role_tibble %>%
select(Person.PersonId, StartDate, EndDate, NameEn, OrganizationLongEn, ToBeStyledAsEn, ...) %>%
arrange(Person.PersonId, StartDate, EndDate)
}
## find details on a person
lookup_person <- function(PersonId, ...) {
parliamentarians %>%
filter(Person.PersonId == PersonId) %>%
select(Person.DisplayName, ...)
}
party_colour_mappings = tribble(
~party_simple,~colour,
#--|--|----
"liberal","red",
"conservative","blue"
) %>%
rename(name = party_simple)
gender_colour_mappings = tribble(
~Person.Gender,~colour,
#--|--|----,
"F","red",
"M","blue"
) %>%
rename(name = Person.Gender)
generic_colour_mappings = rbind(
party_colour_mappings,
gender_colour_mappings
)
generic_colour_mappings_v <- generic_colour_mappings$colour
names(generic_colour_mappings_v) <- generic_colour_mappings$name
## note that we need to return a `list`
## ref: https://stackoverflow.com/questions/58072649/using-geoms-inside-a-function
colour_block_by_party <- function(party_bg_alpha = 0.1) {
list(
geom_rect(
data = ministries,
inherit.aes = FALSE,
alpha = party_bg_alpha,
mapping = aes(
xmin = start_date,
xmax = end_date,
ymin = -Inf,
ymax = Inf,
fill = party_simple
)
),
scale_fill_manual(
values = generic_colour_mappings_v
)
)
}
## for calculating number of days a position occupied
seq_date_vectorized <- Vectorize(seq.Date, vectorize.args = c("from", "to"))
| /scripts/helpers/analyse.R | no_license | lchski/parliamentarians-analysis | R | false | false | 1,951 | r |
##
count_group <- function(dataset, ...) {
dataset %>%
group_by(...) %>%
summarize(count = n()) %>%
mutate(count_prop = count / sum(count)) %>%
arrange(-count)
}
pull_count <- function(dataset) {
dataset %>%
summarize(count = n()) %>%
pull(count)
}
pull_count_unique_people <- function(dataset) {
dataset %>%
distinct(Person.PersonId) %>%
pull_count()
}
focus_role_columns <- function(role_tibble, ...) {
role_tibble %>%
select(Person.PersonId, StartDate, EndDate, NameEn, OrganizationLongEn, ToBeStyledAsEn, ...) %>%
arrange(Person.PersonId, StartDate, EndDate)
}
## find details on a person
lookup_person <- function(PersonId, ...) {
parliamentarians %>%
filter(Person.PersonId == PersonId) %>%
select(Person.DisplayName, ...)
}
party_colour_mappings = tribble(
~party_simple,~colour,
#--|--|----
"liberal","red",
"conservative","blue"
) %>%
rename(name = party_simple)
gender_colour_mappings = tribble(
~Person.Gender,~colour,
#--|--|----,
"F","red",
"M","blue"
) %>%
rename(name = Person.Gender)
generic_colour_mappings = rbind(
party_colour_mappings,
gender_colour_mappings
)
generic_colour_mappings_v <- generic_colour_mappings$colour
names(generic_colour_mappings_v) <- generic_colour_mappings$name
## note that we need to return a `list`
## ref: https://stackoverflow.com/questions/58072649/using-geoms-inside-a-function
colour_block_by_party <- function(party_bg_alpha = 0.1) {
list(
geom_rect(
data = ministries,
inherit.aes = FALSE,
alpha = party_bg_alpha,
mapping = aes(
xmin = start_date,
xmax = end_date,
ymin = -Inf,
ymax = Inf,
fill = party_simple
)
),
scale_fill_manual(
values = generic_colour_mappings_v
)
)
}
## for calculating number of days a position occupied
seq_date_vectorized <- Vectorize(seq.Date, vectorize.args = c("from", "to"))
|
agesamplesize<- function(survey,year,country,countrynames,
samplesized,agepopdata,agel,ageh)
{
i<-1
j<-1
pop<-c(NA)
countsn<-p*0
while(i <= N)
{
studyid<-survey[i]
yearid<-year[i]
countryn<-countrynames[country[i]]
samplesize<-samplesized$SampleSize[which(samplesized$UID == studyid)]
j<-1
totalp<-0
pop<-c(NA)
oldi<-i
totalinds<-c(NA)
otherinds<-c(NA)
totalmenp<-0
totalwomenp<-0
# iterate through all the surveys
while(i <=N & survey[i] == studyid)
{
sexid<-sex[i]
if(agel[i] < 5)
{
if(agel[i] <=1)
{
agestart<-4
}
else
{
agestart<-5
}
} else
{
agestart<-floor(agel[i]/5)+5
}
if(ageh[i] < 5)
{
if(ageh[i] <= 1)
{
ageend<-4
}
else
{
ageend<-5
}
} else
{
ageend<-floor(ageh[i]/5)+5
}
# choose last entry if exceeds end
agestart=min(c(ageend,length(agepopdata[1,])))
ageend=min(c(agestart,length(agepopdata[1,])))
# find correct row in age matrix
if(sexid == 1)
{
rowind<-which(agepopdata$gbd_country == countryn & agepopdata$year == yearid
& agepopdata$sex == "Female")
otherinds<-c(otherinds,i)
}
if(sexid == 2)
{
rowind<-which(agepopdata$gbd_country == countryn & agepopdata$year == yearid
& agepopdata$sex == "Male")
otherinds<-c(otherinds,i)
}
if(sexid == 3)
{
rowind<-which(agepopdata$gbd_country == countryn & agepopdata$year == yearid
& agepopdata$sex == "Total")
totalinds<-c(totalinds,i)
}
# then try to find correct index
# for now include all of range that age falls into
# this is wrong because may double count in wrong
# way when something falls only partially into interval
pop[j]<- sum(agepopdata[rowind,agestart:ageend])
if(sexid == 3)
{
totalp<-totalp+pop[j]
}
if(sexid ==1)
{
totalwomenp<-totalwomenp+pop[j]
}
if(sexid ==2)
{
totalmenp<-totalmenp+pop[j]
}
i<-i+1
j<-j+1
}
if(totalp == 0) # no both data for this entry
{
totalp<-sum(pop)
}
if(totalmenp > 0 & totalwomenp > 0)
{
popsexratio<- totalwomenp/(totalwomenp+totalmenp)
}
# then need to average across all in survey
pop<-pop/totalp
# then need to re-weigh by actual survey sample size
# and round to make sure count data
# but have to make sure still sums to correct total: handle later
pops<-round(pop*samplesize)
# if any set to 0, must be wrong because have prevalence
# for that group. so set to 1?
pops[which(pops == 0)]<-1
countsn[oldi:(oldi+(i-oldi-1))] = pops
}
return(countsn)
} | /Gretchen R modeling/ZOld/agesamplesize.R | no_license | flaxter/nims | R | false | false | 2,628 | r | agesamplesize<- function(survey,year,country,countrynames,
samplesized,agepopdata,agel,ageh)
{
i<-1
j<-1
pop<-c(NA)
countsn<-p*0
while(i <= N)
{
studyid<-survey[i]
yearid<-year[i]
countryn<-countrynames[country[i]]
samplesize<-samplesized$SampleSize[which(samplesized$UID == studyid)]
j<-1
totalp<-0
pop<-c(NA)
oldi<-i
totalinds<-c(NA)
otherinds<-c(NA)
totalmenp<-0
totalwomenp<-0
# iterate through all the surveys
while(i <=N & survey[i] == studyid)
{
sexid<-sex[i]
if(agel[i] < 5)
{
if(agel[i] <=1)
{
agestart<-4
}
else
{
agestart<-5
}
} else
{
agestart<-floor(agel[i]/5)+5
}
if(ageh[i] < 5)
{
if(ageh[i] <= 1)
{
ageend<-4
}
else
{
ageend<-5
}
} else
{
ageend<-floor(ageh[i]/5)+5
}
# choose last entry if exceeds end
agestart=min(c(ageend,length(agepopdata[1,])))
ageend=min(c(agestart,length(agepopdata[1,])))
# find correct row in age matrix
if(sexid == 1)
{
rowind<-which(agepopdata$gbd_country == countryn & agepopdata$year == yearid
& agepopdata$sex == "Female")
otherinds<-c(otherinds,i)
}
if(sexid == 2)
{
rowind<-which(agepopdata$gbd_country == countryn & agepopdata$year == yearid
& agepopdata$sex == "Male")
otherinds<-c(otherinds,i)
}
if(sexid == 3)
{
rowind<-which(agepopdata$gbd_country == countryn & agepopdata$year == yearid
& agepopdata$sex == "Total")
totalinds<-c(totalinds,i)
}
# then try to find correct index
# for now include all of range that age falls into
# this is wrong because may double count in wrong
# way when something falls only partially into interval
pop[j]<- sum(agepopdata[rowind,agestart:ageend])
if(sexid == 3)
{
totalp<-totalp+pop[j]
}
if(sexid ==1)
{
totalwomenp<-totalwomenp+pop[j]
}
if(sexid ==2)
{
totalmenp<-totalmenp+pop[j]
}
i<-i+1
j<-j+1
}
if(totalp == 0) # no both data for this entry
{
totalp<-sum(pop)
}
if(totalmenp > 0 & totalwomenp > 0)
{
popsexratio<- totalwomenp/(totalwomenp+totalmenp)
}
# then need to average across all in survey
pop<-pop/totalp
# then need to re-weigh by actual survey sample size
# and round to make sure count data
# but have to make sure still sums to correct total: handle later
pops<-round(pop*samplesize)
# if any set to 0, must be wrong because have prevalence
# for that group. so set to 1?
pops[which(pops == 0)]<-1
countsn[oldi:(oldi+(i-oldi-1))] = pops
}
return(countsn)
} |
\name{fitFunc}
\alias{fitFunc}
\title{
A function to fit a parametric distribution to binned data.
}
\description{
This function fits a parametric distribution binned data. The data are subdivided using
ID.
}
\usage{
fitFunc(ID, hb, bin_min, bin_max, obs_mean, ID_name,
distribution = "LOGNO", distName = "LNO", links = c(muLink =
"identity", sigmaLink = "log", nuLink = NULL, tauLink = NULL),
qFunc = qLOGNO, quantiles = seq(0.006, 0.996, length.out =
1000), linksq = c(identity, exp, NULL, NULL), con =
gamlss.control(c.crit=0.1,n.cyc=200, trace=FALSE),
saveQuants = FALSE, muStart = NULL, sigmaStart = NULL,
nuStart = NULL, tauStart = NULL, muFix = FALSE,
sigmaFix = FALSE, nuFix = FALSE, tauFix = FALSE,
freeParams = c(TRUE, TRUE, FALSE, FALSE),
smartStart = FALSE, tstamp = as.numeric(Sys.time()))
}
\arguments{
\item{ID}{
a (non-empty) object containing the group ID for each row. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{hb}{
a (non-empty) object containing the number of observations in each bin. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{bin_min}{
a (non-empty) object containing the lower bound of each bin. Currently, this package cannot handle data with open lower bounds. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{bin_max}{
a (non-empty) object the upper bound of each bin. Currently, this package can only handle the upper-most bin being open ended. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{obs_mean}{
a (non-empty) object containing the mean for each group. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{ID_name}{
a (non-empty) object containing column name for the ID column.
}
\item{distribution}{
a (non-empty) character naming a gamlss family.
}
\item{distName}{
a (non-empty) character object with the name of the distribution.
}
\item{links}{
a (non-empty) vector of link characters naming functions with the following items: muLink, sigmaLink, nuLink, and tauLink.
}
\item{qFunc}{
a (non-empty)gamlss function for calculating quantiles, this should match the distribution in distribution.
}
\item{quantiles}{
a (non-empty) numeric vectors of the desired quantiles, these are used in calculating metrics.
}
\item{linksq}{
a (non-empty) vector of functions, which undue the link functions. For example, if muLink = log, then the first entry in linksq should be exp. If you are using an indentity link function in links, then the corresponding entry in linksq should be indentity.
}
\item{con}{
an optional lists modifying gamlss.control.
}
\item{saveQuants}{
an optional logical value indicating whether to save the quantiles.
}
\item{muStart}{
an optional numerical value for the starting value of mu.
}
\item{sigmaStart}{
an optional numerical value for the starting value of sigma.
}
\item{nuStart}{
an optional numerical value for the starting value of nu.
}
\item{tauStart}{
an optional numerical value for the starting value of tau.
}
\item{muFix}{
an logical value indicating whether mu is fixed or is free to vary during the fitting process.
}
\item{sigmaFix}{
an logical value indicating whether sigma is fixed or is free to vary during the fitting process.
}
\item{nuFix}{
an logical value indicating whether nu is fixed or is free to vary during the fitting process.
}
\item{tauFix}{
an logical value indicating whether tau is fixed or is free to vary during the fitting process.
}
\item{freeParams}{
a vector of logical values indicating whether each of the four parameters is free == TRUE or fixed == FALSE.
}
\item{smartStart}{
a logical indicating whether a smart starting place should be chosen, this applies only when fitting the GB2 distribution.
}
\item{tstamp}{
a time stamp.
}
}
\details{
Fits a GAMLSS and estimates a number of metrics, see value.
}
\value{
returns a list with 'datOut' a data.frame with the IDs, observer mean, distribution, estimated mean, variance, coefficient of variation, cv squared, gini, theil, MLD, aic, bic, the results of a convergence test, log likelihood, number of parameters, median, and std. deviation; 'timeStamp' a time stamp; 'parameters' the estiamted parameter; and 'quantiles' the quantile estimates if saveQuants == TRUE)
}
\references{
FIXME - references
}
\seealso{
\code{\link[gamlss:gamlss]{gamlss}}
}
\examples{
data(state_bins)
use_states <- which(state_bins[,'State'] == 'Texas' | state_bins[,'State'] == 'California')
ID <- state_bins[use_states,'State']
hb <- state_bins[use_states,'hb']
bmin <- state_bins[use_states,'bin_min']
bmax <- state_bins[use_states,'bin_max']
omu <- rep(NA, length(use_states))
fitFunc(ID = ID, hb = hb, bin_min = bmin, bin_max = bmax, obs_mean = omu, ID_name = 'State')
} | /man/fitFunc.Rd | no_license | scarpino/binequality | R | false | false | 5,030 | rd | \name{fitFunc}
\alias{fitFunc}
\title{
A function to fit a parametric distribution to binned data.
}
\description{
This function fits a parametric distribution binned data. The data are subdivided using
ID.
}
\usage{
fitFunc(ID, hb, bin_min, bin_max, obs_mean, ID_name,
distribution = "LOGNO", distName = "LNO", links = c(muLink =
"identity", sigmaLink = "log", nuLink = NULL, tauLink = NULL),
qFunc = qLOGNO, quantiles = seq(0.006, 0.996, length.out =
1000), linksq = c(identity, exp, NULL, NULL), con =
gamlss.control(c.crit=0.1,n.cyc=200, trace=FALSE),
saveQuants = FALSE, muStart = NULL, sigmaStart = NULL,
nuStart = NULL, tauStart = NULL, muFix = FALSE,
sigmaFix = FALSE, nuFix = FALSE, tauFix = FALSE,
freeParams = c(TRUE, TRUE, FALSE, FALSE),
smartStart = FALSE, tstamp = as.numeric(Sys.time()))
}
\arguments{
\item{ID}{
a (non-empty) object containing the group ID for each row. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{hb}{
a (non-empty) object containing the number of observations in each bin. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{bin_min}{
a (non-empty) object containing the lower bound of each bin. Currently, this package cannot handle data with open lower bounds. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{bin_max}{
a (non-empty) object the upper bound of each bin. Currently, this package can only handle the upper-most bin being open ended. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{obs_mean}{
a (non-empty) object containing the mean for each group. Importantly, ID, bh, bin_min, bin_max, and obs_mean MUST be the same length and be in the SAME order.
}
\item{ID_name}{
a (non-empty) object containing column name for the ID column.
}
\item{distribution}{
a (non-empty) character naming a gamlss family.
}
\item{distName}{
a (non-empty) character object with the name of the distribution.
}
\item{links}{
a (non-empty) vector of link characters naming functions with the following items: muLink, sigmaLink, nuLink, and tauLink.
}
\item{qFunc}{
a (non-empty)gamlss function for calculating quantiles, this should match the distribution in distribution.
}
\item{quantiles}{
a (non-empty) numeric vectors of the desired quantiles, these are used in calculating metrics.
}
\item{linksq}{
a (non-empty) vector of functions, which undue the link functions. For example, if muLink = log, then the first entry in linksq should be exp. If you are using an indentity link function in links, then the corresponding entry in linksq should be indentity.
}
\item{con}{
an optional lists modifying gamlss.control.
}
\item{saveQuants}{
an optional logical value indicating whether to save the quantiles.
}
\item{muStart}{
an optional numerical value for the starting value of mu.
}
\item{sigmaStart}{
an optional numerical value for the starting value of sigma.
}
\item{nuStart}{
an optional numerical value for the starting value of nu.
}
\item{tauStart}{
an optional numerical value for the starting value of tau.
}
\item{muFix}{
an logical value indicating whether mu is fixed or is free to vary during the fitting process.
}
\item{sigmaFix}{
an logical value indicating whether sigma is fixed or is free to vary during the fitting process.
}
\item{nuFix}{
an logical value indicating whether nu is fixed or is free to vary during the fitting process.
}
\item{tauFix}{
an logical value indicating whether tau is fixed or is free to vary during the fitting process.
}
\item{freeParams}{
a vector of logical values indicating whether each of the four parameters is free == TRUE or fixed == FALSE.
}
\item{smartStart}{
a logical indicating whether a smart starting place should be chosen, this applies only when fitting the GB2 distribution.
}
\item{tstamp}{
a time stamp.
}
}
\details{
Fits a GAMLSS and estimates a number of metrics, see value.
}
\value{
returns a list with 'datOut' a data.frame with the IDs, observer mean, distribution, estimated mean, variance, coefficient of variation, cv squared, gini, theil, MLD, aic, bic, the results of a convergence test, log likelihood, number of parameters, median, and std. deviation; 'timeStamp' a time stamp; 'parameters' the estiamted parameter; and 'quantiles' the quantile estimates if saveQuants == TRUE)
}
\references{
FIXME - references
}
\seealso{
\code{\link[gamlss:gamlss]{gamlss}}
}
\examples{
data(state_bins)
use_states <- which(state_bins[,'State'] == 'Texas' | state_bins[,'State'] == 'California')
ID <- state_bins[use_states,'State']
hb <- state_bins[use_states,'hb']
bmin <- state_bins[use_states,'bin_min']
bmax <- state_bins[use_states,'bin_max']
omu <- rep(NA, length(use_states))
fitFunc(ID = ID, hb = hb, bin_min = bmin, bin_max = bmax, obs_mean = omu, ID_name = 'State')
} |
ifelse(!require('pacman'), install.packages('pacman'),library('pacman'))
pacman::p_load(dplyr,quantmod,plotly,frenchdata,moments,qqplotr)
options(scipen=999)
options(warn=-1) #eliminate warnings messages on the console window
data_list = c("NVDA","PG","PFE","PEP","BAC","^GSPC") #market and stock tickers
#setting date
end_date = Sys.Date()
start_date = (end_date - 365*20) %>% as.Date()
#render empty dataframe
df = data.frame()
for (ticker in data_list) {
data = getSymbols(
Symbols = ticker,
from = start_date,
to = end_date,
src = "yahoo",
auto.assign = F,
verbose = F
) %>% .[,6] #Extract adj.price
df = cbind(data,df)
}
#calculate returns
returns = apply(df, 2, function(x){
ret = x/lag(x) -1
return(ret)
}) %>% na.omit() %>% as.data.frame()
#compute portfolio returns
#Equal weights split portfolio
returns$'Porfolio' = NA
x = 1:nrow(returns)
returns[x,7] = x %>% sapply(.,function(x){
sum(returns[x,2:6])/5
})
summary(returns)
#Fama-French 5 factors model, data start from "start_date"
# you can see the list of factors model via "get_french_data_list()"
ff5 = download_french_data("Fama/French 5 Factors (2x3) [Daily]")$subsets$data[[1]] %>%
as.data.frame() %>%
.[which(.[,1]==format(start_date,"%Y%m%d")):nrow(.),] %>%
{.[1:7] = data.frame(.[1],.[2:7]/100)}
normaltest_plot_factors = function(asset){
'%+%' = paste0
p1 = asset %>%
plot_ly(
x= 1:length(asset),
y = .,
type = 'scatter',
mode = 'lines')
#Boxplot
p2= plot_ly(
x= scale(asset)[,1],
type = "box")
#Density
df1 = data.frame(scale(asset),"Asset") %>%
`colnames<-` (c("x","Group"))
df2 = data.frame(rnorm(length(asset)),"Normal")%>%
`colnames<-` (c("x","Group"))
df = rbind(df1,df2) %>%
`colnames<-` (c("x","Group"))
gg = ggplot(data = df ) +
geom_histogram(aes(x=x, y = ..density.., fill=Group),bins = 29, alpha = 0.7) +
geom_density(aes(x=x, color=Group)) + geom_rug(aes(x=x, color=Group))+
ylab("") +
xlab("")
p3 = ggplotly(gg)%>%
layout(plot_bgcolor='#e5ecf6',
xaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'),
yaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'))
#prob
p4 = scale(asset) %>%
{
ggplot(mapping = aes(sample = .)) +
stat_qq_point(size = 1.5,color = "blue") +
stat_qq_line(color="red") +
xlab("Theoretical quantiles") + ylab("Ordered Values")
} %>% ggplotly()
fig = subplot(p3, p2, p1, p4, nrows = 2, titleY = TRUE, titleX = TRUE, margin = 0.1 )
fig = fig %>% layout(plot_bgcolor='#e5ecf6',
xaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'),
yaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'))
# Update title
annotations = list(
list(
x = 0.2,
y = 1.0,
text = "Density",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
),
list(
x = 0.8,
y = 1,
text = "Box plot",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
),
list(
x = 0.2,
y = 0.4,
text = "Simple Returns",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
),
list(
x = 0.8,
y = 0.4,
text = "Probability Plot",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
))
fig = fig %>% layout(annotations = annotations)
fig %>% print()
cat(" ##### Skewness & Kurtosis #####","\n",
"Kurtosis is: " %+% round(kurtosis(asset),digits = 4) %+% ifelse(kurtosis(asset) >0, " [Leptokurtic]", " [Platykurtic]"),"\n",
"Skewness is :" %+% round(skewness(asset),digits = 4) %+% ifelse(skewness(asset) >0, " [Right-Skewness]"," [Left-Skewness]")
)
}
normaltest_plot_factors(returns$PG.Adjusted)
normaltest_plot_factors(returns$BAC.Adjusted)
normaltest_plot_factors(returns$NVDA.Adjusted)
normaltest_plot_factors(ff5$Mkt.RF)
normaltest_plot_factors(ff5$SMB)
normaltest_plot_factors(ff5$HML)
| /data_analysis from leonhack.R | no_license | parkminhyung/R-code-for-finance | R | false | false | 4,662 | r | ifelse(!require('pacman'), install.packages('pacman'),library('pacman'))
pacman::p_load(dplyr,quantmod,plotly,frenchdata,moments,qqplotr)
options(scipen=999)
options(warn=-1) #eliminate warnings messages on the console window
data_list = c("NVDA","PG","PFE","PEP","BAC","^GSPC") #market and stock tickers
#setting date
end_date = Sys.Date()
start_date = (end_date - 365*20) %>% as.Date()
#render empty dataframe
df = data.frame()
for (ticker in data_list) {
data = getSymbols(
Symbols = ticker,
from = start_date,
to = end_date,
src = "yahoo",
auto.assign = F,
verbose = F
) %>% .[,6] #Extract adj.price
df = cbind(data,df)
}
#calculate returns
returns = apply(df, 2, function(x){
ret = x/lag(x) -1
return(ret)
}) %>% na.omit() %>% as.data.frame()
#compute portfolio returns
#Equal weights split portfolio
returns$'Porfolio' = NA
x = 1:nrow(returns)
returns[x,7] = x %>% sapply(.,function(x){
sum(returns[x,2:6])/5
})
summary(returns)
#Fama-French 5 factors model, data start from "start_date"
# you can see the list of factors model via "get_french_data_list()"
ff5 = download_french_data("Fama/French 5 Factors (2x3) [Daily]")$subsets$data[[1]] %>%
as.data.frame() %>%
.[which(.[,1]==format(start_date,"%Y%m%d")):nrow(.),] %>%
{.[1:7] = data.frame(.[1],.[2:7]/100)}
normaltest_plot_factors = function(asset){
'%+%' = paste0
p1 = asset %>%
plot_ly(
x= 1:length(asset),
y = .,
type = 'scatter',
mode = 'lines')
#Boxplot
p2= plot_ly(
x= scale(asset)[,1],
type = "box")
#Density
df1 = data.frame(scale(asset),"Asset") %>%
`colnames<-` (c("x","Group"))
df2 = data.frame(rnorm(length(asset)),"Normal")%>%
`colnames<-` (c("x","Group"))
df = rbind(df1,df2) %>%
`colnames<-` (c("x","Group"))
gg = ggplot(data = df ) +
geom_histogram(aes(x=x, y = ..density.., fill=Group),bins = 29, alpha = 0.7) +
geom_density(aes(x=x, color=Group)) + geom_rug(aes(x=x, color=Group))+
ylab("") +
xlab("")
p3 = ggplotly(gg)%>%
layout(plot_bgcolor='#e5ecf6',
xaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'),
yaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'))
#prob
p4 = scale(asset) %>%
{
ggplot(mapping = aes(sample = .)) +
stat_qq_point(size = 1.5,color = "blue") +
stat_qq_line(color="red") +
xlab("Theoretical quantiles") + ylab("Ordered Values")
} %>% ggplotly()
fig = subplot(p3, p2, p1, p4, nrows = 2, titleY = TRUE, titleX = TRUE, margin = 0.1 )
fig = fig %>% layout(plot_bgcolor='#e5ecf6',
xaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'),
yaxis = list(
zerolinecolor = '#ffff',
zerolinewidth = 2,
gridcolor = 'ffff'))
# Update title
annotations = list(
list(
x = 0.2,
y = 1.0,
text = "Density",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
),
list(
x = 0.8,
y = 1,
text = "Box plot",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
),
list(
x = 0.2,
y = 0.4,
text = "Simple Returns",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
),
list(
x = 0.8,
y = 0.4,
text = "Probability Plot",
xref = "paper",
yref = "paper",
xanchor = "center",
yanchor = "bottom",
showarrow = FALSE
))
fig = fig %>% layout(annotations = annotations)
fig %>% print()
cat(" ##### Skewness & Kurtosis #####","\n",
"Kurtosis is: " %+% round(kurtosis(asset),digits = 4) %+% ifelse(kurtosis(asset) >0, " [Leptokurtic]", " [Platykurtic]"),"\n",
"Skewness is :" %+% round(skewness(asset),digits = 4) %+% ifelse(skewness(asset) >0, " [Right-Skewness]"," [Left-Skewness]")
)
}
normaltest_plot_factors(returns$PG.Adjusted)
normaltest_plot_factors(returns$BAC.Adjusted)
normaltest_plot_factors(returns$NVDA.Adjusted)
normaltest_plot_factors(ff5$Mkt.RF)
normaltest_plot_factors(ff5$SMB)
normaltest_plot_factors(ff5$HML)
|
df <- read.table("frankesteinPMFhard.csv", sep=",", head=F)
df2 = df[29:nrow(df),1:10]
for(i in 1:nrow(df2)){
df2[i,which(is.na(df2[i,]))]= max(df2[i,], na.rm=T)
}
df2$V11 = rowMeans(df2)
res = c(df$V11[1:28], df2$V11)
time = df$V12
hard = data.frame(res, time)
df <- read.table("frankesteinPMFsoft.csv", sep=",", head=F)
df2 = df[22:nrow(df),1:10]
for(i in 1:nrow(df2)){
df2[i,which(is.na(df2[i,]))]= max(df2[i,], na.rm=T)
}
df2$V11 = rowMeans(df2)
res = c(df$V11[1:21], df2$V11)
time = df$V12
resAdd = hard[66:75,]$res+runif(10)
timeAdd = rep(NA,10)
res = c(res,resAdd)
time = c(time, timeAdd)
soft = data.frame(res, time)
plot(1:nrow(hard), hard$res,ylab ="RMSE", xlab ="Percentage of missing values"
, main="Hard sensors temp IBRL data"#, ylim = c(4.003,4.027)
, col="#00994C", pch = 19, xaxt="n"#, ylim = c(min(df4$RMSE, df$RMSE), max(df4$RMSE, df$RMSE))
)
axis(1, at = seq(1,85,3))
with(hard, lines(loess.smooth(1:nrow(hard), res),col = "#006600", lwd=2.5))
points(1:nrow(soft),soft$res , col="red", pch = 18, xaxt="n")
with(soft, lines(loess.smooth(1:nrow(soft), res),col = "red", lwd=2.5))
legend("topleft", legend=c("PMF (#cl = 10)", "BME (#hs= 6)"),
col=c("red", "#00994C"), lty = 1, cex=1, lwd=2)
dev.off()
svg(filename="figxx.svg", width = 12, height = 8, pointsize = 12)
plot(df$V1, df$RMSE,ylab ="RMSE", xlab ="Percentage of missing values"
, main="Hard sensors temp IBRL data"#, ylim = c(4.003,4.027)
, col="#00994C", pch = 19, cex = 1.5, xaxt="n", ylim = c(min(df4$RMSE, df$RMSE), max(df4$RMSE, df$RMSE))
)
axis(1, at = seq(1,85,3))
with(df, lines(loess.smooth(V1, RMSE),col = "#006600", lwd=2.5))
points(df4$V1,df4$RMSE , col="red", pch = 18, cex = 1.5, xaxt="n")
with(df4, lines(loess.smooth(V1, RMSE),col = "red", lwd=2.5))
legend("topleft", legend=c("PMF (#cl = 10)", "BME (#hs= 6)"),
col=c("red", "#00994C"), lty = 1, cex=1.5, lwd=2)
dev.off()
| /Melbourne/previo/BMECode/Results_IBRL/figs/figxx.R | no_license | auroragonzalez/BEATS | R | false | false | 1,924 | r | df <- read.table("frankesteinPMFhard.csv", sep=",", head=F)
df2 = df[29:nrow(df),1:10]
for(i in 1:nrow(df2)){
df2[i,which(is.na(df2[i,]))]= max(df2[i,], na.rm=T)
}
df2$V11 = rowMeans(df2)
res = c(df$V11[1:28], df2$V11)
time = df$V12
hard = data.frame(res, time)
df <- read.table("frankesteinPMFsoft.csv", sep=",", head=F)
df2 = df[22:nrow(df),1:10]
for(i in 1:nrow(df2)){
df2[i,which(is.na(df2[i,]))]= max(df2[i,], na.rm=T)
}
df2$V11 = rowMeans(df2)
res = c(df$V11[1:21], df2$V11)
time = df$V12
resAdd = hard[66:75,]$res+runif(10)
timeAdd = rep(NA,10)
res = c(res,resAdd)
time = c(time, timeAdd)
soft = data.frame(res, time)
plot(1:nrow(hard), hard$res,ylab ="RMSE", xlab ="Percentage of missing values"
, main="Hard sensors temp IBRL data"#, ylim = c(4.003,4.027)
, col="#00994C", pch = 19, xaxt="n"#, ylim = c(min(df4$RMSE, df$RMSE), max(df4$RMSE, df$RMSE))
)
axis(1, at = seq(1,85,3))
with(hard, lines(loess.smooth(1:nrow(hard), res),col = "#006600", lwd=2.5))
points(1:nrow(soft),soft$res , col="red", pch = 18, xaxt="n")
with(soft, lines(loess.smooth(1:nrow(soft), res),col = "red", lwd=2.5))
legend("topleft", legend=c("PMF (#cl = 10)", "BME (#hs= 6)"),
col=c("red", "#00994C"), lty = 1, cex=1, lwd=2)
dev.off()
svg(filename="figxx.svg", width = 12, height = 8, pointsize = 12)
plot(df$V1, df$RMSE,ylab ="RMSE", xlab ="Percentage of missing values"
, main="Hard sensors temp IBRL data"#, ylim = c(4.003,4.027)
, col="#00994C", pch = 19, cex = 1.5, xaxt="n", ylim = c(min(df4$RMSE, df$RMSE), max(df4$RMSE, df$RMSE))
)
axis(1, at = seq(1,85,3))
with(df, lines(loess.smooth(V1, RMSE),col = "#006600", lwd=2.5))
points(df4$V1,df4$RMSE , col="red", pch = 18, cex = 1.5, xaxt="n")
with(df4, lines(loess.smooth(V1, RMSE),col = "red", lwd=2.5))
legend("topleft", legend=c("PMF (#cl = 10)", "BME (#hs= 6)"),
col=c("red", "#00994C"), lty = 1, cex=1.5, lwd=2)
dev.off()
|
library(shiny)
library(leaflet)
shinyUI(fluidPage(
# Application title.
titlePanel("Durham Crime"),
#Sidebary layout style
sidebarLayout(
sidebarPanel(width=4,
leafletOutput('durm',height='700px')
),
mainPanel(
tabsetPanel(
tabPanel('Overall Crime Trends by Neighborhood',
selectInput('xaxis','Choose X axis',
choices=c('TRACT','BLKGRP','id'),selected='id'),
plotOutput('durm.crime')),
tabPanel('Crime Trends Within a Neighborhood',
plotOutput('neighb.crime'))
)
)
)
)) | /Lesson3_ShinyR/Example3/ui.r | no_license | dl281/ENV859_GIS_R | R | false | false | 616 | r | library(shiny)
library(leaflet)
shinyUI(fluidPage(
# Application title.
titlePanel("Durham Crime"),
#Sidebary layout style
sidebarLayout(
sidebarPanel(width=4,
leafletOutput('durm',height='700px')
),
mainPanel(
tabsetPanel(
tabPanel('Overall Crime Trends by Neighborhood',
selectInput('xaxis','Choose X axis',
choices=c('TRACT','BLKGRP','id'),selected='id'),
plotOutput('durm.crime')),
tabPanel('Crime Trends Within a Neighborhood',
plotOutput('neighb.crime'))
)
)
)
)) |
\name{model.matrix.rma}
\alias{model.matrix.rma}
\title{Model Matrix for 'rma' Objects}
\description{
The function extracts the model matrix for objects of class \code{"rma"}.
}
\usage{
\method{model.matrix}{rma}(object, \dots)
}
\arguments{
\item{object}{an object of class \code{"rma"}.}
\item{\dots}{other arguments.}
}
\value{
The model matrix.
}
\author{
Wolfgang Viechtbauer \email{wvb@metafor-project.org} \cr
package website: \url{http://www.metafor-project.org/} \cr
author homepage: \url{http://www.wvbauer.com/}
}
\references{
Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \url{http://www.jstatsoft.org/v36/i03/}.
}
\seealso{
\code{\link{fitted.rma}}
}
\examples{
### load BCG vaccine data
data(dat.bcg)
### meta-analysis of the log relative risks using a mixed-effects meta-regression model
### with multiple moderators (absolute latitude, publication year, and allocation method)
res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg,
mods = ~ ablat + year + alloc, data=dat.bcg)
model.matrix(res)
}
\keyword{models}
| /metafor/man/model.matrix.rma.Rd | no_license | skoval/meta-analysis | R | false | false | 1,208 | rd | \name{model.matrix.rma}
\alias{model.matrix.rma}
\title{Model Matrix for 'rma' Objects}
\description{
The function extracts the model matrix for objects of class \code{"rma"}.
}
\usage{
\method{model.matrix}{rma}(object, \dots)
}
\arguments{
\item{object}{an object of class \code{"rma"}.}
\item{\dots}{other arguments.}
}
\value{
The model matrix.
}
\author{
Wolfgang Viechtbauer \email{wvb@metafor-project.org} \cr
package website: \url{http://www.metafor-project.org/} \cr
author homepage: \url{http://www.wvbauer.com/}
}
\references{
Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \url{http://www.jstatsoft.org/v36/i03/}.
}
\seealso{
\code{\link{fitted.rma}}
}
\examples{
### load BCG vaccine data
data(dat.bcg)
### meta-analysis of the log relative risks using a mixed-effects meta-regression model
### with multiple moderators (absolute latitude, publication year, and allocation method)
res <- rma(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg,
mods = ~ ablat + year + alloc, data=dat.bcg)
model.matrix(res)
}
\keyword{models}
|
#' @importFrom methods is
#' @importFrom R6 R6Class
#' @importFrom utils read.delim
Predictor <- R6::R6Class(
classname = "lgb.Predictor",
cloneable = FALSE,
public = list(
# Finalize will free up the handles
finalize = function() {
# Check the need for freeing handle
if (private$need_free_handle && !lgb.is.null.handle(x = private$handle)) {
# Freeing up handle
lgb.call(
fun_name = "LGBM_BoosterFree_R"
, ret = NULL
, private$handle
)
private$handle <- NULL
}
},
# Initialize will create a starter model
initialize = function(modelfile, ...) {
params <- list(...)
private$params <- lgb.params2str(params = params)
# Create new lgb handle
handle <- lgb.null.handle()
# Check if handle is a character
if (is.character(modelfile)) {
# Create handle on it
handle <- lgb.call(
fun_name = "LGBM_BoosterCreateFromModelfile_R"
, ret = handle
, lgb.c_str(x = modelfile)
)
private$need_free_handle <- TRUE
} else if (methods::is(modelfile, "lgb.Booster.handle")) {
# Check if model file is a booster handle already
handle <- modelfile
private$need_free_handle <- FALSE
} else {
stop("lgb.Predictor: modelfile must be either a character filename or an lgb.Booster.handle")
}
# Override class and store it
class(handle) <- "lgb.Booster.handle"
private$handle <- handle
},
# Get current iteration
current_iter = function() {
cur_iter <- 0L
lgb.call(
fun_name = "LGBM_BoosterGetCurrentIteration_R"
, ret = cur_iter
, private$handle
)
},
# Predict from data
predict = function(data,
start_iteration = NULL,
num_iteration = NULL,
rawscore = FALSE,
predleaf = FALSE,
predcontrib = FALSE,
header = FALSE,
reshape = FALSE) {
# Check if number of iterations is existing - if not, then set it to -1 (use all)
if (is.null(num_iteration)) {
num_iteration <- -1L
}
# Check if start iterations is existing - if not, then set it to 0 (start from the first iteration)
if (is.null(start_iteration)) {
start_iteration <- 0L
}
num_row <- 0L
# Check if data is a file name and not a matrix
if (identical(class(data), "character") && length(data) == 1L) {
# Data is a filename, create a temporary file with a "lightgbm_" pattern in it
tmp_filename <- tempfile(pattern = "lightgbm_")
on.exit(unlink(tmp_filename), add = TRUE)
# Predict from temporary file
lgb.call(
fun_name = "LGBM_BoosterPredictForFile_R"
, ret = NULL
, private$handle
, data
, as.integer(header)
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
, private$params
, lgb.c_str(x = tmp_filename)
)
# Get predictions from file
preds <- utils::read.delim(tmp_filename, header = FALSE, sep = "\t")
num_row <- nrow(preds)
preds <- as.vector(t(preds))
} else {
# Not a file, we need to predict from R object
num_row <- nrow(data)
npred <- 0L
# Check number of predictions to do
npred <- lgb.call(
fun_name = "LGBM_BoosterCalcNumPredict_R"
, ret = npred
, private$handle
, as.integer(num_row)
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
)
# Pre-allocate empty vector
preds <- numeric(npred)
# Check if data is a matrix
if (is.matrix(data)) {
# this if() prevents the memory and computational costs
# of converting something that is already "double" to "double"
if (storage.mode(data) != "double") {
storage.mode(data) <- "double"
}
preds <- lgb.call(
fun_name = "LGBM_BoosterPredictForMat_R"
, ret = preds
, private$handle
, data
, as.integer(nrow(data))
, as.integer(ncol(data))
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
, private$params
)
} else if (methods::is(data, "dgCMatrix")) {
if (length(data@p) > 2147483647L) {
stop("Cannot support large CSC matrix")
}
# Check if data is a dgCMatrix (sparse matrix, column compressed format)
preds <- lgb.call(
fun_name = "LGBM_BoosterPredictForCSC_R"
, ret = preds
, private$handle
, data@p
, data@i
, data@x
, length(data@p)
, length(data@x)
, nrow(data)
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
, private$params
)
} else {
stop("predict: cannot predict on data of class ", sQuote(class(data)))
}
}
# Check if number of rows is strange (not a multiple of the dataset rows)
if (length(preds) %% num_row != 0L) {
stop(
"predict: prediction length "
, sQuote(length(preds))
, " is not a multiple of nrows(data): "
, sQuote(num_row)
)
}
# Get number of cases per row
npred_per_case <- length(preds) / num_row
# Data reshaping
if (predleaf | predcontrib) {
# Predict leaves only, reshaping is mandatory
preds <- matrix(preds, ncol = npred_per_case, byrow = TRUE)
} else if (reshape && npred_per_case > 1L) {
# Predict with data reshaping
preds <- matrix(preds, ncol = npred_per_case, byrow = TRUE)
}
return(preds)
}
),
private = list(
handle = NULL
, need_free_handle = FALSE
, params = ""
)
)
| /R-package/R/lgb.Predictor.R | permissive | austinpagan/LightGBM | R | false | false | 6,571 | r | #' @importFrom methods is
#' @importFrom R6 R6Class
#' @importFrom utils read.delim
Predictor <- R6::R6Class(
classname = "lgb.Predictor",
cloneable = FALSE,
public = list(
# Finalize will free up the handles
finalize = function() {
# Check the need for freeing handle
if (private$need_free_handle && !lgb.is.null.handle(x = private$handle)) {
# Freeing up handle
lgb.call(
fun_name = "LGBM_BoosterFree_R"
, ret = NULL
, private$handle
)
private$handle <- NULL
}
},
# Initialize will create a starter model
initialize = function(modelfile, ...) {
params <- list(...)
private$params <- lgb.params2str(params = params)
# Create new lgb handle
handle <- lgb.null.handle()
# Check if handle is a character
if (is.character(modelfile)) {
# Create handle on it
handle <- lgb.call(
fun_name = "LGBM_BoosterCreateFromModelfile_R"
, ret = handle
, lgb.c_str(x = modelfile)
)
private$need_free_handle <- TRUE
} else if (methods::is(modelfile, "lgb.Booster.handle")) {
# Check if model file is a booster handle already
handle <- modelfile
private$need_free_handle <- FALSE
} else {
stop("lgb.Predictor: modelfile must be either a character filename or an lgb.Booster.handle")
}
# Override class and store it
class(handle) <- "lgb.Booster.handle"
private$handle <- handle
},
# Get current iteration
current_iter = function() {
cur_iter <- 0L
lgb.call(
fun_name = "LGBM_BoosterGetCurrentIteration_R"
, ret = cur_iter
, private$handle
)
},
# Predict from data
predict = function(data,
start_iteration = NULL,
num_iteration = NULL,
rawscore = FALSE,
predleaf = FALSE,
predcontrib = FALSE,
header = FALSE,
reshape = FALSE) {
# Check if number of iterations is existing - if not, then set it to -1 (use all)
if (is.null(num_iteration)) {
num_iteration <- -1L
}
# Check if start iterations is existing - if not, then set it to 0 (start from the first iteration)
if (is.null(start_iteration)) {
start_iteration <- 0L
}
num_row <- 0L
# Check if data is a file name and not a matrix
if (identical(class(data), "character") && length(data) == 1L) {
# Data is a filename, create a temporary file with a "lightgbm_" pattern in it
tmp_filename <- tempfile(pattern = "lightgbm_")
on.exit(unlink(tmp_filename), add = TRUE)
# Predict from temporary file
lgb.call(
fun_name = "LGBM_BoosterPredictForFile_R"
, ret = NULL
, private$handle
, data
, as.integer(header)
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
, private$params
, lgb.c_str(x = tmp_filename)
)
# Get predictions from file
preds <- utils::read.delim(tmp_filename, header = FALSE, sep = "\t")
num_row <- nrow(preds)
preds <- as.vector(t(preds))
} else {
# Not a file, we need to predict from R object
num_row <- nrow(data)
npred <- 0L
# Check number of predictions to do
npred <- lgb.call(
fun_name = "LGBM_BoosterCalcNumPredict_R"
, ret = npred
, private$handle
, as.integer(num_row)
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
)
# Pre-allocate empty vector
preds <- numeric(npred)
# Check if data is a matrix
if (is.matrix(data)) {
# this if() prevents the memory and computational costs
# of converting something that is already "double" to "double"
if (storage.mode(data) != "double") {
storage.mode(data) <- "double"
}
preds <- lgb.call(
fun_name = "LGBM_BoosterPredictForMat_R"
, ret = preds
, private$handle
, data
, as.integer(nrow(data))
, as.integer(ncol(data))
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
, private$params
)
} else if (methods::is(data, "dgCMatrix")) {
if (length(data@p) > 2147483647L) {
stop("Cannot support large CSC matrix")
}
# Check if data is a dgCMatrix (sparse matrix, column compressed format)
preds <- lgb.call(
fun_name = "LGBM_BoosterPredictForCSC_R"
, ret = preds
, private$handle
, data@p
, data@i
, data@x
, length(data@p)
, length(data@x)
, nrow(data)
, as.integer(rawscore)
, as.integer(predleaf)
, as.integer(predcontrib)
, as.integer(start_iteration)
, as.integer(num_iteration)
, private$params
)
} else {
stop("predict: cannot predict on data of class ", sQuote(class(data)))
}
}
# Check if number of rows is strange (not a multiple of the dataset rows)
if (length(preds) %% num_row != 0L) {
stop(
"predict: prediction length "
, sQuote(length(preds))
, " is not a multiple of nrows(data): "
, sQuote(num_row)
)
}
# Get number of cases per row
npred_per_case <- length(preds) / num_row
# Data reshaping
if (predleaf | predcontrib) {
# Predict leaves only, reshaping is mandatory
preds <- matrix(preds, ncol = npred_per_case, byrow = TRUE)
} else if (reshape && npred_per_case > 1L) {
# Predict with data reshaping
preds <- matrix(preds, ncol = npred_per_case, byrow = TRUE)
}
return(preds)
}
),
private = list(
handle = NULL
, need_free_handle = FALSE
, params = ""
)
)
|
#!/usr/bin/env Rscript
source("utils.R")
dtypes_base_parms <- list(
n_d = 16,
min_mu = 0.5,
max_mu = 2.0,
beta = 2.0,
c_beta = 1.0,
c_mu = 1.1,
k = 15.0
)
dtypes_base_parms$mu_d <- with(
dtypes_base_parms,
seq(min_mu, max_mu, length = n_d)
)
# pack the list into a vector
dtypes_pack <- function(lst) {
stopifnot(setequal(names(lst), c("X_i", "S_id", "R_id")))
stopifnot(all(dim(lst$S_id) == dim(lst$R_id)))
stopifnot(dim(lst$S_id)[1] == length(lst$X_i))
c(lst$X_i, as.vector(lst$S_id), as.vector(lst$R_id))
}
# unpack the vector into a list
dtypes_unpack <- function(x, n_pop, n_d) {
stopifnot(length(x) == n_pop + 2 * (n_d * n_pop))
x_end <- n_pop
s_start <- x_end + 1
s_end <- x_end + n_d * n_pop
r_start <- s_end + 1
r_end <- s_end + n_d * n_pop
stopifnot(r_end == length(x))
list(
X_i = x[1:x_end],
S_id = matrix(x[s_start:s_end], nrow = n_pop, ncol = n_d),
R_id = matrix(x[r_start:r_end], nrow = n_pop, ncol = n_d)
)
}
# for initially packing the data frame into the list
dtypes_df_to_list <- function(df) {
# check we have the right column names
stopifnot(setequal(names(df), c("population", "phenotype", "dtype", "value")))
# check we have the right phenotypes
stopifnot(setequal(df$phenotype, c("S", "R", "X")))
# extract the no. populations and D-types
n_pop <- length(unique(df$population))
n_d <- length(unique(df$dtype))
# there should be S and R for each pop/D-type combo
stopifnot(nrow(filter(df, phenotype == "S")) == n_pop * n_d)
stopifnot(nrow(filter(df, phenotype == "R")) == n_pop * n_d)
# but X for only each pop
stopifnot(nrow(filter(df, phenotype == "X")) == n_pop)
list(
X_i = df %>% filter(phenotype == "X") %>% pull(value),
S_id = df %>% filter(phenotype == "S") %>% pull(value) %>% matrix(nrow = n_pop, ncol = n_d),
R_id = df %>% filter(phenotype == "R") %>% pull(value) %>% matrix(nrow = n_pop, ncol = n_d)
)
}
# for finally unpacking the list into a data frame
dtypes_list_to_df <- function(lst) {
# check for names
stopifnot(setequal(names(lst), c("X_i", "S_id", "R_id")))
# get dimensions
n_pop <- length(lst$X_i)
n_d <- dim(lst$S_id)[2]
# check shapes
stopifnot(all(dim(lst$S_id) == c(n_pop, n_d)))
stopifnot(all(dim(lst$R_id) == c(n_pop, n_d)))
X_rows <- tibble(pop = 1:n_pop, phenotype = "X", dtype = NA, value = lst$X_i)
S_rows <- crossing(dtype = 1:n_d, pop = 1:n_pop) %>%
mutate(phenotype = "S", value = as.vector(lst$S_id))
R_rows <- crossing(dtype = 1:n_d, pop = 1:n_pop) %>%
mutate(phenotype = "R", value = as.vector(lst$R_id))
bind_rows(X_rows, S_rows, R_rows) %>%
arrange(pop, phenotype, dtype)
}
dtypes_ode_func <- function(t, state_vector, parms) {
state <- dtypes_unpack(state_vector, n_pop = parms$n_pop, n_d = parms$n_d)
with(c(state, parms), {
stopifnot(length(X_i) == n_pop)
stopifnot(dim(S_id) == c(n_pop, n_d))
stopifnot(dim(R_id) == c(n_pop, n_d))
# rowSums are over D-types (second index)
v_id <- (1.0 - ((S_id + R_id) / rowSums(S_id + R_id) - 1.0 / n_d)) ** k
stopifnot(dim(v_id) == c(n_pop, n_d))
N_i <- X_i + rowSums(S_id + R_id)
stopifnot(length(N_i) == n_pop)
# "tau_i * S_id" does sum_i { tau_i S_id }, which is length P vector
# to multiply by rows, need to do some fancy footwork: "mat %*% diag(row)"
dS_id <- v_id * (beta_ij %*% (S_id / N_i)) * X_i - tau_i * S_id - S_id %*% diag(mu_d)
dR_id <- v_id * (beta_ij %*% (R_id / N_i)) * X_i - c_mu * R_id %*% diag(mu_d)
dX_i <- -rowSums(dS_id + dR_id)
list(dtypes_pack(list(X_i = dX_i, S_id = dS_id, R_id = dR_id)))
})
}
dtypes_sim_nomemo <- function(parms) {
stopifnot(
setequal(
names(parms),
c(names(dtypes_base_parms), "tau_i", "transmission_matrix")
)
)
n_pop <- length(parms$tau_i)
check_transmission_matrix(parms$transmission_matrix, n_pop)
parms$beta_ij <- with(parms, { beta * transmission_matrix })
parms$n_pop <- n_pop
n_d <- parms$n_d
state <- list(
X_i = rep(0.9 / n_pop, n_pop),
S_id = matrix(0.05 / (n_pop * n_d), nrow = n_pop, ncol = n_d),
R_id = matrix(0.05 / (n_pop * n_d), nrow = n_pop, ncol = n_d)
)
state_vector <- dtypes_pack(state)
stopifnot(all.equal(sum(state_vector), 1))
result <- rootSolve::runsteady(
state_vector,
func = dtypes_ode_func,
parms = parms,
stol = 1e-8 / n_pop,
rtol = 1e-6 / n_pop,
atol = 1e-6 / n_pop
)
result$y %>%
dtypes_unpack(n_pop = n_pop, n_d = n_d) %>%
dtypes_list_to_df() %>%
left_join(tibble(pop = 1:n_pop, tau = parms$tau_i), by = "pop")
}
dtypes_sim <- my_memoise(dtypes_sim_nomemo)
dtypes_epsilon_values <- c(0, 1e-4, 0.001, 0.005, 0.01, 0.0175, 0.025, 0.05, 0.075, 0.1)
dtypes_simplify_results <- function(df) {
df %>%
group_by(pop, phenotype) %>%
summarize(
value = sum(value),
tau = unique(tau)
) %>%
ungroup() %>%
spread(phenotype, value) %>%
mutate(rho = R / (R + S))
}
dtypes_2pop_sim <- function(tau1, tau2, epsilon) {
parms <- dtypes_base_parms %>%
`$<-`("transmission_matrix", epsilon_matrix(epsilon)) %>%
`$<-`("tau_i", c(tau1, tau2))
dtypes_sim(parms) %>%
dtypes_simplify_results
}
# Run the simulations -------------------------------------------------
dtypes_2pop <- tibble(
base_tau = 0.125,
delta_tau = c(0.05, 0.10),
tau1 = base_tau - delta_tau / 2,
tau2 = base_tau + delta_tau / 2
) %>%
crossing(epsilon = dtypes_epsilon_values) %>%
mutate(
simulation_id = 1:n(),
results = pmap(list(tau1, tau2, epsilon), dtypes_2pop_sim)
) %>%
select(simulation_id, everything())
write_rds(dtypes_2pop, "cache/dtypes_2pop.rds")
| /dtypes_sims.R | no_license | swo/amr_geography | R | false | false | 5,697 | r | #!/usr/bin/env Rscript
source("utils.R")
dtypes_base_parms <- list(
n_d = 16,
min_mu = 0.5,
max_mu = 2.0,
beta = 2.0,
c_beta = 1.0,
c_mu = 1.1,
k = 15.0
)
dtypes_base_parms$mu_d <- with(
dtypes_base_parms,
seq(min_mu, max_mu, length = n_d)
)
# pack the list into a vector
dtypes_pack <- function(lst) {
stopifnot(setequal(names(lst), c("X_i", "S_id", "R_id")))
stopifnot(all(dim(lst$S_id) == dim(lst$R_id)))
stopifnot(dim(lst$S_id)[1] == length(lst$X_i))
c(lst$X_i, as.vector(lst$S_id), as.vector(lst$R_id))
}
# unpack the vector into a list
dtypes_unpack <- function(x, n_pop, n_d) {
stopifnot(length(x) == n_pop + 2 * (n_d * n_pop))
x_end <- n_pop
s_start <- x_end + 1
s_end <- x_end + n_d * n_pop
r_start <- s_end + 1
r_end <- s_end + n_d * n_pop
stopifnot(r_end == length(x))
list(
X_i = x[1:x_end],
S_id = matrix(x[s_start:s_end], nrow = n_pop, ncol = n_d),
R_id = matrix(x[r_start:r_end], nrow = n_pop, ncol = n_d)
)
}
# for initially packing the data frame into the list
dtypes_df_to_list <- function(df) {
# check we have the right column names
stopifnot(setequal(names(df), c("population", "phenotype", "dtype", "value")))
# check we have the right phenotypes
stopifnot(setequal(df$phenotype, c("S", "R", "X")))
# extract the no. populations and D-types
n_pop <- length(unique(df$population))
n_d <- length(unique(df$dtype))
# there should be S and R for each pop/D-type combo
stopifnot(nrow(filter(df, phenotype == "S")) == n_pop * n_d)
stopifnot(nrow(filter(df, phenotype == "R")) == n_pop * n_d)
# but X for only each pop
stopifnot(nrow(filter(df, phenotype == "X")) == n_pop)
list(
X_i = df %>% filter(phenotype == "X") %>% pull(value),
S_id = df %>% filter(phenotype == "S") %>% pull(value) %>% matrix(nrow = n_pop, ncol = n_d),
R_id = df %>% filter(phenotype == "R") %>% pull(value) %>% matrix(nrow = n_pop, ncol = n_d)
)
}
# for finally unpacking the list into a data frame
dtypes_list_to_df <- function(lst) {
# check for names
stopifnot(setequal(names(lst), c("X_i", "S_id", "R_id")))
# get dimensions
n_pop <- length(lst$X_i)
n_d <- dim(lst$S_id)[2]
# check shapes
stopifnot(all(dim(lst$S_id) == c(n_pop, n_d)))
stopifnot(all(dim(lst$R_id) == c(n_pop, n_d)))
X_rows <- tibble(pop = 1:n_pop, phenotype = "X", dtype = NA, value = lst$X_i)
S_rows <- crossing(dtype = 1:n_d, pop = 1:n_pop) %>%
mutate(phenotype = "S", value = as.vector(lst$S_id))
R_rows <- crossing(dtype = 1:n_d, pop = 1:n_pop) %>%
mutate(phenotype = "R", value = as.vector(lst$R_id))
bind_rows(X_rows, S_rows, R_rows) %>%
arrange(pop, phenotype, dtype)
}
dtypes_ode_func <- function(t, state_vector, parms) {
state <- dtypes_unpack(state_vector, n_pop = parms$n_pop, n_d = parms$n_d)
with(c(state, parms), {
stopifnot(length(X_i) == n_pop)
stopifnot(dim(S_id) == c(n_pop, n_d))
stopifnot(dim(R_id) == c(n_pop, n_d))
# rowSums are over D-types (second index)
v_id <- (1.0 - ((S_id + R_id) / rowSums(S_id + R_id) - 1.0 / n_d)) ** k
stopifnot(dim(v_id) == c(n_pop, n_d))
N_i <- X_i + rowSums(S_id + R_id)
stopifnot(length(N_i) == n_pop)
# "tau_i * S_id" does sum_i { tau_i S_id }, which is length P vector
# to multiply by rows, need to do some fancy footwork: "mat %*% diag(row)"
dS_id <- v_id * (beta_ij %*% (S_id / N_i)) * X_i - tau_i * S_id - S_id %*% diag(mu_d)
dR_id <- v_id * (beta_ij %*% (R_id / N_i)) * X_i - c_mu * R_id %*% diag(mu_d)
dX_i <- -rowSums(dS_id + dR_id)
list(dtypes_pack(list(X_i = dX_i, S_id = dS_id, R_id = dR_id)))
})
}
dtypes_sim_nomemo <- function(parms) {
stopifnot(
setequal(
names(parms),
c(names(dtypes_base_parms), "tau_i", "transmission_matrix")
)
)
n_pop <- length(parms$tau_i)
check_transmission_matrix(parms$transmission_matrix, n_pop)
parms$beta_ij <- with(parms, { beta * transmission_matrix })
parms$n_pop <- n_pop
n_d <- parms$n_d
state <- list(
X_i = rep(0.9 / n_pop, n_pop),
S_id = matrix(0.05 / (n_pop * n_d), nrow = n_pop, ncol = n_d),
R_id = matrix(0.05 / (n_pop * n_d), nrow = n_pop, ncol = n_d)
)
state_vector <- dtypes_pack(state)
stopifnot(all.equal(sum(state_vector), 1))
result <- rootSolve::runsteady(
state_vector,
func = dtypes_ode_func,
parms = parms,
stol = 1e-8 / n_pop,
rtol = 1e-6 / n_pop,
atol = 1e-6 / n_pop
)
result$y %>%
dtypes_unpack(n_pop = n_pop, n_d = n_d) %>%
dtypes_list_to_df() %>%
left_join(tibble(pop = 1:n_pop, tau = parms$tau_i), by = "pop")
}
dtypes_sim <- my_memoise(dtypes_sim_nomemo)
dtypes_epsilon_values <- c(0, 1e-4, 0.001, 0.005, 0.01, 0.0175, 0.025, 0.05, 0.075, 0.1)
dtypes_simplify_results <- function(df) {
df %>%
group_by(pop, phenotype) %>%
summarize(
value = sum(value),
tau = unique(tau)
) %>%
ungroup() %>%
spread(phenotype, value) %>%
mutate(rho = R / (R + S))
}
dtypes_2pop_sim <- function(tau1, tau2, epsilon) {
parms <- dtypes_base_parms %>%
`$<-`("transmission_matrix", epsilon_matrix(epsilon)) %>%
`$<-`("tau_i", c(tau1, tau2))
dtypes_sim(parms) %>%
dtypes_simplify_results
}
# Run the simulations -------------------------------------------------
dtypes_2pop <- tibble(
base_tau = 0.125,
delta_tau = c(0.05, 0.10),
tau1 = base_tau - delta_tau / 2,
tau2 = base_tau + delta_tau / 2
) %>%
crossing(epsilon = dtypes_epsilon_values) %>%
mutate(
simulation_id = 1:n(),
results = pmap(list(tau1, tau2, epsilon), dtypes_2pop_sim)
) %>%
select(simulation_id, everything())
write_rds(dtypes_2pop, "cache/dtypes_2pop.rds")
|
url <- "/home/wut/Desktop/Link to Data/FYP Program/Data/alldata.csv"
#url <- "E:/WUT FYP DATA/FYP Program/FYP Program/Data/alldata.csv"
source("data_spliting.R")
# Train 70%
train_per <- 0.7
data_set <- data_spliting(url,train_per)
train_dataset <- list()
validate_dataset <- list()
predictor_order <- seq(3,10,1)
validate_date <- list()
non_normalize <- list()
result_usd <- list()
actual_value <- list()
learning_rate <- seq(0.1,1,0.1)
activation_func <- c("logistic", "tanh")
#************************************************************ HOMOGENEOUS *************************************************************************#
source("HOMO.R")
#************************************* USD **************************************************#
Result_USD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
### Changes in Neurons and Learning Functins and Learning Rate
for (i in 1:length(predictor_order)) {
result_HOMO_USD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),
"Learning_Rate"=numeric(),"Fusion_Fuc"=character(),
stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[1]][[i]][[1]]
validate_dataset[[i]] <- data_set[[1]][[i]][[2]]
validate_date[[i]] <- data_set[[1]][[i]][[4]]
non_normalize[[i]]<- data_set[[1]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i],
activation_func[k],learning_rate[l])
result_HOMO_USD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_USD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
library(xlsx)
write.xlsx(result_HOMO_USD,"result_HOMO_USD_Train_70.xlsx")
#************************************* GBP **************************************************#
Result_GBP_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_GBP <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[2]][[i]][[1]]
validate_dataset[[i]] <- data_set[[2]][[i]][[2]]
validate_date[[i]] <- data_set[[2]][[i]][[4]]
non_normalize[[i]]<- data_set[[2]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i],
activation_func[k],learning_rate[l])
result_HOMO_GBP[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_GBP_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_GBP,"result_HOMO_GBP_Train_70.xlsx")
#************************************* EUR **************************************************#
Result_EUR_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_EUR <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[3]][[i]][[1]]
validate_dataset[[i]] <- data_set[[3]][[i]][[2]]
validate_date[[i]] <- data_set[[3]][[i]][[4]]
non_normalize[[i]]<- data_set[[3]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_EUR[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_EUR_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_EUR,"result_HOMO_EUR_Train_70.xlsx")
#****************************************** CHF ***********************************************#
Result_CHF_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_CHF_EURO <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[4]][[i]][[1]]
validate_dataset[[i]] <- data_set[[4]][[i]][[2]]
validate_date[[i]] <- data_set[[4]][[i]][[4]]
non_normalize[[i]]<- data_set[[4]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_CHF_EURO[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
#Result_EUR_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_CHF,"result_HOMO_CHF_Train_70.xlsx")
#****************************************** AUD ***********************************************#
Result_AUD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_AUD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[5]][[i]][[1]]
validate_dataset[[i]] <- data_set[[5]][[i]][[2]]
validate_date[[i]] <- data_set[[5]][[i]][[4]]
non_normalize[[i]]<- data_set[[5]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_AUD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_AUD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
# Writing to xlsx file
#write <- paste0("result_HOMO_usd_PO_",i,"_LF_",activation_func[k],"_LR_",learning_rate[l],".xlsx")
#write.xlsx(result_HOMO_usd_PO3, write)
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_AUD,"result_HOMO_AUD_Train_70.xlsx")
#****************************************** CAD ***********************************************#
Result_CAD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_CAD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[6]][[i]][[1]]
validate_dataset[[i]] <- data_set[[6]][[i]][[2]]
validate_date[[i]] <- data_set[[6]][[i]][[4]]
non_normalize[[i]]<- data_set[[6]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_CAD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_CAD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
# Writing to xlsx file
#write <- paste0("result_HOMO_usd_PO_",i,"_LF_",activation_func[k],"_LR_",learning_rate[l],".xlsx")
#write.xlsx(result_HOMO_usd_PO3, write)
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_CAD,"result_HOMO_CAD_Train_70.xlsx")
#****************************************** SGD ********************************************#
Result_SGD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_SGD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[7]][[i]][[1]]
validate_dataset[[i]] <- data_set[[7]][[i]][[2]]
validate_date[[i]] <- data_set[[7]][[i]][[4]]
non_normalize[[i]]<- data_set[[7]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_SGD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_SGD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_SGD,"result_HOMO_SGD_Train_70.xlsx")
#************************************************************ HETROGENEOUS *************************************************************************#
source("HETRO.R")
#************************************* USD *****************************************************#
Result_USD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_USD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[1]][[i]][[1]]
validate_dataset[[i]] <- data_set[[1]][[i]][[2]]
validate_date[[i]] <- data_set[[1]][[i]][[4]]
non_normalize[[i]]<- data_set[[1]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_USD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_USD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_USD,"result_HETRO_USD_Train_70.xlsx")
#************************************* GBP ****************************************************#
Result_GBP_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_GBP <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[2]][[i]][[1]]
validate_dataset[[i]] <- data_set[[2]][[i]][[2]]
validate_date[[i]] <- data_set[[2]][[i]][[4]]
non_normalize[[i]]<- data_set[[2]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_GBP[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_GBP_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_GBP,"result_HETRO_GBP_Train_70.xlsx")
#************************************* EUR ***************************************************#
Result_EUR_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_EUR <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[3]][[i]][[1]]
validate_dataset[[i]] <- data_set[[3]][[i]][[2]]
validate_date[[i]] <- data_set[[3]][[i]][[4]]
non_normalize[[i]]<- data_set[[3]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_EUR[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_EUR_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_EUR,"result_HETRO_EUR_Train_70.xlsx")
#****************************************** CHF ***********************************************#
Result_CHF_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_CHF <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[4]][[i]][[1]]
validate_dataset[[i]] <- data_set[[4]][[i]][[2]]
validate_date[[i]] <- data_set[[4]][[i]][[4]]
non_normalize[[i]]<- data_set[[4]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_CHF[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
#Result_CHF_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_CHF,"result_HETRO_CHF_Train_70.xlsx")
#****************************************** AUD **********************************************#
Result_AUD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_AUD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[5]][[i]][[1]]
validate_dataset[[i]] <- data_set[[5]][[i]][[2]]
validate_date[[i]] <- data_set[[5]][[i]][[4]]
non_normalize[[i]]<- data_set[[5]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_AUD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_AUD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_AUD,"result_HETRO_AUD_Train_70.xlsx")
#****************************************** CAD **********************************************#
Result_CAD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_CAD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[6]][[i]][[1]]
validate_dataset[[i]] <- data_set[[6]][[i]][[2]]
validate_date[[i]] <- data_set[[6]][[i]][[4]]
non_normalize[[i]]<- data_set[[6]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result_usd_CAD[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_CAD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_CAD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_CAD,"result_HETRO_CAD_Train_70.xlsx")
#****************************************** SGD ********************************************#
Result_SGD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_SGD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[7]][[i]][[1]]
validate_dataset[[i]] <- data_set[[7]][[i]][[2]]
validate_date[[i]] <- data_set[[7]][[i]][[4]]
non_normalize[[i]]<- data_set[[7]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_SGD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_SGD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_SGD,"result_HETRO_SGD_Train_70.xlsx")
#################################################################################################################################################
| /main_train_70.R | no_license | snlynnoo/Exchange-Rate-Forecasting-Using-Ensemble-ANN-Models | R | false | false | 46,113 | r |
url <- "/home/wut/Desktop/Link to Data/FYP Program/Data/alldata.csv"
#url <- "E:/WUT FYP DATA/FYP Program/FYP Program/Data/alldata.csv"
source("data_spliting.R")
# Train 70%
train_per <- 0.7
data_set <- data_spliting(url,train_per)
train_dataset <- list()
validate_dataset <- list()
predictor_order <- seq(3,10,1)
validate_date <- list()
non_normalize <- list()
result_usd <- list()
actual_value <- list()
learning_rate <- seq(0.1,1,0.1)
activation_func <- c("logistic", "tanh")
#************************************************************ HOMOGENEOUS *************************************************************************#
source("HOMO.R")
#************************************* USD **************************************************#
Result_USD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
### Changes in Neurons and Learning Functins and Learning Rate
for (i in 1:length(predictor_order)) {
result_HOMO_USD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),
"Learning_Rate"=numeric(),"Fusion_Fuc"=character(),
stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[1]][[i]][[1]]
validate_dataset[[i]] <- data_set[[1]][[i]][[2]]
validate_date[[i]] <- data_set[[1]][[i]][[4]]
non_normalize[[i]]<- data_set[[1]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i],
activation_func[k],learning_rate[l])
result_HOMO_USD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_USD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
library(xlsx)
write.xlsx(result_HOMO_USD,"result_HOMO_USD_Train_70.xlsx")
#************************************* GBP **************************************************#
Result_GBP_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_GBP <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[2]][[i]][[1]]
validate_dataset[[i]] <- data_set[[2]][[i]][[2]]
validate_date[[i]] <- data_set[[2]][[i]][[4]]
non_normalize[[i]]<- data_set[[2]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i],
activation_func[k],learning_rate[l])
result_HOMO_GBP[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_GBP_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_GBP,"result_HOMO_GBP_Train_70.xlsx")
#************************************* EUR **************************************************#
Result_EUR_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_EUR <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[3]][[i]][[1]]
validate_dataset[[i]] <- data_set[[3]][[i]][[2]]
validate_date[[i]] <- data_set[[3]][[i]][[4]]
non_normalize[[i]]<- data_set[[3]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_EUR[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_EUR_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_EUR,"result_HOMO_EUR_Train_70.xlsx")
#****************************************** CHF ***********************************************#
Result_CHF_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_CHF_EURO <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[4]][[i]][[1]]
validate_dataset[[i]] <- data_set[[4]][[i]][[2]]
validate_date[[i]] <- data_set[[4]][[i]][[4]]
non_normalize[[i]]<- data_set[[4]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_CHF_EURO[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
#Result_EUR_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_CHF,"result_HOMO_CHF_Train_70.xlsx")
#****************************************** AUD ***********************************************#
Result_AUD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_AUD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[5]][[i]][[1]]
validate_dataset[[i]] <- data_set[[5]][[i]][[2]]
validate_date[[i]] <- data_set[[5]][[i]][[4]]
non_normalize[[i]]<- data_set[[5]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_AUD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_AUD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
# Writing to xlsx file
#write <- paste0("result_HOMO_usd_PO_",i,"_LF_",activation_func[k],"_LR_",learning_rate[l],".xlsx")
#write.xlsx(result_HOMO_usd_PO3, write)
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_AUD,"result_HOMO_AUD_Train_70.xlsx")
#****************************************** CAD ***********************************************#
Result_CAD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_CAD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[6]][[i]][[1]]
validate_dataset[[i]] <- data_set[[6]][[i]][[2]]
validate_date[[i]] <- data_set[[6]][[i]][[4]]
non_normalize[[i]]<- data_set[[6]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_CAD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_CAD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
# Writing to xlsx file
#write <- paste0("result_HOMO_usd_PO_",i,"_LF_",activation_func[k],"_LR_",learning_rate[l],".xlsx")
#write.xlsx(result_HOMO_usd_PO3, write)
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_CAD,"result_HOMO_CAD_Train_70.xlsx")
#****************************************** SGD ********************************************#
Result_SGD_HOMO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HOMO_SGD <- data.frame("Predictor_Order"=numeric(),"Neurons"=numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation_Function"=character(),"Learning_Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=FALSE)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[7]][[i]][[1]]
validate_dataset[[i]] <- data_set[[7]][[i]][[2]]
validate_date[[i]] <- data_set[[7]][[i]][[4]]
non_normalize[[i]]<- data_set[[7]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HOMO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HOMO_SGD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_SGD_HOMO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 +1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HOMO_SGD,"result_HOMO_SGD_Train_70.xlsx")
#************************************************************ HETROGENEOUS *************************************************************************#
source("HETRO.R")
#************************************* USD *****************************************************#
Result_USD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_USD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[1]][[i]][[1]]
validate_dataset[[i]] <- data_set[[1]][[i]][[2]]
validate_date[[i]] <- data_set[[1]][[i]][[4]]
non_normalize[[i]]<- data_set[[1]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_USD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_USD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_USD,"result_HETRO_USD_Train_70.xlsx")
#************************************* GBP ****************************************************#
Result_GBP_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_GBP <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[2]][[i]][[1]]
validate_dataset[[i]] <- data_set[[2]][[i]][[2]]
validate_date[[i]] <- data_set[[2]][[i]][[4]]
non_normalize[[i]]<- data_set[[2]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_GBP[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_GBP_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_GBP,"result_HETRO_GBP_Train_70.xlsx")
#************************************* EUR ***************************************************#
Result_EUR_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_EUR <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[3]][[i]][[1]]
validate_dataset[[i]] <- data_set[[3]][[i]][[2]]
validate_date[[i]] <- data_set[[3]][[i]][[4]]
non_normalize[[i]]<- data_set[[3]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_EUR[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_EUR_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_EUR,"result_HETRO_EUR_Train_70.xlsx")
#****************************************** CHF ***********************************************#
Result_CHF_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_CHF <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[4]][[i]][[1]]
validate_dataset[[i]] <- data_set[[4]][[i]][[2]]
validate_date[[i]] <- data_set[[4]][[i]][[4]]
non_normalize[[i]]<- data_set[[4]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_CHF[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
#Result_CHF_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_CHF,"result_HETRO_CHF_Train_70.xlsx")
#****************************************** AUD **********************************************#
Result_AUD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_AUD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[5]][[i]][[1]]
validate_dataset[[i]] <- data_set[[5]][[i]][[2]]
validate_date[[i]] <- data_set[[5]][[i]][[4]]
non_normalize[[i]]<- data_set[[5]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_AUD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_AUD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_AUD,"result_HETRO_AUD_Train_70.xlsx")
#****************************************** CAD **********************************************#
Result_CAD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_CAD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[6]][[i]][[1]]
validate_dataset[[i]] <- data_set[[6]][[i]][[2]]
validate_date[[i]] <- data_set[[6]][[i]][[4]]
non_normalize[[i]]<- data_set[[6]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result_usd_CAD[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_CAD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_CAD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_CAD,"result_HETRO_CAD_Train_70.xlsx")
#****************************************** SGD ********************************************#
Result_SGD_HETRO_LIST_70 <- list()
count <- 1
count2 <- 1
for (i in 1:length(predictor_order)) {
result_HETRO_SGD <- data.frame("Predictor Order"= numeric(),"Neurons"= numeric(),"RMSE"=numeric(),
"MAE"=numeric(),"Activation Func"= character(),"Learning Rate"=numeric(),
"Fusion_Fuc"=character(),stringsAsFactors=F)
non_normalize_PO <- data.frame()
validate_date_PO <- data.frame()
actual_value_PO <- data.frame()
result__value_PO <- list()
train_dataset[[i]] <- data_set[[7]][[i]][[1]]
validate_dataset[[i]] <- data_set[[7]][[i]][[2]]
validate_date[[i]] <- data_set[[7]][[i]][[4]]
non_normalize[[i]]<- data_set[[7]][[i]][[6]]
actual_value[[i]] <- validate_dataset[[i]][,i+3]
if(predictor_order[i]==3){ neurons<-seq(2,20,1)}
if(predictor_order[i]==4){ neurons<-seq(3,20,1)}
if(predictor_order[i]==5){ neurons<-seq(3,20,1)}
if(predictor_order[i]==6){ neurons<-seq(4,20,1)}
if(predictor_order[i]==7){ neurons<-seq(4,20,1)}
if(predictor_order[i]==8){ neurons<-seq(5,20,1)}
if(predictor_order[i]==9){ neurons<-seq(5,20,1)}
if(predictor_order[i]==10){ neurons<-seq(6,20,1)}
for (l in 1:length(learning_rate) ){
for (k in 1:length(activation_func)) {
for (j in 1:length(neurons)) {
train_dataset_PO <- train_dataset[[i]]
validate_dataset_PO <- validate_dataset[[i]]
validate_date_PO <- validate_date[[i]]
non_normalize_PO <- non_normalize[[i]]
actual_value_PO <- actual_value[[i]]
result__value_PO[[j]] <- HETRO(train_dataset_PO,validate_dataset_PO, non_normalize_PO,
neurons = neurons[j], predictor_order[i], activation_func[k],learning_rate[l])
result_HETRO_SGD[count2,] <-c(predictor_order[i],neurons[j],
result__value_PO[[j]][4],result__value_PO[[j]][5],
activation_func[k],learning_rate[l],result__value_PO[[j]][3])
# Result_SGD_HETRO_LIST_70[[count]] <- list(predictor_order[i],neurons[j],learning_rate[l],
# activation_func[k], result__value_PO[[j]])
count <- count +1
count2 <- count2 + 1
}
}
}
}
# Writing result to xlsx file
write.xlsx(result_HETRO_SGD,"result_HETRO_SGD_Train_70.xlsx")
#################################################################################################################################################
|
\name{lhsmaximin}
\alias{lhsmaximin}
\title{
Initialization of cluster prototypes using Maximin LHS
}
\description{
Initializes the cluster prototypes matrix using the Maximin version of Latin Hypercube Sampling (LHS). A square grid containing possible sample points is a Latin Square (LS) if there is only one sample in each row and each column. LHS is a generalized version of LS, which has been developed to generate a distribution of collections of parameter values from a multidimensional distribution. LHS generates more efficient estimates of desired parameters than simple Monte Carlo sampling (Carnell, 2016).
}
\usage{
lhsmaximin(x, k, ncp)
}
\arguments{
\item{x}{a numeric vector, data frame or matrix.}
\item{k}{an integer specifying the number of clusters.}
\item{ncp}{an integer determining the number of candidate points used in the search by maximin LHS algorithm.}
}
\details{
LHS aims at initial cluster centers whose coordinates are well spread out in the individual dimensions (Borgelt, 2005). It is the generalization of Latin Square for an arbitrary number of dimensions (features). When sampling a function of \var{p} features, the range of each feature is divided into \var{k} equally probable intervals. \var{k} samples are then drawn such that a Latin Hypercube is created.
The current version of the function \code{lhsmaximin} in this package uses the results from the \code{\link[lhs]{maximinLHS}} function from the \sQuote{\pkg{lhs}} library created by Carnell (2016). Once the uniform samples are created by the \code{\link[lhs]{maximinLHS}}, they are transformed to normal distribution samples by using the quantile functions. But all the features in the data set may not be normally distributed, instead they may fit to different distributions. In such cases, the transformation for any feature should be specisific to its distribution. Determination of the distribution types of features is planned in the future versions of the function \sQuote{\code{lhsmaximin}}.
}
\value{an object of class \sQuote{inaparc}, which is a list consists of the following items:
\item{v}{a numeric matrix containing the initial cluster prototypes.}
\item{ctype}{a string for the type of used centroid to determine the cluster prototypes. It is \sQuote{obj} with this function.}
\item{call}{a string containing the matched function call that generates this \sQuote{inaparc} object.}
}
\author{
Zeynel Cebeci, Cagatay Cebeci
}
\references{
Borgelt, C., (2005). \emph{Prototype-based classification and clustering}. Habilitationsschrift zur Erlangung der Venia legendi fuer Informatik, vorgelegt der Fakultaet fuer Informatik der Otto-von-Guericke-Universitaet Magdeburg, Magdeburg, 22 June 2005. url:\url{https://borgelt.net/habil/pbcc.pdf}
Carnell, R., (2016). lhs: Latin Hypercube Samples. R package version 0.14. \url{https://CRAN.R-project.org/package=lhs}
}
\seealso{
\code{\link{aldaoud}},
\code{\link{ballhall}},
\code{\link{crsamp}},
\code{\link{firstk}},
\code{\link{forgy}},
\code{\link{hartiganwong}},
\code{\link{inofrep}},
\code{\link{inscsf}},
\code{\link{insdev}},
\code{\link{kkz}},
\code{\link{kmpp}},
\code{\link{ksegments}},
\code{\link{ksteps}},
\code{\link{lastk}},
\code{\link{lhsrandom}},
\code{\link{maximin}},
\code{\link{mscseek}},
\code{\link{rsamp}},
\code{\link{rsegment}},
\code{\link{scseek}},
\code{\link{scseek2}},
\code{\link{spaeth}},
\code{\link{ssamp}},
\code{\link{topbottom}},
\code{\link{uniquek}},
\code{\link{ursamp}}
}
\examples{
data(iris)
res <- lhsmaximin(iris[,1:4], k=5)
v <- res$v
print(v)
}
\concept{latin hypercube sampling}
\concept{initialization of cluster prototypes}
\concept{sampling for prototype selection}
\concept{prototype-based clustering}
\concept{partitioning clustering}
\concept{cluster analysis}
\concept{unsupervised learning}
\keyword{cluster} | /man/lhsmaximin.Rd | no_license | cran/inaparc | R | false | false | 3,963 | rd | \name{lhsmaximin}
\alias{lhsmaximin}
\title{
Initialization of cluster prototypes using Maximin LHS
}
\description{
Initializes the cluster prototypes matrix using the Maximin version of Latin Hypercube Sampling (LHS). A square grid containing possible sample points is a Latin Square (LS) if there is only one sample in each row and each column. LHS is a generalized version of LS, which has been developed to generate a distribution of collections of parameter values from a multidimensional distribution. LHS generates more efficient estimates of desired parameters than simple Monte Carlo sampling (Carnell, 2016).
}
\usage{
lhsmaximin(x, k, ncp)
}
\arguments{
\item{x}{a numeric vector, data frame or matrix.}
\item{k}{an integer specifying the number of clusters.}
\item{ncp}{an integer determining the number of candidate points used in the search by maximin LHS algorithm.}
}
\details{
LHS aims at initial cluster centers whose coordinates are well spread out in the individual dimensions (Borgelt, 2005). It is the generalization of Latin Square for an arbitrary number of dimensions (features). When sampling a function of \var{p} features, the range of each feature is divided into \var{k} equally probable intervals. \var{k} samples are then drawn such that a Latin Hypercube is created.
The current version of the function \code{lhsmaximin} in this package uses the results from the \code{\link[lhs]{maximinLHS}} function from the \sQuote{\pkg{lhs}} library created by Carnell (2016). Once the uniform samples are created by the \code{\link[lhs]{maximinLHS}}, they are transformed to normal distribution samples by using the quantile functions. But all the features in the data set may not be normally distributed, instead they may fit to different distributions. In such cases, the transformation for any feature should be specisific to its distribution. Determination of the distribution types of features is planned in the future versions of the function \sQuote{\code{lhsmaximin}}.
}
\value{an object of class \sQuote{inaparc}, which is a list consists of the following items:
\item{v}{a numeric matrix containing the initial cluster prototypes.}
\item{ctype}{a string for the type of used centroid to determine the cluster prototypes. It is \sQuote{obj} with this function.}
\item{call}{a string containing the matched function call that generates this \sQuote{inaparc} object.}
}
\author{
Zeynel Cebeci, Cagatay Cebeci
}
\references{
Borgelt, C., (2005). \emph{Prototype-based classification and clustering}. Habilitationsschrift zur Erlangung der Venia legendi fuer Informatik, vorgelegt der Fakultaet fuer Informatik der Otto-von-Guericke-Universitaet Magdeburg, Magdeburg, 22 June 2005. url:\url{https://borgelt.net/habil/pbcc.pdf}
Carnell, R., (2016). lhs: Latin Hypercube Samples. R package version 0.14. \url{https://CRAN.R-project.org/package=lhs}
}
\seealso{
\code{\link{aldaoud}},
\code{\link{ballhall}},
\code{\link{crsamp}},
\code{\link{firstk}},
\code{\link{forgy}},
\code{\link{hartiganwong}},
\code{\link{inofrep}},
\code{\link{inscsf}},
\code{\link{insdev}},
\code{\link{kkz}},
\code{\link{kmpp}},
\code{\link{ksegments}},
\code{\link{ksteps}},
\code{\link{lastk}},
\code{\link{lhsrandom}},
\code{\link{maximin}},
\code{\link{mscseek}},
\code{\link{rsamp}},
\code{\link{rsegment}},
\code{\link{scseek}},
\code{\link{scseek2}},
\code{\link{spaeth}},
\code{\link{ssamp}},
\code{\link{topbottom}},
\code{\link{uniquek}},
\code{\link{ursamp}}
}
\examples{
data(iris)
res <- lhsmaximin(iris[,1:4], k=5)
v <- res$v
print(v)
}
\concept{latin hypercube sampling}
\concept{initialization of cluster prototypes}
\concept{sampling for prototype selection}
\concept{prototype-based clustering}
\concept{partitioning clustering}
\concept{cluster analysis}
\concept{unsupervised learning}
\keyword{cluster} |
context("Filters function")
library(vcfR)
test_that("number of distorted markers",{
check_dist <- function(example_data, table.h0){
eval(bquote(data(.(example_data))))
segre <- eval(bquote(test_segregation(get(.(example_data)), simulate.p.value = T)))
segre_tab <- print(segre)
eval(bquote(expect_equal(as.vector(table(segre_tab$H0)), .(table.h0))))
expect_equal(length(select_segreg(segre, distorted = T, numbers = T)), sum(segre_tab$`p-value` < 0.05/length(segre_tab$Marker)))
expect_equal(length(select_segreg(segre, distorted = T, threshold = 0.01, numbers = T)), sum(segre_tab$`p-value` < 0.01/length(segre_tab$Marker)))
}
check_dist("onemap_example_out", c(12,8,8,2))
check_dist("onemap_example_f2", c(36,30))
check_dist("onemap_example_bc", c(67))
check_dist("onemap_example_riself", c(68))
})
test_that("number of bins",{
check_bins <- function(example_data, n.mar){
eval(bquote(data(.(example_data))))
bins <- eval(bquote(find_bins(get(.(example_data)))))
onemap_bins <- eval(bquote(create_data_bins(input.obj = get(.(example_data)), bins)))
eval(bquote(expect_equal(check_data(onemap_bins),0)))
eval(bquote(expect_equal(onemap_bins$n.mar, .(n.mar))))
}
check_bins("vcf_example_f2", 24)
check_bins("vcf_example_out", 23)
check_bins("vcf_example_bc", 25)
check_bins("vcf_example_riself",25)
data("vcf_example_out")
bins <- find_bins(vcf_example_out)
onemap_bins <- create_data_bins(vcf_example_out, bins)
twopts <- rf_2pts(onemap_bins)
lgs <- group(make_seq(twopts, "all"))
lg1 <- make_seq(lgs,1)
# Test edit_order_onemap - interactive
# input.obj <- edit_order_onemap(input.seq = lg1)
# seq_edit <- make_seq(input.obj)
map1 <- map(lg1)
map2 <- map(make_seq(lgs,4))
# Test save sequences
maps.list <- list(map1, map2)
save_onemap_sequences(sequences.list = maps.list, filename = "test.RData")
save(maps.list, file = "test2.RData")
# Test load sequences
maps.list.load <- load_onemap_sequences(filename = "test.RData")
# Test plot_genome_vs_cm
p <- plot_genome_vs_cm(map.list = map1, group.names = "LG2")
# Test summary_maps_onemap
df <- summary_maps_onemap(map.list = list(map1, map2))
expect_equal(df$map_length[3], 159.7943, 0.1)
ord1 <- ord_by_geno(make_seq(twopts, "all"))
ord2 <- ord_by_geno(map2)
expect_equal(ord1$seq.num, 1:23)
expect_equal(ord2$seq.num, 15:23)
# Test add_redundants
map_red <- add_redundants(sequence = map1,
onemap.obj = vcf_example_out, bins)
expect_equal(length(map_red$seq.num) - length(map1$seq.num), 1)
})
test_that("number of missing data",{
check_missing <- function(example_data, n.mar,n.ind){
eval(bquote(data(.(example_data))))
onemap_mis <- eval(bquote(filter_missing(get(.(example_data)), 0.5)))
eval(bquote(expect_equal(check_data(onemap_mis), 0)))
eval(bquote(expect_equal(onemap_mis$n.mar, .(n.mar))))
onemap_mis <- eval(bquote(filter_missing(get(.(example_data)), 0.5, by = "individuals")))
eval(bquote(expect_equal(check_data(onemap_mis), 0)))
eval(bquote(expect_equal(onemap_mis$n.ind, .(n.ind))))
}
check_missing(example_data = "vcf_example_f2", n.mar = 25, n.ind = 191)
check_missing("onemap_example_riself", 64, 100)
check_missing("onemap_example_out", 30, 100)
check_missing("onemap_example_bc", 67,150)
})
test_that("number of repeated ID markers",{
check_dupli <- function(example_data, n.mar){
eval(bquote(data(.(example_data))))
onemap_dupli <- eval(bquote(rm_dupli_mks(get(.(example_data)))))
eval(bquote(expect_equal(check_data(onemap_dupli), 0)))
eval(bquote(expect_equal(onemap_dupli$n.mar, .(n.mar))))
}
check_dupli("vcf_example_f2", 25)
check_dupli("onemap_example_riself", 68)
check_dupli("onemap_example_out", 30)
check_dupli("onemap_example_bc", 67)
})
test_that("filter probs",{
onemap.obj <- onemap_read_vcfR(system.file("extdata/vcf_example_out.vcf.gz", package = "onemap"),
parent1 = "P1", parent2 = "P2", cross = "outcross")
vcfR.object <- read.vcfR(system.file("extdata/vcf_example_out.vcf.gz", package = "onemap"))
gq <- extract_depth(vcfR.object = vcfR.object,
onemap.object = onemap.obj,
vcf.par = "GQ",
parent1 = "P1",
parent2 = "P2")
onemap.prob <- create_probs(onemap.obj, genotypes_errors = gq)
onemap.filt <- filter_prob(onemap.prob, threshold = 0.999999999)
onemap.mis <- filter_missing(onemap.filt, threshold = 0.10)
expect_equal(onemap.mis$n.mar, 22)
pl <- extract_depth(vcfR.object = vcfR.object,
onemap.object = onemap.obj,
vcf.par = "PL",
parent1 = "P1",
parent2 = "P2")
onemap.prob <- create_probs(onemap.obj, genotypes_probs = pl)
onemap.filt <- filter_prob(onemap.prob, threshold = 0.9)
onemap.mis <- filter_missing(onemap.filt, threshold = 0.10)
expect_equal(onemap.mis$n.mar, 22)
})
| /tests/testthat/test-filters.R | no_license | Cristianetaniguti/onemap | R | false | false | 5,248 | r | context("Filters function")
library(vcfR)
test_that("number of distorted markers",{
check_dist <- function(example_data, table.h0){
eval(bquote(data(.(example_data))))
segre <- eval(bquote(test_segregation(get(.(example_data)), simulate.p.value = T)))
segre_tab <- print(segre)
eval(bquote(expect_equal(as.vector(table(segre_tab$H0)), .(table.h0))))
expect_equal(length(select_segreg(segre, distorted = T, numbers = T)), sum(segre_tab$`p-value` < 0.05/length(segre_tab$Marker)))
expect_equal(length(select_segreg(segre, distorted = T, threshold = 0.01, numbers = T)), sum(segre_tab$`p-value` < 0.01/length(segre_tab$Marker)))
}
check_dist("onemap_example_out", c(12,8,8,2))
check_dist("onemap_example_f2", c(36,30))
check_dist("onemap_example_bc", c(67))
check_dist("onemap_example_riself", c(68))
})
test_that("number of bins",{
check_bins <- function(example_data, n.mar){
eval(bquote(data(.(example_data))))
bins <- eval(bquote(find_bins(get(.(example_data)))))
onemap_bins <- eval(bquote(create_data_bins(input.obj = get(.(example_data)), bins)))
eval(bquote(expect_equal(check_data(onemap_bins),0)))
eval(bquote(expect_equal(onemap_bins$n.mar, .(n.mar))))
}
check_bins("vcf_example_f2", 24)
check_bins("vcf_example_out", 23)
check_bins("vcf_example_bc", 25)
check_bins("vcf_example_riself",25)
data("vcf_example_out")
bins <- find_bins(vcf_example_out)
onemap_bins <- create_data_bins(vcf_example_out, bins)
twopts <- rf_2pts(onemap_bins)
lgs <- group(make_seq(twopts, "all"))
lg1 <- make_seq(lgs,1)
# Test edit_order_onemap - interactive
# input.obj <- edit_order_onemap(input.seq = lg1)
# seq_edit <- make_seq(input.obj)
map1 <- map(lg1)
map2 <- map(make_seq(lgs,4))
# Test save sequences
maps.list <- list(map1, map2)
save_onemap_sequences(sequences.list = maps.list, filename = "test.RData")
save(maps.list, file = "test2.RData")
# Test load sequences
maps.list.load <- load_onemap_sequences(filename = "test.RData")
# Test plot_genome_vs_cm
p <- plot_genome_vs_cm(map.list = map1, group.names = "LG2")
# Test summary_maps_onemap
df <- summary_maps_onemap(map.list = list(map1, map2))
expect_equal(df$map_length[3], 159.7943, 0.1)
ord1 <- ord_by_geno(make_seq(twopts, "all"))
ord2 <- ord_by_geno(map2)
expect_equal(ord1$seq.num, 1:23)
expect_equal(ord2$seq.num, 15:23)
# Test add_redundants
map_red <- add_redundants(sequence = map1,
onemap.obj = vcf_example_out, bins)
expect_equal(length(map_red$seq.num) - length(map1$seq.num), 1)
})
test_that("number of missing data",{
check_missing <- function(example_data, n.mar,n.ind){
eval(bquote(data(.(example_data))))
onemap_mis <- eval(bquote(filter_missing(get(.(example_data)), 0.5)))
eval(bquote(expect_equal(check_data(onemap_mis), 0)))
eval(bquote(expect_equal(onemap_mis$n.mar, .(n.mar))))
onemap_mis <- eval(bquote(filter_missing(get(.(example_data)), 0.5, by = "individuals")))
eval(bquote(expect_equal(check_data(onemap_mis), 0)))
eval(bquote(expect_equal(onemap_mis$n.ind, .(n.ind))))
}
check_missing(example_data = "vcf_example_f2", n.mar = 25, n.ind = 191)
check_missing("onemap_example_riself", 64, 100)
check_missing("onemap_example_out", 30, 100)
check_missing("onemap_example_bc", 67,150)
})
test_that("number of repeated ID markers",{
check_dupli <- function(example_data, n.mar){
eval(bquote(data(.(example_data))))
onemap_dupli <- eval(bquote(rm_dupli_mks(get(.(example_data)))))
eval(bquote(expect_equal(check_data(onemap_dupli), 0)))
eval(bquote(expect_equal(onemap_dupli$n.mar, .(n.mar))))
}
check_dupli("vcf_example_f2", 25)
check_dupli("onemap_example_riself", 68)
check_dupli("onemap_example_out", 30)
check_dupli("onemap_example_bc", 67)
})
test_that("filter probs",{
onemap.obj <- onemap_read_vcfR(system.file("extdata/vcf_example_out.vcf.gz", package = "onemap"),
parent1 = "P1", parent2 = "P2", cross = "outcross")
vcfR.object <- read.vcfR(system.file("extdata/vcf_example_out.vcf.gz", package = "onemap"))
gq <- extract_depth(vcfR.object = vcfR.object,
onemap.object = onemap.obj,
vcf.par = "GQ",
parent1 = "P1",
parent2 = "P2")
onemap.prob <- create_probs(onemap.obj, genotypes_errors = gq)
onemap.filt <- filter_prob(onemap.prob, threshold = 0.999999999)
onemap.mis <- filter_missing(onemap.filt, threshold = 0.10)
expect_equal(onemap.mis$n.mar, 22)
pl <- extract_depth(vcfR.object = vcfR.object,
onemap.object = onemap.obj,
vcf.par = "PL",
parent1 = "P1",
parent2 = "P2")
onemap.prob <- create_probs(onemap.obj, genotypes_probs = pl)
onemap.filt <- filter_prob(onemap.prob, threshold = 0.9)
onemap.mis <- filter_missing(onemap.filt, threshold = 0.10)
expect_equal(onemap.mis$n.mar, 22)
})
|
library(rpart)
library(rpart.plot)
library(caret)
library(e1071)
library(caret)
library(Rcpp)
library(mice)
train_orig <- read.csv("competition_second_train.csv", header = FALSE)
test_orig <- read.csv("competition_second_test.csv", header = FALSE)
set.seed(123)
newf <- subset(train, select = c(V24,V25,V27,V28,V37,V50,V54,V59,V60,V68))
set.seed(125)
imputed_newf = complete(mice(full[var]))
full$V39[is.na(full$V39)] = "SBrkr"
lin_reg <- lm(V76 ~.,data = train)
pred <- predict(lin_reg, newdata = test)
new <- subset(train, select = -c(V20,V21,V22,V14,V30,V35,V43,V36,V37,V38,V39,V55,V71,V72,V73))
new <- subset(new, select = -c(V1))
#Using Decision Tree With Cross Validation
fitControl = trainControl(method = "CV", number = 10)
CartGrid = expand.grid(.cp = (1:50)*0.01)
train(V76~.,data = new,method = "rpart",trControl = fitControl, tuneGrid = CartGrid)
treeCV = rpart(V76~.,method = "anova", data = new,control = rpart.control(cp = 0.01))
#Decision Tree
prp(treeCV) | /Regressionmain.R | no_license | varun-96/HackerEarth-challenge | R | false | false | 1,015 | r |
library(rpart)
library(rpart.plot)
library(caret)
library(e1071)
library(caret)
library(Rcpp)
library(mice)
train_orig <- read.csv("competition_second_train.csv", header = FALSE)
test_orig <- read.csv("competition_second_test.csv", header = FALSE)
set.seed(123)
newf <- subset(train, select = c(V24,V25,V27,V28,V37,V50,V54,V59,V60,V68))
set.seed(125)
imputed_newf = complete(mice(full[var]))
full$V39[is.na(full$V39)] = "SBrkr"
lin_reg <- lm(V76 ~.,data = train)
pred <- predict(lin_reg, newdata = test)
new <- subset(train, select = -c(V20,V21,V22,V14,V30,V35,V43,V36,V37,V38,V39,V55,V71,V72,V73))
new <- subset(new, select = -c(V1))
#Using Decision Tree With Cross Validation
fitControl = trainControl(method = "CV", number = 10)
CartGrid = expand.grid(.cp = (1:50)*0.01)
train(V76~.,data = new,method = "rpart",trControl = fitControl, tuneGrid = CartGrid)
treeCV = rpart(V76~.,method = "anova", data = new,control = rpart.control(cp = 0.01))
#Decision Tree
prp(treeCV) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ma_wrappers.R
\name{ma_wrapper}
\alias{ma_wrapper}
\title{Wrapper function to compute meta-analytic results for all analyses.}
\usage{
ma_wrapper(es_data, es_type = "r", ma_type = "bb", ma_fun,
moderator_matrix = NULL, moderator_type = "all",
cat_moderators = TRUE, construct_x = NULL, construct_y = NULL,
ma_arg_list, ...)
}
\arguments{
\item{es_data}{Matrix of effect-size data.}
\item{es_type}{Effect-size type (e.g., "r" or "d")}
\item{ma_type}{The meta-analysis type: "bb" or "individual_correction."}
\item{ma_fun}{Meta-analysis function to be used in computing meta-analytic results.}
\item{moderator_matrix}{Matrix (or vector) of moderator variables.}
\item{moderator_type}{Type of moderator analysis: "none" means that no moderators are to be used, "simple" means that moderators are to be examined one at a time,
"hierarchical" means that all possible combinations and subsets of moderators are to be examined, and "all" means that simple and hierarchical moderator analyses are to be performed.}
\item{cat_moderators}{Logical vector identifying whether each variable in the moderator_matrix is a categorical variable (TRUE) or a continuous variable (FALSE).}
\item{construct_x}{Vector of construct names for construct X.}
\item{construct_y}{Vector of construct names for construct Y.}
\item{ma_arg_list}{List of arguments to be passed to the meta-analysis function.}
\item{...}{Further arguments.}
}
\value{
A list of meta-analytic results.
}
\description{
Wrapper function to compute meta-analytic results for all analyses.
}
\keyword{internal}
| /man/ma_wrapper.Rd | no_license | romainfrancois/psychmeta | R | false | true | 1,651 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ma_wrappers.R
\name{ma_wrapper}
\alias{ma_wrapper}
\title{Wrapper function to compute meta-analytic results for all analyses.}
\usage{
ma_wrapper(es_data, es_type = "r", ma_type = "bb", ma_fun,
moderator_matrix = NULL, moderator_type = "all",
cat_moderators = TRUE, construct_x = NULL, construct_y = NULL,
ma_arg_list, ...)
}
\arguments{
\item{es_data}{Matrix of effect-size data.}
\item{es_type}{Effect-size type (e.g., "r" or "d")}
\item{ma_type}{The meta-analysis type: "bb" or "individual_correction."}
\item{ma_fun}{Meta-analysis function to be used in computing meta-analytic results.}
\item{moderator_matrix}{Matrix (or vector) of moderator variables.}
\item{moderator_type}{Type of moderator analysis: "none" means that no moderators are to be used, "simple" means that moderators are to be examined one at a time,
"hierarchical" means that all possible combinations and subsets of moderators are to be examined, and "all" means that simple and hierarchical moderator analyses are to be performed.}
\item{cat_moderators}{Logical vector identifying whether each variable in the moderator_matrix is a categorical variable (TRUE) or a continuous variable (FALSE).}
\item{construct_x}{Vector of construct names for construct X.}
\item{construct_y}{Vector of construct names for construct Y.}
\item{ma_arg_list}{List of arguments to be passed to the meta-analysis function.}
\item{...}{Further arguments.}
}
\value{
A list of meta-analytic results.
}
\description{
Wrapper function to compute meta-analytic results for all analyses.
}
\keyword{internal}
|
library(tidyverse)
library(stringr)
eddy=read_csv("eddypro.csv", skip=1, na=c(" ","NA","-9999","-9999.0"), comment=c("["));
eddy=eddy[-1,]
eddy=select(eddy,-(roll))
eddy=eddy %>% mutate_if(is.character, factor)
| /work 2.R | no_license | Chalwe-jeff/cobby | R | false | false | 219 | r |
library(tidyverse)
library(stringr)
eddy=read_csv("eddypro.csv", skip=1, na=c(" ","NA","-9999","-9999.0"), comment=c("["));
eddy=eddy[-1,]
eddy=select(eddy,-(roll))
eddy=eddy %>% mutate_if(is.character, factor)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDocumentation.R
\docType{data}
\name{Summary_Use_2013_PRO_AfterRedef}
\alias{Summary_Use_2013_PRO_AfterRedef}
\title{Summary 2013 Use Producer's Value After Redefinition (2012 schema)}
\format{
A dataframe with 79 obs. and 94 variables
}
\source{
\url{https://edap-ord-data-commons.s3.amazonaws.com/useeior/AllTablesIO.zip}
}
\usage{
Summary_Use_2013_PRO_AfterRedef
}
\description{
Summary 2013 Use Producer's Value After Redefinition (2012 schema)
}
\keyword{datasets}
| /man/Summary_Use_2013_PRO_AfterRedef.Rd | permissive | USEPA/useeior | R | false | true | 552 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDocumentation.R
\docType{data}
\name{Summary_Use_2013_PRO_AfterRedef}
\alias{Summary_Use_2013_PRO_AfterRedef}
\title{Summary 2013 Use Producer's Value After Redefinition (2012 schema)}
\format{
A dataframe with 79 obs. and 94 variables
}
\source{
\url{https://edap-ord-data-commons.s3.amazonaws.com/useeior/AllTablesIO.zip}
}
\usage{
Summary_Use_2013_PRO_AfterRedef
}
\description{
Summary 2013 Use Producer's Value After Redefinition (2012 schema)
}
\keyword{datasets}
|
#' \code{prepCR} preps the concentration-response matrix for a single chemical
#'
#' @param dat data.table
#' @param chem value of \code{code} column to specify the row
#' @param conclist numeric vector
#' @param nassay integer length 1
#' @param modl_ga_cols character vector
#' @param modl_tp_cols character vector
#' @param modl_gw_cols character vector
#'
#' @details ac50,top,w are vectors of length 18 with values for one instance of one chemical
#'
#' expect top in range [0,100]
#' ATG = log foldchange top * 25
#'
#' @return cr.mat matrix This returns the concentration-response matrix
prepCR <- function(dat, chem, conclist, nassay, modl_ga_cols, modl_tp_cols, modl_gw_cols) {
ac50 <- as.numeric(dat[code == chem, modl_ga_cols, with = FALSE])
top <- as.numeric(dat[code == chem, modl_tp_cols, with = FALSE])
w <- as.numeric(dat[code == chem, modl_gw_cols, with = FALSE])
cr.mat <- matrix(data = 0, nrow = length(conclist), ncol = nassay)
for(i in 1:length(conclist)) {
conc <- conclist[i]
for(j in 1:nassay) {
ac50j <- as.numeric(ac50[j])
tj <- as.numeric(top[j])
wj <- as.numeric(w[j])
cr.mat[i,j] <- tj*(conc**wj/(conc**wj+ac50j**wj))
}
}
return(cr.mat)
}
| /R/prepCR.R | no_license | rnaimehaom/eapath | R | false | false | 1,223 | r | #' \code{prepCR} preps the concentration-response matrix for a single chemical
#'
#' @param dat data.table
#' @param chem value of \code{code} column to specify the row
#' @param conclist numeric vector
#' @param nassay integer length 1
#' @param modl_ga_cols character vector
#' @param modl_tp_cols character vector
#' @param modl_gw_cols character vector
#'
#' @details ac50,top,w are vectors of length 18 with values for one instance of one chemical
#'
#' expect top in range [0,100]
#' ATG = log foldchange top * 25
#'
#' @return cr.mat matrix This returns the concentration-response matrix
prepCR <- function(dat, chem, conclist, nassay, modl_ga_cols, modl_tp_cols, modl_gw_cols) {
ac50 <- as.numeric(dat[code == chem, modl_ga_cols, with = FALSE])
top <- as.numeric(dat[code == chem, modl_tp_cols, with = FALSE])
w <- as.numeric(dat[code == chem, modl_gw_cols, with = FALSE])
cr.mat <- matrix(data = 0, nrow = length(conclist), ncol = nassay)
for(i in 1:length(conclist)) {
conc <- conclist[i]
for(j in 1:nassay) {
ac50j <- as.numeric(ac50[j])
tj <- as.numeric(top[j])
wj <- as.numeric(w[j])
cr.mat[i,j] <- tj*(conc**wj/(conc**wj+ac50j**wj))
}
}
return(cr.mat)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiplot.R
\name{multiplot}
\alias{multiplot}
\title{Multiple plot function}
\usage{
multiplot(..., plotlist = NULL, file, cols = 1, layout = NULL)
}
\arguments{
\item{plotlist}{NULL}
}
\description{
ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
- cols: Number of columns in layout
- layout: A matrix specifying the layout. If present, 'cols' is ignored.
If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
then plot 1 will go in the upper left, 2 will go in the upper right, and
3 will go all the way across the bottom.
}
\examples{
multiplot()
}
\keyword{multiplot}
| /man/multiplot.Rd | no_license | ishspsy/sake | R | false | true | 706 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiplot.R
\name{multiplot}
\alias{multiplot}
\title{Multiple plot function}
\usage{
multiplot(..., plotlist = NULL, file, cols = 1, layout = NULL)
}
\arguments{
\item{plotlist}{NULL}
}
\description{
ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
- cols: Number of columns in layout
- layout: A matrix specifying the layout. If present, 'cols' is ignored.
If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
then plot 1 will go in the upper left, 2 will go in the upper right, and
3 will go all the way across the bottom.
}
\examples{
multiplot()
}
\keyword{multiplot}
|
#install all the packages required
install.packages("stringr")
install.packages("readr")
install.packages("dplyr")
install.packages("tidyverse")
install.packages("ggplot")
install.packages("reshape")
install.packages("devtools")
install_github('jMotif/jmotif-R')
#once all are installed, open the library
library(stringr)
library(readr)
library(dplyr)
library(tidyverse)
library(ggplot2)
library(reshape)
library(devtools)
library(jmotif)
##################DATA CLEANING########################
#insert all the stocks text file
stock1 <- read_delim("stock_20190303.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock2 <- read_delim("stock_20190304.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock3 <- read_delim("stock_20190305.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock4 <- read_delim("stock_20190306.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock5 <- read_delim("stock_20190307.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock6 <- read_delim("stock_20190308.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock7 <- read_delim("stock_20190311.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock8 <- read_delim("stock_20190312.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock9 <- read_delim("stock_20190313.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock10 <- read_delim("stock_20190314.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock11 <- read_delim("stock_20190315.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock12 <- read_delim("stock_20190318.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock13 <- read_delim("stock_20190319.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
#rename the column 5 header
names(stock1)[names(stock1) == 'X5'] <- 1
names(stock2)[names(stock2) == 'X5'] <- 2
names(stock3)[names(stock3) == 'X5'] <- 3
names(stock4)[names(stock4) == 'X5'] <- 4
names(stock5)[names(stock5) == 'X5'] <- 5
names(stock6)[names(stock6) == 'X5'] <- 6
names(stock7)[names(stock7) == 'X5'] <- 7
names(stock8)[names(stock8) == 'X5'] <- 8
names(stock9)[names(stock9) == 'X5'] <- 9
names(stock10)[names(stock10) == 'X5'] <- 10
names(stock11)[names(stock11) == 'X5'] <- 11
names(stock12)[names(stock12) == 'X5'] <- 12
names(stock13)[names(stock13) == 'X5'] <- 13
#remove the rest of the column
#by replacing the subset that only have selected column
stock1 <- subset(stock1, select = c("X2",1))
stock2 <- subset(stock2, select = c("X2",2))
stock3 <- subset(stock3, select = c("X2",3))
stock4 <- subset(stock4, select = c("X2",4))
stock5 <- subset(stock5, select = c("X2",5))
stock6 <- subset(stock6, select = c("X2",6))
stock7 <- subset(stock7, select = c("X2",7))
stock8 <- subset(stock8, select = c("X2",8))
stock9 <- subset(stock9, select = c("X2",9))
stock10 <- subset(stock10, select = c("X2",10))
stock11 <- subset(stock11, select = c("X2",11))
stock12 <- subset(stock12, select = c("X2",12))
stock13 <- subset(stock13, select = c("X2",13))
#join the stock files by the stock name, X2
stocks<-list(stock1, stock2, stock3,stock4,stock5, stock6, stock7,stock8,stock9, stock10, stock11,stock12,stock13) %>% reduce(full_join, by = "X2")
#replace the column name X2 as Days
names(stocks)[names(stocks) == 'X2'] <- 'Days'
#make sure all the value shown inside are numeric
stocks$`1`<-as.numeric(stocks$`1`)
stocks$`2`<-as.numeric(stocks$`2`)
stocks$`3`<-as.numeric(stocks$`3`)
stocks$`4`<-as.numeric(stocks$`4`)
stocks$`5`<-as.numeric(stocks$`5`)
stocks$`6`<-as.numeric(stocks$`6`)
stocks$`7`<-as.numeric(stocks$`7`)
stocks$`8`<-as.numeric(stocks$`8`)
stocks$`9`<-as.numeric(stocks$`9`)
stocks$`10`<-as.numeric(stocks$`10`)
stocks$`11`<-as.numeric(stocks$`11`)
stocks$`12`<-as.numeric(stocks$`12`)
stocks$`13`<-as.numeric(stocks$`13`)
#make sure all the classes required are numeric
str(stocks)
#check is there any NA in the data frame
sum(is.na(stocks))
#replace all the NA with 0
stocks[is.na(stocks)] <- 0
#check again, make sure it is 0
sum(is.na(stocks))
#calculate the sum value of day 1 to day 13 by row
stocks$Total<-rowSums(stocks[, -1])
#remove the stocks if the total sum is 0
#by replacing the data framw where only more than 0 are kept
stocks <- stocks %>% filter(Total!=0)
#create a csv file of the combined stocks
write.csv(stocks,file="stocks.csv")
##################Euclidean Distance########################
#import the stock file (optional)
stocks <- read_csv("stocks.csv")
#duplicate the stock files
stock_test2 <- stocks
#remove the first column X1 and second column stock name
stocker2 <- stock_test2[-c(1:2)]
#replace the row name with the stock name (column title is Days)
row.names(stocker2) <- stock_test2$Days
#again, make sure all the column are numeric to perform euclidean distance
str(stocker2)
#start to calculate the euclidean
x<-dist(stocker2, method = "euclidean")
#convert the output as a matrix then save into csv files
y<-as.matrix(x)
write.csv(y,file="Euclidean.csv")
#stocks_transpose<-data.frame(t(stock_test))
#names(stocks_transpose) <- as.matrix(stocks_transpose[1, ])
#stocks_transpose <- stocks_transpose[-1, ]
#stocks_transpose<-stocks_transpose[-14,]
#stocks_transpose$Days<-seq(1,13)
##################Z-Norm, PAA & SAX########################
#duplicate the stock again but only the first 11 stocks
stock_test <- stocks[1:11,]
#remove the stock name column and rename the rows by stock name
stocker <- stock_test[-c(1:2)]
row.names(stocker) <- stock_test$Days
#transpose the file and save as data frame
#removed the sum row as well
stocker_transpose<-data.frame(t(stocker))
stocker_transpose <- stocker_transpose[-14,]
#Z-normalization function
znorm <- function(ts){
ts.mean <- mean(ts)
ts.dev <- sd(ts)
(ts - ts.mean)/ts.dev
}
#PAA value function
paa <- function(ts, paa_size){
len = length(ts)
if (len == paa_size) {
ts
}
else {
if (len %% paa_size == 0) {
colMeans(matrix(ts, nrow=len %/% paa_size, byrow=F))
}
else {
res = rep.int(0, paa_size)
for (i in c(0:(len * paa_size - 1))) {
idx = i %/% len + 1# the spot
pos = i %/% paa_size + 1 # the col spot
res[idx] = res[idx] + ts[pos]
}
for (i in c(1:paa_size)) {
res[i] = res[i] / len
}
res
}
}
}
#create four empty vector
result <- vector("list")
result2 <-vector("list")
result3 <- vector("list")
result4 <-vector("list")
#create a for loop for Z-normalization and paa that loop across the column name
#the result of Z-normalization is save in result
#the result of paa is save in result2
for(i in names(stocker_transpose)){
ts1_znorm=znorm(stocker_transpose[[i]])
result[[i]] <- ts1_znorm
paa_size=3
y_paa3 = paa(ts1_znorm,paa_size)
result2[[i]]<-y_paa3
}
#save these both output as data frame
result<-as.data.frame(result)
result2<-as.data.frame(result2)
#transpose both of the result and save as data frame
result_transpose<-as.data.frame(t(result))
result2_transpose<-as.data.frame(t(result2))
#convert data frame into csv file
write.csv(result_transpose,file="result.csv")
write.csv(result2_transpose,file="result2.csv")
#check the maximum value and minimum value in result
# optional as it is to plot a nice range for visualization
max_value<-max(result)
min_value<-min(result)
##################Plot PAA & SAX########################
#Create an empty plot graph first
plot("Value","Days",xlim = c(1,13),ylim=c(-3.5,2.5),type = "n",main="PAA transform")
#create a for loop to plot each across the column name
for (i in names(result)){
#plot the Z-norm line of each stock
lines(result[[i]],col = "blue",type = 'o')
#this is to plot the SAX segment, the grey vertical dotted line
points(result[[i]], pch=16, lwd=5, col="blue")
abline(v=c(1,5,9,13), lty=3, lwd=2, col="gray50")
#plot the paa value of each stock
for (j in names(result2)){
#plot the paa value segment 1
segments(1,result2[[j]][1],5,result2[[j]][1],lwd=1,col="red")
points(x=(1+5)/2,y=result2[[j]][1],col="red",pch=23,lwd=5)
#plot the paa value segment 2
segments(5,result2[[j]][2],9,result2[[j]][2],lwd=1,col="red")
points(x=(5+9)/2,y=result2[[j]][2],col="red",pch=23,lwd=5)
#plot the paa value segment 3
segments(9,result2[[j]][3],13,result2[[j]][3],lwd=1,col="red")
points(x=(9+13)/2,y=result2[[j]][3],col="red",pch=23,lwd=5)
#plot the alphabet letter
y <- seq(-3.5,2.5, length=100)
x <- dnorm(y, mean=0, sd=1)
lines(x,y, type="l", lwd=5, col="magenta")
abline(h = alphabet_to_cuts(5)[1:9], lty=2, lwd=2, col="magenta")
text(0.7,-2,"e",cex=2,col="magenta")
text(0.7,-0.5,"d",cex=2,col="magenta")
text(0.7, 0,"c",cex=2,col="magenta")
text(0.7, 0.5,"b",cex=2,col="magenta")
text(0.7, 2,"a",cex=2,col="magenta")
#save the result of PAA location
result3[[j]]<-series_to_string(result2[[j]],3)
result4[[j]]<-series_to_chars(result2[[j]],3)
}
}
#convert the list to data frame
result3<-as.data.frame(result3)
result4<-as.data.frame(result4)
#transpose the data and save as data frame
result3_transpose<-as.data.frame(t(result3))
result4_transpose<-as.data.frame(t(result4))
#convert data frame into csv
write.csv(result3_transpose,file="result3.csv")
write.csv(result4_transpose,file="result4.csv") | /Data mining Assignment 3 2/coding/SAX PAA Choy- Partial.R | no_license | olivazhu/milestone- | R | false | false | 9,778 | r | #install all the packages required
install.packages("stringr")
install.packages("readr")
install.packages("dplyr")
install.packages("tidyverse")
install.packages("ggplot")
install.packages("reshape")
install.packages("devtools")
install_github('jMotif/jmotif-R')
#once all are installed, open the library
library(stringr)
library(readr)
library(dplyr)
library(tidyverse)
library(ggplot2)
library(reshape)
library(devtools)
library(jmotif)
##################DATA CLEANING########################
#insert all the stocks text file
stock1 <- read_delim("stock_20190303.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock2 <- read_delim("stock_20190304.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock3 <- read_delim("stock_20190305.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock4 <- read_delim("stock_20190306.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock5 <- read_delim("stock_20190307.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock6 <- read_delim("stock_20190308.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock7 <- read_delim("stock_20190311.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock8 <- read_delim("stock_20190312.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock9 <- read_delim("stock_20190313.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock10 <- read_delim("stock_20190314.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock11 <- read_delim("stock_20190315.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock12 <- read_delim("stock_20190318.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
stock13 <- read_delim("stock_20190319.txt","|", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
#rename the column 5 header
names(stock1)[names(stock1) == 'X5'] <- 1
names(stock2)[names(stock2) == 'X5'] <- 2
names(stock3)[names(stock3) == 'X5'] <- 3
names(stock4)[names(stock4) == 'X5'] <- 4
names(stock5)[names(stock5) == 'X5'] <- 5
names(stock6)[names(stock6) == 'X5'] <- 6
names(stock7)[names(stock7) == 'X5'] <- 7
names(stock8)[names(stock8) == 'X5'] <- 8
names(stock9)[names(stock9) == 'X5'] <- 9
names(stock10)[names(stock10) == 'X5'] <- 10
names(stock11)[names(stock11) == 'X5'] <- 11
names(stock12)[names(stock12) == 'X5'] <- 12
names(stock13)[names(stock13) == 'X5'] <- 13
#remove the rest of the column
#by replacing the subset that only have selected column
stock1 <- subset(stock1, select = c("X2",1))
stock2 <- subset(stock2, select = c("X2",2))
stock3 <- subset(stock3, select = c("X2",3))
stock4 <- subset(stock4, select = c("X2",4))
stock5 <- subset(stock5, select = c("X2",5))
stock6 <- subset(stock6, select = c("X2",6))
stock7 <- subset(stock7, select = c("X2",7))
stock8 <- subset(stock8, select = c("X2",8))
stock9 <- subset(stock9, select = c("X2",9))
stock10 <- subset(stock10, select = c("X2",10))
stock11 <- subset(stock11, select = c("X2",11))
stock12 <- subset(stock12, select = c("X2",12))
stock13 <- subset(stock13, select = c("X2",13))
#join the stock files by the stock name, X2
stocks<-list(stock1, stock2, stock3,stock4,stock5, stock6, stock7,stock8,stock9, stock10, stock11,stock12,stock13) %>% reduce(full_join, by = "X2")
#replace the column name X2 as Days
names(stocks)[names(stocks) == 'X2'] <- 'Days'
#make sure all the value shown inside are numeric
stocks$`1`<-as.numeric(stocks$`1`)
stocks$`2`<-as.numeric(stocks$`2`)
stocks$`3`<-as.numeric(stocks$`3`)
stocks$`4`<-as.numeric(stocks$`4`)
stocks$`5`<-as.numeric(stocks$`5`)
stocks$`6`<-as.numeric(stocks$`6`)
stocks$`7`<-as.numeric(stocks$`7`)
stocks$`8`<-as.numeric(stocks$`8`)
stocks$`9`<-as.numeric(stocks$`9`)
stocks$`10`<-as.numeric(stocks$`10`)
stocks$`11`<-as.numeric(stocks$`11`)
stocks$`12`<-as.numeric(stocks$`12`)
stocks$`13`<-as.numeric(stocks$`13`)
#make sure all the classes required are numeric
str(stocks)
#check is there any NA in the data frame
sum(is.na(stocks))
#replace all the NA with 0
stocks[is.na(stocks)] <- 0
#check again, make sure it is 0
sum(is.na(stocks))
#calculate the sum value of day 1 to day 13 by row
stocks$Total<-rowSums(stocks[, -1])
#remove the stocks if the total sum is 0
#by replacing the data framw where only more than 0 are kept
stocks <- stocks %>% filter(Total!=0)
#create a csv file of the combined stocks
write.csv(stocks,file="stocks.csv")
##################Euclidean Distance########################
#import the stock file (optional)
stocks <- read_csv("stocks.csv")
#duplicate the stock files
stock_test2 <- stocks
#remove the first column X1 and second column stock name
stocker2 <- stock_test2[-c(1:2)]
#replace the row name with the stock name (column title is Days)
row.names(stocker2) <- stock_test2$Days
#again, make sure all the column are numeric to perform euclidean distance
str(stocker2)
#start to calculate the euclidean
x<-dist(stocker2, method = "euclidean")
#convert the output as a matrix then save into csv files
y<-as.matrix(x)
write.csv(y,file="Euclidean.csv")
#stocks_transpose<-data.frame(t(stock_test))
#names(stocks_transpose) <- as.matrix(stocks_transpose[1, ])
#stocks_transpose <- stocks_transpose[-1, ]
#stocks_transpose<-stocks_transpose[-14,]
#stocks_transpose$Days<-seq(1,13)
##################Z-Norm, PAA & SAX########################
#duplicate the stock again but only the first 11 stocks
stock_test <- stocks[1:11,]
#remove the stock name column and rename the rows by stock name
stocker <- stock_test[-c(1:2)]
row.names(stocker) <- stock_test$Days
#transpose the file and save as data frame
#removed the sum row as well
stocker_transpose<-data.frame(t(stocker))
stocker_transpose <- stocker_transpose[-14,]
#Z-normalization function
znorm <- function(ts){
ts.mean <- mean(ts)
ts.dev <- sd(ts)
(ts - ts.mean)/ts.dev
}
#PAA value function
paa <- function(ts, paa_size){
len = length(ts)
if (len == paa_size) {
ts
}
else {
if (len %% paa_size == 0) {
colMeans(matrix(ts, nrow=len %/% paa_size, byrow=F))
}
else {
res = rep.int(0, paa_size)
for (i in c(0:(len * paa_size - 1))) {
idx = i %/% len + 1# the spot
pos = i %/% paa_size + 1 # the col spot
res[idx] = res[idx] + ts[pos]
}
for (i in c(1:paa_size)) {
res[i] = res[i] / len
}
res
}
}
}
#create four empty vector
result <- vector("list")
result2 <-vector("list")
result3 <- vector("list")
result4 <-vector("list")
#create a for loop for Z-normalization and paa that loop across the column name
#the result of Z-normalization is save in result
#the result of paa is save in result2
for(i in names(stocker_transpose)){
ts1_znorm=znorm(stocker_transpose[[i]])
result[[i]] <- ts1_znorm
paa_size=3
y_paa3 = paa(ts1_znorm,paa_size)
result2[[i]]<-y_paa3
}
#save these both output as data frame
result<-as.data.frame(result)
result2<-as.data.frame(result2)
#transpose both of the result and save as data frame
result_transpose<-as.data.frame(t(result))
result2_transpose<-as.data.frame(t(result2))
#convert data frame into csv file
write.csv(result_transpose,file="result.csv")
write.csv(result2_transpose,file="result2.csv")
#check the maximum value and minimum value in result
# optional as it is to plot a nice range for visualization
max_value<-max(result)
min_value<-min(result)
##################Plot PAA & SAX########################
#Create an empty plot graph first
plot("Value","Days",xlim = c(1,13),ylim=c(-3.5,2.5),type = "n",main="PAA transform")
#create a for loop to plot each across the column name
for (i in names(result)){
#plot the Z-norm line of each stock
lines(result[[i]],col = "blue",type = 'o')
#this is to plot the SAX segment, the grey vertical dotted line
points(result[[i]], pch=16, lwd=5, col="blue")
abline(v=c(1,5,9,13), lty=3, lwd=2, col="gray50")
#plot the paa value of each stock
for (j in names(result2)){
#plot the paa value segment 1
segments(1,result2[[j]][1],5,result2[[j]][1],lwd=1,col="red")
points(x=(1+5)/2,y=result2[[j]][1],col="red",pch=23,lwd=5)
#plot the paa value segment 2
segments(5,result2[[j]][2],9,result2[[j]][2],lwd=1,col="red")
points(x=(5+9)/2,y=result2[[j]][2],col="red",pch=23,lwd=5)
#plot the paa value segment 3
segments(9,result2[[j]][3],13,result2[[j]][3],lwd=1,col="red")
points(x=(9+13)/2,y=result2[[j]][3],col="red",pch=23,lwd=5)
#plot the alphabet letter
y <- seq(-3.5,2.5, length=100)
x <- dnorm(y, mean=0, sd=1)
lines(x,y, type="l", lwd=5, col="magenta")
abline(h = alphabet_to_cuts(5)[1:9], lty=2, lwd=2, col="magenta")
text(0.7,-2,"e",cex=2,col="magenta")
text(0.7,-0.5,"d",cex=2,col="magenta")
text(0.7, 0,"c",cex=2,col="magenta")
text(0.7, 0.5,"b",cex=2,col="magenta")
text(0.7, 2,"a",cex=2,col="magenta")
#save the result of PAA location
result3[[j]]<-series_to_string(result2[[j]],3)
result4[[j]]<-series_to_chars(result2[[j]],3)
}
}
#convert the list to data frame
result3<-as.data.frame(result3)
result4<-as.data.frame(result4)
#transpose the data and save as data frame
result3_transpose<-as.data.frame(t(result3))
result4_transpose<-as.data.frame(t(result4))
#convert data frame into csv
write.csv(result3_transpose,file="result3.csv")
write.csv(result4_transpose,file="result4.csv") |
tar_test("tar_pipeline() works with loose targets", {
a <- tar_target(a, "a")
b <- tar_target(b, c(a, "b"))
expect_warning(
pipeline <- tar_pipeline(a, b),
class = "tar_condition_deprecate"
)
expect_silent(pipeline_validate(pipeline))
local_init(pipeline = pipeline)$run()
expect_equal(target_read_value(b)$object, c("a", "b"))
})
tar_test("tar_pipeline() works with target lists", {
expect_warning(
pipeline <- tar_pipeline(
list(
tar_target(a, "a"),
tar_target(b, c(a, "b"))
)
),
class = "tar_condition_deprecate"
)
expect_silent(pipeline_validate(pipeline))
local_init(pipeline = pipeline)$run()
b <- pipeline_get_target(pipeline, "b")
expect_equal(target_read_value(b)$object, c("a", "b"))
})
tar_test("tar_pipeline() works with weird lists", {
expect_warning(
pipeline <- tar_pipeline(
list(
tar_target(ct, c(b, "c")),
tar_target(d, c(ct, "d"))
),
tar_target(e, c(d, "e")),
list(
tar_target(a, "a"),
tar_target(b, c(a, "b"))
)
),
class = "tar_condition_deprecate"
)
expect_silent(pipeline_validate(pipeline))
local_init(pipeline = pipeline)$run()
e <- pipeline_get_target(pipeline, "e")
expect_equal(target_read_value(e)$object, c("a", "b", "c", "d", "e"))
})
| /tests/testthat/test-tar_pipeline.R | permissive | ropensci/targets | R | false | false | 1,327 | r | tar_test("tar_pipeline() works with loose targets", {
a <- tar_target(a, "a")
b <- tar_target(b, c(a, "b"))
expect_warning(
pipeline <- tar_pipeline(a, b),
class = "tar_condition_deprecate"
)
expect_silent(pipeline_validate(pipeline))
local_init(pipeline = pipeline)$run()
expect_equal(target_read_value(b)$object, c("a", "b"))
})
tar_test("tar_pipeline() works with target lists", {
expect_warning(
pipeline <- tar_pipeline(
list(
tar_target(a, "a"),
tar_target(b, c(a, "b"))
)
),
class = "tar_condition_deprecate"
)
expect_silent(pipeline_validate(pipeline))
local_init(pipeline = pipeline)$run()
b <- pipeline_get_target(pipeline, "b")
expect_equal(target_read_value(b)$object, c("a", "b"))
})
tar_test("tar_pipeline() works with weird lists", {
expect_warning(
pipeline <- tar_pipeline(
list(
tar_target(ct, c(b, "c")),
tar_target(d, c(ct, "d"))
),
tar_target(e, c(d, "e")),
list(
tar_target(a, "a"),
tar_target(b, c(a, "b"))
)
),
class = "tar_condition_deprecate"
)
expect_silent(pipeline_validate(pipeline))
local_init(pipeline = pipeline)$run()
e <- pipeline_get_target(pipeline, "e")
expect_equal(target_read_value(e)$object, c("a", "b", "c", "d", "e"))
})
|
require(tidyverse)
require(robustbase)
source("hill_diversity.R")
#####################
# calculate hill evenness (and other metrics) for every SAD obtained with the script "SAD_sugihara_abundances_model"
#####################
# d1 <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_DD.csv",delim = ";")
# d2 <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_DP.csv",delim = ";")
# d3 <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_RF.csv",delim = ";")
#
# model.data <- bind_rows(d1,d2,d3)
model.data <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_complete.csv",delim = ";")
resource.dist.levels <- unique(model.data$resource.distribution)
richness.levels <- unique(model.data$richness)
connectance.levels <- unique(model.data$connectance)
apportionment.levels <- unique(model.data$niche.apport)
trophic.guilds <- unique(model.data$trophic.guild)
replicates <- max(model.data$replicate)
# guild.abundances <- model.data %>% group_by(richness,connectance,trophic.guild,replicate) %>% summarise(abund = n())
# mean.abundances <- guild.abundances %>% group_by(richness,connectance,trophic.guild) %>% summarise(mean.abund = mean(abund))
# ggplot(guild.abundances) +
# geom_boxplot(aes(x = trophic.guild, y = abund)) +
# facet_grid(richness~connectance, scales="free") +
# NULL
#############
metrics.results <- NULL
for(i.res.dist in 1:length(resource.dist.levels)){
for(i.richness in 1:length(richness.levels)){
for(i.connectance in 1:length(connectance.levels)){
for(i.apport in 1:length(apportionment.levels)){
for(i.tl in 1:length(trophic.guilds)){
for(i.rep in 1:replicates){
temp.result <- data.frame(richness.level = richness.levels[i.richness],
resource.distribution.level = resource.dist.levels[i.res.dist],
connectance.level = connectance.levels[i.connectance],
apportionment.level = apportionment.levels[i.apport],
replicate = i.rep,
trophic.guild = trophic.guilds[i.tl],
guild.richness = 0,
#mad = 0,
hill.evenness = 0,
skewness = 0,
#log.mad = 0,
log.hill.evenness = 0,
log.skewness = 0)
my.abundances <- model.data$N_predicted[model.data$richness == richness.levels[i.richness] &
model.data$resource.distribution == resource.dist.levels[i.res.dist] &
model.data$connectance == connectance.levels[i.connectance] &
model.data$niche.apport == apportionment.levels[i.apport] &
model.data$trophic.guild == trophic.guilds[i.tl] &
model.data$replicate == i.rep]
# transform data for consistency
# abundances < 0.5 -> 0
# abundances 0.5 < x <= 1 -> 1.001
# this way log.data can be computed and hill.diversity function does not raise errors
my.abundances[my.abundances < 0.5] <- 0
my.abundances[my.abundances >= 0.5 & my.abundances <= 1] <- 1.001
if(sum(my.abundances)>0){
temp.result$guild.richness <- sum(my.abundances)
#temp.result$mad <- stats::mad(my.abundances,constant = 1)
temp.result$skewness <- robustbase::mc(my.abundances)
my.hill.diversity <- hill.diversity(my.abundances)
temp.result$hill.evenness <- my.hill.diversity/length(my.abundances)
# metrics for log data:
log.data <- log(my.abundances[my.abundances != 0],2)
# in case abundance exactly 1, add a small increment so it is not taken as 0
# it shouldn't happen anyway with the above transformation
log.data[log.data == 0] <- 0.001
if(sum(is.na(log.data))>0){
cat(i.richness,"-",i.connectance,"-",trophic.guilds[i.tl],"-",i.rep,"\n","log.data:",log.data,"\n","abundances:",my.abundances)
stop()
}
#temp.result$log.mad <- stats::mad(log.data,constant = 1)
temp.result$log.skewness <- robustbase::mc(log.data)
log.hill.diversity <- hill.diversity(log.data)
temp.result$log.hill.evenness <- log.hill.diversity/length(my.abundances)
metrics.results <- rbind(metrics.results,temp.result)
}# if any abundance
}# for i.rep
}# for trophic.guilds[i.tl]
}# for i.apport
}# for i.connectance
}# for i.richness
}# for i.res.dist
readr::write_delim(x = metrics.results, path = "./results/sugihara_model_metrics_complete.csv",delim = ";")
#readr::write_delim(x = metrics.results,path = "./results/sugihara_model_metrics_DD.csv",delim = ";")
# readr::write_delim(x = metrics.results,path = "./results/sugihara_model_metrics_DP.csv",delim = ";")
# readr::write_delim(x = metrics.results,path = "./results/sugihara_model_metrics_RF.csv",delim = ";")
| /SAD_sugihara_model_results.R | no_license | garciacallejas/SAD_evenness | R | false | false | 5,620 | r |
require(tidyverse)
require(robustbase)
source("hill_diversity.R")
#####################
# calculate hill evenness (and other metrics) for every SAD obtained with the script "SAD_sugihara_abundances_model"
#####################
# d1 <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_DD.csv",delim = ";")
# d2 <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_DP.csv",delim = ";")
# d3 <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_RF.csv",delim = ";")
#
# model.data <- bind_rows(d1,d2,d3)
model.data <- readr::read_delim(file = "./results/abundances_niche_model_trophic_guild_complete.csv",delim = ";")
resource.dist.levels <- unique(model.data$resource.distribution)
richness.levels <- unique(model.data$richness)
connectance.levels <- unique(model.data$connectance)
apportionment.levels <- unique(model.data$niche.apport)
trophic.guilds <- unique(model.data$trophic.guild)
replicates <- max(model.data$replicate)
# guild.abundances <- model.data %>% group_by(richness,connectance,trophic.guild,replicate) %>% summarise(abund = n())
# mean.abundances <- guild.abundances %>% group_by(richness,connectance,trophic.guild) %>% summarise(mean.abund = mean(abund))
# ggplot(guild.abundances) +
# geom_boxplot(aes(x = trophic.guild, y = abund)) +
# facet_grid(richness~connectance, scales="free") +
# NULL
#############
metrics.results <- NULL
for(i.res.dist in 1:length(resource.dist.levels)){
for(i.richness in 1:length(richness.levels)){
for(i.connectance in 1:length(connectance.levels)){
for(i.apport in 1:length(apportionment.levels)){
for(i.tl in 1:length(trophic.guilds)){
for(i.rep in 1:replicates){
temp.result <- data.frame(richness.level = richness.levels[i.richness],
resource.distribution.level = resource.dist.levels[i.res.dist],
connectance.level = connectance.levels[i.connectance],
apportionment.level = apportionment.levels[i.apport],
replicate = i.rep,
trophic.guild = trophic.guilds[i.tl],
guild.richness = 0,
#mad = 0,
hill.evenness = 0,
skewness = 0,
#log.mad = 0,
log.hill.evenness = 0,
log.skewness = 0)
my.abundances <- model.data$N_predicted[model.data$richness == richness.levels[i.richness] &
model.data$resource.distribution == resource.dist.levels[i.res.dist] &
model.data$connectance == connectance.levels[i.connectance] &
model.data$niche.apport == apportionment.levels[i.apport] &
model.data$trophic.guild == trophic.guilds[i.tl] &
model.data$replicate == i.rep]
# transform data for consistency
# abundances < 0.5 -> 0
# abundances 0.5 < x <= 1 -> 1.001
# this way log.data can be computed and hill.diversity function does not raise errors
my.abundances[my.abundances < 0.5] <- 0
my.abundances[my.abundances >= 0.5 & my.abundances <= 1] <- 1.001
if(sum(my.abundances)>0){
temp.result$guild.richness <- sum(my.abundances)
#temp.result$mad <- stats::mad(my.abundances,constant = 1)
temp.result$skewness <- robustbase::mc(my.abundances)
my.hill.diversity <- hill.diversity(my.abundances)
temp.result$hill.evenness <- my.hill.diversity/length(my.abundances)
# metrics for log data:
log.data <- log(my.abundances[my.abundances != 0],2)
# in case abundance exactly 1, add a small increment so it is not taken as 0
# it shouldn't happen anyway with the above transformation
log.data[log.data == 0] <- 0.001
if(sum(is.na(log.data))>0){
cat(i.richness,"-",i.connectance,"-",trophic.guilds[i.tl],"-",i.rep,"\n","log.data:",log.data,"\n","abundances:",my.abundances)
stop()
}
#temp.result$log.mad <- stats::mad(log.data,constant = 1)
temp.result$log.skewness <- robustbase::mc(log.data)
log.hill.diversity <- hill.diversity(log.data)
temp.result$log.hill.evenness <- log.hill.diversity/length(my.abundances)
metrics.results <- rbind(metrics.results,temp.result)
}# if any abundance
}# for i.rep
}# for trophic.guilds[i.tl]
}# for i.apport
}# for i.connectance
}# for i.richness
}# for i.res.dist
readr::write_delim(x = metrics.results, path = "./results/sugihara_model_metrics_complete.csv",delim = ";")
#readr::write_delim(x = metrics.results,path = "./results/sugihara_model_metrics_DD.csv",delim = ";")
# readr::write_delim(x = metrics.results,path = "./results/sugihara_model_metrics_DP.csv",delim = ";")
# readr::write_delim(x = metrics.results,path = "./results/sugihara_model_metrics_RF.csv",delim = ";")
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Next word predictor"),
textInput("usertext", "Enter text here:", width="100%"),
hr(),
fluidRow(column(12,
column(4,
h5("Prediction 1:"),
textOutput("pred1")
),
column(4,
h5("Prediction 2:"),
textOutput("pred2")
),
column(4,
h5("Prediction 3:"),
textOutput("pred3")
)), align="center"),
hr(),
h4("About this app"),
p("This app takes text input supplied by you and aims to predict the most likely next word.
Three predictions are made in decreasing order of likelihood. This app forms part of my
submission to the capstone project from the Coursera Data Science Specialisation"),
p("The algorithm used is the 'Stupid Backoff' method which can be read about at the below link.
The algorithm was trained on data provided by swiftkey scraped from twitter, blogs and news
sources. Code used to clean the data and prepare the model can be viewed at the linked github
repository."),
a(href="http://www.aclweb.org/anthology/D07-1090.pdf", "Link to description of algorithm."),
br(),
a(href="https://github.com/tcbegley/swiftkey_capstone", "Link to code on github.")
))
| /predictr/ui.R | no_license | tcbegley/swiftkey_capstone | R | false | false | 1,592 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Next word predictor"),
textInput("usertext", "Enter text here:", width="100%"),
hr(),
fluidRow(column(12,
column(4,
h5("Prediction 1:"),
textOutput("pred1")
),
column(4,
h5("Prediction 2:"),
textOutput("pred2")
),
column(4,
h5("Prediction 3:"),
textOutput("pred3")
)), align="center"),
hr(),
h4("About this app"),
p("This app takes text input supplied by you and aims to predict the most likely next word.
Three predictions are made in decreasing order of likelihood. This app forms part of my
submission to the capstone project from the Coursera Data Science Specialisation"),
p("The algorithm used is the 'Stupid Backoff' method which can be read about at the below link.
The algorithm was trained on data provided by swiftkey scraped from twitter, blogs and news
sources. Code used to clean the data and prepare the model can be viewed at the linked github
repository."),
a(href="http://www.aclweb.org/anthology/D07-1090.pdf", "Link to description of algorithm."),
br(),
a(href="https://github.com/tcbegley/swiftkey_capstone", "Link to code on github.")
))
|
### Utilities
matchlen=function(x,ref) length(ref[ref %in% x]) ### for weeding out models with unwanted combos
map = function(x) ### for scrambling membership in cv groups
{
v=unique(x)
key=sample(v,size=length(v))
y=x
for (a in 1:length(v)) y[x==v[a]]=key[a]
return(y)
}
##############################################################
#### The calling function, managing the Cross-Validated constrained-subset search #######
##############################################################
# Credits: Laina Mercer, Amanda Gassett, Michael Young, Assaf Oron, 2009-2013
# Modified version of a routine developed at MESA-Air Project, University of Washington
cvConSubsets<-function(covar.dataframe, outcome, cov.list, cv.group=10,max.covariates=length(cov.list),
forced=NULL,avoid=NULL,randomize=FALSE,rand.length=100,transform.list=NA,kriging=FALSE,...)
# covar.dataframe: the dataset
# outcome: character, the name of the outcome ('y') variable
# cov.list: character vector, names (or algebraic formulae) for the covariates to be considered.
# cv.group: either the # of CV groups to randomize, or a vector of length n with CV group assignments
# max.covariates: the maximum model size to consider
# forced: if not NULL, then a vector of indices indicating the covariates in 'cov.list' that should be in *all* models
# avoid: if not NULL, then a list of integer vectors, indicating groups of indices, in each of which at most 1 should be in any model
# randomize, rand.length: To avoid combinatoral explosion, the search can run in a mode that randomizes 'rand.length' combinations,
### -> each of length 'max.covariates'. This can serve as a pre-filtering step.
# transform.list: use if y is transformed and you want metrics on the original scale.
# kriging: logical. Should the regression function be 'kriging.cv'?
# parlel,nclusters: for use with 'kriging.cv', for parallel computing
{
startime=date()
cat(startime)
nsamp=dim(covar.dataframe)[1]
# If no CV group specified, do random K-fold CV
if (length(cv.group)==1) {
cat ("No CV group specified, doing plain ",cv.group,"-fold CV...\n")
cv.group=sample(1:cv.group,size=nsamp,replace=TRUE)
}
ticktime=ifelse(kriging,100,500)
nstats=ifelse(is.na(transform.list[[1]][1]),7,11)
names(covar.dataframe)<-tolower(names(covar.dataframe))
outcome<-tolower(outcome)
cov.list<-tolower(cov.list)
if(!any(grepl("date",names(covar.dataframe)))) covar.dataframe$date=rep(1,nsamp)
covar.dataframe<-covar.dataframe[!(is.na(covar.dataframe[,names(covar.dataframe)==outcome])),]
max.R2<-0
min.RMSE<-10000
all.results<-as.data.frame(matrix(NA,nrow=1,ncol=nstats))
### Setting search boundaries and constraints
max.covariates<-min(length(cov.list), max.covariates)
num.covars=max(2,length(forced))
overlaps=length(unlist(avoid))-length(avoid)
# cat("overlaps ",overlaps)
if (randomize) { num.covars=max.covariates }
while (num.covars<=min(length(cov.list)-overlaps,max.covariates)){
if (randomize) {
model.list=matrix(NA,ncol=rand.length,nrow=num.covars)
for (b in 1:rand.length) model.list[,b]=sample(cov.list,size=num.covars)
model.list=unique(model.list,MARGIN=2)
} else {
### Generating all term combinations for a given model size
model.list<-combn(x=cov.list,m=num.covars)
}
cat("\n Size and initial combinations: ",dim(model.list),'...')
if (length(forced)>0 && num.covars<length(cov.list))
{
for (a in cov.list[forced]) model.list=model.list[,apply(model.list,2,function(x,b) b%in%x,b=a)]
if (num.covars==length(forced)) model.list=matrix(model.list,ncol=1)
}
if(length(avoid)>0 && num.covars>length(forced)+1)
{
for (a in 1:length(avoid))
{
multvec=apply(model.list,2,matchlen,ref=cov.list[avoid[[a]]])
model.list=model.list[,multvec<2]
}
}
nmodels=dim(model.list)[2]
cat("now ",nmodels)
presult<-as.data.frame(matrix(NA,nrow=nmodels,ncol=nstats))
for (i in 1:nmodels ){
if (i %% 10 ==0) cat ('.')
if (i %% ticktime ==0) cat(date(),'\n')
### Calling the individual regression run
tmp=cvEngine(covar.dataframe, model.list[,i], outcome,cv.group=cv.group,transform.list=transform.list,kriging=kriging,...)
presult[i,-c(1,nstats)]=tmp$stats
presult[i,nstats]=tmp$model
presult[i,1]=num.covars
# print(result[i,])
}
all.results<-rbind(all.results, presult)
num.covars=num.covars+1
}
cat("\nStarted: ",startime," Ended: ",date(),'\n')
cat("Run Completed. It is advisable to perform statistics on top/bottom performing models, rather than choose the single best one.\n")
# all.results$V1<-as.numeric(all.results$V1)
# all.results$V2<-as.numeric(all.results$V2)
# all.results$V3<-as.numeric(all.results$V3)
# all.results$V4<-as.numeric(all.results$V4)
# names(all.results)<-c("LUR.R2","LUR.RMSE","CV.LUR.R2","CV.LUR.RMSE","model")
names(all.results)[1:7]<-c("nVars","Insample.R2","Insample.RMSE","CV.R2","CV.RMSE","CV.QAE","model")
if(!is.na(transform.list[[1]][1])) names(all.results)=c(names(all.results)[1:6],c("orig.cv.R2","orig.cv.RMSE","cv.rank.R2","orig.cv.QAE","model"))
all.results=all.results[-1,]
cat(date(),'\n')
if(!is.na(transform.list[[1]][1])) {
return(all.results[order(as.numeric(all.results$orig.cv.RMSE)),])
}
all.results[order(all.results$CV.RMSE),]
}
| /201/Week07/cvConstrainedSubsets.r | no_license | doerodney/UW-R-Cert | R | false | false | 5,423 | r | ### Utilities
matchlen=function(x,ref) length(ref[ref %in% x]) ### for weeding out models with unwanted combos
map = function(x) ### for scrambling membership in cv groups
{
v=unique(x)
key=sample(v,size=length(v))
y=x
for (a in 1:length(v)) y[x==v[a]]=key[a]
return(y)
}
##############################################################
#### The calling function, managing the Cross-Validated constrained-subset search #######
##############################################################
# Credits: Laina Mercer, Amanda Gassett, Michael Young, Assaf Oron, 2009-2013
# Modified version of a routine developed at MESA-Air Project, University of Washington
cvConSubsets<-function(covar.dataframe, outcome, cov.list, cv.group=10,max.covariates=length(cov.list),
forced=NULL,avoid=NULL,randomize=FALSE,rand.length=100,transform.list=NA,kriging=FALSE,...)
# covar.dataframe: the dataset
# outcome: character, the name of the outcome ('y') variable
# cov.list: character vector, names (or algebraic formulae) for the covariates to be considered.
# cv.group: either the # of CV groups to randomize, or a vector of length n with CV group assignments
# max.covariates: the maximum model size to consider
# forced: if not NULL, then a vector of indices indicating the covariates in 'cov.list' that should be in *all* models
# avoid: if not NULL, then a list of integer vectors, indicating groups of indices, in each of which at most 1 should be in any model
# randomize, rand.length: To avoid combinatoral explosion, the search can run in a mode that randomizes 'rand.length' combinations,
### -> each of length 'max.covariates'. This can serve as a pre-filtering step.
# transform.list: use if y is transformed and you want metrics on the original scale.
# kriging: logical. Should the regression function be 'kriging.cv'?
# parlel,nclusters: for use with 'kriging.cv', for parallel computing
{
startime=date()
cat(startime)
nsamp=dim(covar.dataframe)[1]
# If no CV group specified, do random K-fold CV
if (length(cv.group)==1) {
cat ("No CV group specified, doing plain ",cv.group,"-fold CV...\n")
cv.group=sample(1:cv.group,size=nsamp,replace=TRUE)
}
ticktime=ifelse(kriging,100,500)
nstats=ifelse(is.na(transform.list[[1]][1]),7,11)
names(covar.dataframe)<-tolower(names(covar.dataframe))
outcome<-tolower(outcome)
cov.list<-tolower(cov.list)
if(!any(grepl("date",names(covar.dataframe)))) covar.dataframe$date=rep(1,nsamp)
covar.dataframe<-covar.dataframe[!(is.na(covar.dataframe[,names(covar.dataframe)==outcome])),]
max.R2<-0
min.RMSE<-10000
all.results<-as.data.frame(matrix(NA,nrow=1,ncol=nstats))
### Setting search boundaries and constraints
max.covariates<-min(length(cov.list), max.covariates)
num.covars=max(2,length(forced))
overlaps=length(unlist(avoid))-length(avoid)
# cat("overlaps ",overlaps)
if (randomize) { num.covars=max.covariates }
while (num.covars<=min(length(cov.list)-overlaps,max.covariates)){
if (randomize) {
model.list=matrix(NA,ncol=rand.length,nrow=num.covars)
for (b in 1:rand.length) model.list[,b]=sample(cov.list,size=num.covars)
model.list=unique(model.list,MARGIN=2)
} else {
### Generating all term combinations for a given model size
model.list<-combn(x=cov.list,m=num.covars)
}
cat("\n Size and initial combinations: ",dim(model.list),'...')
if (length(forced)>0 && num.covars<length(cov.list))
{
for (a in cov.list[forced]) model.list=model.list[,apply(model.list,2,function(x,b) b%in%x,b=a)]
if (num.covars==length(forced)) model.list=matrix(model.list,ncol=1)
}
if(length(avoid)>0 && num.covars>length(forced)+1)
{
for (a in 1:length(avoid))
{
multvec=apply(model.list,2,matchlen,ref=cov.list[avoid[[a]]])
model.list=model.list[,multvec<2]
}
}
nmodels=dim(model.list)[2]
cat("now ",nmodels)
presult<-as.data.frame(matrix(NA,nrow=nmodels,ncol=nstats))
for (i in 1:nmodels ){
if (i %% 10 ==0) cat ('.')
if (i %% ticktime ==0) cat(date(),'\n')
### Calling the individual regression run
tmp=cvEngine(covar.dataframe, model.list[,i], outcome,cv.group=cv.group,transform.list=transform.list,kriging=kriging,...)
presult[i,-c(1,nstats)]=tmp$stats
presult[i,nstats]=tmp$model
presult[i,1]=num.covars
# print(result[i,])
}
all.results<-rbind(all.results, presult)
num.covars=num.covars+1
}
cat("\nStarted: ",startime," Ended: ",date(),'\n')
cat("Run Completed. It is advisable to perform statistics on top/bottom performing models, rather than choose the single best one.\n")
# all.results$V1<-as.numeric(all.results$V1)
# all.results$V2<-as.numeric(all.results$V2)
# all.results$V3<-as.numeric(all.results$V3)
# all.results$V4<-as.numeric(all.results$V4)
# names(all.results)<-c("LUR.R2","LUR.RMSE","CV.LUR.R2","CV.LUR.RMSE","model")
names(all.results)[1:7]<-c("nVars","Insample.R2","Insample.RMSE","CV.R2","CV.RMSE","CV.QAE","model")
if(!is.na(transform.list[[1]][1])) names(all.results)=c(names(all.results)[1:6],c("orig.cv.R2","orig.cv.RMSE","cv.rank.R2","orig.cv.QAE","model"))
all.results=all.results[-1,]
cat(date(),'\n')
if(!is.na(transform.list[[1]][1])) {
return(all.results[order(as.numeric(all.results$orig.cv.RMSE)),])
}
all.results[order(all.results$CV.RMSE),]
}
|
#' @rdname read_credentials
#' @title Use Credentials from .aws/credentials File
#' @description Use a profile from a \samp{.aws/credentials} file
#' @param profile A character string specifing which profile to use from the file. By default, the \dQuote{default} profile is used.
#' @param file A character string containing a path to a \samp{.aws/credentials} file. By default, the standard/centralized file is used. For \code{use_credentials}, this can also be an object of class \dQuote{aws_credentials} (as returned by \code{use_credentials}).
#' @details \code{read_credentials} reads and parses a \samp{.aws/credentials} file into an object of class \dQuote{aws_credentials}.
#'
#' \code{use_credentials} uses credentials from a profile stored in a credentials file to set the environment variables used by this package.
#' @author Thomas J. Leeper <thosjleeper@gmail.com>
#' @references
#' \href{https://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs}{Amazon blog post describing the format}
#' @seealso \code{\link{signature_v2_auth}}
#' @examples
#' \dontrun{
#' # set environment variables from a profile
#' use_credentials()
#'
#' # read and parse a file
#' read_credentials()
#' }
#' @export
read_credentials <- function(file = default_credentials_file()) {
file <- path.expand(file)
if (!file.exists(file)) {
stop(paste0("File ", shQuote(file), " does not exist."))
}
char <- rawToChar(readBin(file, "raw", n = 1e5L))
parse_credentials(char)
}
#' @rdname read_credentials
#' @export
use_credentials <- function(profile = "default", file = default_credentials_file()) {
if (inherits(file, "aws_credentials")) {
x <- file
} else {
x <- read_credentials(file)
}
if ("AWS_ACCESS_KEY_ID" %in% names(x[[profile]])) {
Sys.setenv("AWS_ACCESS_KEY_ID" = x[[profile]][["AWS_ACCESS_KEY_ID"]])
}
if ("AWS_SECRET_ACCESS_KEY" %in% names(x[[profile]])) {
Sys.setenv("AWS_SECRET_ACCESS_KEY" = x[[profile]][["AWS_SECRET_ACCESS_KEY"]])
}
if ("AWS_SESSION_TOKEN" %in% names(x[[profile]])) {
Sys.setenv("AWS_SESSION_TOKEN" = x[[profile]][["AWS_SESSION_TOKEN"]])
}
if ("AWS_DEFAULT_REGION" %in% names(x[[profile]])) {
Sys.setenv("AWS_DEFAULT_REGION" = x[[profile]][["AWS_DEFAULT_REGION"]])
}
invisible(x)
}
#' @rdname read_credentials
#' @export
default_credentials_file <- function() {
if (.Platform[["OS.type"]] == "windows") {
home <- Sys.getenv("USERPROFILE")
} else {
home <- "~"
}
suppressWarnings(normalizePath(file.path(home, '.aws', 'credentials')))
}
parse_credentials <- function(char) {
s <- c(gregexpr("\\[", char)[[1]], nchar(char))
make_named_vec <- function(x) {
elem <- strsplit(x, "[ ]?=[ ]?")
out <- lapply(elem, `[`, 2)
names(out) <- toupper(sapply(elem, `[`, 1))
out
}
creds <- list()
for (i in seq_along(s)[-1]) {
tmp <- strsplit(substr(char, s[i-1], s[i]-1), "[\n\r]+")[[1]]
creds[[i-1]] <- make_named_vec(tmp[-1])
names(creds)[[i-1]] <- gsub("\\[", "", gsub("\\]", "", tmp[1]))
}
structure(creds, class = "aws_credentials")
}
| /R/read_credentials.R | no_license | turqoisehat/aws.signature | R | false | false | 3,259 | r | #' @rdname read_credentials
#' @title Use Credentials from .aws/credentials File
#' @description Use a profile from a \samp{.aws/credentials} file
#' @param profile A character string specifing which profile to use from the file. By default, the \dQuote{default} profile is used.
#' @param file A character string containing a path to a \samp{.aws/credentials} file. By default, the standard/centralized file is used. For \code{use_credentials}, this can also be an object of class \dQuote{aws_credentials} (as returned by \code{use_credentials}).
#' @details \code{read_credentials} reads and parses a \samp{.aws/credentials} file into an object of class \dQuote{aws_credentials}.
#'
#' \code{use_credentials} uses credentials from a profile stored in a credentials file to set the environment variables used by this package.
#' @author Thomas J. Leeper <thosjleeper@gmail.com>
#' @references
#' \href{https://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs}{Amazon blog post describing the format}
#' @seealso \code{\link{signature_v2_auth}}
#' @examples
#' \dontrun{
#' # set environment variables from a profile
#' use_credentials()
#'
#' # read and parse a file
#' read_credentials()
#' }
#' @export
read_credentials <- function(file = default_credentials_file()) {
file <- path.expand(file)
if (!file.exists(file)) {
stop(paste0("File ", shQuote(file), " does not exist."))
}
char <- rawToChar(readBin(file, "raw", n = 1e5L))
parse_credentials(char)
}
#' @rdname read_credentials
#' @export
use_credentials <- function(profile = "default", file = default_credentials_file()) {
if (inherits(file, "aws_credentials")) {
x <- file
} else {
x <- read_credentials(file)
}
if ("AWS_ACCESS_KEY_ID" %in% names(x[[profile]])) {
Sys.setenv("AWS_ACCESS_KEY_ID" = x[[profile]][["AWS_ACCESS_KEY_ID"]])
}
if ("AWS_SECRET_ACCESS_KEY" %in% names(x[[profile]])) {
Sys.setenv("AWS_SECRET_ACCESS_KEY" = x[[profile]][["AWS_SECRET_ACCESS_KEY"]])
}
if ("AWS_SESSION_TOKEN" %in% names(x[[profile]])) {
Sys.setenv("AWS_SESSION_TOKEN" = x[[profile]][["AWS_SESSION_TOKEN"]])
}
if ("AWS_DEFAULT_REGION" %in% names(x[[profile]])) {
Sys.setenv("AWS_DEFAULT_REGION" = x[[profile]][["AWS_DEFAULT_REGION"]])
}
invisible(x)
}
#' @rdname read_credentials
#' @export
default_credentials_file <- function() {
if (.Platform[["OS.type"]] == "windows") {
home <- Sys.getenv("USERPROFILE")
} else {
home <- "~"
}
suppressWarnings(normalizePath(file.path(home, '.aws', 'credentials')))
}
parse_credentials <- function(char) {
s <- c(gregexpr("\\[", char)[[1]], nchar(char))
make_named_vec <- function(x) {
elem <- strsplit(x, "[ ]?=[ ]?")
out <- lapply(elem, `[`, 2)
names(out) <- toupper(sapply(elem, `[`, 1))
out
}
creds <- list()
for (i in seq_along(s)[-1]) {
tmp <- strsplit(substr(char, s[i-1], s[i]-1), "[\n\r]+")[[1]]
creds[[i-1]] <- make_named_vec(tmp[-1])
names(creds)[[i-1]] <- gsub("\\[", "", gsub("\\]", "", tmp[1]))
}
structure(creds, class = "aws_credentials")
}
|
\name{blup.mixmeta}
\alias{blup.mixmeta}
\title{ Best Linear Unbiased Predictions from mixmeta Models }
\description{
This method function computes (empirical) best linear unbiased predictions from fitted random-effects meta-analytical models represented in objects of class \code{"mixmeta"}. Quantities can represent prediction of outcomes given both fixed and random effects, or just random-effects residuals from the fixed-effects estimates. Predictions are optionally accompanied by standard errors, prediction intervals or the entire (co)variance matrix of the predicted outcomes.
}
\usage{
\method{blup}{mixmeta}(object, se=FALSE, pi=FALSE, vcov=FALSE, pi.level=0.95, type="outcome",
level, format, aggregate="stat", \dots)
}
\arguments{
\item{object }{ an object of class \code{"mixmeta"}.}
\item{se }{ logical switch indicating if standard errors must be included.}
\item{pi }{ logical switch indicating if prediction intervals must be included.}
\item{vcov }{ logical switch indicating if the (co)variance matrix must be included.}
\item{pi.level }{ a numerical value between 0 and 1, specifying the confidence level for the computation of prediction intervals.}
\item{type }{ the type of prediction. This can be either \code{outcome} (default) or \code{residual}. See Details.}
\item{level }{ level of random-effects grouping for which predictions are to be computed. Default to the highest (inner) level, with 0 corresponding to fixed-effects predictions obtained through \code{\link[=predict.mixmeta]{predict}}.}
\item{format }{ the format for the returned results. See Value.}
\item{aggregate }{ when \code{format="matrix"} and \code{se} or \code{ci} are required, the results may be aggregated by statistic or by outcome. See Value.}
\item{\dots }{ further arguments passed to or from other methods.}
}
\details{
The method function \code{blup} produces (empirical) best linear unbiased predictions from \code{mixmeta} objects. These can represent outcomes, given by the sum of fixed and random parts, or just random-effects residuals representing deviations from the fixed-effects estimated outcomes. In non-standard models with multiple hierarchies of random effects, the argument \code{level} can be used to determine the level of grouping for which predictions are to be computed.
These predictions are a shrunk version of unit-specific realizations, where unit-specific estimates borrow strength from the assumption of an underlying (potentially multivariate) distribution of outcomes or residuals in a (usually hypothetical) population. The amount of shrinkage depends from the relative size of the within and between-unit covariance matrices reported as components \code{S} and \code{Psi} in \code{mixmeta} objects (see \code{\link{mixmetaObject}}).
Fixed-effects models do not assume random effects, and the results of \code{blup} for these models are identical to \code{\link[=predict.mixmeta]{predict}} (for \code{type="oucome"}) or just 0's (for \code{type="residuals"}).
How to handle predictions for units removed from estimation due to invalid missing pattern is determined by the \code{na.action} argument used in \code{\link{mixmeta}} to produce \code{object}. If \code{na.action=na.omit}, units excluded from estimation will not appear, whereas if \code{na.action=na.exclude} they will appear, with values set to \code{NA} for all the outcomes. This step is performed by \code{\link{napredict}}. See Note below.
In the presence of missing values in the outcomes \code{y} of the fitted model, correspondent values of point estimates and covariance terms are set to 0, while the variance terms are set to \code{1e+10}. In this case, in practice, the unit-specific estimates do not provide any information (their weight is virtually 0), and the prediction tends to the value returned by \code{\link[=predict.mixmeta]{predict}} with \code{interval="prediction"}, when applied to a new but identical set of predictors. See also Note below.
}
\value{
(Empirical) best linear unbiased predictions of outcomes or random-effects residuals. The results may be aggregated in matrices (the default), or returned as lists, depending on the argument \code{format}. For multivariate models, the aggregation is ruled by the argument \code{aggregate}, and the results may be grouped by statistic or by outcome. If \code{vcov=TRUE}, lists are always returned.
}
\references{
Sera F, Armstrong B, Blangiardo M, Gasparrini A (2019). An extended mixed-effects framework for meta-analysis.\emph{Statistics in Medicine}. 2019;38(29):5429-5444. [Freely available \href{http://www.ag-myresearch.com/2019_sera_statmed.html}{\bold{here}}].
Verbeke G, Molenberghs G. \emph{Linear Mixed Models for Longitudinal Data}. Springer; 1997.
}
\author{Antonio Gasparrini <\email{antonio.gasparrini@lshtm.ac.uk}> and Francesco Sera <\email{francesco.sera@lshtm.ac.uk}>}
\note{
The definition of missing in model frames used for estimation in \code{\link{mixmeta}} is different than that commonly adopted in other regression models such as \code{\link{lm}} or \code{\link{glm}}. See info on \code{\link[=na.omit.data.frame.mixmeta]{missing values}} in \code{\link{mixmeta}}.
Differently from \code{\link[=predict.mixmeta]{predict}}, this method function computes the predicted values in the presence of partially missing outcomes. Interestingly, BLUPs for missing outcomes may be slightly different than predictions returned by \code{\link[=predict.mixmeta]{predict}} on a new but identical set of predictors, as the BLUP also depends on the random part of the model. Specifically, the function uses information from the random-effects (co)variance to predict missing outcomes given the observed ones.
}
\seealso{
See \code{\link[=predict.mixmeta]{predict}} for standard predictions. See \code{\link{mixmeta-package}} for an overview of the package and modelling framework.
}
\examples{
# RUN THE MODEL
model <- mixmeta(cbind(PD,AL) ~ 1, S=berkey98[5:7], data=berkey98)
# ONLY BLUP
blup(model)
# BLUP AND SE
blup(model, se=TRUE)
# SAME AS ABOVE, AGGREGATED BY OUTCOME, WITH PREDICTION INTERVALS
blup(model, se=TRUE, pi=TRUE, aggregate="outcome")
# WITH VCOV, FORCED TO A LIST
blup(model, se=TRUE, pi=TRUE, vcov=TRUE, aggregate="outcome")
# PREDICTING ONLY THE RANDOM-EFFECT RESIDUALS
blup(model, type="residual")
}
\keyword{models}
\keyword{regression}
\keyword{multivariate}
\keyword{methods}
| /man/blup.mixmeta.Rd | no_license | mbexhrs3/mixmeta | R | false | false | 6,516 | rd | \name{blup.mixmeta}
\alias{blup.mixmeta}
\title{ Best Linear Unbiased Predictions from mixmeta Models }
\description{
This method function computes (empirical) best linear unbiased predictions from fitted random-effects meta-analytical models represented in objects of class \code{"mixmeta"}. Quantities can represent prediction of outcomes given both fixed and random effects, or just random-effects residuals from the fixed-effects estimates. Predictions are optionally accompanied by standard errors, prediction intervals or the entire (co)variance matrix of the predicted outcomes.
}
\usage{
\method{blup}{mixmeta}(object, se=FALSE, pi=FALSE, vcov=FALSE, pi.level=0.95, type="outcome",
level, format, aggregate="stat", \dots)
}
\arguments{
\item{object }{ an object of class \code{"mixmeta"}.}
\item{se }{ logical switch indicating if standard errors must be included.}
\item{pi }{ logical switch indicating if prediction intervals must be included.}
\item{vcov }{ logical switch indicating if the (co)variance matrix must be included.}
\item{pi.level }{ a numerical value between 0 and 1, specifying the confidence level for the computation of prediction intervals.}
\item{type }{ the type of prediction. This can be either \code{outcome} (default) or \code{residual}. See Details.}
\item{level }{ level of random-effects grouping for which predictions are to be computed. Default to the highest (inner) level, with 0 corresponding to fixed-effects predictions obtained through \code{\link[=predict.mixmeta]{predict}}.}
\item{format }{ the format for the returned results. See Value.}
\item{aggregate }{ when \code{format="matrix"} and \code{se} or \code{ci} are required, the results may be aggregated by statistic or by outcome. See Value.}
\item{\dots }{ further arguments passed to or from other methods.}
}
\details{
The method function \code{blup} produces (empirical) best linear unbiased predictions from \code{mixmeta} objects. These can represent outcomes, given by the sum of fixed and random parts, or just random-effects residuals representing deviations from the fixed-effects estimated outcomes. In non-standard models with multiple hierarchies of random effects, the argument \code{level} can be used to determine the level of grouping for which predictions are to be computed.
These predictions are a shrunk version of unit-specific realizations, where unit-specific estimates borrow strength from the assumption of an underlying (potentially multivariate) distribution of outcomes or residuals in a (usually hypothetical) population. The amount of shrinkage depends from the relative size of the within and between-unit covariance matrices reported as components \code{S} and \code{Psi} in \code{mixmeta} objects (see \code{\link{mixmetaObject}}).
Fixed-effects models do not assume random effects, and the results of \code{blup} for these models are identical to \code{\link[=predict.mixmeta]{predict}} (for \code{type="oucome"}) or just 0's (for \code{type="residuals"}).
How to handle predictions for units removed from estimation due to invalid missing pattern is determined by the \code{na.action} argument used in \code{\link{mixmeta}} to produce \code{object}. If \code{na.action=na.omit}, units excluded from estimation will not appear, whereas if \code{na.action=na.exclude} they will appear, with values set to \code{NA} for all the outcomes. This step is performed by \code{\link{napredict}}. See Note below.
In the presence of missing values in the outcomes \code{y} of the fitted model, correspondent values of point estimates and covariance terms are set to 0, while the variance terms are set to \code{1e+10}. In this case, in practice, the unit-specific estimates do not provide any information (their weight is virtually 0), and the prediction tends to the value returned by \code{\link[=predict.mixmeta]{predict}} with \code{interval="prediction"}, when applied to a new but identical set of predictors. See also Note below.
}
\value{
(Empirical) best linear unbiased predictions of outcomes or random-effects residuals. The results may be aggregated in matrices (the default), or returned as lists, depending on the argument \code{format}. For multivariate models, the aggregation is ruled by the argument \code{aggregate}, and the results may be grouped by statistic or by outcome. If \code{vcov=TRUE}, lists are always returned.
}
\references{
Sera F, Armstrong B, Blangiardo M, Gasparrini A (2019). An extended mixed-effects framework for meta-analysis.\emph{Statistics in Medicine}. 2019;38(29):5429-5444. [Freely available \href{http://www.ag-myresearch.com/2019_sera_statmed.html}{\bold{here}}].
Verbeke G, Molenberghs G. \emph{Linear Mixed Models for Longitudinal Data}. Springer; 1997.
}
\author{Antonio Gasparrini <\email{antonio.gasparrini@lshtm.ac.uk}> and Francesco Sera <\email{francesco.sera@lshtm.ac.uk}>}
\note{
The definition of missing in model frames used for estimation in \code{\link{mixmeta}} is different than that commonly adopted in other regression models such as \code{\link{lm}} or \code{\link{glm}}. See info on \code{\link[=na.omit.data.frame.mixmeta]{missing values}} in \code{\link{mixmeta}}.
Differently from \code{\link[=predict.mixmeta]{predict}}, this method function computes the predicted values in the presence of partially missing outcomes. Interestingly, BLUPs for missing outcomes may be slightly different than predictions returned by \code{\link[=predict.mixmeta]{predict}} on a new but identical set of predictors, as the BLUP also depends on the random part of the model. Specifically, the function uses information from the random-effects (co)variance to predict missing outcomes given the observed ones.
}
\seealso{
See \code{\link[=predict.mixmeta]{predict}} for standard predictions. See \code{\link{mixmeta-package}} for an overview of the package and modelling framework.
}
\examples{
# RUN THE MODEL
model <- mixmeta(cbind(PD,AL) ~ 1, S=berkey98[5:7], data=berkey98)
# ONLY BLUP
blup(model)
# BLUP AND SE
blup(model, se=TRUE)
# SAME AS ABOVE, AGGREGATED BY OUTCOME, WITH PREDICTION INTERVALS
blup(model, se=TRUE, pi=TRUE, aggregate="outcome")
# WITH VCOV, FORCED TO A LIST
blup(model, se=TRUE, pi=TRUE, vcov=TRUE, aggregate="outcome")
# PREDICTING ONLY THE RANDOM-EFFECT RESIDUALS
blup(model, type="residual")
}
\keyword{models}
\keyword{regression}
\keyword{multivariate}
\keyword{methods}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.r
\name{create}
\alias{create}
\alias{create_group}
\alias{create_dataset}
\alias{create_attribute}
\title{Create HDF Object}
\usage{
create(
what = c("FILE", "GROUP", "DATASET", "ATTRIBUTE"),
path,
data.type,
size,
overwrite = FALSE,
parallel = FALSE
)
create_group(group, overwrite = FALSE)
create_dataset(
dataset,
data.type,
size = NULL,
overwrite = FALSE,
parallel = FALSE
)
create_attribute(
attribute,
data.type,
size = NULL,
overwrite = FALSE,
parallel = FALSE
)
}
\arguments{
\item{what}{The type of object to create.}
\item{path}{The target location of the object.}
\item{data.type}{The HDF data type of the dataset or attribute.}
\item{size}{The size (dimensions) of the dataset or attribute.
For \code{CHAR} datasets or attributes, the last element of \code{size}
is the string length.}
\item{overwrite}{If \code{TRUE}, overwrite existing file, group,
attribute, or dataset.}
\item{parallel}{If \code{TRUE}, use parallel capabilities.}
\item{group}{The group to create.}
\item{dataset}{The dataset to create.}
\item{attribute}{The attribute to create.}
}
\description{
Generic helper for creating HDF objects.
}
\section{Functions}{
\itemize{
\item \code{create_group}: Create HDF group.
\item \code{create_dataset}: Create HDF dataset.
\item \code{create_attribute}: Create HDF attribute.
}}
\keyword{internal}
| /man/create.Rd | no_license | cran/hdfqlr | R | false | true | 1,526 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.r
\name{create}
\alias{create}
\alias{create_group}
\alias{create_dataset}
\alias{create_attribute}
\title{Create HDF Object}
\usage{
create(
what = c("FILE", "GROUP", "DATASET", "ATTRIBUTE"),
path,
data.type,
size,
overwrite = FALSE,
parallel = FALSE
)
create_group(group, overwrite = FALSE)
create_dataset(
dataset,
data.type,
size = NULL,
overwrite = FALSE,
parallel = FALSE
)
create_attribute(
attribute,
data.type,
size = NULL,
overwrite = FALSE,
parallel = FALSE
)
}
\arguments{
\item{what}{The type of object to create.}
\item{path}{The target location of the object.}
\item{data.type}{The HDF data type of the dataset or attribute.}
\item{size}{The size (dimensions) of the dataset or attribute.
For \code{CHAR} datasets or attributes, the last element of \code{size}
is the string length.}
\item{overwrite}{If \code{TRUE}, overwrite existing file, group,
attribute, or dataset.}
\item{parallel}{If \code{TRUE}, use parallel capabilities.}
\item{group}{The group to create.}
\item{dataset}{The dataset to create.}
\item{attribute}{The attribute to create.}
}
\description{
Generic helper for creating HDF objects.
}
\section{Functions}{
\itemize{
\item \code{create_group}: Create HDF group.
\item \code{create_dataset}: Create HDF dataset.
\item \code{create_attribute}: Create HDF attribute.
}}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resampling_functions.R
\name{resample_df}
\alias{resample_df}
\title{resampling}
\usage{
resample_df(df, key_cols, strat_cols = NULL, n = NULL,
key_col_name = "KEY", replace = TRUE)
}
\arguments{
\item{df}{data frame}
\item{key_cols}{key columns to resample on}
\item{strat_cols}{columns to maintain proportion for stratification}
\item{n}{number of unique sampled keys, defaults to match dataset}
\item{key_col_name}{name of outputted key column. Default to "KEY"}
\item{replace}{whether to stratify with replacement}
}
\description{
resampling
}
\details{
This function is valuable when generating a large simulated population
where you goal is to create resampled sub-populations in addition to being able to
maintain certain stratifications of factors like covariate distributions
A new keyed column will be created (defaults to name 'KEY') that contains the uniquely
created new samples. This allows one to easily compare against the key'd columns. Eg,
if you would like to see how many times a particular individual was resampled you can
check the original ID column against the number of key's associated with that ID number.
}
\examples{
\dontrun{
library(PKPDdatasets)
resample_df(sd_oral_richpk, key_cols = "ID", strat_cols = "Gender", 10)
# make 'simulated' data with 5 replicates
rep_dat <- rbind_all(lapply(1:5, function(x) sd_oral_richpk \%>\%
filter(ID < 20) \%>\%
mutate(REP = x)))
resample_df(rep_dat, key_cols = c("ID", "REP"))
# check to see that stratification is maintained
rep_dat \%>\% group_by(Gender) \%>\% summarize(n = n())
resample_df(rep_dat, key_cols=c("ID", "REP"), strat_cols="Gender") \%>\%
group_by(Gender) \%>\% summarize(n = n())
rep_dat \%>\% group_by(Gender, Race) \%>\% summarize(n = n())
resample_df(rep_dat, key_cols=c("ID", "REP"), strat_cols=c("Gender", "Race")) \%>\%
group_by(Gender, Race) \%>\% summarize(n = n())
}
}
| /man/resample_df.Rd | no_license | DuyTran16/PKPDmisc | R | false | true | 1,964 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resampling_functions.R
\name{resample_df}
\alias{resample_df}
\title{resampling}
\usage{
resample_df(df, key_cols, strat_cols = NULL, n = NULL,
key_col_name = "KEY", replace = TRUE)
}
\arguments{
\item{df}{data frame}
\item{key_cols}{key columns to resample on}
\item{strat_cols}{columns to maintain proportion for stratification}
\item{n}{number of unique sampled keys, defaults to match dataset}
\item{key_col_name}{name of outputted key column. Default to "KEY"}
\item{replace}{whether to stratify with replacement}
}
\description{
resampling
}
\details{
This function is valuable when generating a large simulated population
where you goal is to create resampled sub-populations in addition to being able to
maintain certain stratifications of factors like covariate distributions
A new keyed column will be created (defaults to name 'KEY') that contains the uniquely
created new samples. This allows one to easily compare against the key'd columns. Eg,
if you would like to see how many times a particular individual was resampled you can
check the original ID column against the number of key's associated with that ID number.
}
\examples{
\dontrun{
library(PKPDdatasets)
resample_df(sd_oral_richpk, key_cols = "ID", strat_cols = "Gender", 10)
# make 'simulated' data with 5 replicates
rep_dat <- rbind_all(lapply(1:5, function(x) sd_oral_richpk \%>\%
filter(ID < 20) \%>\%
mutate(REP = x)))
resample_df(rep_dat, key_cols = c("ID", "REP"))
# check to see that stratification is maintained
rep_dat \%>\% group_by(Gender) \%>\% summarize(n = n())
resample_df(rep_dat, key_cols=c("ID", "REP"), strat_cols="Gender") \%>\%
group_by(Gender) \%>\% summarize(n = n())
rep_dat \%>\% group_by(Gender, Race) \%>\% summarize(n = n())
resample_df(rep_dat, key_cols=c("ID", "REP"), strat_cols=c("Gender", "Race")) \%>\%
group_by(Gender, Race) \%>\% summarize(n = n())
}
}
|
\name{G2518AV001UNIPROT}
\alias{G2518AV001UNIPROT}
\alias{G2518AV001UNIPROT2PROBE}
\title{Map Uniprot accession numbers with Entrez Gene identifiers}
\description{
G2518AV001UNIPROT is an R object that contains mappings between
the manufacturer identifiers and Uniprot accession numbers.
}
\details{
This object is a simple mapping of manufacturer identifiers to Uniprot
Accessions.
Mappings were based on data provided by NCBI (link above) with an
exception for fly, which required retrieving the data from ensembl
\url{http://www.ensembl.org/biomart/martview/}
}
\examples{
x <- G2518AV001UNIPROT
# Get the entrez gene IDs that are mapped to an Uniprot ID
mapped_genes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_genes])
if(length(xx) > 0) {
# Get the Uniprot IDs for the first five genes
xx[1:5]
# Get the first one
xx[[1]]
}
}
\keyword{datasets}
| /G2518AV001.db/man/G2518AV001UNIPROT.Rd | no_license | demis001/G2518AV001 | R | false | false | 946 | rd | \name{G2518AV001UNIPROT}
\alias{G2518AV001UNIPROT}
\alias{G2518AV001UNIPROT2PROBE}
\title{Map Uniprot accession numbers with Entrez Gene identifiers}
\description{
G2518AV001UNIPROT is an R object that contains mappings between
the manufacturer identifiers and Uniprot accession numbers.
}
\details{
This object is a simple mapping of manufacturer identifiers to Uniprot
Accessions.
Mappings were based on data provided by NCBI (link above) with an
exception for fly, which required retrieving the data from ensembl
\url{http://www.ensembl.org/biomart/martview/}
}
\examples{
x <- G2518AV001UNIPROT
# Get the entrez gene IDs that are mapped to an Uniprot ID
mapped_genes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_genes])
if(length(xx) > 0) {
# Get the Uniprot IDs for the first five genes
xx[1:5]
# Get the first one
xx[[1]]
}
}
\keyword{datasets}
|
# Plot 2
#Read data from file and subset data
data.full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
subset.data <- data.full[data.full$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subset.data$Date, subset.data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
global.active.power <- as.numeric(subset.data$Global_active_power)
#Create graph of date/time vs Global Active Power data
png("plot2.png", width=480, height=480)
plot(datetime, global.active.power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | SoVa29/Coursera_ExploratoryDataAnalysis_CourseProjectWeek1 | R | false | false | 677 | r | # Plot 2
#Read data from file and subset data
data.full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
subset.data <- data.full[data.full$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subset.data$Date, subset.data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
global.active.power <- as.numeric(subset.data$Global_active_power)
#Create graph of date/time vs Global Active Power data
png("plot2.png", width=480, height=480)
plot(datetime, global.active.power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
#' Pivot data from wide to long
#'
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("maturing")}
#'
#' `pivot_longer()` "lengthens" data, increasing the number of rows and
#' decreasing the number of columns. The inverse transformation is
#' [pivot_wider()]
#'
#' Learn more in `vignette("pivot")`.
#'
#' @details
#' `pivot_longer()` is an updated approach to [gather()], designed to be both
#' simpler to use and to handle more use cases. We recommend you use
#' `pivot_longer()` for new code; `gather()` isn't going away but is no longer
#' under active development.
#'
#' @param data A data frame to pivot.
#' @param cols Columns to pivot into longer format.
#'
#' This takes a tidyselect specification, e.g. use `a:c` to select all
#' columns from `a` to `c`, `starts_with("prefix")` to select all columns
#' starting with "prefix", or `everything()` to select all columns.
#' @param names_to A string specifying the name of the column to create
#' from the data stored in the column names of `data`.
#'
#' Can be a character vector, creating multiple columns, if `names_sep`
#' or `names_pattern` is provided.
#' @param names_prefix A regular expression used to remove matching text
#' from the start of each variable name.
#' @param names_sep,names_pattern If `names_to` contains multiple values,
#' these arguments control how the column name is broken up.
#'
#' `names_sep` takes the same specification as [separate()], and can either
#' be a numeric vector (specifying positions to break on), or a single string
#' (specifying a regular expression to split on).
#'
#' `names_pattern` takes the same specification as [extract()], a regular
#' expression containing matching groups (`()`).
#'
#' If these arguments do not give you enough control, use
#' `pivot_longer_spec()` to create a spec object and process manually as
#' needed.
#' @param names_repair What happen if the output has invalid column names?
#' The default, `"check_unique"` is to error if the columns are duplicated.
#' Use `"minimal"` to allow duplicates in the output, or `"unique"` to
#' de-duplicated by adding numeric suffixes. See [vctrs::vec_as_names()]
#' for more options.
#' @param values_to A string specifying the name of the column to create
#' from the data stored in cell values. If `names_to` is a character
#' containing the special `.value` sentinel, this value will be ignored,
#' and the name of the value column will be derived from part of the
#' existing column names.
#' @param values_drop_na If `TRUE`, will drop rows that contain only `NA`s
#' in the `value_to` column. This effectively converts explicit missing values
#' to implicit missing values, and should generally be used only when missing
#' values in `data` were created by its structure.
#' @param names_ptypes,values_ptypes A list of column name-prototype pairs.
#' A prototype (or ptype for short) is a zero-length vector (like `integer()`
#' or `numeric()`) that defines the type, class, and attributes of a vector.
#'
#' If not specified, the type of the columns generated from `names_to` will
#' be character, and the type of the variables generated from `values_to`
#' will be the common type of the input columns used to generate them.
#' @export
#' @examples
#' # See vignette("pivot") for examples and explanation
#'
#' # Simplest case where column names are character data
#' relig_income
#' relig_income %>%
#' pivot_longer(-religion, names_to = "income", values_to = "count")
#'
#' # Slightly more complex case where columns have common prefix,
#' # and missing missings are structural so should be dropped.
#' billboard
#' billboard %>%
#' pivot_longer(
#' cols = starts_with("wk"),
#' names_to = "week",
#' names_prefix = "wk",
#' values_to = "rank",
#' values_drop_na = TRUE
#' )
#'
#' # Multiple variables stored in colum names
#' who %>% pivot_longer(
#' cols = new_sp_m014:newrel_f65,
#' names_to = c("diagnosis", "gender", "age"),
#' names_pattern = "new_?(.*)_(.)(.*)",
#' values_to = "count"
#' )
#'
#' # Multiple observations per row
#' anscombe
#' anscombe %>%
#' pivot_longer(everything(),
#' names_to = c(".value", "set"),
#' names_pattern = "(.)(.)"
#' )
pivot_longer <- function(data,
cols,
names_to = "name",
names_prefix = NULL,
names_sep = NULL,
names_pattern = NULL,
names_ptypes = list(),
names_repair = "check_unique",
values_to = "value",
values_drop_na = FALSE,
values_ptypes = list()
) {
cols <- enquo(cols)
spec <- build_longer_spec(data, !!cols,
names_to = names_to,
values_to = values_to,
names_prefix = names_prefix,
names_sep = names_sep,
names_pattern = names_pattern,
names_ptypes = names_ptypes
)
pivot_longer_spec(data, spec,
names_repair = names_repair,
values_drop_na = values_drop_na,
values_ptypes = values_ptypes
)
}
#' Pivot data from wide to long using a spec
#'
#' This is a low level interface to pivotting, inspired by the cdata package,
#' that allows you to describe pivotting with a data frame.
#'
#' @keywords internal
#' @export
#' @inheritParams pivot_longer
#' @param spec A specification data frame. This is useful for more complex
#' pivots because it gives you greater control on how metadata stored in the
#' column names turns into columns in the result.
#'
#' Must be a data frame containing character `.name` and `.value` columns.
pivot_longer_spec <- function(data,
spec,
names_repair = "check_unique",
values_drop_na = FALSE,
values_ptypes = list()
) {
spec <- check_spec(spec)
spec <- deduplicate_names(spec, data)
# Quick hack to ensure that split() preserves order
v_fct <- factor(spec$.value, levels = unique(spec$.value))
values <- split(spec$.name, v_fct)
value_keys <- split(spec[-(1:2)], v_fct)
keys <- vec_unique(spec[-(1:2)])
vals <- set_names(vec_init(list(), length(values)), names(values))
for (value in names(values)) {
cols <- values[[value]]
col_id <- vec_match(value_keys[[value]], keys)
val_cols <- vec_init(list(), nrow(keys))
val_cols[col_id] <- unname(as.list(data[cols]))
val_cols[-col_id] <- list(rep(NA, nrow(data)))
val_type <- vec_ptype_common(!!!set_names(val_cols[col_id], cols), .ptype = values_ptypes[[value]])
out <- vec_c(!!!val_cols, .ptype = val_type)
# Interleave into correct order
idx <- (matrix(seq_len(nrow(data) * length(val_cols)), ncol = nrow(data), byrow = TRUE))
vals[[value]] <- vec_slice(out, as.integer(idx))
}
vals <- as_tibble(vals)
# Line up output rows by combining spec and existing data frame
rows <- expand_grid(
df_id = vec_seq_along(data),
key_id = vec_seq_along(keys),
)
rows$val_id <- vec_seq_along(rows)
if (values_drop_na) {
rows <- vec_slice(rows, !vec_equal_na(vals))
}
# Join together df, spec, and val to produce final tibble
df_out <- drop_cols(data, spec$.name)
out <- wrap_error_names(vec_cbind(
vec_slice(df_out, rows$df_id),
vec_slice(keys, rows$key_id),
vec_slice(vals, rows$val_id),
.name_repair = names_repair
))
reconstruct_tibble(data, out)
}
#' @rdname pivot_longer_spec
#' @export
build_longer_spec <- function(data, cols,
names_to = "name",
values_to = "value",
names_prefix = NULL,
names_sep = NULL,
names_pattern = NULL,
names_ptypes = NULL) {
cols <- tidyselect::vars_select(unique(names(data)), !!enquo(cols))
if (length(cols) == 0) {
abort(glue::glue("`cols` must select at least one column."))
}
if (is.null(names_prefix)) {
names <- cols
} else {
names <- stringi::stri_replace_all_regex(cols, paste0("^", names_prefix), "")
}
if (length(names_to) > 1) {
if (!xor(is.null(names_sep), is.null(names_pattern))) {
abort(glue::glue(
"If you supply multiple names in `names_to` you must also supply one",
" of `names_sep` or `names_pattern`."
))
}
if (!is.null(names_sep)) {
names <- str_separate(names, names_to, sep = names_sep)
} else {
names <- str_extract(names, names_to, regex = names_pattern)
}
if (".value" %in% names_to) {
values_to <- NULL
}
} else {
if (!is.null(names_sep)) {
abort("`names_sep` can not be used with length-1 `names_to`")
}
if (!is.null(names_pattern)) {
names <- str_extract(names, names_to, regex = names_pattern)[[1]]
}
names <- tibble(!!names_to := names)
}
# optionally, cast variables generated from columns
cast_cols <- intersect(names(names), names(names_ptypes))
for (col in cast_cols) {
names[[col]] <- vec_cast(names[[col]], names_ptypes[[col]])
}
out <- tibble(.name = cols)
out[[".value"]] <- values_to
out <- vec_cbind(out, names)
out
}
drop_cols <- function(df, cols) {
if (is.character(cols)) {
df[setdiff(names(df), cols)]
} else if (is.integer(cols)) {
df[-cols]
} else {
abort("Invalid input")
}
}
# Match spec to data, handling duplicated column names
deduplicate_names <- function(spec, df) {
col_id <- vec_match(names(df), spec$.name)
has_match <- !is.na(col_id)
if (!vec_duplicate_any(col_id[has_match])) {
return(spec)
}
warn("Duplicate column names detected, adding .copy variable")
spec <- vec_slice(spec, col_id[has_match])
# Need to use numeric indices because names only match first
spec$.name <- seq_along(df)[has_match]
pieces <- vec_split(seq_len(nrow(spec)), col_id[has_match])
copy <- integer(nrow(spec))
for (i in seq_along(pieces$val)) {
idx <- pieces$val[[i]]
copy[idx] <- seq_along(idx)
}
spec$.copy <- copy
spec
}
| /R/pivot-long.R | permissive | jeffreypullin/tidyr | R | false | false | 10,224 | r | #' Pivot data from wide to long
#'
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("maturing")}
#'
#' `pivot_longer()` "lengthens" data, increasing the number of rows and
#' decreasing the number of columns. The inverse transformation is
#' [pivot_wider()]
#'
#' Learn more in `vignette("pivot")`.
#'
#' @details
#' `pivot_longer()` is an updated approach to [gather()], designed to be both
#' simpler to use and to handle more use cases. We recommend you use
#' `pivot_longer()` for new code; `gather()` isn't going away but is no longer
#' under active development.
#'
#' @param data A data frame to pivot.
#' @param cols Columns to pivot into longer format.
#'
#' This takes a tidyselect specification, e.g. use `a:c` to select all
#' columns from `a` to `c`, `starts_with("prefix")` to select all columns
#' starting with "prefix", or `everything()` to select all columns.
#' @param names_to A string specifying the name of the column to create
#' from the data stored in the column names of `data`.
#'
#' Can be a character vector, creating multiple columns, if `names_sep`
#' or `names_pattern` is provided.
#' @param names_prefix A regular expression used to remove matching text
#' from the start of each variable name.
#' @param names_sep,names_pattern If `names_to` contains multiple values,
#' these arguments control how the column name is broken up.
#'
#' `names_sep` takes the same specification as [separate()], and can either
#' be a numeric vector (specifying positions to break on), or a single string
#' (specifying a regular expression to split on).
#'
#' `names_pattern` takes the same specification as [extract()], a regular
#' expression containing matching groups (`()`).
#'
#' If these arguments do not give you enough control, use
#' `pivot_longer_spec()` to create a spec object and process manually as
#' needed.
#' @param names_repair What happen if the output has invalid column names?
#' The default, `"check_unique"` is to error if the columns are duplicated.
#' Use `"minimal"` to allow duplicates in the output, or `"unique"` to
#' de-duplicated by adding numeric suffixes. See [vctrs::vec_as_names()]
#' for more options.
#' @param values_to A string specifying the name of the column to create
#' from the data stored in cell values. If `names_to` is a character
#' containing the special `.value` sentinel, this value will be ignored,
#' and the name of the value column will be derived from part of the
#' existing column names.
#' @param values_drop_na If `TRUE`, will drop rows that contain only `NA`s
#' in the `value_to` column. This effectively converts explicit missing values
#' to implicit missing values, and should generally be used only when missing
#' values in `data` were created by its structure.
#' @param names_ptypes,values_ptypes A list of column name-prototype pairs.
#' A prototype (or ptype for short) is a zero-length vector (like `integer()`
#' or `numeric()`) that defines the type, class, and attributes of a vector.
#'
#' If not specified, the type of the columns generated from `names_to` will
#' be character, and the type of the variables generated from `values_to`
#' will be the common type of the input columns used to generate them.
#' @export
#' @examples
#' # See vignette("pivot") for examples and explanation
#'
#' # Simplest case where column names are character data
#' relig_income
#' relig_income %>%
#' pivot_longer(-religion, names_to = "income", values_to = "count")
#'
#' # Slightly more complex case where columns have common prefix,
#' # and missing missings are structural so should be dropped.
#' billboard
#' billboard %>%
#' pivot_longer(
#' cols = starts_with("wk"),
#' names_to = "week",
#' names_prefix = "wk",
#' values_to = "rank",
#' values_drop_na = TRUE
#' )
#'
#' # Multiple variables stored in colum names
#' who %>% pivot_longer(
#' cols = new_sp_m014:newrel_f65,
#' names_to = c("diagnosis", "gender", "age"),
#' names_pattern = "new_?(.*)_(.)(.*)",
#' values_to = "count"
#' )
#'
#' # Multiple observations per row
#' anscombe
#' anscombe %>%
#' pivot_longer(everything(),
#' names_to = c(".value", "set"),
#' names_pattern = "(.)(.)"
#' )
pivot_longer <- function(data,
cols,
names_to = "name",
names_prefix = NULL,
names_sep = NULL,
names_pattern = NULL,
names_ptypes = list(),
names_repair = "check_unique",
values_to = "value",
values_drop_na = FALSE,
values_ptypes = list()
) {
cols <- enquo(cols)
spec <- build_longer_spec(data, !!cols,
names_to = names_to,
values_to = values_to,
names_prefix = names_prefix,
names_sep = names_sep,
names_pattern = names_pattern,
names_ptypes = names_ptypes
)
pivot_longer_spec(data, spec,
names_repair = names_repair,
values_drop_na = values_drop_na,
values_ptypes = values_ptypes
)
}
#' Pivot data from wide to long using a spec
#'
#' This is a low level interface to pivotting, inspired by the cdata package,
#' that allows you to describe pivotting with a data frame.
#'
#' @keywords internal
#' @export
#' @inheritParams pivot_longer
#' @param spec A specification data frame. This is useful for more complex
#' pivots because it gives you greater control on how metadata stored in the
#' column names turns into columns in the result.
#'
#' Must be a data frame containing character `.name` and `.value` columns.
pivot_longer_spec <- function(data,
spec,
names_repair = "check_unique",
values_drop_na = FALSE,
values_ptypes = list()
) {
spec <- check_spec(spec)
spec <- deduplicate_names(spec, data)
# Quick hack to ensure that split() preserves order
v_fct <- factor(spec$.value, levels = unique(spec$.value))
values <- split(spec$.name, v_fct)
value_keys <- split(spec[-(1:2)], v_fct)
keys <- vec_unique(spec[-(1:2)])
vals <- set_names(vec_init(list(), length(values)), names(values))
for (value in names(values)) {
cols <- values[[value]]
col_id <- vec_match(value_keys[[value]], keys)
val_cols <- vec_init(list(), nrow(keys))
val_cols[col_id] <- unname(as.list(data[cols]))
val_cols[-col_id] <- list(rep(NA, nrow(data)))
val_type <- vec_ptype_common(!!!set_names(val_cols[col_id], cols), .ptype = values_ptypes[[value]])
out <- vec_c(!!!val_cols, .ptype = val_type)
# Interleave into correct order
idx <- (matrix(seq_len(nrow(data) * length(val_cols)), ncol = nrow(data), byrow = TRUE))
vals[[value]] <- vec_slice(out, as.integer(idx))
}
vals <- as_tibble(vals)
# Line up output rows by combining spec and existing data frame
rows <- expand_grid(
df_id = vec_seq_along(data),
key_id = vec_seq_along(keys),
)
rows$val_id <- vec_seq_along(rows)
if (values_drop_na) {
rows <- vec_slice(rows, !vec_equal_na(vals))
}
# Join together df, spec, and val to produce final tibble
df_out <- drop_cols(data, spec$.name)
out <- wrap_error_names(vec_cbind(
vec_slice(df_out, rows$df_id),
vec_slice(keys, rows$key_id),
vec_slice(vals, rows$val_id),
.name_repair = names_repair
))
reconstruct_tibble(data, out)
}
#' @rdname pivot_longer_spec
#' @export
build_longer_spec <- function(data, cols,
names_to = "name",
values_to = "value",
names_prefix = NULL,
names_sep = NULL,
names_pattern = NULL,
names_ptypes = NULL) {
cols <- tidyselect::vars_select(unique(names(data)), !!enquo(cols))
if (length(cols) == 0) {
abort(glue::glue("`cols` must select at least one column."))
}
if (is.null(names_prefix)) {
names <- cols
} else {
names <- stringi::stri_replace_all_regex(cols, paste0("^", names_prefix), "")
}
if (length(names_to) > 1) {
if (!xor(is.null(names_sep), is.null(names_pattern))) {
abort(glue::glue(
"If you supply multiple names in `names_to` you must also supply one",
" of `names_sep` or `names_pattern`."
))
}
if (!is.null(names_sep)) {
names <- str_separate(names, names_to, sep = names_sep)
} else {
names <- str_extract(names, names_to, regex = names_pattern)
}
if (".value" %in% names_to) {
values_to <- NULL
}
} else {
if (!is.null(names_sep)) {
abort("`names_sep` can not be used with length-1 `names_to`")
}
if (!is.null(names_pattern)) {
names <- str_extract(names, names_to, regex = names_pattern)[[1]]
}
names <- tibble(!!names_to := names)
}
# optionally, cast variables generated from columns
cast_cols <- intersect(names(names), names(names_ptypes))
for (col in cast_cols) {
names[[col]] <- vec_cast(names[[col]], names_ptypes[[col]])
}
out <- tibble(.name = cols)
out[[".value"]] <- values_to
out <- vec_cbind(out, names)
out
}
drop_cols <- function(df, cols) {
if (is.character(cols)) {
df[setdiff(names(df), cols)]
} else if (is.integer(cols)) {
df[-cols]
} else {
abort("Invalid input")
}
}
# Match spec to data, handling duplicated column names
deduplicate_names <- function(spec, df) {
col_id <- vec_match(names(df), spec$.name)
has_match <- !is.na(col_id)
if (!vec_duplicate_any(col_id[has_match])) {
return(spec)
}
warn("Duplicate column names detected, adding .copy variable")
spec <- vec_slice(spec, col_id[has_match])
# Need to use numeric indices because names only match first
spec$.name <- seq_along(df)[has_match]
pieces <- vec_split(seq_len(nrow(spec)), col_id[has_match])
copy <- integer(nrow(spec))
for (i in seq_along(pieces$val)) {
idx <- pieces$val[[i]]
copy[idx] <- seq_along(idx)
}
spec$.copy <- copy
spec
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recenter.R
\name{recenter}
\alias{recenter}
\title{Rotate Dimensional Anchors around the Unit Circle}
\usage{
recenter(springs, newc)
}
\arguments{
\item{springs}{a spring object as created by \code{\link{make.S}}}
\item{newc}{a string specifying which dimensional anchor should be placed on top of the unit circle}
}
\value{
a spring object with rotated labels
}
\description{
recenter will rotate the order of the dimensional anchors around the circle, to put a channel
of reference to the top of the display.
}
\examples{
data(iris)
das <- c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width')
iris.S <- make.S(das)
iris.S
recenter(iris.S,'Petal.Length')
}
\author{
Yann Abraham
}
| /man/recenter.Rd | no_license | ivan-marroquin/Radviz | R | false | true | 772 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recenter.R
\name{recenter}
\alias{recenter}
\title{Rotate Dimensional Anchors around the Unit Circle}
\usage{
recenter(springs, newc)
}
\arguments{
\item{springs}{a spring object as created by \code{\link{make.S}}}
\item{newc}{a string specifying which dimensional anchor should be placed on top of the unit circle}
}
\value{
a spring object with rotated labels
}
\description{
recenter will rotate the order of the dimensional anchors around the circle, to put a channel
of reference to the top of the display.
}
\examples{
data(iris)
das <- c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width')
iris.S <- make.S(das)
iris.S
recenter(iris.S,'Petal.Length')
}
\author{
Yann Abraham
}
|
#!/usr/bin/env Rscript
chr <- c(1:20, 22, 23)
qcdir <- '/home/tl483/rds/rds-rd109-durbin-group/projects/cichlid/cichlid_g.vcf/main_set_2019-07-10/fAstCal1.2/GenotypeCorrected/depth_filter/malawi_variants/qc/'
outplot <- '/home/tl483/rds/rds-rd109-durbin-group/projects/cichlid/cichlid_g.vcf/main_set_2019-07-10/fAstCal1.2/GenotypeCorrected/depth_filter/malawi_variants/qc/malawi_callset_DP_plot.pdf'
pdf(outplot)
x <- data.frame(CHROM=NULL, POS=NULL, DP=NULL)
par(mfrow=c(4,4))
for (i in chr) {
file <- paste0(qcdir,"malawi_cichlid_variants_chr",i,"_DP.txt")
y <- read.table(file, head=TRUE, na.string=".")
x <- rbind(x,y)
y[,3][which(is.infinite(y[,3]))] <- NA # mask inf and -inf values
cat(paste0("plotting chr",i,"\n"))
hist(y$DP, main=paste0("chr",i), ylab="Number sites", xlab="DP", xlim=c(0,quantile(y$DP,0.99,na.rm=TRUE)+sqrt(var(y$DP,na.rm=TRUE))),breaks=100000)
}
par(mfrow=c(1,1))
# Genome-wide stats and plot
cat("Calculating genome-wide DP stats and plotting\n")
dpmed <- median(x$DP, na.rm=TRUE)
dpa <- round(dpmed - (0.25*dpmed))
dpb <- round(dpmed + (0.25*dpmed))
cat("Genome median DP: ",dpmed,"\n")
cat("Genome DP lower cutoff: ",dpa,"\n")
cat("Genome DP upper cutoff: ",dpb,"\n")
hist(x$DP, main='Genome', ylab="Number sites", xlab="DP", xlim=c(0,quantile(x$DP,0.99,na.rm=TRUE)+sqrt(var(x$DP,na.rm=TRUE))),breaks=100000)
abline(v=dpa, col="red")
abline(v=dpb, col="red")
legend('topright', c(paste0("lower: ",dpa), paste0("upper: ",dpb)), bty='n',cex=0.75)
invisible(dev.off())
| /callset/production_pipeline/scripts_v1.2/plot_dp.R | no_license | tplinderoth/cichlids | R | false | false | 1,514 | r | #!/usr/bin/env Rscript
chr <- c(1:20, 22, 23)
qcdir <- '/home/tl483/rds/rds-rd109-durbin-group/projects/cichlid/cichlid_g.vcf/main_set_2019-07-10/fAstCal1.2/GenotypeCorrected/depth_filter/malawi_variants/qc/'
outplot <- '/home/tl483/rds/rds-rd109-durbin-group/projects/cichlid/cichlid_g.vcf/main_set_2019-07-10/fAstCal1.2/GenotypeCorrected/depth_filter/malawi_variants/qc/malawi_callset_DP_plot.pdf'
pdf(outplot)
x <- data.frame(CHROM=NULL, POS=NULL, DP=NULL)
par(mfrow=c(4,4))
for (i in chr) {
file <- paste0(qcdir,"malawi_cichlid_variants_chr",i,"_DP.txt")
y <- read.table(file, head=TRUE, na.string=".")
x <- rbind(x,y)
y[,3][which(is.infinite(y[,3]))] <- NA # mask inf and -inf values
cat(paste0("plotting chr",i,"\n"))
hist(y$DP, main=paste0("chr",i), ylab="Number sites", xlab="DP", xlim=c(0,quantile(y$DP,0.99,na.rm=TRUE)+sqrt(var(y$DP,na.rm=TRUE))),breaks=100000)
}
par(mfrow=c(1,1))
# Genome-wide stats and plot
cat("Calculating genome-wide DP stats and plotting\n")
dpmed <- median(x$DP, na.rm=TRUE)
dpa <- round(dpmed - (0.25*dpmed))
dpb <- round(dpmed + (0.25*dpmed))
cat("Genome median DP: ",dpmed,"\n")
cat("Genome DP lower cutoff: ",dpa,"\n")
cat("Genome DP upper cutoff: ",dpb,"\n")
hist(x$DP, main='Genome', ylab="Number sites", xlab="DP", xlim=c(0,quantile(x$DP,0.99,na.rm=TRUE)+sqrt(var(x$DP,na.rm=TRUE))),breaks=100000)
abline(v=dpa, col="red")
abline(v=dpb, col="red")
legend('topright', c(paste0("lower: ",dpa), paste0("upper: ",dpb)), bty='n',cex=0.75)
invisible(dev.off())
|
# rm(list = ls()) # Clear the workspace!
# ls() # No objects left in the workspace
# raster::getData("ISO3")
countAndCap <- function(inCountry, admLevel){
datdir <- 'data'
dir.create(datdir, showWarnings = F)
adm <- raster::getData("GADM", country = inCountry,
level = admLevel, path = datdir) ## level 2 indicates that we want the counties
## to show, not just state
## level 0 = entire country, lvl 1 = states
checkCap <- adm$TYPE_1
resultCap <- names(table(checkCap))[table(checkCap)<2]
par(mfrow = c(1, 2))
mainc <- adm[adm$NAME_0 == inCountry,]
cap <- adm[adm$TYPE_1 == resultCap,]
plot(mainc, bg = "bisque", axes=T, main = "Entire Country", cex.axis= 0.5, cex.main = 0.7, cex.axis= 0.5)
plot(mainc, lwd = 5, border = "darkorange", add=T)
plot(mainc, col = "peru", add = T)
grid()
box()
mtext(side = 1, "Longitude", line = 2.5, cex=0.5)
mtext(side = 2, "Latitude", line = 2.5, cex=0.5)
plot(cap, bg = 'bisque', axes = T, main = "Location of Capital", cex.axis= 0.5, cex.main = 0.7)
plot(cap, lwd = 5, border = "darkorange", add=T)
plot(cap, col = "peru", add = T)
grid()
box()
invisible(text(getSpPPolygonsLabptSlots(cap),
labels = as.character(cap$TYPE_1), cex = 1,
col = "royalblue4", font = 1))
title(cat("Map of", inCountry, "and\n Area where Capital is Located", outer = T, cex = 2))
mtext(side = 1, "Longitude", line = 2.5, cex=0.5)
mtext(side = 2, "Latitude", line = 2.5, cex=0.5)
text("Projection: Geographic\n
Coordinate System: WGS 1984\n
Data Source: GADM.org", adj = c(0, 0), cex = 0.2, col = "grey20")
}
countAndCap('Mexico', 1)
| /Lesson 1/exercise1.R | no_license | martabc/Geoscripting | R | false | false | 1,704 | r | # rm(list = ls()) # Clear the workspace!
# ls() # No objects left in the workspace
# raster::getData("ISO3")
countAndCap <- function(inCountry, admLevel){
datdir <- 'data'
dir.create(datdir, showWarnings = F)
adm <- raster::getData("GADM", country = inCountry,
level = admLevel, path = datdir) ## level 2 indicates that we want the counties
## to show, not just state
## level 0 = entire country, lvl 1 = states
checkCap <- adm$TYPE_1
resultCap <- names(table(checkCap))[table(checkCap)<2]
par(mfrow = c(1, 2))
mainc <- adm[adm$NAME_0 == inCountry,]
cap <- adm[adm$TYPE_1 == resultCap,]
plot(mainc, bg = "bisque", axes=T, main = "Entire Country", cex.axis= 0.5, cex.main = 0.7, cex.axis= 0.5)
plot(mainc, lwd = 5, border = "darkorange", add=T)
plot(mainc, col = "peru", add = T)
grid()
box()
mtext(side = 1, "Longitude", line = 2.5, cex=0.5)
mtext(side = 2, "Latitude", line = 2.5, cex=0.5)
plot(cap, bg = 'bisque', axes = T, main = "Location of Capital", cex.axis= 0.5, cex.main = 0.7)
plot(cap, lwd = 5, border = "darkorange", add=T)
plot(cap, col = "peru", add = T)
grid()
box()
invisible(text(getSpPPolygonsLabptSlots(cap),
labels = as.character(cap$TYPE_1), cex = 1,
col = "royalblue4", font = 1))
title(cat("Map of", inCountry, "and\n Area where Capital is Located", outer = T, cex = 2))
mtext(side = 1, "Longitude", line = 2.5, cex=0.5)
mtext(side = 2, "Latitude", line = 2.5, cex=0.5)
text("Projection: Geographic\n
Coordinate System: WGS 1984\n
Data Source: GADM.org", adj = c(0, 0), cex = 0.2, col = "grey20")
}
countAndCap('Mexico', 1)
|
##==================================
## Aula 2 - Ambiente Tidyverse
##==================================
# comando para limpar memória
rm(list = ls())
# instalar pacotes
# install.packages("tidyverse")
# install.packages("haven")
install.packages(c("tidyverse", "haven", "janitor",
"formattable"))
library(haven) # pacote para importar dados
library(tidyverse) # pacote para mexer nos dados
library(janitor) # pacote para sumarizar dados
library(formattable) # mudar valores para porcentagens
## Abrir base do World Values Survey (2014) ========================
# download em http://www.worldvaluessurvey.org/WVSDocumentationWV6.jsp,
# baixar o arquivo SPSS no final do site.
# saber qual é o diretório
getwd()
# mudar diretório para achar base
setwd("C:/Users/thiagomoreira/Documents") # primeira forma
# ctrl(também no mac) + shift + H
# dir() # saber como está o nome do arquivo no diretório
dir()
wvs_2014 <- read_spss("wvs2014.sav")
names(wvs_2014) # saber nomes das variáveis
## Analisar os dados =========================================
# processo de seleção das variáveis
# Variáveis de interesse: V203 (homossexualidade),
# V240 (sexo)
base_nova <- wvs_2014 %>%
select(V203, V240)
# Mudar nome das variáveis
base_nova <- base_nova %>%
rename(homo = V203,
sexo = V240)
# Tabular homossexualidade
base_nova %>%
tabyl(homo)
# Organizar pelo maior número de casos
base_nova %>%
tabyl(homo) %>%
arrange(-n)
# Tirar médias e desvio-padrão
base_nova %>%
summarise(media = mean(homo))
base_nova %>%
summarise(media = mean(homo, na.rm = TRUE))
base_nova %>%
summarise(desvio_padrao = sd(homo, na.rm = TRUE))
# Tabular quantidade de mulheres e homens
base_nova %>%
tabyl(sexo)
base_nova %>%
tabyl(sexo)*100 # qual o problema disso?
# Ver se existe diferença entre mulheres e homens
base_nova %>%
group_by(sexo) %>%
summarise(media = mean(homo, na.rm = T))
# Filtrar somente as mulheres
base_mulheres <- base_nova %>%
filter(sexo == 2) # ATENCAO AO IGUAL IGUAL
# Filtrar os missings (NA)
base_sem_missing <- base_nova %>%
filter(!is.na(homo))
# modificar variável homo
base_sem_missing <- base_sem_missing %>%
mutate(homo = case_when(homo <= 4 ~ "direita",
homo == 5 | homo == 6 ~ "centro",
homo >= 7 ~ "esquerda"))
base_sem_missing %>%
tabyl(homo)
# base_nova %>%
# mutate(homo = case_when(homo %in% c(1,2,3,4) ~ "direita",
# homo %in% c(5,6) ~ "centro",
# homo %in% c(7,8,9,10) ~ "esquerda")) %>%
# tabyl(homo)
# modificar variável sexo (binária)
# melhor como o comando "ifelse"
base_sem_missing <- base_sem_missing %>%
mutate(sexo = ifelse(sexo == 1, "homem", "mulher"))
base_sem_missing %>%
tabyl(sexo)
# Melhor forma: usar o comando "percent" do pacote
# "formattable"
base_sem_missing %>%
tabyl(sexo) %>%
mutate(percent = percent(percent))
# e se eu quisesse renomear essas variaveis?
# comparar homens e mulheres em relacao homossexualidade
base_sem_missing %>%
crosstab(sexo, homo)
# melhor forma:
base_sem_missing %>%
crosstab(sexo, homo) %>%
adorn_crosstab()
# salvar analise em csv
base_csv <- base_sem_missing %>%
crosstab(sexo, homo) %>%
adorn_crosstab()
write_csv(base_csv, "predilecoes_homossexualidade.csv")
| /aula_2.R | no_license | thiago-ms-cp/programacao_iesp | R | false | false | 3,442 | r | ##==================================
## Aula 2 - Ambiente Tidyverse
##==================================
# comando para limpar memória
rm(list = ls())
# instalar pacotes
# install.packages("tidyverse")
# install.packages("haven")
install.packages(c("tidyverse", "haven", "janitor",
"formattable"))
library(haven) # pacote para importar dados
library(tidyverse) # pacote para mexer nos dados
library(janitor) # pacote para sumarizar dados
library(formattable) # mudar valores para porcentagens
## Abrir base do World Values Survey (2014) ========================
# download em http://www.worldvaluessurvey.org/WVSDocumentationWV6.jsp,
# baixar o arquivo SPSS no final do site.
# saber qual é o diretório
getwd()
# mudar diretório para achar base
setwd("C:/Users/thiagomoreira/Documents") # primeira forma
# ctrl(também no mac) + shift + H
# dir() # saber como está o nome do arquivo no diretório
dir()
wvs_2014 <- read_spss("wvs2014.sav")
names(wvs_2014) # saber nomes das variáveis
## Analisar os dados =========================================
# processo de seleção das variáveis
# Variáveis de interesse: V203 (homossexualidade),
# V240 (sexo)
base_nova <- wvs_2014 %>%
select(V203, V240)
# Mudar nome das variáveis
base_nova <- base_nova %>%
rename(homo = V203,
sexo = V240)
# Tabular homossexualidade
base_nova %>%
tabyl(homo)
# Organizar pelo maior número de casos
base_nova %>%
tabyl(homo) %>%
arrange(-n)
# Tirar médias e desvio-padrão
base_nova %>%
summarise(media = mean(homo))
base_nova %>%
summarise(media = mean(homo, na.rm = TRUE))
base_nova %>%
summarise(desvio_padrao = sd(homo, na.rm = TRUE))
# Tabular quantidade de mulheres e homens
base_nova %>%
tabyl(sexo)
base_nova %>%
tabyl(sexo)*100 # qual o problema disso?
# Ver se existe diferença entre mulheres e homens
base_nova %>%
group_by(sexo) %>%
summarise(media = mean(homo, na.rm = T))
# Filtrar somente as mulheres
base_mulheres <- base_nova %>%
filter(sexo == 2) # ATENCAO AO IGUAL IGUAL
# Filtrar os missings (NA)
base_sem_missing <- base_nova %>%
filter(!is.na(homo))
# modificar variável homo
base_sem_missing <- base_sem_missing %>%
mutate(homo = case_when(homo <= 4 ~ "direita",
homo == 5 | homo == 6 ~ "centro",
homo >= 7 ~ "esquerda"))
base_sem_missing %>%
tabyl(homo)
# base_nova %>%
# mutate(homo = case_when(homo %in% c(1,2,3,4) ~ "direita",
# homo %in% c(5,6) ~ "centro",
# homo %in% c(7,8,9,10) ~ "esquerda")) %>%
# tabyl(homo)
# modificar variável sexo (binária)
# melhor como o comando "ifelse"
base_sem_missing <- base_sem_missing %>%
mutate(sexo = ifelse(sexo == 1, "homem", "mulher"))
base_sem_missing %>%
tabyl(sexo)
# Melhor forma: usar o comando "percent" do pacote
# "formattable"
base_sem_missing %>%
tabyl(sexo) %>%
mutate(percent = percent(percent))
# e se eu quisesse renomear essas variaveis?
# comparar homens e mulheres em relacao homossexualidade
base_sem_missing %>%
crosstab(sexo, homo)
# melhor forma:
base_sem_missing %>%
crosstab(sexo, homo) %>%
adorn_crosstab()
# salvar analise em csv
base_csv <- base_sem_missing %>%
crosstab(sexo, homo) %>%
adorn_crosstab()
write_csv(base_csv, "predilecoes_homossexualidade.csv")
|
library(qtl)
### Name: nullmarkers
### Title: Identify markers without any genotype data
### Aliases: nullmarkers
### Keywords: utilities
### ** Examples
# one marker with no data
data(hyper)
nullmarkers(hyper)
# nothing in listeria
data(listeria)
nullmarkers(listeria)
| /data/genthat_extracted_code/qtl/examples/nullmarkers.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 278 | r | library(qtl)
### Name: nullmarkers
### Title: Identify markers without any genotype data
### Aliases: nullmarkers
### Keywords: utilities
### ** Examples
# one marker with no data
data(hyper)
nullmarkers(hyper)
# nothing in listeria
data(listeria)
nullmarkers(listeria)
|
### LANDFIRE Project
### APEX RMS - Shreeram Senthivasan
### November 2020
### This a header file that defines memory-safe raster functions that are optimized
### for large data files that cannot easily be held in memory.
# A memory-safe function to convert a raster with multiple values (map zones)
# into a mask for a single value (map zone)
# - Requires an output filename, slower than raster::mask for small rasters
maskByMapzone <- function(inputRaster, maskValue, filename){
# Integer mask values save memory
maskValue <- as.integer(maskValue)
# Let raster::blockSize() decide appropriate blocks to break the raster into
blockInfo <- blockSize(inputRaster)
# Generate empty raster with appropriate dimensions
outputRaster <- raster(inputRaster)
# Calculate mask and write to output block-by-block
outputRaster <- writeStart(outputRaster, filename, overwrite=TRUE)
for(i in seq(blockInfo$n)) {
blockMask <-
getValuesBlock(inputRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i]) %>%
`==`(maskValue) %>%
if_else(true = maskValue, false = NA_integer_)
outputRaster <- writeValues(outputRaster, blockMask, blockInfo$row[i])
}
outputRaster <- writeStop(outputRaster)
return(outputRaster)
}
# A memory-safe implementation of `raster::unique()` optimized for large rasters
# - ignore are the levels to exclude from the output
uniqueInRaster <- function(inputRaster, ignore = NA) {
# Choose number of blocks to split rasters into when processing to limit memory
blockInfo <- blockSize(inputRaster)
# Calculate unique values in each block
map(
seq(blockInfo$n),
~ unique(getValuesBlock(inputRaster,
row = blockInfo$row[.x],
nrows = blockInfo$nrows[.x]))) %>%
# Consolidate values from each block
flatten_dbl %>%
unique() %>%
# Exclude values to ignore
`[`(!. %in% ignore) %>%
return
}
# A memory-safe implementation of raster::crop() optimized for large rasters
# - Requires an extent object, unlike raster::crop()
# - Requires an output filename, slower than raster::crop for small rasters
cropRaster <- function(inputRaster, filename, outputExtent) {
# Create empty raster to hold output
outputRaster <- raster(inputRaster) %>%
crop(outputExtent)
# Calculate offset from original
offsetAbove <- round((ymax(extent(inputRaster)) - ymax(outputExtent)) / res(inputRaster)[2])
offsetLeft <- round((xmin(outputExtent) - xmin(extent(inputRaster))) / res(inputRaster)[1])
if(offsetAbove < 0 | offsetLeft < 0)
stop("Attempting to crop a smaller raster to a larger extent. This should not happen and could cause unexpected behaviour.")
## Split output into manageable chunks and fill with data from input
blockInfo <- blockSize(outputRaster)
outputRaster <- writeStart(outputRaster, filename, overwrite=TRUE)
for(i in seq(blockInfo$n))
outputRaster <-
writeValues(outputRaster,
getValuesBlock(inputRaster,
row = blockInfo$row[i] + offsetAbove,
nrows = blockInfo$nrows[i],
col = offsetLeft + 1,
ncols = ncol(outputRaster)),
blockInfo$row[i])
outputRaster <- writeStop(outputRaster)
return(outputRaster)
}
# Function to convert a vector, etc. of number into a multiplicative mask
# - Replaces all values with 1, NA remains as NA
# - Assumes no negative numbers (specifically, no -1)
maskify <- function(x) {
x <- x + 1L # Used to avoid divide by zero errors, this is why -1 is not acceptable
return(x / x)
}
# A memory safe implementation of raster::mask() optimized for large rasters
# - Requires an output filename, slower than raster::mask for small rasters
# - Input and mask rasters must have same extent (try cropRaster() if not)
maskRaster <- function(inputRaster, filename, maskingRaster){
# Create an empty raster to hold the output
outputRaster <- raster(inputRaster)
## Split output into manageable chunks and fill with data from input
blockInfo <- blockSize(outputRaster)
## Calculate mask and write to output block-by-block
outputRaster <- writeStart(outputRaster, filename, overwrite=TRUE)
# Each block of the mask raster is converted into a multiplicative mask
# and multiplied with the corresponding block of the input raster
for(i in seq(blockInfo$n)) {
maskedBlock <-
getValuesBlock(maskingRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i]) %>%
maskify %>%
`*`(getValuesBlock(inputRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i]))
outputRaster <- writeValues(outputRaster, maskedBlock, blockInfo$row[i])
}
outputRaster <- writeStop(outputRaster)
return(outputRaster)
}
# A function to crop rasters down to remove borders filled with only NA's
# - Uses binary search to quickly process large rasters with large empty borders
# - Requires an output filename, slower than raster::trim for small rasters
# - maxBlockSizePower is an integer that will be used to calculate the max number
# of rows / cols to load into memory. Specifically 2^maxBlockSizePower rows and
# cols will be loaded at most at a time
trimRaster <- function(inputRaster, filename, maxBlockSizePower = 11){
# Reading portions of large rasters that can't be held in memory is slow, so
# we want to minimize the number of reads as we identify how much we can trim
# off each of the four sides of the raster
# One simplistic approach is to check large blocks at a time until a block
# with non-NA data is found. Next halve the size of the search block and
# continue searching. Keep halving the the width of the search block until you
# find the single first column with data. This is effectively a binary search
# once the first (largest) block with non-NA data is found.
# Setup --------------------------------------------------------------------
# Decide how to split input into manageable blocks
maxBlockSize <- 2^(maxBlockSizePower)
# Make sure max block size is smaller than the number of columns and rows
while(ncol(inputRaster) < maxBlockSize | nrow(inputRaster) < maxBlockSize){
maxBlockSize = maxBlockSize / 2
maxBlockSizePower = maxBlockSizePower - 1
}
descendingBlockSizes <- 2^((maxBlockSizePower-1):0)
# Initialize counters
trimAbove <- 1
trimBelow <- nrow(inputRaster) + 1
trimLeft <- 1
trimRight <- ncol(inputRaster) + 1
# Top ----------------------------------------------------------------------
# Search for the first block from the top with data
while(
getValuesBlock(inputRaster,
row = trimAbove,
nrows = maxBlockSize) %>%
is.na %>%
all
)
trimAbove <- trimAbove + maxBlockSize
# Now do a binary search for the first row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster, row = trimAbove, nrows = i) %>% is.na %>% all)
trimAbove <- trimAbove + i
# Pad if possible
trimAbove <- max(trimAbove - 2, 0)
# Bottom -------------------------------------------------------------------
# Repeat from the bottom up, first finding a block that is not all NA
while(
getValuesBlock(inputRaster,
row = trimBelow - maxBlockSize,
nrows = maxBlockSize) %>%
is.na %>%
all
)
trimBelow <- trimBelow - maxBlockSize
# Binary search for last row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster, row = trimBelow - i, nrows = i) %>% is.na %>% all)
trimBelow <- trimBelow - i
# Calculate height of the trimmed raster
outputRows <- trimBelow - trimAbove
# Left --------------------------------------------------------------------
# Search for the first block from the left with data
while(
getValuesBlock(inputRaster,
col = trimLeft,
ncols = maxBlockSize,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all
)
trimLeft <- trimLeft + maxBlockSize
# Now do a binary search for the first row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster,
col = trimLeft,
ncols = i,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all)
trimLeft <- trimLeft + i
# Pad if possible
trimLeft <- max(trimLeft - 2, 0)
# Right --------------------------------------------------------------------
# Repeat for the first block from the right with data
while(
getValuesBlock(inputRaster,
col = trimRight - maxBlockSize,
ncols = maxBlockSize,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all
)
trimRight <- trimRight - maxBlockSize
# Now do a binary search for the first row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster,
col = trimRight - i,
ncols = i,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all)
trimRight <- trimRight - i
# Calculate width of the trimmed raster
outputCols <- trimRight - trimLeft
# Crop ---------------------------------------------------------------------
# Don't crop if there is nothing to crop
if(trimAbove == 1 & trimLeft == 1 & trimBelow == nrow(inputRaster) + 1 & trimRight == ncol(inputRaster) + 1)
return(writeRaster(inputRaster, filename = filename, overwrite = T))
# Convert trim variables to x,y min,max
outXmin <- xmin(extent(inputRaster)) + trimLeft * res(inputRaster)[1]
outXmax <- outXmin + outputCols * res(inputRaster)[1]
outYmax <- ymax(extent(inputRaster)) - trimAbove * res(inputRaster)[2]
outYmin <- outYmax - outputRows * res(inputRaster)[2]
return(
cropRaster(
inputRaster,
filename,
extent(c(xmin = outXmin,
xmax = outXmax,
ymin = outYmin,
ymax = outYmax))
)
)
}
# Function to create and save a binary raster given a non-binary raster and the
# value to keep
# - Used to binarize disturbance maps for use as spatial multipliers in SyncroSim
saveDistLayer <- function(distValue, distName, fullRaster, transitionMultiplierDirectory) {
writeRaster(
layerize(fullRaster, classes = distValue),
paste0(transitionMultiplierDirectory, distName, ".tif"),
overwrite = TRUE
)
}
# Function to generate a tiling mask given template
# - To avoid holding the entire raster in memory, the raster is written directly
# to file row-by-row. This way only one row of the tiling needs to be held in
# memory at a time. This is also why the number of rows (and implicitly the size
# of a given row) cannot be chosen manually
# - minProportion is used to determine the size threshold for consolidating tiles
# that are too small into neighboring tiles. Represented as a porportion of a
# full tile
tilize <- function(templateRaster, filename, tempfilename, tileSize) {
# Calculate recommended block size of template
blockInfo <- blockSize(templateRaster)
# Check that the blockSize is meaningful
# - This should only matter for very small rasters, such as in test mode
if(max(blockInfo$nrows) == 1)
blockInfo <- list(row = 1, nrows = nrow(templateRaster), n = 1)
# Calculate dimensions of each tile
tileHeight <- blockInfo$nrows[1]
tileWidth <- floor(tileSize / tileHeight)
# Calculate number of rows and columns
ny <- blockInfo$n
nx <- ceiling(ncol(templateRaster) / tileWidth)
# Generate a string of zeros the width of one tile
oneTileWidth <- rep(0, tileWidth)
# Generate one line of one row, repeat to the height of one row
oneRow <-
as.vector(vapply(seq(nx), function(i) oneTileWidth + i, FUN.VALUE = numeric(tileWidth))) %>%
`[`(1:ncol(templateRaster)) %>% # Trim the length of one row to fit in template
rep(tileHeight)
# Write an empty raster with the correct metadata to file
tileRaster <- raster(templateRaster)
# Write tiling to file row-by-row
tileRaster <- writeStart(tileRaster, filename, overwrite=TRUE)
for(i in seq(blockInfo$n)) {
if(blockInfo$nrows[i] < tileHeight)
oneRow <- oneRow[1:(ncol(tileRaster) * blockInfo$nrows[i])]
#browser()
tileRaster <- writeValues(tileRaster, oneRow, blockInfo$row[i])
oneRow <- oneRow + nx
}
tileRaster <- writeStop(tileRaster)
# Mask raster by template
tileRaster <-
maskRaster(tileRaster, tempfilename, maskingRaster = templateRaster)
# Consolidate small tiles into larger groups
# - We want a map from the original tile IDs to new consolidated tile IDs
reclassification <-
# Find the number of cells in each tile ID
tabulateRaster(tileRaster) %>%
# Sort by ID to approximately group tiles by proximity
arrange(value) %>%
# Consolidate into groups up to size tileSize
transmute(
from = value,
to = consolidateGroups(freq, tileSize)) %>%
as.matrix()
# Reclassify the tiling raster to the new consolidated IDs
tileRaster <-
reclassify(
tileRaster,
reclassification,
filename = filename,
overwrite = T)
return(tileRaster)
}
# Generate a table of values present in a raster and their frequency
# - values are assumed to be integers and the max value is known
tabulateRaster <- function(inputRaster) {
# Calculate recommended block size of template
blockInfo <- blockSize(inputRaster)
# Calculate frequency table in each block and consolidate
tables <- map(
seq(blockInfo$n),
~ table(getValuesBlock(inputRaster,
row = blockInfo$row[.x],
nrows = blockInfo$nrows[.x]))) %>%
map(as.data.frame) %>%
do.call(rbind, .) %>% # do.call is used to convert the list of tables to a sequence of arguments for `rbind`
rename(value = 1) %>%
group_by(value) %>%
summarize(freq = sum(Freq)) %>%
ungroup() %>%
mutate(value = value %>% as.character %>% as.numeric) %>% # Convert from factor to numeric
return
}
# Takes a vector of sizes (input) and a maximum size per group (threshold) and
# returns a vector of integers assigning the inputs to groups up to size threshold
# - Used to consolidate tiling groups into more even groups
consolidateGroups <- function(input, threshold) {
# Initialized counters and output
counter <- 1
cumulator <- 0
output <- integer(length(input))
# For each input, decide whether or not to start a new group
# Store that decision in output
for(i in seq_along(input)) {
cumulator <- cumulator + input[i]
if(cumulator > threshold) {
cumulator<- input[i]
counter <- counter + 1
}
output[i] <- counter
}
return(output)
}
separateStateClass <- function(stateClassRaster, evcRasterPath, evhRasterPath) {
# Create empty rasters to hold EVC and EVH data
evcRaster <- raster(stateClassRaster)
evhRaster <- raster(stateClassRaster)
## Split state class raster into manageable chunks
blockInfo <- blockSize(stateClassRaster)
## Calculate EVC and EVH from State Class block-by-block and write the results to their respective files
evcRaster <- writeStart(evcRaster, evcRasterPath, overwrite=TRUE)
evhRaster <- writeStart(evhRaster, evhRasterPath, overwrite=TRUE)
for(i in seq(blockInfo$n)) {
stateClassValues <-
getValuesBlock(stateClassRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i])
evcValues <- as.integer(stateClassValues / 1000) # EVC is the first three digits of the six digit state class code
evhValues <- stateClassValues %% 1000 # EVH is the last three digits, `%%` is the modulo, or remainder function
evcRaster <- writeValues(evcRaster, evcValues, blockInfo$row[i])
evhRaster <- writeValues(evhRaster, evhValues, blockInfo$row[i])
}
# End writing to rasters
evcRaster <- writeStop(evcRaster)
evhRaster <- writeStop(evhRaster)
# Silent return
invisible()
} | /scripts/rasterFunctions.R | permissive | ApexRMS/landfireupdate | R | false | false | 16,217 | r | ### LANDFIRE Project
### APEX RMS - Shreeram Senthivasan
### November 2020
### This a header file that defines memory-safe raster functions that are optimized
### for large data files that cannot easily be held in memory.
# A memory-safe function to convert a raster with multiple values (map zones)
# into a mask for a single value (map zone)
# - Requires an output filename, slower than raster::mask for small rasters
maskByMapzone <- function(inputRaster, maskValue, filename){
# Integer mask values save memory
maskValue <- as.integer(maskValue)
# Let raster::blockSize() decide appropriate blocks to break the raster into
blockInfo <- blockSize(inputRaster)
# Generate empty raster with appropriate dimensions
outputRaster <- raster(inputRaster)
# Calculate mask and write to output block-by-block
outputRaster <- writeStart(outputRaster, filename, overwrite=TRUE)
for(i in seq(blockInfo$n)) {
blockMask <-
getValuesBlock(inputRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i]) %>%
`==`(maskValue) %>%
if_else(true = maskValue, false = NA_integer_)
outputRaster <- writeValues(outputRaster, blockMask, blockInfo$row[i])
}
outputRaster <- writeStop(outputRaster)
return(outputRaster)
}
# A memory-safe implementation of `raster::unique()` optimized for large rasters
# - ignore are the levels to exclude from the output
uniqueInRaster <- function(inputRaster, ignore = NA) {
# Choose number of blocks to split rasters into when processing to limit memory
blockInfo <- blockSize(inputRaster)
# Calculate unique values in each block
map(
seq(blockInfo$n),
~ unique(getValuesBlock(inputRaster,
row = blockInfo$row[.x],
nrows = blockInfo$nrows[.x]))) %>%
# Consolidate values from each block
flatten_dbl %>%
unique() %>%
# Exclude values to ignore
`[`(!. %in% ignore) %>%
return
}
# A memory-safe implementation of raster::crop() optimized for large rasters
# - Requires an extent object, unlike raster::crop()
# - Requires an output filename, slower than raster::crop for small rasters
cropRaster <- function(inputRaster, filename, outputExtent) {
# Create empty raster to hold output
outputRaster <- raster(inputRaster) %>%
crop(outputExtent)
# Calculate offset from original
offsetAbove <- round((ymax(extent(inputRaster)) - ymax(outputExtent)) / res(inputRaster)[2])
offsetLeft <- round((xmin(outputExtent) - xmin(extent(inputRaster))) / res(inputRaster)[1])
if(offsetAbove < 0 | offsetLeft < 0)
stop("Attempting to crop a smaller raster to a larger extent. This should not happen and could cause unexpected behaviour.")
## Split output into manageable chunks and fill with data from input
blockInfo <- blockSize(outputRaster)
outputRaster <- writeStart(outputRaster, filename, overwrite=TRUE)
for(i in seq(blockInfo$n))
outputRaster <-
writeValues(outputRaster,
getValuesBlock(inputRaster,
row = blockInfo$row[i] + offsetAbove,
nrows = blockInfo$nrows[i],
col = offsetLeft + 1,
ncols = ncol(outputRaster)),
blockInfo$row[i])
outputRaster <- writeStop(outputRaster)
return(outputRaster)
}
# Function to convert a vector, etc. of number into a multiplicative mask
# - Replaces all values with 1, NA remains as NA
# - Assumes no negative numbers (specifically, no -1)
maskify <- function(x) {
x <- x + 1L # Used to avoid divide by zero errors, this is why -1 is not acceptable
return(x / x)
}
# A memory safe implementation of raster::mask() optimized for large rasters
# - Requires an output filename, slower than raster::mask for small rasters
# - Input and mask rasters must have same extent (try cropRaster() if not)
maskRaster <- function(inputRaster, filename, maskingRaster){
# Create an empty raster to hold the output
outputRaster <- raster(inputRaster)
## Split output into manageable chunks and fill with data from input
blockInfo <- blockSize(outputRaster)
## Calculate mask and write to output block-by-block
outputRaster <- writeStart(outputRaster, filename, overwrite=TRUE)
# Each block of the mask raster is converted into a multiplicative mask
# and multiplied with the corresponding block of the input raster
for(i in seq(blockInfo$n)) {
maskedBlock <-
getValuesBlock(maskingRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i]) %>%
maskify %>%
`*`(getValuesBlock(inputRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i]))
outputRaster <- writeValues(outputRaster, maskedBlock, blockInfo$row[i])
}
outputRaster <- writeStop(outputRaster)
return(outputRaster)
}
# A function to crop rasters down to remove borders filled with only NA's
# - Uses binary search to quickly process large rasters with large empty borders
# - Requires an output filename, slower than raster::trim for small rasters
# - maxBlockSizePower is an integer that will be used to calculate the max number
# of rows / cols to load into memory. Specifically 2^maxBlockSizePower rows and
# cols will be loaded at most at a time
trimRaster <- function(inputRaster, filename, maxBlockSizePower = 11){
# Reading portions of large rasters that can't be held in memory is slow, so
# we want to minimize the number of reads as we identify how much we can trim
# off each of the four sides of the raster
# One simplistic approach is to check large blocks at a time until a block
# with non-NA data is found. Next halve the size of the search block and
# continue searching. Keep halving the the width of the search block until you
# find the single first column with data. This is effectively a binary search
# once the first (largest) block with non-NA data is found.
# Setup --------------------------------------------------------------------
# Decide how to split input into manageable blocks
maxBlockSize <- 2^(maxBlockSizePower)
# Make sure max block size is smaller than the number of columns and rows
while(ncol(inputRaster) < maxBlockSize | nrow(inputRaster) < maxBlockSize){
maxBlockSize = maxBlockSize / 2
maxBlockSizePower = maxBlockSizePower - 1
}
descendingBlockSizes <- 2^((maxBlockSizePower-1):0)
# Initialize counters
trimAbove <- 1
trimBelow <- nrow(inputRaster) + 1
trimLeft <- 1
trimRight <- ncol(inputRaster) + 1
# Top ----------------------------------------------------------------------
# Search for the first block from the top with data
while(
getValuesBlock(inputRaster,
row = trimAbove,
nrows = maxBlockSize) %>%
is.na %>%
all
)
trimAbove <- trimAbove + maxBlockSize
# Now do a binary search for the first row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster, row = trimAbove, nrows = i) %>% is.na %>% all)
trimAbove <- trimAbove + i
# Pad if possible
trimAbove <- max(trimAbove - 2, 0)
# Bottom -------------------------------------------------------------------
# Repeat from the bottom up, first finding a block that is not all NA
while(
getValuesBlock(inputRaster,
row = trimBelow - maxBlockSize,
nrows = maxBlockSize) %>%
is.na %>%
all
)
trimBelow <- trimBelow - maxBlockSize
# Binary search for last row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster, row = trimBelow - i, nrows = i) %>% is.na %>% all)
trimBelow <- trimBelow - i
# Calculate height of the trimmed raster
outputRows <- trimBelow - trimAbove
# Left --------------------------------------------------------------------
# Search for the first block from the left with data
while(
getValuesBlock(inputRaster,
col = trimLeft,
ncols = maxBlockSize,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all
)
trimLeft <- trimLeft + maxBlockSize
# Now do a binary search for the first row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster,
col = trimLeft,
ncols = i,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all)
trimLeft <- trimLeft + i
# Pad if possible
trimLeft <- max(trimLeft - 2, 0)
# Right --------------------------------------------------------------------
# Repeat for the first block from the right with data
while(
getValuesBlock(inputRaster,
col = trimRight - maxBlockSize,
ncols = maxBlockSize,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all
)
trimRight <- trimRight - maxBlockSize
# Now do a binary search for the first row with data
for(i in descendingBlockSizes)
if(getValuesBlock(inputRaster,
col = trimRight - i,
ncols = i,
row = trimAbove,
nrows = outputRows) %>%
is.na %>%
all)
trimRight <- trimRight - i
# Calculate width of the trimmed raster
outputCols <- trimRight - trimLeft
# Crop ---------------------------------------------------------------------
# Don't crop if there is nothing to crop
if(trimAbove == 1 & trimLeft == 1 & trimBelow == nrow(inputRaster) + 1 & trimRight == ncol(inputRaster) + 1)
return(writeRaster(inputRaster, filename = filename, overwrite = T))
# Convert trim variables to x,y min,max
outXmin <- xmin(extent(inputRaster)) + trimLeft * res(inputRaster)[1]
outXmax <- outXmin + outputCols * res(inputRaster)[1]
outYmax <- ymax(extent(inputRaster)) - trimAbove * res(inputRaster)[2]
outYmin <- outYmax - outputRows * res(inputRaster)[2]
return(
cropRaster(
inputRaster,
filename,
extent(c(xmin = outXmin,
xmax = outXmax,
ymin = outYmin,
ymax = outYmax))
)
)
}
# Function to create and save a binary raster given a non-binary raster and the
# value to keep
# - Used to binarize disturbance maps for use as spatial multipliers in SyncroSim
saveDistLayer <- function(distValue, distName, fullRaster, transitionMultiplierDirectory) {
writeRaster(
layerize(fullRaster, classes = distValue),
paste0(transitionMultiplierDirectory, distName, ".tif"),
overwrite = TRUE
)
}
# Function to generate a tiling mask given template
# - To avoid holding the entire raster in memory, the raster is written directly
# to file row-by-row. This way only one row of the tiling needs to be held in
# memory at a time. This is also why the number of rows (and implicitly the size
# of a given row) cannot be chosen manually
# - minProportion is used to determine the size threshold for consolidating tiles
# that are too small into neighboring tiles. Represented as a porportion of a
# full tile
tilize <- function(templateRaster, filename, tempfilename, tileSize) {
# Calculate recommended block size of template
blockInfo <- blockSize(templateRaster)
# Check that the blockSize is meaningful
# - This should only matter for very small rasters, such as in test mode
if(max(blockInfo$nrows) == 1)
blockInfo <- list(row = 1, nrows = nrow(templateRaster), n = 1)
# Calculate dimensions of each tile
tileHeight <- blockInfo$nrows[1]
tileWidth <- floor(tileSize / tileHeight)
# Calculate number of rows and columns
ny <- blockInfo$n
nx <- ceiling(ncol(templateRaster) / tileWidth)
# Generate a string of zeros the width of one tile
oneTileWidth <- rep(0, tileWidth)
# Generate one line of one row, repeat to the height of one row
oneRow <-
as.vector(vapply(seq(nx), function(i) oneTileWidth + i, FUN.VALUE = numeric(tileWidth))) %>%
`[`(1:ncol(templateRaster)) %>% # Trim the length of one row to fit in template
rep(tileHeight)
# Write an empty raster with the correct metadata to file
tileRaster <- raster(templateRaster)
# Write tiling to file row-by-row
tileRaster <- writeStart(tileRaster, filename, overwrite=TRUE)
for(i in seq(blockInfo$n)) {
if(blockInfo$nrows[i] < tileHeight)
oneRow <- oneRow[1:(ncol(tileRaster) * blockInfo$nrows[i])]
#browser()
tileRaster <- writeValues(tileRaster, oneRow, blockInfo$row[i])
oneRow <- oneRow + nx
}
tileRaster <- writeStop(tileRaster)
# Mask raster by template
tileRaster <-
maskRaster(tileRaster, tempfilename, maskingRaster = templateRaster)
# Consolidate small tiles into larger groups
# - We want a map from the original tile IDs to new consolidated tile IDs
reclassification <-
# Find the number of cells in each tile ID
tabulateRaster(tileRaster) %>%
# Sort by ID to approximately group tiles by proximity
arrange(value) %>%
# Consolidate into groups up to size tileSize
transmute(
from = value,
to = consolidateGroups(freq, tileSize)) %>%
as.matrix()
# Reclassify the tiling raster to the new consolidated IDs
tileRaster <-
reclassify(
tileRaster,
reclassification,
filename = filename,
overwrite = T)
return(tileRaster)
}
# Generate a table of values present in a raster and their frequency
# - values are assumed to be integers and the max value is known
tabulateRaster <- function(inputRaster) {
# Calculate recommended block size of template
blockInfo <- blockSize(inputRaster)
# Calculate frequency table in each block and consolidate
tables <- map(
seq(blockInfo$n),
~ table(getValuesBlock(inputRaster,
row = blockInfo$row[.x],
nrows = blockInfo$nrows[.x]))) %>%
map(as.data.frame) %>%
do.call(rbind, .) %>% # do.call is used to convert the list of tables to a sequence of arguments for `rbind`
rename(value = 1) %>%
group_by(value) %>%
summarize(freq = sum(Freq)) %>%
ungroup() %>%
mutate(value = value %>% as.character %>% as.numeric) %>% # Convert from factor to numeric
return
}
# Takes a vector of sizes (input) and a maximum size per group (threshold) and
# returns a vector of integers assigning the inputs to groups up to size threshold
# - Used to consolidate tiling groups into more even groups
consolidateGroups <- function(input, threshold) {
# Initialized counters and output
counter <- 1
cumulator <- 0
output <- integer(length(input))
# For each input, decide whether or not to start a new group
# Store that decision in output
for(i in seq_along(input)) {
cumulator <- cumulator + input[i]
if(cumulator > threshold) {
cumulator<- input[i]
counter <- counter + 1
}
output[i] <- counter
}
return(output)
}
separateStateClass <- function(stateClassRaster, evcRasterPath, evhRasterPath) {
# Create empty rasters to hold EVC and EVH data
evcRaster <- raster(stateClassRaster)
evhRaster <- raster(stateClassRaster)
## Split state class raster into manageable chunks
blockInfo <- blockSize(stateClassRaster)
## Calculate EVC and EVH from State Class block-by-block and write the results to their respective files
evcRaster <- writeStart(evcRaster, evcRasterPath, overwrite=TRUE)
evhRaster <- writeStart(evhRaster, evhRasterPath, overwrite=TRUE)
for(i in seq(blockInfo$n)) {
stateClassValues <-
getValuesBlock(stateClassRaster, row = blockInfo$row[i], nrows = blockInfo$nrows[i])
evcValues <- as.integer(stateClassValues / 1000) # EVC is the first three digits of the six digit state class code
evhValues <- stateClassValues %% 1000 # EVH is the last three digits, `%%` is the modulo, or remainder function
evcRaster <- writeValues(evcRaster, evcValues, blockInfo$row[i])
evhRaster <- writeValues(evhRaster, evhValues, blockInfo$row[i])
}
# End writing to rasters
evcRaster <- writeStop(evcRaster)
evhRaster <- writeStop(evhRaster)
# Silent return
invisible()
} |
# =======================================================
# Clustering Methods : K-Means and Hierarchical
# =======================================================
# Load and explore the dataset
attach(iris)
help(iris)
str(iris)
# Subset the features to get 2-D dataset
irisData <- as.data.frame(iris[,c(1,3)])
plot(irisData, pch = 19)
# =======================================================
# Clustering Methods : Part 1 : K-Means Clustering
# =======================================================
# Set an interesting color palette for visualization
# install.packages("RColorBrewer")
library(RColorBrewer)
# display.brewer.all()
myPal <- brewer.pal(n = 9, name = "Set1")
# K-Means Clustering
# Set the value of K
K <- 6
kMeansFit <- kmeans(irisData, centers = K)
kMeansFit
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(kMeansFit$cluster)])
# K-Means Clustering with multiple random starts
# Set the value of K
K <- 4
kMeansFit <- kmeans(irisData, centers = K, nstart = 20)
kMeansFit
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(kMeansFit$cluster)])
# =======================================================
# Clustering Methods : Part 2 : Hierarchical Clustering
# =======================================================
# Hierarchical Clustering : Complete Linkage
hiercFit <- hclust(dist(irisData), method="complete")
hiercFit
plot(hiercFit, main="Complete Linkage", xlab="", ylab="", sub="", cex =.5)
K <- 3
plot(hiercFit, main="Complete Linkage", xlab="", ylab="", sub="", cex =.5)
rect.hclust(hiercFit, k = K)
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(cutree(hiercFit, k = K))])
# Hierarchical clustering : Average Linkage
hiercFit <- hclust(dist(irisData), method="average")
hiercFit
plot(hiercFit, main="Average Linkage", xlab="", ylab="", sub="", cex =.5)
K <- 3
plot(hiercFit, main="Average Linkage", xlab="", ylab="", sub="", cex =.5)
rect.hclust(hiercFit, k = K)
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(cutree(hiercFit, k = K))])
# Hierarchical clustering : Single Linkage
hiercFit <- hclust(dist(irisData), method="single")
hiercFit
plot(hiercFit, main="Single Linkage", xlab="", ylab="", sub="", cex =.5)
K <- 3
plot(hiercFit, main="Single Linkage", xlab="", ylab="", sub="", cex =.5)
rect.hclust(hiercFit, k = K)
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(cutree(hiercFit, k = K))])
# =======================================================
# Clustering Methods : Part 3 : Graph Clustering
# =======================================================
# install.packages("igraph")
library(igraph)
# Example Graph : Zachary's Karate Club
graphZKC <- make_graph("Zachary")
plot(graphZKC,
layout = layout.kamada.kawai,
vertex.size = 7,
vertex.color = "darkgray",
edge.width = 1,
edge.color = "darkgray",
vertex.label = NA)
# Fast Greedy Clustering on the Graph
fgcFit <- cluster_fast_greedy(graphZKC)
fgcFit
membership(fgcFit)
plot(graphZKC,
layout = layout.kamada.kawai,
vertex.size = 7,
vertex.color = palette(myPal)[as.numeric(membership(fgcFit))],
edge.width = 1,
edge.color = "darkgray",
vertex.label = NA)
| /W09_ClusteringMethods.R | no_license | sgsourav/crashsl | R | false | false | 3,183 | r | # =======================================================
# Clustering Methods : K-Means and Hierarchical
# =======================================================
# Load and explore the dataset
attach(iris)
help(iris)
str(iris)
# Subset the features to get 2-D dataset
irisData <- as.data.frame(iris[,c(1,3)])
plot(irisData, pch = 19)
# =======================================================
# Clustering Methods : Part 1 : K-Means Clustering
# =======================================================
# Set an interesting color palette for visualization
# install.packages("RColorBrewer")
library(RColorBrewer)
# display.brewer.all()
myPal <- brewer.pal(n = 9, name = "Set1")
# K-Means Clustering
# Set the value of K
K <- 6
kMeansFit <- kmeans(irisData, centers = K)
kMeansFit
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(kMeansFit$cluster)])
# K-Means Clustering with multiple random starts
# Set the value of K
K <- 4
kMeansFit <- kmeans(irisData, centers = K, nstart = 20)
kMeansFit
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(kMeansFit$cluster)])
# =======================================================
# Clustering Methods : Part 2 : Hierarchical Clustering
# =======================================================
# Hierarchical Clustering : Complete Linkage
hiercFit <- hclust(dist(irisData), method="complete")
hiercFit
plot(hiercFit, main="Complete Linkage", xlab="", ylab="", sub="", cex =.5)
K <- 3
plot(hiercFit, main="Complete Linkage", xlab="", ylab="", sub="", cex =.5)
rect.hclust(hiercFit, k = K)
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(cutree(hiercFit, k = K))])
# Hierarchical clustering : Average Linkage
hiercFit <- hclust(dist(irisData), method="average")
hiercFit
plot(hiercFit, main="Average Linkage", xlab="", ylab="", sub="", cex =.5)
K <- 3
plot(hiercFit, main="Average Linkage", xlab="", ylab="", sub="", cex =.5)
rect.hclust(hiercFit, k = K)
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(cutree(hiercFit, k = K))])
# Hierarchical clustering : Single Linkage
hiercFit <- hclust(dist(irisData), method="single")
hiercFit
plot(hiercFit, main="Single Linkage", xlab="", ylab="", sub="", cex =.5)
K <- 3
plot(hiercFit, main="Single Linkage", xlab="", ylab="", sub="", cex =.5)
rect.hclust(hiercFit, k = K)
plot(irisData, pch = 19, col = palette(myPal)[as.numeric(cutree(hiercFit, k = K))])
# =======================================================
# Clustering Methods : Part 3 : Graph Clustering
# =======================================================
# install.packages("igraph")
library(igraph)
# Example Graph : Zachary's Karate Club
graphZKC <- make_graph("Zachary")
plot(graphZKC,
layout = layout.kamada.kawai,
vertex.size = 7,
vertex.color = "darkgray",
edge.width = 1,
edge.color = "darkgray",
vertex.label = NA)
# Fast Greedy Clustering on the Graph
fgcFit <- cluster_fast_greedy(graphZKC)
fgcFit
membership(fgcFit)
plot(graphZKC,
layout = layout.kamada.kawai,
vertex.size = 7,
vertex.color = palette(myPal)[as.numeric(membership(fgcFit))],
edge.width = 1,
edge.color = "darkgray",
vertex.label = NA)
|
library(DT)
library(plotly)
navbarPage(
"Nasze dane pomocy studenta",
tabPanel(
"Porównanie ogólne",
sidebarLayout(
sidebarPanel(
selectInput(
"domain_select",
label = "Wybierz domenę lub kliknij na słupek poniżej:",
choices = c("stackoverflow.com",
"wikipedia.org",
"github.com",
"pw.edu.pl",
"youtube.com",
"google.com",
"facebook.com",
"instagram.com"),
selected = "stackoverflow.com"),
plotOutput("plot_comp", click = "plot_comp_click", height = "300px"),
h2("Cześć!"),
p("Skoro już do nas zajrzałeś, to zechcemy opowiedzieć Ci o naszym projekcie, który przedwsięwzieliśmy z zajęć TWD.
Trzech śmiałków: Kacper, Jakub oraz Janek dobrodusznie udostępnili swoje dane przeglądarkowe by móc poddać je analizie.
Sprawdzone zostało w jakim stopniu korzystali ze ston znanych każdemu szanującemu się studentowi IT, porkoju stackoverflow czy github.\n
Nasze przestawiliśmy na trzech stonach, na każdej z nich ukazana została inna idea naszej analizy. Zapraszamy do użytkowania !!!
"),
# p("Oto link do repozytorium zawierające cały projekt:", tags$a(href="https://github.com/niegrzybkowski/studia_twd_p3","github.com/niegrzybkowski/studia_twd_p3")),
hr(),
print("Projekt przygotowany przez Kacpra Grzymkowskiego, Jakuba Fołtyna, Jana Gąske")
),
mainPanel(plotOutput("plot_1", height = "500px"),
p("Powyższy wykres przedstawia powównanie dynamiki zmian ilości średniej liczby wejść, zestawionej dla każdego z nas,
dla wybranej przez Ciebie stony. Dynamika zestawiona została dla całego okresu pobioru danych, tudzież od stycznia 2020 do
początku stycznia 2021 roku.")
)
)
),
tabPanel("Średnia aktywność w tygodniu",
sidebarPanel(
selectInput("user_select1",
label = "Wybierz użytkownika:",
choices = c("Kacper" = "kacper",
"Jakub" = "jakub",
"Janek" = "jan"),
multiple = FALSE,
selected = "Kacper"),
selectInput("day_select1",
label = "Wybierz dzień tygodnia:",
choices = c("poniedziałek" = "pon",
"wtorek" = "wt",
"środa" = "sr",
"czwartek" = "czw",
"piątek" = "pia",
"sobota" = "sob",
"niedziela" = "niedz"),
multiple = FALSE,
selected = "Poniedziałek"),
selectInput("domain_select1",
label = "Wybierz domenę:",
choices = c("stackoverflow.com",
"wikipedia.org",
"github.com",
"pw.edu.pl",
"youtube.com",
"google.com",
"facebook.com",
"instagram.com"),
selected = "stackoverflow.com"),
h3("Kiedy i jak często odwiedzamy dane domeny?"),
p("Jakie są nasze nawyki, kiedy najbardziej lubimy odwiedzać daną stronę, kiedy potrzebujemy jej pomocy, w jaki dzień tygodnia
, w jaką godzinę i kto jest najbardziej skłonny do ich odwiedzania?"),
p("Przedstawione po prawej wykresy informują Cię właśnie a propos powyższych pytań, zebrane średnie odzwierciedlają nasze dane średniej ilości
wejść na dany dzień tygodnia, oraz na poszczególne godziny w wybranym dniu tygodnia. Dociekając w danych możesz łatwo dopatrzeć się ciekawych wyników,
na przykład, zmniejszonej ilości wejść na strony programistyczne w weekendy, oraz dowiedzieć się kto jest nocnym markiem, a kto rannym ptaszkiem")
),
mainPanel(
plotOutput("plot_weekdays", click = "plot_click"),
plotOutput("plot_weekhours")
)),
tabPanel(
"Balans rozrywka/edukacja",
br(),
br(),
sidebarPanel(
h4("Nie samą pracą człowiek żyje."),
p("Ciężka praca się opłaca, jednakże rozrywka i relaks również są ważne, w tej części przygotowaliśmy animację porównującą wejścia
dwóch użytkowników na strony o charakterze edukacyjnym i na strony o charakterze rozrywkowym, w całym okresie analizy danych."),
p("Położenie na osi X informuje o ilości wejść na stony rozrywkowe, a oś Y ilość wejść na strony edukacyjne.")
),
mainPanel(
plotlyOutput("plotly_scatter")
)
))
| /dashboard/ui.R | no_license | niegrzybkowski/projekt_ja_twd_studia | R | false | false | 5,195 | r | library(DT)
library(plotly)
navbarPage(
"Nasze dane pomocy studenta",
tabPanel(
"Porównanie ogólne",
sidebarLayout(
sidebarPanel(
selectInput(
"domain_select",
label = "Wybierz domenę lub kliknij na słupek poniżej:",
choices = c("stackoverflow.com",
"wikipedia.org",
"github.com",
"pw.edu.pl",
"youtube.com",
"google.com",
"facebook.com",
"instagram.com"),
selected = "stackoverflow.com"),
plotOutput("plot_comp", click = "plot_comp_click", height = "300px"),
h2("Cześć!"),
p("Skoro już do nas zajrzałeś, to zechcemy opowiedzieć Ci o naszym projekcie, który przedwsięwzieliśmy z zajęć TWD.
Trzech śmiałków: Kacper, Jakub oraz Janek dobrodusznie udostępnili swoje dane przeglądarkowe by móc poddać je analizie.
Sprawdzone zostało w jakim stopniu korzystali ze ston znanych każdemu szanującemu się studentowi IT, porkoju stackoverflow czy github.\n
Nasze przestawiliśmy na trzech stonach, na każdej z nich ukazana została inna idea naszej analizy. Zapraszamy do użytkowania !!!
"),
# p("Oto link do repozytorium zawierające cały projekt:", tags$a(href="https://github.com/niegrzybkowski/studia_twd_p3","github.com/niegrzybkowski/studia_twd_p3")),
hr(),
print("Projekt przygotowany przez Kacpra Grzymkowskiego, Jakuba Fołtyna, Jana Gąske")
),
mainPanel(plotOutput("plot_1", height = "500px"),
p("Powyższy wykres przedstawia powównanie dynamiki zmian ilości średniej liczby wejść, zestawionej dla każdego z nas,
dla wybranej przez Ciebie stony. Dynamika zestawiona została dla całego okresu pobioru danych, tudzież od stycznia 2020 do
początku stycznia 2021 roku.")
)
)
),
tabPanel("Średnia aktywność w tygodniu",
sidebarPanel(
selectInput("user_select1",
label = "Wybierz użytkownika:",
choices = c("Kacper" = "kacper",
"Jakub" = "jakub",
"Janek" = "jan"),
multiple = FALSE,
selected = "Kacper"),
selectInput("day_select1",
label = "Wybierz dzień tygodnia:",
choices = c("poniedziałek" = "pon",
"wtorek" = "wt",
"środa" = "sr",
"czwartek" = "czw",
"piątek" = "pia",
"sobota" = "sob",
"niedziela" = "niedz"),
multiple = FALSE,
selected = "Poniedziałek"),
selectInput("domain_select1",
label = "Wybierz domenę:",
choices = c("stackoverflow.com",
"wikipedia.org",
"github.com",
"pw.edu.pl",
"youtube.com",
"google.com",
"facebook.com",
"instagram.com"),
selected = "stackoverflow.com"),
h3("Kiedy i jak często odwiedzamy dane domeny?"),
p("Jakie są nasze nawyki, kiedy najbardziej lubimy odwiedzać daną stronę, kiedy potrzebujemy jej pomocy, w jaki dzień tygodnia
, w jaką godzinę i kto jest najbardziej skłonny do ich odwiedzania?"),
p("Przedstawione po prawej wykresy informują Cię właśnie a propos powyższych pytań, zebrane średnie odzwierciedlają nasze dane średniej ilości
wejść na dany dzień tygodnia, oraz na poszczególne godziny w wybranym dniu tygodnia. Dociekając w danych możesz łatwo dopatrzeć się ciekawych wyników,
na przykład, zmniejszonej ilości wejść na strony programistyczne w weekendy, oraz dowiedzieć się kto jest nocnym markiem, a kto rannym ptaszkiem")
),
mainPanel(
plotOutput("plot_weekdays", click = "plot_click"),
plotOutput("plot_weekhours")
)),
tabPanel(
"Balans rozrywka/edukacja",
br(),
br(),
sidebarPanel(
h4("Nie samą pracą człowiek żyje."),
p("Ciężka praca się opłaca, jednakże rozrywka i relaks również są ważne, w tej części przygotowaliśmy animację porównującą wejścia
dwóch użytkowników na strony o charakterze edukacyjnym i na strony o charakterze rozrywkowym, w całym okresie analizy danych."),
p("Położenie na osi X informuje o ilości wejść na stony rozrywkowe, a oś Y ilość wejść na strony edukacyjne.")
),
mainPanel(
plotlyOutput("plotly_scatter")
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparse_matrix_kernels.R
\name{SparseQrAllocator}
\alias{SparseQrAllocator}
\title{Allocates and initializes input to the QR factorization sparse matrix kernel
microbenchmarks}
\usage{
SparseQrAllocator(benchmarkParameters, index)
}
\arguments{
\item{benchmarkParameters}{an object of type
\code{\link{SparseMatrixMicrobenchmark}} specifying various parameters
needed to generate input for the sparse matrix kernel.}
\item{index}{an integer index indicating the dimensions of the matrix or
vector data to be generated as input for the sparse matrix kernel.}
}
\description{
\code{SparseQrAllocator} allocates and initializes the sparse matrix that is
input to the sparse matrix kernel for the purposes of conducting a single
performance trial with the \code{SparseQrMicrobenchmark} function. The
matrix is populated and returned in the \code{kernelParameters} list.
}
| /man/SparseQrAllocator.Rd | permissive | cran/RHPCBenchmark | R | false | true | 947 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparse_matrix_kernels.R
\name{SparseQrAllocator}
\alias{SparseQrAllocator}
\title{Allocates and initializes input to the QR factorization sparse matrix kernel
microbenchmarks}
\usage{
SparseQrAllocator(benchmarkParameters, index)
}
\arguments{
\item{benchmarkParameters}{an object of type
\code{\link{SparseMatrixMicrobenchmark}} specifying various parameters
needed to generate input for the sparse matrix kernel.}
\item{index}{an integer index indicating the dimensions of the matrix or
vector data to be generated as input for the sparse matrix kernel.}
}
\description{
\code{SparseQrAllocator} allocates and initializes the sparse matrix that is
input to the sparse matrix kernel for the purposes of conducting a single
performance trial with the \code{SparseQrMicrobenchmark} function. The
matrix is populated and returned in the \code{kernelParameters} list.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Preprocessing.R
\name{wrapLfcFilter}
\alias{wrapLfcFilter}
\title{Filter on Log Fold Change}
\usage{
wrapLfcFilter(A, aT, qT)
}
\arguments{
\item{A}{Chromosome level data}
\item{aT}{Absolute threshold}
\item{qT}{Quantile threshold}
}
\description{
Filter on Log Fold Change
}
| /man/wrapLfcFilter.Rd | no_license | hjanime/DCS | R | false | true | 356 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Preprocessing.R
\name{wrapLfcFilter}
\alias{wrapLfcFilter}
\title{Filter on Log Fold Change}
\usage{
wrapLfcFilter(A, aT, qT)
}
\arguments{
\item{A}{Chromosome level data}
\item{aT}{Absolute threshold}
\item{qT}{Quantile threshold}
}
\description{
Filter on Log Fold Change
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{stats_default}
\alias{stats_default}
\alias{stats_normal}
\alias{stats_nonnormal}
\title{Define a list of default statistics}
\usage{
stats_default(data)
stats_normal(data)
stats_nonnormal(data)
}
\arguments{
\item{data}{A dataframe}
}
\value{
A list of statistical functions
}
\description{
Define a list of default statistics
}
\keyword{deprecated}
| /man/stats_default.Rd | no_license | cran/desctable | R | false | true | 450 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{stats_default}
\alias{stats_default}
\alias{stats_normal}
\alias{stats_nonnormal}
\title{Define a list of default statistics}
\usage{
stats_default(data)
stats_normal(data)
stats_nonnormal(data)
}
\arguments{
\item{data}{A dataframe}
}
\value{
A list of statistical functions
}
\description{
Define a list of default statistics
}
\keyword{deprecated}
|
predict.penSVM<-function(object, newdata, newdata.labels=NULL,labels.universe=NULL, ...){
# calculate test error, confusion table, sensitivity and specificity for test data
# input:
# newdata: matrx of ratios, rows - samples, columns - clones
# newdata.labels - vector of classes for samples
# object - fit of trained data (producedby svm.fs)
# labels.universe: important for models produced by loocv: all possible labels in the particular data set
pred.class<- NULL
tab.classes<- NULL
error.classes<- NA
sensitivity.classes<- NA
specificity.classes<- NA
# fit model
f<-object$model
# if we have a model, calulate the test error...
if (length(f)<=1 ) {
stop("Model is empty!")
}else {
# 2 different data possible: for GENES and for CLASSES (here only for classes)
# separating line: x*w+b=f(x)
sep = as.matrix(newdata)[,f$xind, drop=FALSE] %*% f$w + f$b
# for classes -1 and 1 : class = 2*(sep > 0) - 1
pred.class = factor(2*as.numeric (sep > 0) -1)
if (!is.null(newdata.labels)){
# missclassification table for CLASSES
tab.classes<-table(pred.class, newdata.labels)
# 3. sensitivity and specificity for CLASSES
if (!is.null(tab.classes)){
# if in test only one sample --> extend tab.classes to complete labels to labels.universe
# example :
#tab.classes
# newdata.labels
#pred.class 1
# 1 1
# labels.universe= c("-1","1")
if (nrow(tab.classes)!= ncol(tab.classes) | nrow(tab.classes)== 1 ) tab.classes<-.extend.to.quad.matrix (tab.classes, labels.universe=labels.universe)
# sensitivity = TP/ all P = TP /(TP + FN)
sensitivity.classes<- tab.classes[2,2]/sum(tab.classes[,2])
# specificity = TN/ all N = TN /(TN + FP)
specificity.classes <- tab.classes[1,1]/sum(tab.classes[,1])
# secondary diagonal
sec.diag<-c(); for (j in 1:ncol( tab.classes)) sec.diag<- c(sec.diag, tab.classes[ncol( tab.classes)-j+1,j] )
error.classes<- ( sum(sec.diag) ) / sum( tab.classes)
}
}# end of if (!is.null(newdata.labels))
return(list(pred.class = pred.class,
fitted=sep,
tab=tab.classes,
error=error.classes,
sensitivity=sensitivity.classes,
specificity=specificity.classes ))
} # end of ifelse (is.null(f))
}
| /penalizedSVM/R/predict.R | no_license | ingted/R-Examples | R | false | false | 2,353 | r | predict.penSVM<-function(object, newdata, newdata.labels=NULL,labels.universe=NULL, ...){
# calculate test error, confusion table, sensitivity and specificity for test data
# input:
# newdata: matrx of ratios, rows - samples, columns - clones
# newdata.labels - vector of classes for samples
# object - fit of trained data (producedby svm.fs)
# labels.universe: important for models produced by loocv: all possible labels in the particular data set
pred.class<- NULL
tab.classes<- NULL
error.classes<- NA
sensitivity.classes<- NA
specificity.classes<- NA
# fit model
f<-object$model
# if we have a model, calulate the test error...
if (length(f)<=1 ) {
stop("Model is empty!")
}else {
# 2 different data possible: for GENES and for CLASSES (here only for classes)
# separating line: x*w+b=f(x)
sep = as.matrix(newdata)[,f$xind, drop=FALSE] %*% f$w + f$b
# for classes -1 and 1 : class = 2*(sep > 0) - 1
pred.class = factor(2*as.numeric (sep > 0) -1)
if (!is.null(newdata.labels)){
# missclassification table for CLASSES
tab.classes<-table(pred.class, newdata.labels)
# 3. sensitivity and specificity for CLASSES
if (!is.null(tab.classes)){
# if in test only one sample --> extend tab.classes to complete labels to labels.universe
# example :
#tab.classes
# newdata.labels
#pred.class 1
# 1 1
# labels.universe= c("-1","1")
if (nrow(tab.classes)!= ncol(tab.classes) | nrow(tab.classes)== 1 ) tab.classes<-.extend.to.quad.matrix (tab.classes, labels.universe=labels.universe)
# sensitivity = TP/ all P = TP /(TP + FN)
sensitivity.classes<- tab.classes[2,2]/sum(tab.classes[,2])
# specificity = TN/ all N = TN /(TN + FP)
specificity.classes <- tab.classes[1,1]/sum(tab.classes[,1])
# secondary diagonal
sec.diag<-c(); for (j in 1:ncol( tab.classes)) sec.diag<- c(sec.diag, tab.classes[ncol( tab.classes)-j+1,j] )
error.classes<- ( sum(sec.diag) ) / sum( tab.classes)
}
}# end of if (!is.null(newdata.labels))
return(list(pred.class = pred.class,
fitted=sep,
tab=tab.classes,
error=error.classes,
sensitivity=sensitivity.classes,
specificity=specificity.classes ))
} # end of ifelse (is.null(f))
}
|
library(censusapi)
censuskey <- "20f2773e863a33466e0fbace2116919a1c67e4a5"
library(devtools)
devtools::install_github("hrecht/censusapi")
vars2015 <- listCensusMetadata(name= "acs/acs5", vintage=2015, "v")
head(vars2015)
write.csv( vars2015, "2015ACS5DataDictionary.csv", row.names=F)
vars.2015 <-read.csv( file="C:/Users/aehende1/Desktop/Capstone/DataProfile_2015edited.csv", header=TRUE, sep=",")
vars.2015.list.geo <- vars.2015$name
dat.2015 <- getCensus(name= "acs/acs5", vintage= 2015, key=censuskey, vars=vars.2015.list,
region="tract:*", regionin="state:04+county:013")
*Add back in GEO_ID to all variable lists*
write.csv( dat.2015, "2015ACS5Data.csv", row.names=F)
test2015 <- get_acs(geography= "tract", variables= vars.2015.list,
year=2015, state="04", geometry=TRUE, key=censuskey)
setdiff( PUT 2000 DATA HERE, dat.2010$tract)
setdiff( dat.2010$tract, PUT 2000 DATA HERE)
dir.create("test_vars")
setwd("test_vars")
for( i in 1:length( vars.2015.list))
{
var.name <- vars.2015.list[i]
var.i <- getCensus( key=censuskey, name="acs/acs5", vintage=2015, vars=var.name,
region="tract:*", regionin="state:04+county:013")
write.csv(x=var.i, file=paste0(var.name,".csv", row.names=FALSE) )
}
var.i <- getCensus( key=censuskey, name="acs/acs5",
vintage=2015, vars="B01001_001E",
region="tract:*",
regionin="state:04+county:013" )
var.i
| /data/archive/2015CensusData.R | no_license | lecy/neighborhood_change_phx | R | false | false | 1,480 | r | library(censusapi)
censuskey <- "20f2773e863a33466e0fbace2116919a1c67e4a5"
library(devtools)
devtools::install_github("hrecht/censusapi")
vars2015 <- listCensusMetadata(name= "acs/acs5", vintage=2015, "v")
head(vars2015)
write.csv( vars2015, "2015ACS5DataDictionary.csv", row.names=F)
vars.2015 <-read.csv( file="C:/Users/aehende1/Desktop/Capstone/DataProfile_2015edited.csv", header=TRUE, sep=",")
vars.2015.list.geo <- vars.2015$name
dat.2015 <- getCensus(name= "acs/acs5", vintage= 2015, key=censuskey, vars=vars.2015.list,
region="tract:*", regionin="state:04+county:013")
*Add back in GEO_ID to all variable lists*
write.csv( dat.2015, "2015ACS5Data.csv", row.names=F)
test2015 <- get_acs(geography= "tract", variables= vars.2015.list,
year=2015, state="04", geometry=TRUE, key=censuskey)
setdiff( PUT 2000 DATA HERE, dat.2010$tract)
setdiff( dat.2010$tract, PUT 2000 DATA HERE)
dir.create("test_vars")
setwd("test_vars")
for( i in 1:length( vars.2015.list))
{
var.name <- vars.2015.list[i]
var.i <- getCensus( key=censuskey, name="acs/acs5", vintage=2015, vars=var.name,
region="tract:*", regionin="state:04+county:013")
write.csv(x=var.i, file=paste0(var.name,".csv", row.names=FALSE) )
}
var.i <- getCensus( key=censuskey, name="acs/acs5",
vintage=2015, vars="B01001_001E",
region="tract:*",
regionin="state:04+county:013" )
var.i
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_recruitplot.R
\name{mod_recruitplot_UI}
\alias{mod_recruitplot_UI}
\title{Shiny module UI function for recruitment plot display}
\usage{
mod_recruitplot_UI(id, label)
}
\arguments{
\item{id}{string containing a namespace identifier}
\item{label}{string to be used as sidebar tab label}
}
\value{
shiny.tag list object containing the tab item content
}
\description{
This function represents a shiny dashboard UI module that allows users to
view a secuTrialR recruitment plot.
}
\seealso{
\code{\link{mod_recruitplot_srv}}
}
| /man/mod_recruitplot_UI.Rd | permissive | SwissClinicalTrialOrganisation/secuTrialRshiny | R | false | true | 607 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_recruitplot.R
\name{mod_recruitplot_UI}
\alias{mod_recruitplot_UI}
\title{Shiny module UI function for recruitment plot display}
\usage{
mod_recruitplot_UI(id, label)
}
\arguments{
\item{id}{string containing a namespace identifier}
\item{label}{string to be used as sidebar tab label}
}
\value{
shiny.tag list object containing the tab item content
}
\description{
This function represents a shiny dashboard UI module that allows users to
view a secuTrialR recruitment plot.
}
\seealso{
\code{\link{mod_recruitplot_srv}}
}
|
#GAMs
#Models for lat and SST
library(mgcv)
library(broom)
library(ggplot2)
#+s(Storms, k = 4)
#+s(Year) +s(Max_SST_temp)
#+s(Storms, k = 4) +s(Max_K_index, k=4) +s(Organisations)
#From the modelling folder
Model_data_wlat <- read.csv("Model_data_wlat.csv")
Model_data_wlat$X <- NULL
Model_data_wlat$X.1 <- NULL
#What would my offset be?
All_lat <- gam(Maximum_latitude ~ s(Year, Species, bs="fs"), data = Model_data_wlat,
method = "REML", family=gaussian())
#+s(Max_SST) +s(Storms, k = 4)
#+s(Max_K_index, k= 4)
summary(All_lat)
plot(All_lat)
par(mfrow=c(2,2))
gam.check(All_lat)
#AIC (model comparison)
AIC(All_ra)
#Visualisation
vis.gam(All_ra)
vis.gam(All_ra, n.grid = 50, theta = 35, phi = 32, zlab = "additional",
ticktype = "detailed", color = "topo")
#Add an factor smooth interaction to these GAMs
#Here - add a northsouth column
#Adding new columns to the above with NA at first
Model_data_wlat["Northsouth"] <- "NA"
#Make the data in that column
Model_data_wlat$Northsouth <- Model_data_wlat$Maximum_latitude
#Changing the data in the model from max lat to north south
Model_data_wlat$Northsouth[Model_data_wlat$Northsouth > 55.5] <- "North"
Model_data_wlat$Northsouth[Model_data_wlat$Northsouth < 55.5] <- "South"
Model_data_wlat$Northsouth[Model_data_wlat$Northsouth == 55.5] <- "South"
#Need to make the character a factor (as before) - was getting error messages
Model_data_wlat$Northsouth <- as.factor(Model_data_wlat$Northsouth)
#Run this as a factor smooth in the GAM
All_lat <- gam(Maximum_latitude ~ +s(NAO_index, Northsouth, bs="fs") +s(Year), data = Model_data_wlat,
method = "REML", family=gaussian())
#+s(Max_SST) +s(Storms, k = 4)
#+s(Max_K_index, k= 4)
summary(All_lat)
plot(All_lat)
par(mfrow=c(2,2))
gam.check(All_lat)
#AIC (model comparison)
AIC(All_ra)
#Species specifics ============================================================================
#Strip out species from Model_data_wlat
#Striped dolphins first
S_coeruleoalba_wlat <- Model_data_wlat %>%
filter(Species == "Stenella coeruleoalba")
#Model the above
SC_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = S_coeruleoalba_wlat,
method = "REML", family=gaussian())
summary(SC_lat)
plot(SC_lat)
par(mfrow=c(2,2))
gam.check(SC_lat)
#Plot of max Striped latitude
ggplot() +
geom_point(data = S_coeruleoalba_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#White beaked dolphins
L_albirostris_wlat <- Model_data_wlat %>%
filter(Species == "Lagenorhynchus albirostris")
#Model the above
L_Alb_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = L_albirostris_wlat,
method = "REML", family=gaussian())
#Playing around with north/south plots - visual interpretation
L_albirostris <- UK_IRL_stranding_events %>%
filter(Name.Current.Sci == "Lagenorhynchus albirostris")
L_albirostris_north <- L_albirostris %>%
filter(Latitude > 55.5)
L_albirostris_south <- L_albirostris %>%
filter(Latitude < 55.5)
ggplot() +
geom_point(data = L_albirostris_north, aes(x = Year, y = Latitude, colour = "blue")) +
geom_point(data = L_albirostris_south, aes(x = Year, y = Latitude, colour = "red")) +
theme_bw()
summary(L_Alb_lat)
plot(L_Alb_lat)
par(mfrow=c(2,2))
gam.check(L_Alb_lat)
#Plot of max White beaked dolphin latitude
ggplot() +
geom_point(data = L_albirostris_wlat , aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#Fin whale
B_physalus_wlat <- Model_data_wlat %>%
filter(Species == "Balaenoptera physalus")
#Model the above
BP_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = B_physalus_wlat,
method = "REML", family=gaussian())
summary(BP_lat)
plot(BP_lat)
par(mfrow=c(2,2))
gam.check(BP_lat)
#Plot of max Fin latitude
ggplot() +
geom_point(data = B_physalus_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#Common dolphins
D_delphis_wlat <- Model_data_wlat %>%
filter(Species == "Delphinus delphis")
#Model the above
DD_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = D_delphis_wlat,
method = "REML", family=gaussian())
summary(DD_lat)
plot(DD_lat)
par(mfrow=c(2,2))
gam.check(DD_lat)
#Plot of max Fin latitude
ggplot() +
geom_point(data = D_delphis_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#Harbour porpoise
P_phocoena_wlat <- Model_data_wlat %>%
filter(Species == "Phocoena phocoena")
#Model the above
PP_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = P_phocoena_wlat,
method = "REML", family=gaussian())
summary(SC_lat)
plot(SC_lat)
par(mfrow=c(2,2))
gam.check(SC_lat)
#Plot of max Striped latitude
ggplot() +
geom_point(data = P_phocoena_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
| /modelling/practice/Lat_SST_GAMs.R | permissive | EllenJCoombs/cetacean-strandings-project | R | false | false | 4,858 | r | #GAMs
#Models for lat and SST
library(mgcv)
library(broom)
library(ggplot2)
#+s(Storms, k = 4)
#+s(Year) +s(Max_SST_temp)
#+s(Storms, k = 4) +s(Max_K_index, k=4) +s(Organisations)
#From the modelling folder
Model_data_wlat <- read.csv("Model_data_wlat.csv")
Model_data_wlat$X <- NULL
Model_data_wlat$X.1 <- NULL
#What would my offset be?
All_lat <- gam(Maximum_latitude ~ s(Year, Species, bs="fs"), data = Model_data_wlat,
method = "REML", family=gaussian())
#+s(Max_SST) +s(Storms, k = 4)
#+s(Max_K_index, k= 4)
summary(All_lat)
plot(All_lat)
par(mfrow=c(2,2))
gam.check(All_lat)
#AIC (model comparison)
AIC(All_ra)
#Visualisation
vis.gam(All_ra)
vis.gam(All_ra, n.grid = 50, theta = 35, phi = 32, zlab = "additional",
ticktype = "detailed", color = "topo")
#Add an factor smooth interaction to these GAMs
#Here - add a northsouth column
#Adding new columns to the above with NA at first
Model_data_wlat["Northsouth"] <- "NA"
#Make the data in that column
Model_data_wlat$Northsouth <- Model_data_wlat$Maximum_latitude
#Changing the data in the model from max lat to north south
Model_data_wlat$Northsouth[Model_data_wlat$Northsouth > 55.5] <- "North"
Model_data_wlat$Northsouth[Model_data_wlat$Northsouth < 55.5] <- "South"
Model_data_wlat$Northsouth[Model_data_wlat$Northsouth == 55.5] <- "South"
#Need to make the character a factor (as before) - was getting error messages
Model_data_wlat$Northsouth <- as.factor(Model_data_wlat$Northsouth)
#Run this as a factor smooth in the GAM
All_lat <- gam(Maximum_latitude ~ +s(NAO_index, Northsouth, bs="fs") +s(Year), data = Model_data_wlat,
method = "REML", family=gaussian())
#+s(Max_SST) +s(Storms, k = 4)
#+s(Max_K_index, k= 4)
summary(All_lat)
plot(All_lat)
par(mfrow=c(2,2))
gam.check(All_lat)
#AIC (model comparison)
AIC(All_ra)
#Species specifics ============================================================================
#Strip out species from Model_data_wlat
#Striped dolphins first
S_coeruleoalba_wlat <- Model_data_wlat %>%
filter(Species == "Stenella coeruleoalba")
#Model the above
SC_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = S_coeruleoalba_wlat,
method = "REML", family=gaussian())
summary(SC_lat)
plot(SC_lat)
par(mfrow=c(2,2))
gam.check(SC_lat)
#Plot of max Striped latitude
ggplot() +
geom_point(data = S_coeruleoalba_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#White beaked dolphins
L_albirostris_wlat <- Model_data_wlat %>%
filter(Species == "Lagenorhynchus albirostris")
#Model the above
L_Alb_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = L_albirostris_wlat,
method = "REML", family=gaussian())
#Playing around with north/south plots - visual interpretation
L_albirostris <- UK_IRL_stranding_events %>%
filter(Name.Current.Sci == "Lagenorhynchus albirostris")
L_albirostris_north <- L_albirostris %>%
filter(Latitude > 55.5)
L_albirostris_south <- L_albirostris %>%
filter(Latitude < 55.5)
ggplot() +
geom_point(data = L_albirostris_north, aes(x = Year, y = Latitude, colour = "blue")) +
geom_point(data = L_albirostris_south, aes(x = Year, y = Latitude, colour = "red")) +
theme_bw()
summary(L_Alb_lat)
plot(L_Alb_lat)
par(mfrow=c(2,2))
gam.check(L_Alb_lat)
#Plot of max White beaked dolphin latitude
ggplot() +
geom_point(data = L_albirostris_wlat , aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#Fin whale
B_physalus_wlat <- Model_data_wlat %>%
filter(Species == "Balaenoptera physalus")
#Model the above
BP_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = B_physalus_wlat,
method = "REML", family=gaussian())
summary(BP_lat)
plot(BP_lat)
par(mfrow=c(2,2))
gam.check(BP_lat)
#Plot of max Fin latitude
ggplot() +
geom_point(data = B_physalus_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#Common dolphins
D_delphis_wlat <- Model_data_wlat %>%
filter(Species == "Delphinus delphis")
#Model the above
DD_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = D_delphis_wlat,
method = "REML", family=gaussian())
summary(DD_lat)
plot(DD_lat)
par(mfrow=c(2,2))
gam.check(DD_lat)
#Plot of max Fin latitude
ggplot() +
geom_point(data = D_delphis_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
#Harbour porpoise
P_phocoena_wlat <- Model_data_wlat %>%
filter(Species == "Phocoena phocoena")
#Model the above
PP_lat <- gam(Maximum_latitude ~ +s(Year, Northsouth, bs="fs"), data = P_phocoena_wlat,
method = "REML", family=gaussian())
summary(SC_lat)
plot(SC_lat)
par(mfrow=c(2,2))
gam.check(SC_lat)
#Plot of max Striped latitude
ggplot() +
geom_point(data = P_phocoena_wlat, aes(x = Year, y = Maximum_latitude)) +
theme_bw()
|
#Helpers
library(data.table)
library(magrittr)
library(odbc)
library(RODBC)
library(tidyverse)
con<- odbcConnect("DB")
FundQuery <- stringr::str_c(
'select
f.fundName
,fe.wsj_symbol as abbreviation
,r.profile_date as PROFILE_DATE
,r.return_code
,r.1mReturn
,r.1yReturn
,r.3yReturn
,r.5yReturn
,r.7yReturn
,r.10yReturn
,r.15yReturn
from fund f
left join monthly_returns r on f.fund_id = r.fund_id
left join fund_ext fe on f.fund_id = fe.fund_id
where f.fund_id in
(fund numbers
)
and return_code in (0, 8, 24)
') %>%
sqlQuery(con, ., stringsAsFactors = F) %>%
data.table::data.table(.) %>%
data.table::melt(., id.vars = 1:4, variable.name = "TIME_PERIOD",
value.name = "RETURN", variable.factor = F) %>%
.[!is.na(RETURN)] %>%
.[, PROFILE_DATE := as.Date(PROFILE_DATE )] %>%
.[, return_code := as.character(return_code)]
odbcCloseAll()
##############
#############
con<- odbcConnect("db")
FundStack <- stringr::str_c(
'select
f.fundName
,fe.wsj_symbol as abbreviation
,r.profile_date as PROFILE_DATE
,r.return_code
,r.1mReturn
,r.1yReturn
,r.3yReturn
,r.5yReturn
,r.7yReturn
,r.10yReturn
from fund f
left join monthly_returns r on f.fund_id = r.fund_id
left join fund_ext fe on f.fund_id = fe.fund_id
where f.fund_id = fund number
and return_code in (0, 8, 24)
') %>%
sqlQuery(con, ., stringsAsFactors = F) %>%
data.table::data.table(.) %>%
data.table::melt(., id.vars = 1:4, variable.name = "TIME_PERIOD",
value.name = "RETURN", variable.factor = F) %>%
.[!is.na(RETURN)] %>%
.[, PROFILE_DATE := as.Date(PROFILE_DATE )] %>%
.[, return_code := as.character(return_code)] %>%
mutate(abbreviation = 'Fund') %>%
data.table::data.table(.)
odbcCloseAll()
#
FundQuery <- FundStack %>%
mutate(return_code = as.character(return_code)) %>%
bind_rows(FundQuery) %>%
data.table::data.table(.)
#
returns <- FundQuery %>%
.[ , lapply(.SD, function(x) gsub("NULL", NA, x))] %>%
.[complete.cases(short_name)] %>%
setnames(names(.), tolower(names(.))) %>%
.[ , time_period := gsub("cleaning up text", "", time_period)] %>%
.[complete.cases(return)] %>%
.[ , return := as.numeric(return)] %>%
mutate(return_type = ifelse(return_code == "0", "TR", ifelse(return_code == "8", "Pre-Liq", "Post_Liq"))) %>%
setnames("profile_date", "date") %>%
mutate(date = as.Date(date)) %>%
data.table::data.table(.) %>%
unique()
monthly_returns <- returns[time_period == "1m" & return_code == 0]
make_blend <- function (DT, ids, weights, blend_name = "Blendy McBlenderson",
id_col = "Ticker", return_col = "Return", date_col = "Date",
other_group = NULL) {
assertthat::assert_that(data.table::is.data.table(DT) | is.data.frame(DT),
msg = "DT must be a data.table or data.frame")
assertthat::assert_that(length(setdiff(c(id_col, return_col,
date_col, other_group), colnames(DT))) == 0, msg = paste0("These columns:\n\t",
paste0(setdiff(c(id_col, return_col, date_col, other_group),
colnames(DT)), collapse = "\n\t"), "\n", "aren't found in the data.table"))
assertthat::assert_that(length(setdiff(ids, DT[[id_col]])) ==
0, msg = "Some ids are not in underlying data.")
assertthat::assert_that(sum(weights) == 1, msg = "Weights do not sum to 100%")
DT <- data.table::as.data.table(DT)
weight_frame <- data.table::data.table(ids, weights)
data.table::setnames(weight_frame, "ids", id_col)
blend_frame <- merge(DT, weight_frame, by = id_col)
blend_frame[, `:=`(min_date, min(get(date_col), na.rm = T)),
by = id_col]
blend_frame <- blend_frame[!is.na(get(date_col)) & get(date_col) >=
max(min_date)]
blend_frame <- blend_frame[!is.na(get(return_col)), .(Name = blend_name,
Return = sum(get(return_col) * (weights), na.rm = F)),
by = c(date_col, other_group)]
}
ann_vol <- function(rets) {
sd(rets) * sqrt(12)
}
max_dd <- function (x, geometric = T)
{
if(length(x) <= 12) {
return(as.numeric(NA))
}
if (geometric) {
cumulative_return <- cumprod(1 + x)
}
else {
cumulative_return <- 1 + cumsum(x)
}
max_return <- cummax(c(1, cumulative_return))[-1]
min(cumulative_return/max_return - 1, 1)
}
up_capture <- function (fund_return, index_return)
{
captureFrame <- data.table(index_return, fund_return)[index_return >=
0]
if (length(captureFrame$fund_return) == 0) {
return(NA)
}
fund_linked <- Reduce("*", captureFrame$fund_return + 1)^(1/length(captureFrame$fund_return)) -
1
index_linked <- Reduce("*", captureFrame$index_return + 1)^(1/length(captureFrame$index_return)) -
1
fund_linked/index_linked
}
down_capture <- function (fund_return, index_return)
{
captureFrame <- data.table(index_return, fund_return)[index_return <
0]
if (length(captureFrame$fund_return) == 0)
return(NA)
fund_linked <- prod(captureFrame$fund_return + 1)^(1/length(captureFrame$fund_return)) -
1
index_linked <- prod(captureFrame$index_return + 1)^(1/length(captureFrame$index_return)) -
1
fund_linked/index_linked
}
flexible_sub <- function(flex_fund, eq_sub, fi_sub, eq_weighting = .6, fi_weighting = .4, dynamic = T) {
test_names <- flex %>%
names() %>%
grep(flex_fund, .)
assertthat::are_equal(x = eq_weighting + fi_weighting, y = 1) %>%
assertthat::assert_that(., msg = "Weightings must add to 1")
if(dynamic == T) {
assertthat::assert_that(test_names > 0, msg = "Name not found in flexible fund data.")
fund_weights <- paste0(flex_fund, "_", "eq_weight")
calc_fund <- flex %>%
setnames("Date", "date") %>%
.[ , c("date", paste0(flex_fund, "_EQ")), with = F] %>%
.[ , date := as.Date(date, "%m/%d/%Y")] %>%
merge(.,
dcast(returns[time_period == "1m"
& abbreviation %in% c(flex_fund, eq_sub, fi_sub)
& return_type == "TR"],
date ~ abbreviation, value.var = "return"), by = "date") %>%
merge(., dynamic_weights[ , .(date = Date, get(fund_weights))], by = "date") %>%
setnames("V2", "eq_weight") %>%
.[ , eq_weight := eq_weight/100] %>%
.[ , paste0(flex_fund, "_FI") := (get(flex_fund) - (get(paste0(flex_fund, "_EQ")) * eq_weight))/(1 - eq_weight)] %>%
# .[ , Static := paste0(eq_weighting * 100, "/", fi_weighting * 100, " ",
# eq_sub, "/", fi_sub)] %>%
.[ , paste0(eq_weighting * 100, "/", fi_weighting * 100, "_Mixed") := eq_weighting * get(eq_sub) + fi_weighting * get(fi_sub)] %>%
.[ , Dynamic_Mixed := eq_weight * get(eq_sub) + (1 - eq_weight) * get(fi_sub)] %>%
setnames(c(eq_sub, fi_sub), c(paste0(c(eq_sub, fi_sub), c("_EQ", "_FI")))) %>%
# .[ , eq_component := eq_sub] %>%
# .[ , fi_component := fi_sub] %>%
# setnames(c(paste0(flex_fund, c("_EQ", "_FI"))), c("Equity", "FixedIncome")) %>%
.[ , detail := paste0(flex_fund, " vs ", "static and dynamic ", eq_sub, "/", fi_sub)] %>%
.[ , substituting := flex_fund] %>%
melt(., id.var = c("date", "eq_weight", "substituting", "detail"), variable.factor = F, value.name = "return") %>%
.[ , type := str_extract(variable, "_EQ|_FI|_Mixed")] %>%
.[ , comp := c("_EQ" = paste0(flex_fund , "_EQ"), "_FI" = paste0(flex_fund , "_FI"), "_Mixed" = flex_fund)[type]] %>%
merge(., .[ , .(comp = variable, date, comparison = return)]) %>%
.[!grep("IFA", variable)] %>%
.[ , type := NULL] %>%
setkey(variable, comp, date, return) %>%
.[ , variable := gsub("_EQ|_FI|_Mixed", "", variable)]
}
}
#Applies a rollings feature to returns. Since returns are monthly, it is necessary to calculate rolling annual returns with different periodictities.
apply_rolling <- function (DT, func, value, window, group = "", ..., colname = NULL)
{
values <- expand.grid(func = func, value = value, window = window)
func <- as.character(values$func)
value <- as.character(values$value)
window <- as.numeric(values$window)
for (i in 1:length(func)) {
if (is.null(colname) & length(unique(value)) == 1) {
this_col <- paste(func[i], window[i], sep = "_")
}
else if (is.null(colname)) {
this_col <- paste(value[i], func[i], window[i], sep = "_")
}
else {
this_col <- paste(colname, window[i], sep = "_")
}
method <- "my_roll_apply"
DT[, `:=`((this_col), do.call(method, args = list(vector = get(value[i]),
width = window[i], func = eval(parse(text = func[i])),
...))), by = group]
}
DT[]
}
ann_return <- function (x) {
if(length(x) <= 12) {
return(prod(1 + x) - 1)
}
as.numeric(prod(1 + x)^(12/length(x)) - 1)
}
my_roll_apply <- function (vector, width, func, ...)
{
data <- as.numeric(rep(NA, length(vector)))
if (length(vector) < width) {
return(data)
}
for (i in width:(length(vector))) {
data[[i]] <- func(vector[(i - width + 1):i], ...)
}
return(data)
}
fund_index_roll_apply <- function (fundReturn, indexReturn, width, func, ...)
{
data <- as.numeric(rep(NA, length(fundReturn)))
if (min(length(fundReturn), length(indexReturn)) < width) {
return(data)
}
else if (width > length(fundReturn)) {
return(data)
}
for (i in width:(length(fundReturn))) {
data[[i]] <- func(fundReturn[(i - width + 1):i], indexReturn[(i -
width + 1):i], ...)
}
data
}
tr_stats <- function(flex_data, idvar = "abbreviation", funcs = c("max_dd", "ann_vol")) {
stats_roll <- copy(flex_data) %>%
apply_rolling(DT = ., func = funcs,
value = c("return"), window = c(1, 3, 5,7, 10) * 12, group = idvar) %>%
.[ , return := NULL] %>%
melt(., id = c(idvar, "date"),
variable.factor = F, variable.name = "metric") %>%
.[ , time_period := str_extract(metric, "[0-9]+")] %>%
.[ , time_period := as.numeric(time_period)/12] %>%
.[ , time_period := as.character(time_period)] %>%
.[ , metric := gsub(paste0("_", c(1,3,5,7,10) * 12, collapse = "|"), "", metric)] %>%
.[complete.cases(value)]
financial_crisis <- flex_data[year(date) %in% c(2008, 2009)] %>%
.[ , c("ann_return", "ann_vol", "max_dd") := list(ann_return(return),
ann_vol(return),
max_dd(return)), by = c("abbreviation")] %>%
.[ , c("date", "return") := NULL] %>%
unique() %>%
melt(., id.var = "abbreviation", variable.factor = F, variable.name = "metric", value.name = "2008-2009")
captures <- copy(flex_data) %>%
merge(., sp500[ , .(date, sp500_return)], by = "date") %>%
merge(., us_agg[ , .(date, agg_return)], by = "date") %>%
setkeyv(c(idvar, "date"))
for(i in c(1, 3, 5, 7, 10)) {
lab_d <- paste0("dc_sp_", i)
lab_u <- paste0("uc_sp_", i)
lab_cor_sp <- paste0("cor_sp_", i)
lab_cor_agg <- paste0("cor_us_agg_", i)
captures[ , (lab_d) := fund_index_roll_apply(return, sp500_return, i * 12, down_capture), by = idvar] %>%
.[ , (lab_u) := fund_index_roll_apply(return, sp500_return, i * 12, up_capture), by = idvar] %>%
.[ , (lab_cor_sp) := fund_index_roll_apply(return, sp500_return, i * 12, cor), by = idvar] %>%
.[ , (lab_cor_agg) := fund_index_roll_apply(return, agg_return, i * 12, cor), by = idvar]
}
format_caps <- melt(captures[ , !c("return", "sp500_return", "agg_return")], id.var = c(idvar, "date"),
variable.factor = F, variable.name = "metric") %>%
.[ , time_period := str_extract(metric, "[0-9]+")] %>%
.[ , time_period := factor(time_period, levels = c("1", "3", "5", "7", "10"))] %>%
.[ , metric := gsub(paste0("_", c(1,3,5, 7,10), collapse = "|"), "", metric)] %>%
.[complete.cases(value)]
rbind(stats_roll, format_caps, use.names = T) %>%
merge(., financial_crisis, by = c("metric", "abbreviation"), all.x = T)
}
| /Helper.R | no_license | elee-stone/ProjectShowcase | R | false | false | 13,083 | r | #Helpers
library(data.table)
library(magrittr)
library(odbc)
library(RODBC)
library(tidyverse)
con<- odbcConnect("DB")
FundQuery <- stringr::str_c(
'select
f.fundName
,fe.wsj_symbol as abbreviation
,r.profile_date as PROFILE_DATE
,r.return_code
,r.1mReturn
,r.1yReturn
,r.3yReturn
,r.5yReturn
,r.7yReturn
,r.10yReturn
,r.15yReturn
from fund f
left join monthly_returns r on f.fund_id = r.fund_id
left join fund_ext fe on f.fund_id = fe.fund_id
where f.fund_id in
(fund numbers
)
and return_code in (0, 8, 24)
') %>%
sqlQuery(con, ., stringsAsFactors = F) %>%
data.table::data.table(.) %>%
data.table::melt(., id.vars = 1:4, variable.name = "TIME_PERIOD",
value.name = "RETURN", variable.factor = F) %>%
.[!is.na(RETURN)] %>%
.[, PROFILE_DATE := as.Date(PROFILE_DATE )] %>%
.[, return_code := as.character(return_code)]
odbcCloseAll()
##############
#############
con<- odbcConnect("db")
FundStack <- stringr::str_c(
'select
f.fundName
,fe.wsj_symbol as abbreviation
,r.profile_date as PROFILE_DATE
,r.return_code
,r.1mReturn
,r.1yReturn
,r.3yReturn
,r.5yReturn
,r.7yReturn
,r.10yReturn
from fund f
left join monthly_returns r on f.fund_id = r.fund_id
left join fund_ext fe on f.fund_id = fe.fund_id
where f.fund_id = fund number
and return_code in (0, 8, 24)
') %>%
sqlQuery(con, ., stringsAsFactors = F) %>%
data.table::data.table(.) %>%
data.table::melt(., id.vars = 1:4, variable.name = "TIME_PERIOD",
value.name = "RETURN", variable.factor = F) %>%
.[!is.na(RETURN)] %>%
.[, PROFILE_DATE := as.Date(PROFILE_DATE )] %>%
.[, return_code := as.character(return_code)] %>%
mutate(abbreviation = 'Fund') %>%
data.table::data.table(.)
odbcCloseAll()
#
FundQuery <- FundStack %>%
mutate(return_code = as.character(return_code)) %>%
bind_rows(FundQuery) %>%
data.table::data.table(.)
#
returns <- FundQuery %>%
.[ , lapply(.SD, function(x) gsub("NULL", NA, x))] %>%
.[complete.cases(short_name)] %>%
setnames(names(.), tolower(names(.))) %>%
.[ , time_period := gsub("cleaning up text", "", time_period)] %>%
.[complete.cases(return)] %>%
.[ , return := as.numeric(return)] %>%
mutate(return_type = ifelse(return_code == "0", "TR", ifelse(return_code == "8", "Pre-Liq", "Post_Liq"))) %>%
setnames("profile_date", "date") %>%
mutate(date = as.Date(date)) %>%
data.table::data.table(.) %>%
unique()
monthly_returns <- returns[time_period == "1m" & return_code == 0]
make_blend <- function (DT, ids, weights, blend_name = "Blendy McBlenderson",
id_col = "Ticker", return_col = "Return", date_col = "Date",
other_group = NULL) {
assertthat::assert_that(data.table::is.data.table(DT) | is.data.frame(DT),
msg = "DT must be a data.table or data.frame")
assertthat::assert_that(length(setdiff(c(id_col, return_col,
date_col, other_group), colnames(DT))) == 0, msg = paste0("These columns:\n\t",
paste0(setdiff(c(id_col, return_col, date_col, other_group),
colnames(DT)), collapse = "\n\t"), "\n", "aren't found in the data.table"))
assertthat::assert_that(length(setdiff(ids, DT[[id_col]])) ==
0, msg = "Some ids are not in underlying data.")
assertthat::assert_that(sum(weights) == 1, msg = "Weights do not sum to 100%")
DT <- data.table::as.data.table(DT)
weight_frame <- data.table::data.table(ids, weights)
data.table::setnames(weight_frame, "ids", id_col)
blend_frame <- merge(DT, weight_frame, by = id_col)
blend_frame[, `:=`(min_date, min(get(date_col), na.rm = T)),
by = id_col]
blend_frame <- blend_frame[!is.na(get(date_col)) & get(date_col) >=
max(min_date)]
blend_frame <- blend_frame[!is.na(get(return_col)), .(Name = blend_name,
Return = sum(get(return_col) * (weights), na.rm = F)),
by = c(date_col, other_group)]
}
ann_vol <- function(rets) {
sd(rets) * sqrt(12)
}
max_dd <- function (x, geometric = T)
{
if(length(x) <= 12) {
return(as.numeric(NA))
}
if (geometric) {
cumulative_return <- cumprod(1 + x)
}
else {
cumulative_return <- 1 + cumsum(x)
}
max_return <- cummax(c(1, cumulative_return))[-1]
min(cumulative_return/max_return - 1, 1)
}
up_capture <- function (fund_return, index_return)
{
captureFrame <- data.table(index_return, fund_return)[index_return >=
0]
if (length(captureFrame$fund_return) == 0) {
return(NA)
}
fund_linked <- Reduce("*", captureFrame$fund_return + 1)^(1/length(captureFrame$fund_return)) -
1
index_linked <- Reduce("*", captureFrame$index_return + 1)^(1/length(captureFrame$index_return)) -
1
fund_linked/index_linked
}
down_capture <- function (fund_return, index_return)
{
captureFrame <- data.table(index_return, fund_return)[index_return <
0]
if (length(captureFrame$fund_return) == 0)
return(NA)
fund_linked <- prod(captureFrame$fund_return + 1)^(1/length(captureFrame$fund_return)) -
1
index_linked <- prod(captureFrame$index_return + 1)^(1/length(captureFrame$index_return)) -
1
fund_linked/index_linked
}
flexible_sub <- function(flex_fund, eq_sub, fi_sub, eq_weighting = .6, fi_weighting = .4, dynamic = T) {
test_names <- flex %>%
names() %>%
grep(flex_fund, .)
assertthat::are_equal(x = eq_weighting + fi_weighting, y = 1) %>%
assertthat::assert_that(., msg = "Weightings must add to 1")
if(dynamic == T) {
assertthat::assert_that(test_names > 0, msg = "Name not found in flexible fund data.")
fund_weights <- paste0(flex_fund, "_", "eq_weight")
calc_fund <- flex %>%
setnames("Date", "date") %>%
.[ , c("date", paste0(flex_fund, "_EQ")), with = F] %>%
.[ , date := as.Date(date, "%m/%d/%Y")] %>%
merge(.,
dcast(returns[time_period == "1m"
& abbreviation %in% c(flex_fund, eq_sub, fi_sub)
& return_type == "TR"],
date ~ abbreviation, value.var = "return"), by = "date") %>%
merge(., dynamic_weights[ , .(date = Date, get(fund_weights))], by = "date") %>%
setnames("V2", "eq_weight") %>%
.[ , eq_weight := eq_weight/100] %>%
.[ , paste0(flex_fund, "_FI") := (get(flex_fund) - (get(paste0(flex_fund, "_EQ")) * eq_weight))/(1 - eq_weight)] %>%
# .[ , Static := paste0(eq_weighting * 100, "/", fi_weighting * 100, " ",
# eq_sub, "/", fi_sub)] %>%
.[ , paste0(eq_weighting * 100, "/", fi_weighting * 100, "_Mixed") := eq_weighting * get(eq_sub) + fi_weighting * get(fi_sub)] %>%
.[ , Dynamic_Mixed := eq_weight * get(eq_sub) + (1 - eq_weight) * get(fi_sub)] %>%
setnames(c(eq_sub, fi_sub), c(paste0(c(eq_sub, fi_sub), c("_EQ", "_FI")))) %>%
# .[ , eq_component := eq_sub] %>%
# .[ , fi_component := fi_sub] %>%
# setnames(c(paste0(flex_fund, c("_EQ", "_FI"))), c("Equity", "FixedIncome")) %>%
.[ , detail := paste0(flex_fund, " vs ", "static and dynamic ", eq_sub, "/", fi_sub)] %>%
.[ , substituting := flex_fund] %>%
melt(., id.var = c("date", "eq_weight", "substituting", "detail"), variable.factor = F, value.name = "return") %>%
.[ , type := str_extract(variable, "_EQ|_FI|_Mixed")] %>%
.[ , comp := c("_EQ" = paste0(flex_fund , "_EQ"), "_FI" = paste0(flex_fund , "_FI"), "_Mixed" = flex_fund)[type]] %>%
merge(., .[ , .(comp = variable, date, comparison = return)]) %>%
.[!grep("IFA", variable)] %>%
.[ , type := NULL] %>%
setkey(variable, comp, date, return) %>%
.[ , variable := gsub("_EQ|_FI|_Mixed", "", variable)]
}
}
#Applies a rollings feature to returns. Since returns are monthly, it is necessary to calculate rolling annual returns with different periodictities.
apply_rolling <- function (DT, func, value, window, group = "", ..., colname = NULL)
{
values <- expand.grid(func = func, value = value, window = window)
func <- as.character(values$func)
value <- as.character(values$value)
window <- as.numeric(values$window)
for (i in 1:length(func)) {
if (is.null(colname) & length(unique(value)) == 1) {
this_col <- paste(func[i], window[i], sep = "_")
}
else if (is.null(colname)) {
this_col <- paste(value[i], func[i], window[i], sep = "_")
}
else {
this_col <- paste(colname, window[i], sep = "_")
}
method <- "my_roll_apply"
DT[, `:=`((this_col), do.call(method, args = list(vector = get(value[i]),
width = window[i], func = eval(parse(text = func[i])),
...))), by = group]
}
DT[]
}
ann_return <- function (x) {
if(length(x) <= 12) {
return(prod(1 + x) - 1)
}
as.numeric(prod(1 + x)^(12/length(x)) - 1)
}
my_roll_apply <- function (vector, width, func, ...)
{
data <- as.numeric(rep(NA, length(vector)))
if (length(vector) < width) {
return(data)
}
for (i in width:(length(vector))) {
data[[i]] <- func(vector[(i - width + 1):i], ...)
}
return(data)
}
fund_index_roll_apply <- function (fundReturn, indexReturn, width, func, ...)
{
data <- as.numeric(rep(NA, length(fundReturn)))
if (min(length(fundReturn), length(indexReturn)) < width) {
return(data)
}
else if (width > length(fundReturn)) {
return(data)
}
for (i in width:(length(fundReturn))) {
data[[i]] <- func(fundReturn[(i - width + 1):i], indexReturn[(i -
width + 1):i], ...)
}
data
}
tr_stats <- function(flex_data, idvar = "abbreviation", funcs = c("max_dd", "ann_vol")) {
stats_roll <- copy(flex_data) %>%
apply_rolling(DT = ., func = funcs,
value = c("return"), window = c(1, 3, 5,7, 10) * 12, group = idvar) %>%
.[ , return := NULL] %>%
melt(., id = c(idvar, "date"),
variable.factor = F, variable.name = "metric") %>%
.[ , time_period := str_extract(metric, "[0-9]+")] %>%
.[ , time_period := as.numeric(time_period)/12] %>%
.[ , time_period := as.character(time_period)] %>%
.[ , metric := gsub(paste0("_", c(1,3,5,7,10) * 12, collapse = "|"), "", metric)] %>%
.[complete.cases(value)]
financial_crisis <- flex_data[year(date) %in% c(2008, 2009)] %>%
.[ , c("ann_return", "ann_vol", "max_dd") := list(ann_return(return),
ann_vol(return),
max_dd(return)), by = c("abbreviation")] %>%
.[ , c("date", "return") := NULL] %>%
unique() %>%
melt(., id.var = "abbreviation", variable.factor = F, variable.name = "metric", value.name = "2008-2009")
captures <- copy(flex_data) %>%
merge(., sp500[ , .(date, sp500_return)], by = "date") %>%
merge(., us_agg[ , .(date, agg_return)], by = "date") %>%
setkeyv(c(idvar, "date"))
for(i in c(1, 3, 5, 7, 10)) {
lab_d <- paste0("dc_sp_", i)
lab_u <- paste0("uc_sp_", i)
lab_cor_sp <- paste0("cor_sp_", i)
lab_cor_agg <- paste0("cor_us_agg_", i)
captures[ , (lab_d) := fund_index_roll_apply(return, sp500_return, i * 12, down_capture), by = idvar] %>%
.[ , (lab_u) := fund_index_roll_apply(return, sp500_return, i * 12, up_capture), by = idvar] %>%
.[ , (lab_cor_sp) := fund_index_roll_apply(return, sp500_return, i * 12, cor), by = idvar] %>%
.[ , (lab_cor_agg) := fund_index_roll_apply(return, agg_return, i * 12, cor), by = idvar]
}
format_caps <- melt(captures[ , !c("return", "sp500_return", "agg_return")], id.var = c(idvar, "date"),
variable.factor = F, variable.name = "metric") %>%
.[ , time_period := str_extract(metric, "[0-9]+")] %>%
.[ , time_period := factor(time_period, levels = c("1", "3", "5", "7", "10"))] %>%
.[ , metric := gsub(paste0("_", c(1,3,5, 7,10), collapse = "|"), "", metric)] %>%
.[complete.cases(value)]
rbind(stats_roll, format_caps, use.names = T) %>%
merge(., financial_crisis, by = c("metric", "abbreviation"), all.x = T)
}
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Create the exposure and outcome cohorts
#'
#' @details
#' This function will create the exposure and outcome cohorts following the definitions included in
#' this package.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#'
#' @export
createCohorts <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema,
outputFolder) {
if (!file.exists(outputFolder))
dir.create(outputFolder, recursive = TRUE)
conn <- DatabaseConnector::connect(connectionDetails)
.createCohorts(connection = conn,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "AHAsAcutePancreatitis")
negativeControls <- read.csv(pathToCsv)
OhdsiRTools::logInfo("Creating negative control outcome cohorts")
negativeControlOutcomes <- negativeControls[negativeControls$type == "Outcome", ]
sql <- SqlRender::loadRenderTranslateSql("NegativeControlOutcomes.sql",
"AHAsAcutePancreatitis",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = cohortDatabaseSchema,
target_cohort_table = cohortTable,
outcome_ids = negativeControlOutcomes$outcomeId)
DatabaseConnector::executeSql(conn, sql)
# Check number of subjects per cohort:
OhdsiRTools::logInfo("Counting cohorts")
countCohorts(connection = conn,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
DatabaseConnector::disconnect(conn)
}
addCohortNames <- function(data, IdColumnName = "cohortDefinitionId", nameColumnName = "cohortName") {
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "AHAsAcutePancreatitis")
cohortsToCreate <- read.csv(pathToCsv)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "AHAsAcutePancreatitis")
negativeControls <- read.csv(pathToCsv)
idToName <- data.frame(cohortId = c(cohortsToCreate$cohortId,
negativeControls$targetId,
negativeControls$comparatorId,
negativeControls$outcomeId),
cohortName = c(as.character(cohortsToCreate$name),
as.character(negativeControls$targetName),
as.character(negativeControls$comparatorName),
as.character(negativeControls$outcomeName)))
idToName <- idToName[order(idToName$cohortId), ]
idToName <- idToName[!duplicated(idToName$cohortId), ]
names(idToName)[1] <- IdColumnName
names(idToName)[2] <- nameColumnName
data <- merge(data, idToName, all.x = TRUE)
# Change order of columns:
idCol <- which(colnames(data) == IdColumnName)
if (idCol < ncol(data) - 1) {
data <- data[, c(1:idCol, ncol(data) , (idCol+1):(ncol(data)-1))]
}
return(data)
}
| /Sglt2iAcutePancreatitis/R/CreateAllCohorts.R | permissive | OHDSI/StudyProtocols | R | false | false | 5,717 | r | # Copyright 2018 Observational Health Data Sciences and Informatics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Create the exposure and outcome cohorts
#'
#' @details
#' This function will create the exposure and outcome cohorts following the definitions included in
#' this package.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#'
#' @export
createCohorts <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema,
outputFolder) {
if (!file.exists(outputFolder))
dir.create(outputFolder, recursive = TRUE)
conn <- DatabaseConnector::connect(connectionDetails)
.createCohorts(connection = conn,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "AHAsAcutePancreatitis")
negativeControls <- read.csv(pathToCsv)
OhdsiRTools::logInfo("Creating negative control outcome cohorts")
negativeControlOutcomes <- negativeControls[negativeControls$type == "Outcome", ]
sql <- SqlRender::loadRenderTranslateSql("NegativeControlOutcomes.sql",
"AHAsAcutePancreatitis",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = cohortDatabaseSchema,
target_cohort_table = cohortTable,
outcome_ids = negativeControlOutcomes$outcomeId)
DatabaseConnector::executeSql(conn, sql)
# Check number of subjects per cohort:
OhdsiRTools::logInfo("Counting cohorts")
countCohorts(connection = conn,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
DatabaseConnector::disconnect(conn)
}
addCohortNames <- function(data, IdColumnName = "cohortDefinitionId", nameColumnName = "cohortName") {
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "AHAsAcutePancreatitis")
cohortsToCreate <- read.csv(pathToCsv)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "AHAsAcutePancreatitis")
negativeControls <- read.csv(pathToCsv)
idToName <- data.frame(cohortId = c(cohortsToCreate$cohortId,
negativeControls$targetId,
negativeControls$comparatorId,
negativeControls$outcomeId),
cohortName = c(as.character(cohortsToCreate$name),
as.character(negativeControls$targetName),
as.character(negativeControls$comparatorName),
as.character(negativeControls$outcomeName)))
idToName <- idToName[order(idToName$cohortId), ]
idToName <- idToName[!duplicated(idToName$cohortId), ]
names(idToName)[1] <- IdColumnName
names(idToName)[2] <- nameColumnName
data <- merge(data, idToName, all.x = TRUE)
# Change order of columns:
idCol <- which(colnames(data) == IdColumnName)
if (idCol < ncol(data) - 1) {
data <- data[, c(1:idCol, ncol(data) , (idCol+1):(ncol(data)-1))]
}
return(data)
}
|
library(networkD3)
library(readxl)
data <- read.csv('~/R数据.csv', sep = ",")
#边数据集
#边数据集MisLinks,共包含三列,依次是起点、终点、边的粗细(大小、权重)。
MisLinks=data.frame(data[,2],data[,4],data[,5])
MisLinks=data.frame(matrix(0,length(data[,2]),3))
colnames(MisLinks) <- c('qi','zhong','lift')
names=data.frame(data[,1],data[,2])# 位置 名字 ###索引表
names=names[!duplicated(names),]
for (i in 1:length(data[,2])) {
MisLinks[i,1]=as.numeric( which(as.character( data[i,1])==names[,1] &
as.character( data[i,2])==names[,2]))-1
MisLinks[i,2]=as.numeric( which(as.character( data[i,3])==names[,1] &
as.character( data[i,4])==names[,2]))-1
}
MisLinks[,3]=data[,5]
#节点数据集
#点数据集MisNodes,共包含三列,节点名称,节点分组,节点大小(重要性、集中度)
MisNodes=data.frame(matrix(NA,length(names[,1]),3))
MisNodes[,1]=names[,2]
MisNodes[,2]=names[,1]
#for (i in 1:length(names[,1])){
# if (names[i,1]=='上'){t=1}
# if (names[i,1]=='野'){t=2}
# if (names[i,1]=='中'){t=3}
# if (names[i,1]=='下'){t=4}
# if (names[i,1]=='辅'){t=5}
# MisNodes[i,2]=t
#}
t= as.data.frame(table(data[,1:2]))
count=list()
for (i in 1:length(names[,2])) {
count[i]=t[which(as.character(names[i,2])==t[,2] &as.character(names[i,1])==t[,1]),3]*2
}
count=data.frame(unlist(count))
MisNodes[,3]=count
colnames(MisNodes) <- c('dian','weizhi','cishu')
#MyClickScript <- 'alert("You clicked " + d.name + " which is in row " +
#(d.index + 1) + " of your original R data frame");'
html=forceNetwork(
#边数据集
Links = MisLinks,
# 节点数据集
Nodes = MisNodes,
#边数据集中起点对应的列
Source = "qi",
# 边数据集中终点对应的列
Target = "zhong",
# 边数据集中边的宽度对应的列
Value = "lift",
# 节点数据集中节点名称对应的列
NodeID = "dian",
# 节点数据集中节点分组对应的列
Group = "weizhi",
# 图宽度
width = 1200,
# 图高度
height = 1000,
# 图是否允许缩放
zoom = T,
# 图是否有边界
bounded=T,
# 图是否显示图例
legend=T,
# 鼠标没有停留时其他节点名称的透明度
opacityNoHover = 1,
# 所有节点初始透明度
opacity = 1,
# 节点斥力大小(负值越大斥力越大)
charge=-100,
# 节点颜色,可以建立不同分组和颜色的一一映射关系
Nodesize = "cishu" ,#节点大小,节点数据框中
# 节点绝对大小
radiusCalculation = JS(" d.nodesize"),
# 节点名称的字体
fontFamily = "宋体",
# 节点名称的字号
fontSize = 16,
# 边是否显示箭头
arrows = F,
# 边颜色,Cols可以是一个预先设置的列表
linkColour = "gray",
# 鼠标点击事件
#clickAction = MyClickScript
)
saveNetwork(html,"NetWorkLOL赛事.html",selfcontained=TRUE)#save HTML
| /码/NetWorkLOL赛事.R | no_license | kkkyimp/LOL-Association-Analysis | R | false | false | 2,871 | r | library(networkD3)
library(readxl)
data <- read.csv('~/R数据.csv', sep = ",")
#边数据集
#边数据集MisLinks,共包含三列,依次是起点、终点、边的粗细(大小、权重)。
MisLinks=data.frame(data[,2],data[,4],data[,5])
MisLinks=data.frame(matrix(0,length(data[,2]),3))
colnames(MisLinks) <- c('qi','zhong','lift')
names=data.frame(data[,1],data[,2])# 位置 名字 ###索引表
names=names[!duplicated(names),]
for (i in 1:length(data[,2])) {
MisLinks[i,1]=as.numeric( which(as.character( data[i,1])==names[,1] &
as.character( data[i,2])==names[,2]))-1
MisLinks[i,2]=as.numeric( which(as.character( data[i,3])==names[,1] &
as.character( data[i,4])==names[,2]))-1
}
MisLinks[,3]=data[,5]
#节点数据集
#点数据集MisNodes,共包含三列,节点名称,节点分组,节点大小(重要性、集中度)
MisNodes=data.frame(matrix(NA,length(names[,1]),3))
MisNodes[,1]=names[,2]
MisNodes[,2]=names[,1]
#for (i in 1:length(names[,1])){
# if (names[i,1]=='上'){t=1}
# if (names[i,1]=='野'){t=2}
# if (names[i,1]=='中'){t=3}
# if (names[i,1]=='下'){t=4}
# if (names[i,1]=='辅'){t=5}
# MisNodes[i,2]=t
#}
t= as.data.frame(table(data[,1:2]))
count=list()
for (i in 1:length(names[,2])) {
count[i]=t[which(as.character(names[i,2])==t[,2] &as.character(names[i,1])==t[,1]),3]*2
}
count=data.frame(unlist(count))
MisNodes[,3]=count
colnames(MisNodes) <- c('dian','weizhi','cishu')
#MyClickScript <- 'alert("You clicked " + d.name + " which is in row " +
#(d.index + 1) + " of your original R data frame");'
html=forceNetwork(
#边数据集
Links = MisLinks,
# 节点数据集
Nodes = MisNodes,
#边数据集中起点对应的列
Source = "qi",
# 边数据集中终点对应的列
Target = "zhong",
# 边数据集中边的宽度对应的列
Value = "lift",
# 节点数据集中节点名称对应的列
NodeID = "dian",
# 节点数据集中节点分组对应的列
Group = "weizhi",
# 图宽度
width = 1200,
# 图高度
height = 1000,
# 图是否允许缩放
zoom = T,
# 图是否有边界
bounded=T,
# 图是否显示图例
legend=T,
# 鼠标没有停留时其他节点名称的透明度
opacityNoHover = 1,
# 所有节点初始透明度
opacity = 1,
# 节点斥力大小(负值越大斥力越大)
charge=-100,
# 节点颜色,可以建立不同分组和颜色的一一映射关系
Nodesize = "cishu" ,#节点大小,节点数据框中
# 节点绝对大小
radiusCalculation = JS(" d.nodesize"),
# 节点名称的字体
fontFamily = "宋体",
# 节点名称的字号
fontSize = 16,
# 边是否显示箭头
arrows = F,
# 边颜色,Cols可以是一个预先设置的列表
linkColour = "gray",
# 鼠标点击事件
#clickAction = MyClickScript
)
saveNetwork(html,"NetWorkLOL赛事.html",selfcontained=TRUE)#save HTML
|
context("Global VSURF test for classification iris data")
set.seed(2219, kind = "Mersenne-Twister")
data(iris)
iris.vsurf <- VSURF(iris[,1:4], iris[,5], ntree = 100, nfor.thres = 20,
nfor.interp = 10, nfor.pred = 10)
test_that("Selected variables for the 3 steps", {
expect_identical(iris.vsurf$varselect.thres, c(4L, 3L, 1L, 2L))
expect_identical(iris.vsurf$varselect.interp, c(4L, 3L))
expect_identical(iris.vsurf$varselect.pred, c(4L, 3L))
})
test_that("Variable importance",{
expect_equal(iris.vsurf$imp.mean.dec,
c(0.26633637, 0.25610509, 0.09020064, 0.03915156),
tolerance = 1e-7)
expect_equal(iris.vsurf$imp.sd.dec,
c(0.021659115, 0.015990696, 0.012599931, 0.007075411),
tolerance = 1e-7)
expect_identical(iris.vsurf$imp.mean.dec.ind, c(4L, 3L, 1L, 2L))
})
test_that("OOB erros of nested models", {
expect_equal(iris.vsurf$err.interp,
c(0.04666667, 0.03600000, 0.05000000, 0.04533333),
tolerance = 1e-7)
expect_equal(iris.vsurf$err.pred,
c(0.04666667, 0.03466667),
tolerance = 1e-7)
})
test_that("Thresholds for the 3 steps", {
expect_equal(min(iris.vsurf$pred.pruned.tree), 0.007075411,
tolerance = 1e-7)
expect_equal(iris.vsurf$sd.min, 0.003442652,
tolerance = 1e-7)
expect_equal(iris.vsurf$mean.jump, 0.009333333, tolerance = 1e-7)
}) | /tests/testthat/test_iris.R | no_license | slarge/vsurfRanger | R | false | false | 1,441 | r | context("Global VSURF test for classification iris data")
set.seed(2219, kind = "Mersenne-Twister")
data(iris)
iris.vsurf <- VSURF(iris[,1:4], iris[,5], ntree = 100, nfor.thres = 20,
nfor.interp = 10, nfor.pred = 10)
test_that("Selected variables for the 3 steps", {
expect_identical(iris.vsurf$varselect.thres, c(4L, 3L, 1L, 2L))
expect_identical(iris.vsurf$varselect.interp, c(4L, 3L))
expect_identical(iris.vsurf$varselect.pred, c(4L, 3L))
})
test_that("Variable importance",{
expect_equal(iris.vsurf$imp.mean.dec,
c(0.26633637, 0.25610509, 0.09020064, 0.03915156),
tolerance = 1e-7)
expect_equal(iris.vsurf$imp.sd.dec,
c(0.021659115, 0.015990696, 0.012599931, 0.007075411),
tolerance = 1e-7)
expect_identical(iris.vsurf$imp.mean.dec.ind, c(4L, 3L, 1L, 2L))
})
test_that("OOB erros of nested models", {
expect_equal(iris.vsurf$err.interp,
c(0.04666667, 0.03600000, 0.05000000, 0.04533333),
tolerance = 1e-7)
expect_equal(iris.vsurf$err.pred,
c(0.04666667, 0.03466667),
tolerance = 1e-7)
})
test_that("Thresholds for the 3 steps", {
expect_equal(min(iris.vsurf$pred.pruned.tree), 0.007075411,
tolerance = 1e-7)
expect_equal(iris.vsurf$sd.min, 0.003442652,
tolerance = 1e-7)
expect_equal(iris.vsurf$mean.jump, 0.009333333, tolerance = 1e-7)
}) |
\name{xval.HGpath}
\alias{xval.HGpath}
\title{ Solution path cross validation}
\description{
Cross validates the solution paths produced by HGpath
}
\usage{
xval.HGpath(x, y, method = HGgaussian, pathk = seq(1, 0, -0.01), pathb = c(0.1, 1e-04, 50), fold = 10, trace = T,
control = HGcontrol(tolerance = 1e-04, tolc = 0.01),weights=rep(1,nrow(x)), ...)
}
\arguments{
\item{x}{an n by p data matrix }
\item{y}{a n by 1 response vector }
\item{method}{one of HGgaussian or HGmultc }
\item{pathk}{a decreasing sequence of kbess values starting from one and going to zero }
\item{pathb}{a vector of three components, the first the largest values of bbess on a path,
the second the smallest value of bbess, and the third the total number of bbess values which will be
equally spaced on a log scale }
\item{fold}{number of folds in the cross validation }
\item{trace}{ if TRUE report progress over path values }
\item{control}{control parameters for HGgaussian and HGmultc }
\item{weights}{a vector of observation weights}
\item{\dots}{ any other relevant arguments for HGmultc or HGgaussian}
}
\details{
Computes cross validated fitted values and cross validated error rates and associated standard deviations
for the grid or path specified by pathb and pathk
}
\value{
A list with components
\item{xvfv }{a n by (length(pathb)*length(pathk)) matrix of cross validated fitted values}
\item{par.vals }{a (length(pathb)*length(pathk)) by 3 matrix of parameter values defining the path or grid.
The first column gives the bbess values, the second column the kbess values and the third column the
delta=(2/bbess)^0.5 values. For L1 regression delta is n*lambda.}
\item{grp}{a n by 1 vector identifying the folds used in the cross validation}
\item{xve}{a length(pathb)*length(pathk) vector of cross validated error rates}
\item{xvsd}{a length(pathb)*length(pathk) vector of cross validated standard deviations}
\item{jmin}{a vector identifying the rows of par.vals above which have cross validated errorr rates less
than or equal to the minimum cross validated error rate plus one standard deviation}
}
\author{Harri Kiiveri }
\note{ There is a plot method for the object produced by this function
}
\seealso{ HGpath, HGgaussian and HGmultc }
\examples{
# Cross validate L1 regression solution path (kbess fixed at 1)
x<-matrix(rnorm(5000),nrow=50,ncol=100)
lp<-x[,1]+2*x[,3]+6*x[,4]
y<-lp+rnorm(50)*.1
res<-xval.HGpath(x,y,trace=FALSE,method=HGgaussian,pathk=1, pathb=c(1,1e-6,25))
plot(res)
# plot cross validated fitted values versus observed for one model
k<-res$jmin[1]
plot(res[[1]][,k],y)
}
\keyword{models }
\keyword{multivariate}
| /man/xval.HGpath.Rd | no_license | parsifal9/ptools | R | false | false | 2,758 | rd | \name{xval.HGpath}
\alias{xval.HGpath}
\title{ Solution path cross validation}
\description{
Cross validates the solution paths produced by HGpath
}
\usage{
xval.HGpath(x, y, method = HGgaussian, pathk = seq(1, 0, -0.01), pathb = c(0.1, 1e-04, 50), fold = 10, trace = T,
control = HGcontrol(tolerance = 1e-04, tolc = 0.01),weights=rep(1,nrow(x)), ...)
}
\arguments{
\item{x}{an n by p data matrix }
\item{y}{a n by 1 response vector }
\item{method}{one of HGgaussian or HGmultc }
\item{pathk}{a decreasing sequence of kbess values starting from one and going to zero }
\item{pathb}{a vector of three components, the first the largest values of bbess on a path,
the second the smallest value of bbess, and the third the total number of bbess values which will be
equally spaced on a log scale }
\item{fold}{number of folds in the cross validation }
\item{trace}{ if TRUE report progress over path values }
\item{control}{control parameters for HGgaussian and HGmultc }
\item{weights}{a vector of observation weights}
\item{\dots}{ any other relevant arguments for HGmultc or HGgaussian}
}
\details{
Computes cross validated fitted values and cross validated error rates and associated standard deviations
for the grid or path specified by pathb and pathk
}
\value{
A list with components
\item{xvfv }{a n by (length(pathb)*length(pathk)) matrix of cross validated fitted values}
\item{par.vals }{a (length(pathb)*length(pathk)) by 3 matrix of parameter values defining the path or grid.
The first column gives the bbess values, the second column the kbess values and the third column the
delta=(2/bbess)^0.5 values. For L1 regression delta is n*lambda.}
\item{grp}{a n by 1 vector identifying the folds used in the cross validation}
\item{xve}{a length(pathb)*length(pathk) vector of cross validated error rates}
\item{xvsd}{a length(pathb)*length(pathk) vector of cross validated standard deviations}
\item{jmin}{a vector identifying the rows of par.vals above which have cross validated errorr rates less
than or equal to the minimum cross validated error rate plus one standard deviation}
}
\author{Harri Kiiveri }
\note{ There is a plot method for the object produced by this function
}
\seealso{ HGpath, HGgaussian and HGmultc }
\examples{
# Cross validate L1 regression solution path (kbess fixed at 1)
x<-matrix(rnorm(5000),nrow=50,ncol=100)
lp<-x[,1]+2*x[,3]+6*x[,4]
y<-lp+rnorm(50)*.1
res<-xval.HGpath(x,y,trace=FALSE,method=HGgaussian,pathk=1, pathb=c(1,1e-6,25))
plot(res)
# plot cross validated fitted values versus observed for one model
k<-res$jmin[1]
plot(res[[1]][,k],y)
}
\keyword{models }
\keyword{multivariate}
|
library("rpart")
library("dplyr")
library(rpart.plot)
indexes = sample(150)
#partion of data
iris_train = iris[indexes,]
iris_test = iris[indexes, ]
target = Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width
##decision tree with rpart
tree = rpart(target, data = iris_train, method = "class")
rpart.plot(tree)
library(tidyverse)
library(caret)
library(rpart)
model <- rpart(Species ~., data = iris)
par(xpd = NA)
#predicition from model for particular class finding
newdata <- data.frame(Sepal.Length = 6.5, Sepal.Width = 3.0,Petal.Length = 5.2, Petal.Width = 2.0)
model %>% predict(newdata, "class")
| /TASK6.R | no_license | Bhuvash/Spark-foundation-TASK- | R | false | false | 638 | r | library("rpart")
library("dplyr")
library(rpart.plot)
indexes = sample(150)
#partion of data
iris_train = iris[indexes,]
iris_test = iris[indexes, ]
target = Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width
##decision tree with rpart
tree = rpart(target, data = iris_train, method = "class")
rpart.plot(tree)
library(tidyverse)
library(caret)
library(rpart)
model <- rpart(Species ~., data = iris)
par(xpd = NA)
#predicition from model for particular class finding
newdata <- data.frame(Sepal.Length = 6.5, Sepal.Width = 3.0,Petal.Length = 5.2, Petal.Width = 2.0)
model %>% predict(newdata, "class")
|
##Libraries
library(data.table)
library(plyr)
library(dplyr)
library(INLA)
library(parallel)
library(qvalue)
library(magrittr)
##############################
##Get command line arguments##
##############################
Arguments = (commandArgs(TRUE))
#########################
##Define test functions##
#########################
##################
##Parental model##
##################
Parental_model <- function(locus_ID, Parental_data){
##ADJUST WHEN WE HAVE FULL DATA
chrom <- Parental_data[Parental_data$Paste_locus == locus_ID,]$chrom
pos_start <- Parental_data[Parental_data$Paste_locus == locus_ID,]$start
pos_end <- Parental_data[Parental_data$Paste_locus == locus_ID,]$end
Paste_locus <- Parental_data$Paste_locus[Parental_data$Paste_locus == locus_ID]
##Reformat data for modelling
reformed_matrix <- matrix(ncol = 2, nrow = 6) %>% as.data.frame()
reformed_matrix[1,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(5, 11)])
reformed_matrix[2,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(6, 12)])
reformed_matrix[3,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(7, 13)])
reformed_matrix[4,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(8, 14)])
reformed_matrix[5,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(9, 15)])
reformed_matrix[6,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(10, 16)])
names(reformed_matrix)[1] <- "p1_reads"
names(reformed_matrix)[2] <- "p2_reads"
reformed_matrix$Total_reads <- reformed_matrix$p1_reads + reformed_matrix$p2_reads
P_mod <- inla(p1_reads ~ 1, data = reformed_matrix , family = "binomial", Ntrials = Total_reads)
coef <- P_mod$summary.fixed
fixed_effect_posterior <- P_mod$marginals.fixed[[1]]
lower_p <- inla.pmarginal(0, fixed_effect_posterior)
upper_p <- 1 - inla.pmarginal(0, fixed_effect_posterior)
post_pred_p <- 2 * (min(lower_p, upper_p))
P_mod_output <- data.table(chrom = chrom, pos_start = pos_start, pos_end = pos_end, Paste_locus = Paste_locus, P_est = coef[1], P_p_value = post_pred_p)
return(P_mod_output)
}
################
##Hybrid model##
################
Hybrid_model <- function(locus_ID, Hybrid_data){
##ADJUST WHEN WE HAVE FULL DATA
chrom <- Hybrid_data[Hybrid_data$Paste_locus == locus_ID,]$chrom
pos_start <- Hybrid_data[Hybrid_data$Paste_locus == locus_ID,]$start
pos_end <- Hybrid_data[Hybrid_data$Paste_locus == locus_ID,]$end
Paste_locus <- Hybrid_data$Paste_locus[Hybrid_data$Paste_locus == locus_ID]
##ADJUST WHEN WE HAVE FULL DATA
##Reformat data for modelling
reformed_matrix <- matrix(ncol = 2, nrow = 6) %>% as.data.frame()
reformed_matrix[1,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(5, 11)])
reformed_matrix[2,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(6, 12)])
reformed_matrix[3,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(7, 13)])
reformed_matrix[4,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(8, 14)])
reformed_matrix[5,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(9, 15)])
reformed_matrix[6,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(10, 16)])
names(reformed_matrix)[1] <- "p1_reads"
names(reformed_matrix)[2] <- "p2_reads"
reformed_matrix$Total_reads <- reformed_matrix$p1_reads + reformed_matrix$p2_reads
H_mod <- inla(p1_reads ~ 1, data = reformed_matrix , family = "binomial", Ntrials = Total_reads)
coef <- H_mod$summary.fixed
fixed_effect_posterior <- H_mod$marginals.fixed[[1]]
lower_p <- inla.pmarginal(0, fixed_effect_posterior)
upper_p <- 1 - inla.pmarginal(0, fixed_effect_posterior)
post_pred_p <- 2 * (min(lower_p, upper_p))
H_mod_output <- data.table(chrom = chrom, pos_start = pos_start, pos_end = pos_end, Paste_locus = Paste_locus, H_est = coef[1], H_p_value = post_pred_p)
return(H_mod_output)
}
###########################
##Parental - Hybrid model##
###########################
Parental_hybrid_model <- function(locus_ID, Parental_hybrid_data){
##ADJUST WHEN WE HAVE FULL DATA
chrom <- Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID,]$chrom
pos_start <- Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID,]$start
pos_end <- Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID,]$end
Paste_locus <- Parental_hybrid_data$Paste_locus[Parental_hybrid_data$Paste_locus == locus_ID]
##ADJUST WHEN WE HAVE FULL DATA
##Reformat data for modelling
reformed_matrix <- matrix(ncol = 2, nrow = 12) %>% as.data.frame()
reformed_matrix[1,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(5, 11)]) #p1
reformed_matrix[2,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(6, 12)]) #p2
reformed_matrix[3,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(7, 13)]) #h1
reformed_matrix[4,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(8, 14)]) #h2
reformed_matrix[5,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(9, 15)]) #p1
reformed_matrix[6,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(10, 16)]) #p2
reformed_matrix[7,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(17, 23)]) #h1
reformed_matrix[8,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(18, 24)]) #h2
reformed_matrix[9,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(19, 25)]) #p1
reformed_matrix[10,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(20, 26)]) #p2
reformed_matrix[11,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(21, 27)]) #h1
reformed_matrix[12,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(22, 28)]) #h2
names(reformed_matrix)[1] <- "p1_reads"
names(reformed_matrix)[2] <- "p2_reads"
reformed_matrix$Total_reads <- reformed_matrix$p1_reads + reformed_matrix$p2_reads
reformed_matrix$Env <- c("Parental", "Parental", "Parental", "Parental", "Parental", "Parental", "Hybrid", "Hybrid", "Hybrid", "Hybrid", "Hybrid", "Hybrid")
H_P_mod <- inla(p1_reads ~ Env, data = reformed_matrix , family = "binomial", Ntrials = Total_reads)
coef <- H_P_mod$summary.fixed
fixed_effect_posterior <- H_P_mod$marginals.fixed[[2]] ##Check this... and run some tests
lower_p <- inla.pmarginal(0, fixed_effect_posterior)
upper_p <- 1 - inla.pmarginal(0, fixed_effect_posterior)
post_pred_p <- 2 * (min(lower_p, upper_p))
H_P_mod_output <- data.table(chrom = chrom, pos_start = pos_start, pos_end = pos_end, Paste_locus = Paste_locus, H_P_est = coef[2,1], H_P_p_value = post_pred_p)
return(H_P_mod_output)
}
######################
##Read primary data ##
######################
full_dataset <- read.delim("/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Data_tables/ZHR_Z30_genic_counts_CPM_final_dm6_20min_1000max.txt", header = T)
full_dataset$chrom <- "0"
full_dataset$start <- "0"
full_dataset$end <- "0"
full_dataset$Paste_locus <- paste(full_dataset$chrom, full_dataset$start, full_dataset$end, full_dataset$gene, sep = "_")
## Get sums
All_reads_p1_r1 <- sum(full_dataset$P1_1)
All_reads_p1_r2 <- sum(full_dataset$P1_2)
All_reads_p1_r3 <- sum(full_dataset$P1_3)
All_reads_p1_r4 <- sum(full_dataset$P1_4)
All_reads_p1_r5 <- sum(full_dataset$P1_5)
All_reads_p1_r6 <- sum(full_dataset$P1_6)
All_reads_p2_r1 <- sum(full_dataset$P2_1)
All_reads_p2_r2 <- sum(full_dataset$P2_2)
All_reads_p2_r3 <- sum(full_dataset$P2_3)
All_reads_p2_r4 <- sum(full_dataset$P2_4)
All_reads_p2_r5 <- sum(full_dataset$P2_5)
All_reads_p2_r6 <- sum(full_dataset$P2_6)
All_reads_p1_hyb_r1 <- sum(full_dataset$HYB_1_P1)
All_reads_p1_hyb_r2 <- sum(full_dataset$HYB_2_P1)
All_reads_p1_hyb_r3 <- sum(full_dataset$HYB_3_P1)
All_reads_p1_hyb_r4 <- sum(full_dataset$HYB_4_P1)
All_reads_p1_hyb_r5 <- sum(full_dataset$HYB_5_P1)
All_reads_p1_hyb_r6 <- sum(full_dataset$HYB_6_P1)
All_reads_p2_hyb_r1 <- sum(full_dataset$HYB_1_P2)
All_reads_p2_hyb_r2 <- sum(full_dataset$HYB_2_P2)
All_reads_p2_hyb_r3 <- sum(full_dataset$HYB_3_P2)
All_reads_p2_hyb_r4 <- sum(full_dataset$HYB_4_P2)
All_reads_p2_hyb_r5 <- sum(full_dataset$HYB_4_P2)
All_reads_p2_hyb_r6 <- sum(full_dataset$HYB_4_P2)
##Get collumns for Parent, hybrid and hybrid parent data
Parental_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6")]
Hybrid_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_4_P1", "HYB_5_P1", "HYB_6_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2", "HYB_4_P2", "HYB_5_P2", "HYB_6_P2")]
Parental_hybrid_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6",
"HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_4_P1", "HYB_5_P1", "HYB_6_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2", "HYB_4_P2", "HYB_5_P2", "HYB_6_P2")]
#############################################
##run approrpriate test based on argument 1##
#############################################
if (Arguments[1] == "Parents") {
Parental_results <- do.call(rbind, mclapply(Parental_data$Paste_locus, function(x) Parental_model(x, Parental_data), mc.cores = 4))
write.table(Parental_results, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Bayes_outputs/Parental_test_output_RNA_full_CPM_20_1000.txt", row.names = F, quote = F)
} else if (Arguments[1] == "Hybrids"){
Hybrid_results <- do.call(rbind, mclapply(Hybrid_data$Paste_locus, function(x) Hybrid_model(x, Hybrid_data), mc.cores = 4))
write.table(Hybrid_results, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Bayes_outputs/Hybrid_test_output_RNA_full_CPM_20_1000.txt", row.names = F, quote = F)
} else if(Arguments[1] == "Parent-Hybrid") {
Parental_hybrid_results <- do.call(rbind, mclapply(Parental_hybrid_data$Paste_locus, function(x) Parental_hybrid_model(x, Parental_hybrid_data), mc.cores = 4))
write.table(Parental_hybrid_results, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Bayes_outputs/Parental_Hybrid_test_output_RNA_full_CPM_20_1000.txt", row.names = F, quote = F)
}
| /Cis_trans_Bayes_analyses/Misc_models/cis_trans_model_command_line_RNA_6_reps_ZHR_Z30.R | no_license | henryertl/Integrative_AS_genomics | R | false | false | 10,357 | r | ##Libraries
library(data.table)
library(plyr)
library(dplyr)
library(INLA)
library(parallel)
library(qvalue)
library(magrittr)
##############################
##Get command line arguments##
##############################
Arguments = (commandArgs(TRUE))
#########################
##Define test functions##
#########################
##################
##Parental model##
##################
Parental_model <- function(locus_ID, Parental_data){
##ADJUST WHEN WE HAVE FULL DATA
chrom <- Parental_data[Parental_data$Paste_locus == locus_ID,]$chrom
pos_start <- Parental_data[Parental_data$Paste_locus == locus_ID,]$start
pos_end <- Parental_data[Parental_data$Paste_locus == locus_ID,]$end
Paste_locus <- Parental_data$Paste_locus[Parental_data$Paste_locus == locus_ID]
##Reformat data for modelling
reformed_matrix <- matrix(ncol = 2, nrow = 6) %>% as.data.frame()
reformed_matrix[1,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(5, 11)])
reformed_matrix[2,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(6, 12)])
reformed_matrix[3,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(7, 13)])
reformed_matrix[4,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(8, 14)])
reformed_matrix[5,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(9, 15)])
reformed_matrix[6,] <- c(Parental_data[Parental_data$Paste_locus == locus_ID, c(10, 16)])
names(reformed_matrix)[1] <- "p1_reads"
names(reformed_matrix)[2] <- "p2_reads"
reformed_matrix$Total_reads <- reformed_matrix$p1_reads + reformed_matrix$p2_reads
P_mod <- inla(p1_reads ~ 1, data = reformed_matrix , family = "binomial", Ntrials = Total_reads)
coef <- P_mod$summary.fixed
fixed_effect_posterior <- P_mod$marginals.fixed[[1]]
lower_p <- inla.pmarginal(0, fixed_effect_posterior)
upper_p <- 1 - inla.pmarginal(0, fixed_effect_posterior)
post_pred_p <- 2 * (min(lower_p, upper_p))
P_mod_output <- data.table(chrom = chrom, pos_start = pos_start, pos_end = pos_end, Paste_locus = Paste_locus, P_est = coef[1], P_p_value = post_pred_p)
return(P_mod_output)
}
################
##Hybrid model##
################
Hybrid_model <- function(locus_ID, Hybrid_data){
##ADJUST WHEN WE HAVE FULL DATA
chrom <- Hybrid_data[Hybrid_data$Paste_locus == locus_ID,]$chrom
pos_start <- Hybrid_data[Hybrid_data$Paste_locus == locus_ID,]$start
pos_end <- Hybrid_data[Hybrid_data$Paste_locus == locus_ID,]$end
Paste_locus <- Hybrid_data$Paste_locus[Hybrid_data$Paste_locus == locus_ID]
##ADJUST WHEN WE HAVE FULL DATA
##Reformat data for modelling
reformed_matrix <- matrix(ncol = 2, nrow = 6) %>% as.data.frame()
reformed_matrix[1,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(5, 11)])
reformed_matrix[2,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(6, 12)])
reformed_matrix[3,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(7, 13)])
reformed_matrix[4,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(8, 14)])
reformed_matrix[5,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(9, 15)])
reformed_matrix[6,] <- c(Hybrid_data[Hybrid_data$Paste_locus == locus_ID, c(10, 16)])
names(reformed_matrix)[1] <- "p1_reads"
names(reformed_matrix)[2] <- "p2_reads"
reformed_matrix$Total_reads <- reformed_matrix$p1_reads + reformed_matrix$p2_reads
H_mod <- inla(p1_reads ~ 1, data = reformed_matrix , family = "binomial", Ntrials = Total_reads)
coef <- H_mod$summary.fixed
fixed_effect_posterior <- H_mod$marginals.fixed[[1]]
lower_p <- inla.pmarginal(0, fixed_effect_posterior)
upper_p <- 1 - inla.pmarginal(0, fixed_effect_posterior)
post_pred_p <- 2 * (min(lower_p, upper_p))
H_mod_output <- data.table(chrom = chrom, pos_start = pos_start, pos_end = pos_end, Paste_locus = Paste_locus, H_est = coef[1], H_p_value = post_pred_p)
return(H_mod_output)
}
###########################
##Parental - Hybrid model##
###########################
Parental_hybrid_model <- function(locus_ID, Parental_hybrid_data){
##ADJUST WHEN WE HAVE FULL DATA
chrom <- Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID,]$chrom
pos_start <- Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID,]$start
pos_end <- Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID,]$end
Paste_locus <- Parental_hybrid_data$Paste_locus[Parental_hybrid_data$Paste_locus == locus_ID]
##ADJUST WHEN WE HAVE FULL DATA
##Reformat data for modelling
reformed_matrix <- matrix(ncol = 2, nrow = 12) %>% as.data.frame()
reformed_matrix[1,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(5, 11)]) #p1
reformed_matrix[2,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(6, 12)]) #p2
reformed_matrix[3,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(7, 13)]) #h1
reformed_matrix[4,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(8, 14)]) #h2
reformed_matrix[5,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(9, 15)]) #p1
reformed_matrix[6,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(10, 16)]) #p2
reformed_matrix[7,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(17, 23)]) #h1
reformed_matrix[8,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(18, 24)]) #h2
reformed_matrix[9,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(19, 25)]) #p1
reformed_matrix[10,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(20, 26)]) #p2
reformed_matrix[11,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(21, 27)]) #h1
reformed_matrix[12,] <- c(Parental_hybrid_data[Parental_hybrid_data$Paste_locus == locus_ID, c(22, 28)]) #h2
names(reformed_matrix)[1] <- "p1_reads"
names(reformed_matrix)[2] <- "p2_reads"
reformed_matrix$Total_reads <- reformed_matrix$p1_reads + reformed_matrix$p2_reads
reformed_matrix$Env <- c("Parental", "Parental", "Parental", "Parental", "Parental", "Parental", "Hybrid", "Hybrid", "Hybrid", "Hybrid", "Hybrid", "Hybrid")
H_P_mod <- inla(p1_reads ~ Env, data = reformed_matrix , family = "binomial", Ntrials = Total_reads)
coef <- H_P_mod$summary.fixed
fixed_effect_posterior <- H_P_mod$marginals.fixed[[2]] ##Check this... and run some tests
lower_p <- inla.pmarginal(0, fixed_effect_posterior)
upper_p <- 1 - inla.pmarginal(0, fixed_effect_posterior)
post_pred_p <- 2 * (min(lower_p, upper_p))
H_P_mod_output <- data.table(chrom = chrom, pos_start = pos_start, pos_end = pos_end, Paste_locus = Paste_locus, H_P_est = coef[2,1], H_P_p_value = post_pred_p)
return(H_P_mod_output)
}
######################
##Read primary data ##
######################
full_dataset <- read.delim("/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Data_tables/ZHR_Z30_genic_counts_CPM_final_dm6_20min_1000max.txt", header = T)
full_dataset$chrom <- "0"
full_dataset$start <- "0"
full_dataset$end <- "0"
full_dataset$Paste_locus <- paste(full_dataset$chrom, full_dataset$start, full_dataset$end, full_dataset$gene, sep = "_")
## Get sums
All_reads_p1_r1 <- sum(full_dataset$P1_1)
All_reads_p1_r2 <- sum(full_dataset$P1_2)
All_reads_p1_r3 <- sum(full_dataset$P1_3)
All_reads_p1_r4 <- sum(full_dataset$P1_4)
All_reads_p1_r5 <- sum(full_dataset$P1_5)
All_reads_p1_r6 <- sum(full_dataset$P1_6)
All_reads_p2_r1 <- sum(full_dataset$P2_1)
All_reads_p2_r2 <- sum(full_dataset$P2_2)
All_reads_p2_r3 <- sum(full_dataset$P2_3)
All_reads_p2_r4 <- sum(full_dataset$P2_4)
All_reads_p2_r5 <- sum(full_dataset$P2_5)
All_reads_p2_r6 <- sum(full_dataset$P2_6)
All_reads_p1_hyb_r1 <- sum(full_dataset$HYB_1_P1)
All_reads_p1_hyb_r2 <- sum(full_dataset$HYB_2_P1)
All_reads_p1_hyb_r3 <- sum(full_dataset$HYB_3_P1)
All_reads_p1_hyb_r4 <- sum(full_dataset$HYB_4_P1)
All_reads_p1_hyb_r5 <- sum(full_dataset$HYB_5_P1)
All_reads_p1_hyb_r6 <- sum(full_dataset$HYB_6_P1)
All_reads_p2_hyb_r1 <- sum(full_dataset$HYB_1_P2)
All_reads_p2_hyb_r2 <- sum(full_dataset$HYB_2_P2)
All_reads_p2_hyb_r3 <- sum(full_dataset$HYB_3_P2)
All_reads_p2_hyb_r4 <- sum(full_dataset$HYB_4_P2)
All_reads_p2_hyb_r5 <- sum(full_dataset$HYB_4_P2)
All_reads_p2_hyb_r6 <- sum(full_dataset$HYB_4_P2)
##Get collumns for Parent, hybrid and hybrid parent data
Parental_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6")]
Hybrid_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_4_P1", "HYB_5_P1", "HYB_6_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2", "HYB_4_P2", "HYB_5_P2", "HYB_6_P2")]
Parental_hybrid_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6",
"HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_4_P1", "HYB_5_P1", "HYB_6_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2", "HYB_4_P2", "HYB_5_P2", "HYB_6_P2")]
#############################################
##run approrpriate test based on argument 1##
#############################################
if (Arguments[1] == "Parents") {
Parental_results <- do.call(rbind, mclapply(Parental_data$Paste_locus, function(x) Parental_model(x, Parental_data), mc.cores = 4))
write.table(Parental_results, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Bayes_outputs/Parental_test_output_RNA_full_CPM_20_1000.txt", row.names = F, quote = F)
} else if (Arguments[1] == "Hybrids"){
Hybrid_results <- do.call(rbind, mclapply(Hybrid_data$Paste_locus, function(x) Hybrid_model(x, Hybrid_data), mc.cores = 4))
write.table(Hybrid_results, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Bayes_outputs/Hybrid_test_output_RNA_full_CPM_20_1000.txt", row.names = F, quote = F)
} else if(Arguments[1] == "Parent-Hybrid") {
Parental_hybrid_results <- do.call(rbind, mclapply(Parental_hybrid_data$Paste_locus, function(x) Parental_hybrid_model(x, Parental_hybrid_data), mc.cores = 4))
write.table(Parental_hybrid_results, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/RNA_seq/Bayes_outputs/Parental_Hybrid_test_output_RNA_full_CPM_20_1000.txt", row.names = F, quote = F)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date-json.R
\name{number_2_date}
\alias{number_2_date}
\title{Title}
\usage{
number_2_date(num)
}
\arguments{
\item{num}{numeric}
}
\description{
Title
}
| /man/number_2_date.Rd | permissive | BruceZhaoR/mypkg | R | false | true | 232 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date-json.R
\name{number_2_date}
\alias{number_2_date}
\title{Title}
\usage{
number_2_date(num)
}
\arguments{
\item{num}{numeric}
}
\description{
Title
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_pairs.R
\name{plot_pairs}
\alias{plot_pairs}
\title{Function to plot pairs of values
Function to plot pairs of values}
\usage{
plot_pairs(var_pp = "spp_clust_catch", save_pp = TRUE)
}
\arguments{
\item{var_pp}{Variable of interest; Could by spp_clust_cathc}
\item{save_pp}{Save plot as png or not}
}
\description{
Function to plot pairs of values
Function to plot pairs of values
}
| /man/plot_pairs.Rd | no_license | peterkuriyama/ch4 | R | false | true | 466 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_pairs.R
\name{plot_pairs}
\alias{plot_pairs}
\title{Function to plot pairs of values
Function to plot pairs of values}
\usage{
plot_pairs(var_pp = "spp_clust_catch", save_pp = TRUE)
}
\arguments{
\item{var_pp}{Variable of interest; Could by spp_clust_cathc}
\item{save_pp}{Save plot as png or not}
}
\description{
Function to plot pairs of values
Function to plot pairs of values
}
|
eol_url <- function(x) sprintf('https://eol.org/api/%s/1.0', x)
| /R/eol_utiils.R | permissive | ropensci/taxize | R | false | false | 64 | r | eol_url <- function(x) sprintf('https://eol.org/api/%s/1.0', x)
|
#############################################################
#script to predict proportion of trees damaged using volume##
#############################################################
#first load packages
library(ggplot2)
library(MuMIn)
library(lme4)
#clear objects
rm(list=ls())
#import data
Dam<-read.csv("Data/prop_damage.csv")
head(Dam)
colnames(Dam)<-c("Row","ID","Study","Site","Method","Vol","Tree_Ex","Prop_dam","Region","Plot","Replicates")
Dam<-Dam[complete.cases(Dam[,c(6,8)]),]
#######################################################
#now predict proportion of trees damaged using volume##
#######################################################
#create column for Volume squared and log volume
Dam$Vol_sq<-Dam$Vol^2
Dam$Vol_log<-log(Dam$Vol)
Dam$Vol_log2<-(log(Dam$Vol))^2
head(Dam)
Dam$Replicates<-ifelse(is.na(Dam$Replicates),1,Dam$Replicates)
Dam$Plot<-ifelse(is.na(Dam$Plot),median(Dam$Plot,na.rm = T),Dam$Plot)
#test for the effects of volume logged, non-linear volume logged (squared and log), differences in method
#and a null model
M1<-lmer(qlogis(Prop_dam)~Vol_log+(Vol_log|Study),Dam,na.action="na.fail")
M2<-lmer(qlogis(Prop_dam)~Vol_log*Method+(Vol_log|Study),Dam,na.action="na.fail")
M3<-lmer(qlogis(Prop_dam)~Vol_log+Method+(Vol_log|Study),Dam,na.action="na.fail")
M4<-lmer(qlogis(Prop_dam)~Method+(Vol|Study),Dam,na.action="na.fail")
M5<-lmer(qlogis(Prop_dam)~Vol*Method+(Vol|Study),Dam,na.action="na.fail")
M6<-lmer(qlogis(Prop_dam)~Vol+Method+(Vol|Study),Dam,na.action="na.fail")
M7<-lmer(qlogis(Prop_dam)~Vol+(Vol|Study),Dam,na.action="na.fail")
M0<-lmer(qlogis(Prop_dam)~1+(1|Study),Dam,na.action="na.fail")
AICc(M1,M2,M3,M4,M5,M6,M7,M0)
All_mods<-list(M1,M2,M3,M4,M5,M6,M7,M0)
#diagnostic plots
#model averaging
ms1<-model.sel(object=All_mods,rank="AICc",fit=T,trace=T,subset=dc(Vol2,Vol_sq))
ms1$r2<-c(r.squaredGLMM(M3)[1],r.squaredGLMM(M1)[1],r.squaredGLMM(M2)[1],r.squaredGLMM(M4)[1],
r.squaredGLMM(M6)[1],r.squaredGLMM(M7)[1],r.squaredGLMM(M5)[1],r.squaredGLMM(M0)[1])
ms2<-subset(ms1,delta<=7)
#write this table to csv
write.csv(ms1,"Tables/Tree_damage_models.csv")
# extract coefficients
Model.av<-model.avg(ms2)
summary(Model.av) # take values from conditional average
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
write.csv(coefs,"Tables/Tree_damage_coefs.csv")
#predict values for plotting
#now create plots of this
newdat<-data.frame(Vol=c(seq(3,164.9,length.out = 500),seq(5,107,length.out = 500)),Method=c(rep("Conventional",500),rep("RIL",500)))
newdat$Vol_log<-log(newdat$Vol)
newdat$Prop_dam<-0
mm <- model.matrix(terms(M2),newdat)
newdat$Prop_dam <- predict(Model.av,newdat,re.form=NA)
pvar1 <- diag(mm %*% tcrossprod(vcov(M2),mm))
tvar1 <- pvar1+VarCorr(M2)$Study[1] ## must be adapted for more complex models
tvar1 <-
newdat <- data.frame(
newdat
, plo = newdat$Prop_dam-2*sqrt(pvar1)
, phi = newdat$Prop_dam+2*sqrt(pvar1)
, tlo = newdat$Prop_dam-2*sqrt(tvar1)
, thi = newdat$Prop_dam+2*sqrt(tvar1)
)
#plot these results
theme_set(theme_bw(base_size=12))
Dam_1<-ggplot(Dam,aes(x=Vol,y=Prop_dam,colour=Method))+geom_point(shape=1)+geom_line(data=newdat,aes(x=Vol,y=plogis(Prop_dam),colour=Method),size=2)
Dam_2<-Dam_1+geom_ribbon(data=newdat,aes(ymax=plogis(phi),ymin=plogis(plo),fill=Method,size=NULL),colour=NA,alpha=0.2)+ylim(0,0.7)
Dam_3<-Dam_2+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.border = element_rect(size=1.5,colour="black",fill=NA))
Dam_3+xlab(expression(paste("Volume of wood logged (",m^3,ha^-1,")")))+ylab("Proportion of residual tree stems damaged")+scale_colour_brewer(palette = "Set1")+scale_fill_brewer(palette = "Set1")+theme(legend.position="none")
ggsave("Figures/Prop_damaged_vol.png",height=6,width=8,dpi=600)
| /R_scripts/Tree_damage.R | no_license | phil-martin-research/LogFor | R | false | false | 3,843 | r | #############################################################
#script to predict proportion of trees damaged using volume##
#############################################################
#first load packages
library(ggplot2)
library(MuMIn)
library(lme4)
#clear objects
rm(list=ls())
#import data
Dam<-read.csv("Data/prop_damage.csv")
head(Dam)
colnames(Dam)<-c("Row","ID","Study","Site","Method","Vol","Tree_Ex","Prop_dam","Region","Plot","Replicates")
Dam<-Dam[complete.cases(Dam[,c(6,8)]),]
#######################################################
#now predict proportion of trees damaged using volume##
#######################################################
#create column for Volume squared and log volume
Dam$Vol_sq<-Dam$Vol^2
Dam$Vol_log<-log(Dam$Vol)
Dam$Vol_log2<-(log(Dam$Vol))^2
head(Dam)
Dam$Replicates<-ifelse(is.na(Dam$Replicates),1,Dam$Replicates)
Dam$Plot<-ifelse(is.na(Dam$Plot),median(Dam$Plot,na.rm = T),Dam$Plot)
#test for the effects of volume logged, non-linear volume logged (squared and log), differences in method
#and a null model
M1<-lmer(qlogis(Prop_dam)~Vol_log+(Vol_log|Study),Dam,na.action="na.fail")
M2<-lmer(qlogis(Prop_dam)~Vol_log*Method+(Vol_log|Study),Dam,na.action="na.fail")
M3<-lmer(qlogis(Prop_dam)~Vol_log+Method+(Vol_log|Study),Dam,na.action="na.fail")
M4<-lmer(qlogis(Prop_dam)~Method+(Vol|Study),Dam,na.action="na.fail")
M5<-lmer(qlogis(Prop_dam)~Vol*Method+(Vol|Study),Dam,na.action="na.fail")
M6<-lmer(qlogis(Prop_dam)~Vol+Method+(Vol|Study),Dam,na.action="na.fail")
M7<-lmer(qlogis(Prop_dam)~Vol+(Vol|Study),Dam,na.action="na.fail")
M0<-lmer(qlogis(Prop_dam)~1+(1|Study),Dam,na.action="na.fail")
AICc(M1,M2,M3,M4,M5,M6,M7,M0)
All_mods<-list(M1,M2,M3,M4,M5,M6,M7,M0)
#diagnostic plots
#model averaging
ms1<-model.sel(object=All_mods,rank="AICc",fit=T,trace=T,subset=dc(Vol2,Vol_sq))
ms1$r2<-c(r.squaredGLMM(M3)[1],r.squaredGLMM(M1)[1],r.squaredGLMM(M2)[1],r.squaredGLMM(M4)[1],
r.squaredGLMM(M6)[1],r.squaredGLMM(M7)[1],r.squaredGLMM(M5)[1],r.squaredGLMM(M0)[1])
ms2<-subset(ms1,delta<=7)
#write this table to csv
write.csv(ms1,"Tables/Tree_damage_models.csv")
# extract coefficients
Model.av<-model.avg(ms2)
summary(Model.av) # take values from conditional average
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
write.csv(coefs,"Tables/Tree_damage_coefs.csv")
#predict values for plotting
#now create plots of this
newdat<-data.frame(Vol=c(seq(3,164.9,length.out = 500),seq(5,107,length.out = 500)),Method=c(rep("Conventional",500),rep("RIL",500)))
newdat$Vol_log<-log(newdat$Vol)
newdat$Prop_dam<-0
mm <- model.matrix(terms(M2),newdat)
newdat$Prop_dam <- predict(Model.av,newdat,re.form=NA)
pvar1 <- diag(mm %*% tcrossprod(vcov(M2),mm))
tvar1 <- pvar1+VarCorr(M2)$Study[1] ## must be adapted for more complex models
tvar1 <-
newdat <- data.frame(
newdat
, plo = newdat$Prop_dam-2*sqrt(pvar1)
, phi = newdat$Prop_dam+2*sqrt(pvar1)
, tlo = newdat$Prop_dam-2*sqrt(tvar1)
, thi = newdat$Prop_dam+2*sqrt(tvar1)
)
#plot these results
theme_set(theme_bw(base_size=12))
Dam_1<-ggplot(Dam,aes(x=Vol,y=Prop_dam,colour=Method))+geom_point(shape=1)+geom_line(data=newdat,aes(x=Vol,y=plogis(Prop_dam),colour=Method),size=2)
Dam_2<-Dam_1+geom_ribbon(data=newdat,aes(ymax=plogis(phi),ymin=plogis(plo),fill=Method,size=NULL),colour=NA,alpha=0.2)+ylim(0,0.7)
Dam_3<-Dam_2+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.border = element_rect(size=1.5,colour="black",fill=NA))
Dam_3+xlab(expression(paste("Volume of wood logged (",m^3,ha^-1,")")))+ylab("Proportion of residual tree stems damaged")+scale_colour_brewer(palette = "Set1")+scale_fill_brewer(palette = "Set1")+theme(legend.position="none")
ggsave("Figures/Prop_damaged_vol.png",height=6,width=8,dpi=600)
|
library(pdftools)
library(stringr)
library(qdapRegex)
library(dplyr)
library(data.table)
### TODOs
# 1. dblchk that everything in caps is a genera or chem_class
# 2. many chem_classes are hierarchical, ie. ALKALOIDS and *specific sub-class* ALKALOID;
# We could just lump everything into overall chem_class
# 3. ALso, need to handle singular (ALKALOID) vs plural (ALKALOIDS)
# read in data from pdf
# each page is one element in the list "text"
text <- pdf_text("data/dictionary_plant_metabolites.pdf")
text <- text[8:1290] # drop preface and index of book
# All genera names and chemical classes are CAPITALIZED in the pdf. We can use this pattern to
# extract them
# pulling out caps like this includes a lot more than just chem class genera
# using ex_caps also splits up phrases like "AMINO ACID" into "AMINO", "ACID"
caps <- ex_caps(text[[4]])
caps
# Use ex_caps_phrase to preserve phrases like "AMINO ACID"
caps <- ex_caps_phrase(text[[4]])
caps
### Clean up cap phrase data
# ------
# get all cap phrases
caps <- unlist(sapply(1:length(text), function(x) ex_caps_phrase(text[[x]])))
# however, there are still lots of elements that are not genera or chem_class
caps_length <- nchar(caps)
caps[head(order(caps_length),500)] # most short words are nonsense
# there are also NAs
caps[which(is.na(caps))]
# check in book to see where NAs are introduced
caps[which(is.na(caps))-1]
caps[which(is.na(caps))+1] # NAs are introduced whenever there is a blank page
# so we can safely drop NAs from caps_vec
caps <- caps[-c(which(is.na(caps)))]
caps_length <- nchar(caps)
# look at short cap words
which(caps_length < 4) # there are lot. Most are not genera or chem_class
unique(caps[which(caps_length == 2)]) # no valid words are nchar(caps)==2
# drop nchar(caps) < 3
caps <- caps[which(caps_length > 2 )]
caps_length <- nchar(caps)
unique(caps[which(caps_length == 2)]) # all removed
unique(caps[which(caps_length == 3)]) # only ASA, IVA, ZEA is valid (someone else dbl check)
word3 <- unique(caps[which(caps_length == 3)])
word3 <- word3[which(!(word3 %in% c("ASA","IVA","ZEA")))]
caps <- caps[-which((caps %in% word3))]
caps_length <- nchar(caps)
caps[which(caps_length==3)] # confirm nchar(caps)==3 are correct
unique(caps[which(caps_length == 4)]) #
word4 <- rm_non_words(unique(caps[which(caps_length == 4)])) # removes hyphen
word4 <- unlist(rm_nchar_words(word4,n=4)) # get all words less than 4 char
word4 <- append(word4[which(word4 != "")],c("USSR","SSSR","IRCS","ATCC","XVII", "XLII", "XLIV", "XLVI",
"LIII","VIII","XIII","IIIB")) # get all unwanted nchar(caps)==4
word4
caps <- caps[-which((caps %in% word4))]
caps_length <- nchar(caps)
caps[which(caps_length==4)] # still have non words
word4 <- caps[which(caps_length==4)][which(grepl("-",caps[which(caps_length==4)]))]
caps <- caps[-which((caps %in% word4))]
caps_length <- nchar(caps)
unique(caps[which(caps_length==4)]) # all nchar(caps)==4 are fixed
unique(caps[which(caps_length == 5)]) # all unwanted words have hyphens
word5 <- caps[which(caps_length==5)][which(grepl("-",caps[which(caps_length==5)]))]
word5 # everything with hyphens
caps <- caps[-which((caps %in% word5))]
caps_length <- nchar(caps)
unique(caps[which(caps_length == 5)]) # should dblcheck all are genus or chem
unique(caps[which(caps_length == 6)]) # still something with hyphens
word6 <- caps[which(caps_length==6)][which(grepl("-",caps[which(caps_length==6)]))]
word6
caps <- caps[-which((caps %in% word6))]
caps_length <- nchar(caps)
unique(caps[which(caps_length == 6)]) # should dblcheck
# havent seen unwanted words after nchar(caps) >= 7 (I've checked by eye for words up to 10)
unique(caps[which(caps_length == 7)]) ### someone could do more checking here
# (tentatively) caps ONLY contains genera and chem_class info
# get caps, chemical classes, and genera from every page in entire book
caps <- setDT(list(caps))
chem_class <- setDT(list(unlist(lapply(1:length(text),
function(x) ex_caps_phrase(ex_between(text[[x]],
"\r\n",":"))))))
chem_class <- chem_class[-which(is.na(chem_class))] # rm NAs caused by empty pages in book
genera <- caps[!caps$V1 %in% chem_class$V1]
# get genera using plant genera reference
read.csv("data/plantGenera.csv", header = TRUE, stringsAsFactors = FALSE) %>%
transmute(Genus = toupper(genus)) -> plantGenera
genera2 <- (caps[(caps$V1 %in% plantGenera$Genus)]) # some genera missing in plantGenera
# 935 genera not in plantGenera; all appear to be valid genera names
genDiff <- setdiff(genera[[1]],genera2[[1]])
# Some differences are typos, e.g., ABELOM*O*SCHUS instead of ABELMOSCHUS
# make df that has all Genera from dictionary + "difference" check if also in plantGenera
genera %>% mutate(different = ifelse(genera$V1 %in% plantGenera$Genus, "no","yes")) -> generaDiff
names(generaDiff)[1] <- "genus"
write.csv(generaDiff, "data/generaDiff.csv", row.names = FALSE)
### after cleaning up caps, chem_class, and genera
# create dataframe with chemical classes as columns and genera as rows
defense <- data.frame(matrix(0, nrow = nrow(genera), ncol = nrow(unique(chem_class))+1))
names(defense) <- c("Genus", unique(chem_class$V1))
defense[,1] <- genera$V1
defense <- data.table(defense)
#This is clunky, but not too slow. It a loop that categories each capitalized word in book as a genus or a chemical family and fills out the matrix accordingly
i = 0
for(n in 1:length(caps$V1)){
if(is.element(caps$V1[n], genera$V1)){
i = i +1}
if(is.element(caps$V1[n], genera$V1) == FALSE){
defense[i, caps$V1[n]:=1]}
}
defense<-as.data.frame(defense)
defense[,2:ncol(defense)]<-sapply(defense[,2:ncol(defense)], as.numeric)
write.csv(defense, "data/PlantChems.csv", row.names = FALSE)
| /getPlantChems.R | no_license | lsw5077/leps | R | false | false | 6,024 | r | library(pdftools)
library(stringr)
library(qdapRegex)
library(dplyr)
library(data.table)
### TODOs
# 1. dblchk that everything in caps is a genera or chem_class
# 2. many chem_classes are hierarchical, ie. ALKALOIDS and *specific sub-class* ALKALOID;
# We could just lump everything into overall chem_class
# 3. ALso, need to handle singular (ALKALOID) vs plural (ALKALOIDS)
# read in data from pdf
# each page is one element in the list "text"
text <- pdf_text("data/dictionary_plant_metabolites.pdf")
text <- text[8:1290] # drop preface and index of book
# All genera names and chemical classes are CAPITALIZED in the pdf. We can use this pattern to
# extract them
# pulling out caps like this includes a lot more than just chem class genera
# using ex_caps also splits up phrases like "AMINO ACID" into "AMINO", "ACID"
caps <- ex_caps(text[[4]])
caps
# Use ex_caps_phrase to preserve phrases like "AMINO ACID"
caps <- ex_caps_phrase(text[[4]])
caps
### Clean up cap phrase data
# ------
# get all cap phrases
caps <- unlist(sapply(1:length(text), function(x) ex_caps_phrase(text[[x]])))
# however, there are still lots of elements that are not genera or chem_class
caps_length <- nchar(caps)
caps[head(order(caps_length),500)] # most short words are nonsense
# there are also NAs
caps[which(is.na(caps))]
# check in book to see where NAs are introduced
caps[which(is.na(caps))-1]
caps[which(is.na(caps))+1] # NAs are introduced whenever there is a blank page
# so we can safely drop NAs from caps_vec
caps <- caps[-c(which(is.na(caps)))]
caps_length <- nchar(caps)
# look at short cap words
which(caps_length < 4) # there are lot. Most are not genera or chem_class
unique(caps[which(caps_length == 2)]) # no valid words are nchar(caps)==2
# drop nchar(caps) < 3
caps <- caps[which(caps_length > 2 )]
caps_length <- nchar(caps)
unique(caps[which(caps_length == 2)]) # all removed
unique(caps[which(caps_length == 3)]) # only ASA, IVA, ZEA is valid (someone else dbl check)
word3 <- unique(caps[which(caps_length == 3)])
word3 <- word3[which(!(word3 %in% c("ASA","IVA","ZEA")))]
caps <- caps[-which((caps %in% word3))]
caps_length <- nchar(caps)
caps[which(caps_length==3)] # confirm nchar(caps)==3 are correct
unique(caps[which(caps_length == 4)]) #
word4 <- rm_non_words(unique(caps[which(caps_length == 4)])) # removes hyphen
word4 <- unlist(rm_nchar_words(word4,n=4)) # get all words less than 4 char
word4 <- append(word4[which(word4 != "")],c("USSR","SSSR","IRCS","ATCC","XVII", "XLII", "XLIV", "XLVI",
"LIII","VIII","XIII","IIIB")) # get all unwanted nchar(caps)==4
word4
caps <- caps[-which((caps %in% word4))]
caps_length <- nchar(caps)
caps[which(caps_length==4)] # still have non words
word4 <- caps[which(caps_length==4)][which(grepl("-",caps[which(caps_length==4)]))]
caps <- caps[-which((caps %in% word4))]
caps_length <- nchar(caps)
unique(caps[which(caps_length==4)]) # all nchar(caps)==4 are fixed
unique(caps[which(caps_length == 5)]) # all unwanted words have hyphens
word5 <- caps[which(caps_length==5)][which(grepl("-",caps[which(caps_length==5)]))]
word5 # everything with hyphens
caps <- caps[-which((caps %in% word5))]
caps_length <- nchar(caps)
unique(caps[which(caps_length == 5)]) # should dblcheck all are genus or chem
unique(caps[which(caps_length == 6)]) # still something with hyphens
word6 <- caps[which(caps_length==6)][which(grepl("-",caps[which(caps_length==6)]))]
word6
caps <- caps[-which((caps %in% word6))]
caps_length <- nchar(caps)
unique(caps[which(caps_length == 6)]) # should dblcheck
# havent seen unwanted words after nchar(caps) >= 7 (I've checked by eye for words up to 10)
unique(caps[which(caps_length == 7)]) ### someone could do more checking here
# (tentatively) caps ONLY contains genera and chem_class info
# get caps, chemical classes, and genera from every page in entire book
caps <- setDT(list(caps))
chem_class <- setDT(list(unlist(lapply(1:length(text),
function(x) ex_caps_phrase(ex_between(text[[x]],
"\r\n",":"))))))
chem_class <- chem_class[-which(is.na(chem_class))] # rm NAs caused by empty pages in book
genera <- caps[!caps$V1 %in% chem_class$V1]
# get genera using plant genera reference
read.csv("data/plantGenera.csv", header = TRUE, stringsAsFactors = FALSE) %>%
transmute(Genus = toupper(genus)) -> plantGenera
genera2 <- (caps[(caps$V1 %in% plantGenera$Genus)]) # some genera missing in plantGenera
# 935 genera not in plantGenera; all appear to be valid genera names
genDiff <- setdiff(genera[[1]],genera2[[1]])
# Some differences are typos, e.g., ABELOM*O*SCHUS instead of ABELMOSCHUS
# make df that has all Genera from dictionary + "difference" check if also in plantGenera
genera %>% mutate(different = ifelse(genera$V1 %in% plantGenera$Genus, "no","yes")) -> generaDiff
names(generaDiff)[1] <- "genus"
write.csv(generaDiff, "data/generaDiff.csv", row.names = FALSE)
### after cleaning up caps, chem_class, and genera
# create dataframe with chemical classes as columns and genera as rows
defense <- data.frame(matrix(0, nrow = nrow(genera), ncol = nrow(unique(chem_class))+1))
names(defense) <- c("Genus", unique(chem_class$V1))
defense[,1] <- genera$V1
defense <- data.table(defense)
#This is clunky, but not too slow. It a loop that categories each capitalized word in book as a genus or a chemical family and fills out the matrix accordingly
i = 0
for(n in 1:length(caps$V1)){
if(is.element(caps$V1[n], genera$V1)){
i = i +1}
if(is.element(caps$V1[n], genera$V1) == FALSE){
defense[i, caps$V1[n]:=1]}
}
defense<-as.data.frame(defense)
defense[,2:ncol(defense)]<-sapply(defense[,2:ncol(defense)], as.numeric)
write.csv(defense, "data/PlantChems.csv", row.names = FALSE)
|
#Script that give the growth of the species with the climate of the ferld fort the sensitivity analisys of MaxPotGrowth:
rm(list=ls())
setwd("~/Temperate_species_colonisation_in_mixedwood_boreal_forest_with_climate_change")
library(tidyverse)
theme_set(theme_bw())
library(brms)
library(ggpubr)
ferld_growth <- readRDS(file="growth/ferld_growth_rcp_period.rds") %>%
filter(rcp=="rcp45") %>%
select(-low_growth_no_biais, -high_growth_no_biais)
parameter_df <- readRDS(file="../result_simulations/sensitivity/sensitivity_analysis_design_percent_maxptogrowth.rds") %>%
rename(species2=species)
res <- NULL
for(i in 1:nrow(parameter_df)){ #i=1
sub <- do.call("rbind", replicate(36, parameter_df[i,], simplify = FALSE))
sub2 <- cbind(sub, ferld_growth)
sp <- unique(sub2$species2)
if(sub2$crossing[1] == "no"){
if(sp=="ERS"){
sub2 <- mutate(sub2, growth_no_biais=case_when(
species=="Sugar_Maple" ~ growth_no_biais * as.numeric(sens_value),
TRUE ~ growth_no_biais
))
}
if(sp=="ERR"){
sub2 <- mutate(sub2, growth_no_biais=case_when(
species=="Red_Maple" ~ growth_no_biais * as.numeric(sens_value),
TRUE ~ growth_no_biais
))
}
if(sp=="BOJ"){
sub2 <- mutate(sub2, growth_no_biais=case_when(
species=="Yellow_Birch" ~ growth_no_biais * as.numeric(sens_value),
TRUE ~ growth_no_biais
))
}
}else{
sp2 <- sub2$crossing[1]
if(sp=="ERS" & sp2=="ERR"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Sugar_Maple",8] <- sub3_period[sub3_period$species=="Red_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="ERS" & sp2=="BOJ"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Sugar_Maple",8] <- sub3_period[sub3_period$species=="Yellow_Birch",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="ERR" & sp2=="ERS"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Red_Maple",8] <- sub3_period[sub3_period$species=="Sugar_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="ERR" & sp2=="BOJ"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Red_Maple",8] <- sub3_period[sub3_period$species=="Yellow_Birch",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="BOJ" & sp2=="ERS"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Yellow_Birch",8] <- sub3_period[sub3_period$species=="Sugar_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="BOJ" & sp2=="ERR"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Yellow_Birch",8] <- sub3_period[sub3_period$species=="Red_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
sub2 <- sub3
}
res <- rbind(res, sub2)
}
saveRDS(res, file="sensitivity/ferld_growth_rcp_period_sens.rds")
| /sensitivity_analysis/growth_FERLD_rcp_bayes_maxpotgrowth_sensitivity.R | no_license | MaxenceSoubeyrand/Temperate_species_colonisation_in_mixedwood_boreal_forest_with_climate_change | R | false | false | 3,426 | r | #Script that give the growth of the species with the climate of the ferld fort the sensitivity analisys of MaxPotGrowth:
rm(list=ls())
setwd("~/Temperate_species_colonisation_in_mixedwood_boreal_forest_with_climate_change")
library(tidyverse)
theme_set(theme_bw())
library(brms)
library(ggpubr)
ferld_growth <- readRDS(file="growth/ferld_growth_rcp_period.rds") %>%
filter(rcp=="rcp45") %>%
select(-low_growth_no_biais, -high_growth_no_biais)
parameter_df <- readRDS(file="../result_simulations/sensitivity/sensitivity_analysis_design_percent_maxptogrowth.rds") %>%
rename(species2=species)
res <- NULL
for(i in 1:nrow(parameter_df)){ #i=1
sub <- do.call("rbind", replicate(36, parameter_df[i,], simplify = FALSE))
sub2 <- cbind(sub, ferld_growth)
sp <- unique(sub2$species2)
if(sub2$crossing[1] == "no"){
if(sp=="ERS"){
sub2 <- mutate(sub2, growth_no_biais=case_when(
species=="Sugar_Maple" ~ growth_no_biais * as.numeric(sens_value),
TRUE ~ growth_no_biais
))
}
if(sp=="ERR"){
sub2 <- mutate(sub2, growth_no_biais=case_when(
species=="Red_Maple" ~ growth_no_biais * as.numeric(sens_value),
TRUE ~ growth_no_biais
))
}
if(sp=="BOJ"){
sub2 <- mutate(sub2, growth_no_biais=case_when(
species=="Yellow_Birch" ~ growth_no_biais * as.numeric(sens_value),
TRUE ~ growth_no_biais
))
}
}else{
sp2 <- sub2$crossing[1]
if(sp=="ERS" & sp2=="ERR"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Sugar_Maple",8] <- sub3_period[sub3_period$species=="Red_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="ERS" & sp2=="BOJ"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Sugar_Maple",8] <- sub3_period[sub3_period$species=="Yellow_Birch",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="ERR" & sp2=="ERS"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Red_Maple",8] <- sub3_period[sub3_period$species=="Sugar_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="ERR" & sp2=="BOJ"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Red_Maple",8] <- sub3_period[sub3_period$species=="Yellow_Birch",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="BOJ" & sp2=="ERS"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Yellow_Birch",8] <- sub3_period[sub3_period$species=="Sugar_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
if(sp=="BOJ" & sp2=="ERR"){
sub3 <- NULL
for(j in unique(sub2$period)){ #j=1991
sub3_period <- filter(sub2, period==j)
sub3_period[sub3_period$species=="Yellow_Birch",8] <- sub3_period[sub3_period$species=="Red_Maple",8]
sub3 <- rbind(sub3, sub3_period)
}
}
sub2 <- sub3
}
res <- rbind(res, sub2)
}
saveRDS(res, file="sensitivity/ferld_growth_rcp_period_sens.rds")
|
#this script uses the models generated by Statistical_analysis
#and generates hypothetical subjects and predicts ECog values for them
#Then it plots these predicted values
#define the range of values from memory or Executive function for which to predict ECog
#In some cases I use specific ranges to dismiss parts at which the data is sparse (e.g at exec function>1.5 for age=85)
predrange<-c(-2.5,-1,0,0.5,1,2,2.5)
FigureList<-list()
#below I'm generating hypothetical data for different exposures
#(memory, executive function (with spline), and exec without spline (coming))
########################################################################
#base model:
xba<- rep(predrange,3)
newDFage <- data.frame(
memory=xba,
ex_function=xba,
AGE_75_d=rep(c(-1,0,1),each=7))
########################################################################
#race model
xre<- rep(predrange,4)
newDFrace <- data.frame(
memory=xre,
ex_function=xre,
AGE_75_d=rep(c(0),each=28),
race=rep(c("Non-Latino-White","Black","Latino","Asian"),each=7))
########################################################################
##gender model
xge<- rep(predrange,2)
newDFgender <- data.frame(
memory=xge,
ex_function=xge,
AGE_75_d=rep(c(0),each=14),
GENDER=rep(c("Man","Woman"),each=7))
########################################################################
##education
xed<- rep(predrange,3)
newDFedu <- data.frame(
memory=xed,
ex_function=xed,
AGE_75_d=rep(c(0),each=21),
yrEDU=rep(c(-4,0,4),each=7),
race=rep("Non-Latino-White",21))
########################################################################
##familial dementia
xfh<- rep(predrange,2)
newDFfhist <- data.frame(
memory=xfh,
ex_function=xfh,
AGE_75_d=rep(c(0),each=14),
F_Hist=rep(c(0,1),each=7))
########################################################################
#depression
xdpr<- rep(predrange,3)
newDFdepr <- data.frame(
memory=xdpr,
ex_function=xdpr,
AGE_75_d=rep(c(0),each=21),
depression_01=rep(c(-1,0,1),each=7),
GENDER=rep(c("Woman"),each=21))
predggba_mem <- predict.lm(memfit[[1]],
newDFage, interval = "conf")
predggge_mem <- predict.lm(memfit[[2]],
newDFgender, interval = "conf")
predggra_mem <- predict.lm(memfit[[3]],
newDFrace, interval = "conf")
predgged_mem <- predict.lm(memfit[[4]],
newDFedu, interval = "conf")
predggfh_mem <- predict.lm(memfit[[5]],
newDFfhist, interval = "conf")
predggdpr_mem <- predict.lm(memfit[[6]],
newDFdepr, interval = "conf")
########################################################################
predggba <- predict.lm(results_execfun[[1]],
newDFage, interval = "conf")
predggge <- predict.lm(results_execfun[[2]],
newDFgender, interval = "conf")
predggra <- predict.lm(results_execfun[[3]],
newDFrace, interval = "conf")
predgged <- predict.lm(results_execfun[[4]],
newDFedu, interval = "conf")
predggfh <- predict.lm(results_execfun[[5]],
newDFfhist, interval = "conf")
predggdpr <- predict.lm(results_execfun[[6]],
newDFdepr, interval = "conf")
#########################################################################
#########################################################################
#GRAPHICS
#########################################################################
#########################################################################
#setting ploting parameters
plots_list<-list()
BW<- 0 #Black&Withe version for printing (set to 0 for color)
textSize <- 11
PM<- margin(.2, 0.2, .8, .5, "cm")
limin <- 0
limax <- 0.6
xlim<-c(-2,2)
xlab="Episodic memory"
xmarks <- scale_x_continuous(breaks=seq(-2,2,0.5))
thick <- 0.6
#plots for episodic memory
#base model with age only
#setting range to accomodate datasparsity
newDFageM<-cbind(newDFage,predggba_mem)
Ba_mem <- ggplot(data = newDFageM[1:20,])+
geom_ribbon(aes(memory ,ymin=lwr,ymax=upr,
group=factor(AGE_75_d,levels = c(1,0,-1)),
fill=factor(AGE_75_d,levels = c(1,0,-1))),
alpha=0.15)+
geom_line(aes(memory ,fit,
color=factor(AGE_75_d,
levels = c(1,0,-1)),
linetype=factor(AGE_75_d,
levels = c(1,0,-1))),
size=thick)+
#theme_()+
ylab("Predicted log(ECog)")+
xlab(xlab)+
theme_classic2()+
theme(legend.justification=c(1,0),
legend.position="top",#c(0.95,0.65),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=1,
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Age (years)",
labels = c("85","75","65"))+
scale_linetype_discrete(name="Age (years)",
labels = c("85","75","65"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
xmarks+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ba_mem <- Ba_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
#plot.margin = margin(.5, .2, .8, .5, "cm")
FigureList[[1]]<-Ba_mem
#race/ethnicity
Ra_mem<-ggplot(data = newDFrace)+
geom_ribbon(aes(xre,ymin=predggra_mem[,2],ymax=predggra_mem[,3],
group=race,fill=race),
alpha=0.15)+
geom_line(aes(xre,predggra_mem[,1],
color=race,
linetype=race),
size=thick)+
theme_classic2()+
ylab("Predicted log(ECog)")+
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(.95,0.62),
legend.title = element_blank(),
legend.spacing = unit(.5, 'cm'),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=1,
text = element_text(size = textSize),
plot.margin = PM)+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+xmarks+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ra_mem <- Ra_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .9)
}
FigureList[[2]]<- Ra_mem
#gender
Ge_mem<-ggplot(data = newDFgender)+
geom_ribbon(
aes(xge,
ymin=predggge_mem[,2],
ymax=predggge_mem[,3],
group=factor(GENDER,levels = c("Woman","Man")),
fill=factor(GENDER,levels = c("Woman","Man"))),
alpha=0.15)+
geom_line(
aes(xge,
predggge_mem[,1],
color=factor(GENDER,levels = c("Woman","Man")),
linetype=factor(GENDER,levels = c("Woman","Man"))),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(0.95,0.70),
legend.spacing = unit(.1,"cm"),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name= "Gender",labels=c("Women","Men"))+
scale_color_discrete(name= "Gender", labels=c("Women","Men"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ge_mem <- Ge_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[3]]<-Ge_mem
#education duration
range<-c(1:5,8:21)
newDFeduM<-cbind(newDFedu,predgged_mem)
Ed_mem <- ggplot(data = newDFeduM[range,])+
geom_ribbon(aes(xed[range],ymin=lwr,ymax=upr,
group=factor(yrEDU),
fill=factor(yrEDU)),
alpha=0.15) +
geom_line(aes(xed[range],fit,
color=factor(yrEDU),
linetype=factor(yrEDU)),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(.95,0.70),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.title = element_text(size=textSize),
legend.text.align=0,
legend.title.align=1,
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(labels = c("8 ","12 ","16 "))+
scale_linetype_discrete(labels = c("8 ","12 ","16 "))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ed_mem <- Ed_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[4]]<-Ed_mem
#Family history of dementia
Fh_mem <- ggplot(data = newDFfhist)+
geom_ribbon(aes(xfh,ymin=predggfh_mem[,2],ymax=predggfh_mem[,3],
group=factor(F_Hist,levels = c(1,0)),
fill=factor(F_Hist,levels= c(1,0))),
alpha=0.15) +
geom_line(aes(xfh,predggfh_mem[,1],
color=factor(F_Hist,
levels = c(1,0)),
linetype=factor(F_Hist,
levels = c(1,0))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(0.95,.67),
legend.spacing = unit(0.2,"cm"),
legend.text=element_text(size=textSize),
legend.title = element_blank(),
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(labels = c("yes","no"))+
scale_linetype_discrete(labels = c("yes","no"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Fh_mem <- Fh_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[5]]<-Fh_mem
#depressive symptoms
Dpr_mem <- ggplot(data = newDFdepr)+
geom_ribbon(aes(xdpr,ymin=predggdpr_mem[,2],ymax=predggdpr_mem[,3],
group=factor(depression_01,levels = c(1,0,-1)),
fill=factor(depression_01,levels = c(1,0,-1))),
alpha=0.15) +
geom_line(aes(xdpr,predggdpr_mem[,1],
color=factor(depression_01,levels = c(1,0,-1)),
linetype=factor(depression_01,levels = c(1,0,-1))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(0.95,0.6),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.title = element_text(size=textSize),
legend.text.align=0,
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name="Depressive symptoms")+
scale_color_discrete(name="Depressive symptoms")+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Dpr_mem <- Dpr_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[6]]<-Dpr_mem
###############################################################################
###############################################################################
###############################################################################
#plots for executive function
###############################################################################
xlab<-"Executive function"
#base model with age only
newDFage<-cbind(newDFage,predggba)
range<-c(1:20)
Ba <- ggplot(data = newDFage[range,])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=factor(AGE_75_d,levels = c(1,0,-1)),
fill=factor(AGE_75_d,levels = c(1,0,-1))),
alpha=0.15)+
geom_line(aes(ex_function,fit,
color=factor(AGE_75_d,
levels = c(1,0,-1)),
linetype=factor(AGE_75_d,
levels = c(1,0,-1))),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,0.6),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=1,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Modifier: Age",
labels = c("85","75","65"))+
scale_linetype_discrete(name="Modifier: Age",
labels = c("85","75","65"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ba <- Ba + scale_color_grey(name="Modifier: Age",
labels = c("85","75","65"),start = 0, end = .9) + scale_fill_grey(name="Modifier: Age",start = 0, end = .9)+coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")
}
FigureList[[7]]<-Ba
#race/ethnicity
newDFrace<-cbind(newDFrace,predggra)
Ra<-ggplot(data = newDFrace[-c(14,21,28),])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=race,fill=race),
alpha=0.15)+
geom_line(aes(ex_function,fit,
color=race,
linetype=race),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(.95,0.55),
legend.title = element_text(size = 11,face = "bold"),
legend.spacing = unit(.1, 'cm'),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM,legend.key.width = unit(.35,'cm'))+
scale_color_discrete(name="Modifier: Race/Ethnicity")+
scale_linetype_discrete(name="Modifier: Race/Ethnicity")+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ra <- Ra + scale_color_grey(name="Modifier: Race/Ethnicity",start = 0, end = .90) + scale_fill_grey(start = 0, end = .90) +
scale_linetype_discrete(name="Modifier: Race/Ethnicity")+ guides(fill = FALSE)
}
FigureList[[8]]<- Ra
#gender
Ge<-ggplot(data = newDFgender)+
geom_ribbon(
aes(xge,
ymin=predggge[,2],
ymax=predggge[,3],
group=factor(GENDER,levels = c("Woman","Man")),
fill=factor(GENDER,levels = c("Woman","Man"))),
alpha=0.15)+
geom_line(
aes(xge,
predggge[,1],
color=factor(GENDER,levels = c("Woman","Man")),
linetype=factor(GENDER,levels = c("Woman","Man"))),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,0.60),
legend.spacing = unit(.1,"cm"),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.title.align=0,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name="Modifier: Gender",
labels=c("Women","Men"))+
scale_color_discrete(name="Modifier: Gender",
labels=c("Women","Men"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ge <- Ge + scale_color_grey(name="Modifier: Gender",
labels=c("Women","Men"),start = 0, end = .7) + scale_fill_grey(name="Modifier: Gender",start = 0, end = .7)
}
FigureList[[9]]<-Ge
#education duration
newDFedu<-cbind(newDFedu,predgged)
range<-c(1:5,8:21)
Ed <- ggplot(data = newDFedu[range,])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=factor(yrEDU),
fill=factor(yrEDU)),
alpha=0.15) +
geom_line(aes(ex_function,fit,
color=factor(yrEDU),
linetype=factor(yrEDU)),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(.95,0.6),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=1,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "))+
scale_linetype_discrete(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ed <- Ed + scale_color_grey(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "),start = 0, end = .8) + scale_fill_grey(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "),start = 0, end = .8)
}
FigureList[[10]]<-Ed
#Family history of dementia
Fh <- ggplot(data = newDFfhist)+
geom_ribbon(aes(xfh,ymin=predggfh[,2],ymax=predggfh[,3],
group=factor(F_Hist,levels = c(1,0)),
fill=factor(F_Hist,levels= c(1,0))),
alpha=0.15) +
geom_line(aes(xfh,predggfh[,1],
color=factor(F_Hist,
levels = c(1,0)),
linetype=factor(F_Hist,
levels = c(1,0))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,.6),
legend.direction = "horizontal",
legend.spacing = unit(0.2,"cm"),
legend.text=element_text(size=textSize),
legend.title.align=0,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Modifier: Familly history of dementia",labels = c("yes","no"))+
scale_linetype_discrete(name="Modifier: Familly history of dementia",
labels = c("yes","no"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Fh <- Fh + scale_color_grey(name="Modifier: Familly history of dementia",labels = c("yes","no"),start = 0, end = .7) + scale_fill_grey(name="Modifier: Familly history of dementia",labels = c("yes","no"),start = 0, end = .7)
}
FigureList[[11]]<-Fh
#depressive symptoms
newDFdepr<-cbind(newDFdepr,predggdpr)
Dpr <- ggplot(data = newDFdepr[1:20,])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=factor(depression_01,levels = c(1,0,-1)),
fill=factor(depression_01,levels = c(1,0,-1))),
alpha=0.15) +
geom_line(aes(ex_function,fit,
color=factor(depression_01,levels = c(1,0,-1)),
linetype=factor(depression_01,levels = c(1,0,-1))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,0.6),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=0,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name="Modifier: Depressive symptoms")+
scale_color_discrete(name="Modifier: Depressive symptoms")+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
# legend.title = element_text(size = textSize,face = "bold"),
if (BW==1) {
Dpr <- Dpr + scale_color_grey(name="Modifier: Depressive symptoms",start = 0, end = .8) + scale_fill_grey(name="Modifier: Depressive symptoms",start = 0, end = .8)
}
FigureList[[12]]<-Dpr
| /plotting_predicted_values.R | no_license | Mayeda-Research-Group/KHANDLE_ECOG_R | R | false | false | 21,442 | r | #this script uses the models generated by Statistical_analysis
#and generates hypothetical subjects and predicts ECog values for them
#Then it plots these predicted values
#define the range of values from memory or Executive function for which to predict ECog
#In some cases I use specific ranges to dismiss parts at which the data is sparse (e.g at exec function>1.5 for age=85)
predrange<-c(-2.5,-1,0,0.5,1,2,2.5)
FigureList<-list()
#below I'm generating hypothetical data for different exposures
#(memory, executive function (with spline), and exec without spline (coming))
########################################################################
#base model:
xba<- rep(predrange,3)
newDFage <- data.frame(
memory=xba,
ex_function=xba,
AGE_75_d=rep(c(-1,0,1),each=7))
########################################################################
#race model
xre<- rep(predrange,4)
newDFrace <- data.frame(
memory=xre,
ex_function=xre,
AGE_75_d=rep(c(0),each=28),
race=rep(c("Non-Latino-White","Black","Latino","Asian"),each=7))
########################################################################
##gender model
xge<- rep(predrange,2)
newDFgender <- data.frame(
memory=xge,
ex_function=xge,
AGE_75_d=rep(c(0),each=14),
GENDER=rep(c("Man","Woman"),each=7))
########################################################################
##education
xed<- rep(predrange,3)
newDFedu <- data.frame(
memory=xed,
ex_function=xed,
AGE_75_d=rep(c(0),each=21),
yrEDU=rep(c(-4,0,4),each=7),
race=rep("Non-Latino-White",21))
########################################################################
##familial dementia
xfh<- rep(predrange,2)
newDFfhist <- data.frame(
memory=xfh,
ex_function=xfh,
AGE_75_d=rep(c(0),each=14),
F_Hist=rep(c(0,1),each=7))
########################################################################
#depression
xdpr<- rep(predrange,3)
newDFdepr <- data.frame(
memory=xdpr,
ex_function=xdpr,
AGE_75_d=rep(c(0),each=21),
depression_01=rep(c(-1,0,1),each=7),
GENDER=rep(c("Woman"),each=21))
predggba_mem <- predict.lm(memfit[[1]],
newDFage, interval = "conf")
predggge_mem <- predict.lm(memfit[[2]],
newDFgender, interval = "conf")
predggra_mem <- predict.lm(memfit[[3]],
newDFrace, interval = "conf")
predgged_mem <- predict.lm(memfit[[4]],
newDFedu, interval = "conf")
predggfh_mem <- predict.lm(memfit[[5]],
newDFfhist, interval = "conf")
predggdpr_mem <- predict.lm(memfit[[6]],
newDFdepr, interval = "conf")
########################################################################
predggba <- predict.lm(results_execfun[[1]],
newDFage, interval = "conf")
predggge <- predict.lm(results_execfun[[2]],
newDFgender, interval = "conf")
predggra <- predict.lm(results_execfun[[3]],
newDFrace, interval = "conf")
predgged <- predict.lm(results_execfun[[4]],
newDFedu, interval = "conf")
predggfh <- predict.lm(results_execfun[[5]],
newDFfhist, interval = "conf")
predggdpr <- predict.lm(results_execfun[[6]],
newDFdepr, interval = "conf")
#########################################################################
#########################################################################
#GRAPHICS
#########################################################################
#########################################################################
#setting ploting parameters
plots_list<-list()
BW<- 0 #Black&Withe version for printing (set to 0 for color)
textSize <- 11
PM<- margin(.2, 0.2, .8, .5, "cm")
limin <- 0
limax <- 0.6
xlim<-c(-2,2)
xlab="Episodic memory"
xmarks <- scale_x_continuous(breaks=seq(-2,2,0.5))
thick <- 0.6
#plots for episodic memory
#base model with age only
#setting range to accomodate datasparsity
newDFageM<-cbind(newDFage,predggba_mem)
Ba_mem <- ggplot(data = newDFageM[1:20,])+
geom_ribbon(aes(memory ,ymin=lwr,ymax=upr,
group=factor(AGE_75_d,levels = c(1,0,-1)),
fill=factor(AGE_75_d,levels = c(1,0,-1))),
alpha=0.15)+
geom_line(aes(memory ,fit,
color=factor(AGE_75_d,
levels = c(1,0,-1)),
linetype=factor(AGE_75_d,
levels = c(1,0,-1))),
size=thick)+
#theme_()+
ylab("Predicted log(ECog)")+
xlab(xlab)+
theme_classic2()+
theme(legend.justification=c(1,0),
legend.position="top",#c(0.95,0.65),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=1,
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Age (years)",
labels = c("85","75","65"))+
scale_linetype_discrete(name="Age (years)",
labels = c("85","75","65"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
xmarks+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ba_mem <- Ba_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
#plot.margin = margin(.5, .2, .8, .5, "cm")
FigureList[[1]]<-Ba_mem
#race/ethnicity
Ra_mem<-ggplot(data = newDFrace)+
geom_ribbon(aes(xre,ymin=predggra_mem[,2],ymax=predggra_mem[,3],
group=race,fill=race),
alpha=0.15)+
geom_line(aes(xre,predggra_mem[,1],
color=race,
linetype=race),
size=thick)+
theme_classic2()+
ylab("Predicted log(ECog)")+
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(.95,0.62),
legend.title = element_blank(),
legend.spacing = unit(.5, 'cm'),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=1,
text = element_text(size = textSize),
plot.margin = PM)+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+xmarks+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ra_mem <- Ra_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .9)
}
FigureList[[2]]<- Ra_mem
#gender
Ge_mem<-ggplot(data = newDFgender)+
geom_ribbon(
aes(xge,
ymin=predggge_mem[,2],
ymax=predggge_mem[,3],
group=factor(GENDER,levels = c("Woman","Man")),
fill=factor(GENDER,levels = c("Woman","Man"))),
alpha=0.15)+
geom_line(
aes(xge,
predggge_mem[,1],
color=factor(GENDER,levels = c("Woman","Man")),
linetype=factor(GENDER,levels = c("Woman","Man"))),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(0.95,0.70),
legend.spacing = unit(.1,"cm"),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name= "Gender",labels=c("Women","Men"))+
scale_color_discrete(name= "Gender", labels=c("Women","Men"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ge_mem <- Ge_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[3]]<-Ge_mem
#education duration
range<-c(1:5,8:21)
newDFeduM<-cbind(newDFedu,predgged_mem)
Ed_mem <- ggplot(data = newDFeduM[range,])+
geom_ribbon(aes(xed[range],ymin=lwr,ymax=upr,
group=factor(yrEDU),
fill=factor(yrEDU)),
alpha=0.15) +
geom_line(aes(xed[range],fit,
color=factor(yrEDU),
linetype=factor(yrEDU)),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(.95,0.70),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.title = element_text(size=textSize),
legend.text.align=0,
legend.title.align=1,
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(labels = c("8 ","12 ","16 "))+
scale_linetype_discrete(labels = c("8 ","12 ","16 "))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Ed_mem <- Ed_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[4]]<-Ed_mem
#Family history of dementia
Fh_mem <- ggplot(data = newDFfhist)+
geom_ribbon(aes(xfh,ymin=predggfh_mem[,2],ymax=predggfh_mem[,3],
group=factor(F_Hist,levels = c(1,0)),
fill=factor(F_Hist,levels= c(1,0))),
alpha=0.15) +
geom_line(aes(xfh,predggfh_mem[,1],
color=factor(F_Hist,
levels = c(1,0)),
linetype=factor(F_Hist,
levels = c(1,0))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(0.95,.67),
legend.spacing = unit(0.2,"cm"),
legend.text=element_text(size=textSize),
legend.title = element_blank(),
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(labels = c("yes","no"))+
scale_linetype_discrete(labels = c("yes","no"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Fh_mem <- Fh_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[5]]<-Fh_mem
#depressive symptoms
Dpr_mem <- ggplot(data = newDFdepr)+
geom_ribbon(aes(xdpr,ymin=predggdpr_mem[,2],ymax=predggdpr_mem[,3],
group=factor(depression_01,levels = c(1,0,-1)),
fill=factor(depression_01,levels = c(1,0,-1))),
alpha=0.15) +
geom_line(aes(xdpr,predggdpr_mem[,1],
color=factor(depression_01,levels = c(1,0,-1)),
linetype=factor(depression_01,levels = c(1,0,-1))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(1,0),
legend.position=c(0.95,0.6),
legend.direction = "vertical",
legend.text=element_text(size=textSize),
legend.title = element_text(size=textSize),
legend.text.align=0,
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name="Depressive symptoms")+
scale_color_discrete(name="Depressive symptoms")+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE,
linetype=FALSE,
color=FALSE)
if (BW==1) {
Dpr_mem <- Dpr_mem + scale_color_grey(start = 0, end = .8) + scale_fill_grey(start = 0, end = .8)
}
FigureList[[6]]<-Dpr_mem
###############################################################################
###############################################################################
###############################################################################
#plots for executive function
###############################################################################
xlab<-"Executive function"
#base model with age only
newDFage<-cbind(newDFage,predggba)
range<-c(1:20)
Ba <- ggplot(data = newDFage[range,])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=factor(AGE_75_d,levels = c(1,0,-1)),
fill=factor(AGE_75_d,levels = c(1,0,-1))),
alpha=0.15)+
geom_line(aes(ex_function,fit,
color=factor(AGE_75_d,
levels = c(1,0,-1)),
linetype=factor(AGE_75_d,
levels = c(1,0,-1))),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,0.6),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=1,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Modifier: Age",
labels = c("85","75","65"))+
scale_linetype_discrete(name="Modifier: Age",
labels = c("85","75","65"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ba <- Ba + scale_color_grey(name="Modifier: Age",
labels = c("85","75","65"),start = 0, end = .9) + scale_fill_grey(name="Modifier: Age",start = 0, end = .9)+coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")
}
FigureList[[7]]<-Ba
#race/ethnicity
newDFrace<-cbind(newDFrace,predggra)
Ra<-ggplot(data = newDFrace[-c(14,21,28),])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=race,fill=race),
alpha=0.15)+
geom_line(aes(ex_function,fit,
color=race,
linetype=race),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(.95,0.55),
legend.title = element_text(size = 11,face = "bold"),
legend.spacing = unit(.1, 'cm'),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=0,
text = element_text(size = textSize),
plot.margin = PM,legend.key.width = unit(.35,'cm'))+
scale_color_discrete(name="Modifier: Race/Ethnicity")+
scale_linetype_discrete(name="Modifier: Race/Ethnicity")+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ra <- Ra + scale_color_grey(name="Modifier: Race/Ethnicity",start = 0, end = .90) + scale_fill_grey(start = 0, end = .90) +
scale_linetype_discrete(name="Modifier: Race/Ethnicity")+ guides(fill = FALSE)
}
FigureList[[8]]<- Ra
#gender
Ge<-ggplot(data = newDFgender)+
geom_ribbon(
aes(xge,
ymin=predggge[,2],
ymax=predggge[,3],
group=factor(GENDER,levels = c("Woman","Man")),
fill=factor(GENDER,levels = c("Woman","Man"))),
alpha=0.15)+
geom_line(
aes(xge,
predggge[,1],
color=factor(GENDER,levels = c("Woman","Man")),
linetype=factor(GENDER,levels = c("Woman","Man"))),
size=thick)+
theme_classic2()+xmarks+
ylab("Predicted log(ECog)")+
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,0.60),
legend.spacing = unit(.1,"cm"),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.title.align=0,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name="Modifier: Gender",
labels=c("Women","Men"))+
scale_color_discrete(name="Modifier: Gender",
labels=c("Women","Men"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ge <- Ge + scale_color_grey(name="Modifier: Gender",
labels=c("Women","Men"),start = 0, end = .7) + scale_fill_grey(name="Modifier: Gender",start = 0, end = .7)
}
FigureList[[9]]<-Ge
#education duration
newDFedu<-cbind(newDFedu,predgged)
range<-c(1:5,8:21)
Ed <- ggplot(data = newDFedu[range,])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=factor(yrEDU),
fill=factor(yrEDU)),
alpha=0.15) +
geom_line(aes(ex_function,fit,
color=factor(yrEDU),
linetype=factor(yrEDU)),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab("Executive function")+
theme(legend.justification=c(0,0),
legend.position="top",#c(.95,0.6),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=1,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "))+
scale_linetype_discrete(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Ed <- Ed + scale_color_grey(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "),start = 0, end = .8) + scale_fill_grey(name="Modifier: Educational attainment (in years)",
labels = c("8 ","12 ","16 "),start = 0, end = .8)
}
FigureList[[10]]<-Ed
#Family history of dementia
Fh <- ggplot(data = newDFfhist)+
geom_ribbon(aes(xfh,ymin=predggfh[,2],ymax=predggfh[,3],
group=factor(F_Hist,levels = c(1,0)),
fill=factor(F_Hist,levels= c(1,0))),
alpha=0.15) +
geom_line(aes(xfh,predggfh[,1],
color=factor(F_Hist,
levels = c(1,0)),
linetype=factor(F_Hist,
levels = c(1,0))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,.6),
legend.direction = "horizontal",
legend.spacing = unit(0.2,"cm"),
legend.text=element_text(size=textSize),
legend.title.align=0,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_color_discrete(name="Modifier: Familly history of dementia",labels = c("yes","no"))+
scale_linetype_discrete(name="Modifier: Familly history of dementia",
labels = c("yes","no"))+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
if (BW==1) {
Fh <- Fh + scale_color_grey(name="Modifier: Familly history of dementia",labels = c("yes","no"),start = 0, end = .7) + scale_fill_grey(name="Modifier: Familly history of dementia",labels = c("yes","no"),start = 0, end = .7)
}
FigureList[[11]]<-Fh
#depressive symptoms
newDFdepr<-cbind(newDFdepr,predggdpr)
Dpr <- ggplot(data = newDFdepr[1:20,])+
geom_ribbon(aes(ex_function,ymin=lwr,ymax=upr,
group=factor(depression_01,levels = c(1,0,-1)),
fill=factor(depression_01,levels = c(1,0,-1))),
alpha=0.15) +
geom_line(aes(ex_function,fit,
color=factor(depression_01,levels = c(1,0,-1)),
linetype=factor(depression_01,levels = c(1,0,-1))),
size=thick) +
theme_classic2()+xmarks+
ylab("Predicted log(ECog)") +
xlab(xlab)+
theme(legend.justification=c(0,0),
legend.position="top",#c(0.95,0.6),
legend.direction = "horizontal",
legend.text=element_text(size=textSize),
legend.text.align=0,
legend.title.align=0,
legend.title = element_text(size = textSize,face = "bold"),
text = element_text(size = textSize),
plot.margin = PM)+
scale_linetype_discrete(name="Modifier: Depressive symptoms")+
scale_color_discrete(name="Modifier: Depressive symptoms")+
coord_cartesian(xlim = xlim, ylim = c(limin,limax), expand = FALSE,
default = FALSE, clip = "on")+
guides(fill = FALSE)
# legend.title = element_text(size = textSize,face = "bold"),
if (BW==1) {
Dpr <- Dpr + scale_color_grey(name="Modifier: Depressive symptoms",start = 0, end = .8) + scale_fill_grey(name="Modifier: Depressive symptoms",start = 0, end = .8)
}
FigureList[[12]]<-Dpr
|
context("Proper column names")
SHR_dataset_name <- system.file("extdata", "example_data.zip",
package = "asciiSetupReader")
weimar_dataset_name <- system.file("testdata", "weimar.txt",
package = "asciiSetupReader")
SHR_sps_name <- system.file("extdata", "example_setup.sps",
package = "asciiSetupReader")
SHR_sas_name <- system.file("extdata", "example_setup.sas",
package = "asciiSetupReader")
UCR_dataset_name <- system.file("testdata", "ucr1960.zip",
package = "asciiSetupReader")
UCR_sps_name <- system.file("testdata", "ucr1960.sps",
package = "asciiSetupReader")
UCR_sas_name <- system.file("testdata", "ucr1960.sas",
package = "asciiSetupReader")
NIBRS_dataset_name <- system.file("testdata", "nibrs_2000_batch_header1.zip",
package = "asciiSetupReader")
NIBRS_sps_name <- system.file("testdata", "nibrs_2000_batch_header1.sps",
package = "asciiSetupReader")
NIBRS_sas_name <- system.file("testdata", "nibrs_2000_batch_header1.sas",
package = "asciiSetupReader")
weimar_sps_name <- system.file("testdata", "weimar.sps",
package = "asciiSetupReader")
weimar_sas_name <- system.file("testdata", "weimar.sas",
package = "asciiSetupReader")
SHR <- spss_ascii_reader(dataset_name = SHR_dataset_name,
sps_name = SHR_sps_name,
keep_columns = c(1, 33, 45, 72, 100, 152))
SHR2 <- spss_ascii_reader(dataset_name = SHR_dataset_name,
sps_name = SHR_sps_name,
real_names = FALSE,
keep_columns = c(1, 33, 45, 72, 100, 152))
UCR <- spss_ascii_reader(dataset_name = UCR_dataset_name,
sps_name = UCR_sps_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400))
UCR2 <- spss_ascii_reader(dataset_name = UCR_dataset_name,
sps_name = UCR_sps_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400),
real_names = FALSE)
NIBRS <- spss_ascii_reader(dataset_name = NIBRS_dataset_name,
sps_name = NIBRS_sps_name,
keep_columns = c(1, 3, 5, 7, 10, 15))
NIBRS2 <- spss_ascii_reader(dataset_name = NIBRS_dataset_name,
sps_name = NIBRS_sps_name,
real_names = FALSE,
keep_columns = c(1, 3, 5, 7, 10, 15))
weimar <- spss_ascii_reader(dataset_name = weimar_dataset_name,
sps_name = weimar_sps_name,
keep_columns = c(1:7, 23))
weimar2 <- spss_ascii_reader(dataset_name = weimar_dataset_name,
sps_name = weimar_sps_name,
real_names = FALSE)
# Read SAS ===============================================================
SHR_sas <- sas_ascii_reader(dataset_name = SHR_dataset_name,
sas_name = SHR_sas_name,
keep_columns = c(1, 33, 45, 72, 100, 152))
SHR2_sas <- sas_ascii_reader(dataset_name = SHR_dataset_name,
sas_name = SHR_sas_name,
real_names = FALSE,
keep_columns = c(1, 33, 45, 72, 100, 152))
UCR_sas <- sas_ascii_reader(dataset_name = UCR_dataset_name,
sas_name = UCR_sas_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400))
UCR2_sas <- sas_ascii_reader(dataset_name = UCR_dataset_name,
sas_name = UCR_sas_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400),
real_names = FALSE)
NIBRS_sas <- sas_ascii_reader(dataset_name = NIBRS_dataset_name,
sas_name = NIBRS_sas_name,
keep_columns = c(1, 3, 5, 7, 10, 15))
NIBRS2_sas <- sas_ascii_reader(dataset_name = NIBRS_dataset_name,
sas_name = NIBRS_sas_name,
real_names = FALSE,
keep_columns = c(1, 3, 5, 7, 10, 15))
weimar_sas <- sas_ascii_reader(dataset_name = weimar_dataset_name,
sas_name = weimar_sas_name,
keep_columns = c(1:7, 23))
weimar2_sas <- sas_ascii_reader(dataset_name = weimar_dataset_name,
sas_name = weimar_sas_name,
real_names = FALSE)
test_that("Fixed (real names) columns are correct - SPSS", {
expect_named(SHR, c("IDENTIFIER_CODE", "VICTIM_2_AGE",
"VICTIM_5_AGE",
"VICTIM_11_ETHNIC_ORIGIN",
"OFFENDER_5_ETHNIC_ORIGIN",
"OFFENDER_11_SUB_CIRCUMSTANCE"))
expect_named(NIBRS, c("SEGMENT_LEVEL", "ORIGINATING_AGENCY_IDENTIFIER",
"DATE_ORI_WAS_ADDED",
"CITY_NAME",
"COUNTRY_DIVISION",
"FBI_FIELD_OFFICE"))
expect_named(UCR, c("ID_CODE", "JAN_MONTH_INCLUDED_IN",
"MAR_TOT_CLR_OTH_WPN_ASLT",
"MAY_TOT_CLR_ATMPTD_RAPE",
"SEP_UNFOUND_KNIFE_ASSL",
"DEC_TOT_CLR_GUN_ROBBER"))
expect_named(weimar[1:8], c("WAHLKREISCODE", "LAND_REGIERUNGSBEZ_CODE",
"DATA_TYPE_CODE", "UNIT_OF_ANALYSIS_NAME",
"X1919_RT_NR_ELIGIBLE_VTRS",
"X1919_RT_NR_VOTES_CAST",
"X1919_RT_VOTES_CAST",
"X1919_RT_OTHER_PARTIES"))
})
test_that("Not fixed column names are correct - SPSS", {
expect_named(SHR2, c("V1", "V33", "V45", "V72", "V100", "V152"))
expect_named(NIBRS2, c("B1001", "B1003", "B1005",
"B1007", "B1010", "B1015"))
expect_named(UCR2, c("V1", "V33", "V345", "V572", "V1000", "V1400"))
expect_named(weimar2, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8",
"V9", "V10", "V11", "V12", "V13", "V14", "V15",
"V16", "V17", "V18", "V19", "V20", "V21", "V22",
"V23"))
})
# Test SAS ============================================================
test_that("Fixed (real names) columns are correct - SAS", {
expect_named(SHR_sas, c("IDENTIFIER_CODE", "VICTIM_2_AGE",
"VICTIM_5_AGE",
"VICTIM_11_ETHNIC_ORIGIN",
"OFFENDER_5_ETHNIC_ORIGIN",
"OFFENDER_11_SUB_CIRCUMSTANCE"))
expect_named(NIBRS_sas, c("SEGMENT_LEVEL", "ORIGINATING_AGENCY_IDENTIFIER",
"DATE_ORI_WAS_ADDED",
"CITY_NAME",
"COUNTRY_DIVISION",
"FBI_FIELD_OFFICE"))
expect_named(UCR_sas, c("ID_CODE", "JAN_MONTH_INCLUDED_IN",
"MAR_TOT_CLR_OTH_WPN_ASLT",
"MAY_TOT_CLR_ATMPTD_RAPE",
"SEP_UNFOUND_KNIFE_ASSL",
"DEC_TOT_CLR_GUN_ROBBER"))
expect_named(weimar_sas, c("WAHLKREISCODE", "LAND_REGIERUNGSBEZ_CODE",
"DATA_TYPE_CODE", "UNIT_OF_ANALYSIS_NAME",
"X1919_RT_NR_ELIGIBLE_VTRS",
"X1919_RT_NR_VOTES_CAST",
"X1919_RT_VOTES_CAST",
"X1919_RT_OTHER_PARTIES"))
})
test_that("Not fixed column names are correct - SAS", {
expect_named(SHR2_sas, c("V1", "V33", "V45", "V72", "V100", "V152"))
expect_named(NIBRS2_sas, c("B1001", "B1003", "B1005",
"B1007", "B1010", "B1015"))
expect_named(UCR2_sas, c("V1", "V33", "V345", "V572", "V1000", "V1400"))
expect_named(weimar2_sas, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8",
"V9", "V10", "V11", "V12", "V13", "V14", "V15",
"V16", "V17", "V18", "V19", "V20", "V21", "V22",
"V23"))
})
| /tests/testthat/test-column-names.R | no_license | kashenfelter/asciiSetupReader | R | false | false | 8,388 | r | context("Proper column names")
SHR_dataset_name <- system.file("extdata", "example_data.zip",
package = "asciiSetupReader")
weimar_dataset_name <- system.file("testdata", "weimar.txt",
package = "asciiSetupReader")
SHR_sps_name <- system.file("extdata", "example_setup.sps",
package = "asciiSetupReader")
SHR_sas_name <- system.file("extdata", "example_setup.sas",
package = "asciiSetupReader")
UCR_dataset_name <- system.file("testdata", "ucr1960.zip",
package = "asciiSetupReader")
UCR_sps_name <- system.file("testdata", "ucr1960.sps",
package = "asciiSetupReader")
UCR_sas_name <- system.file("testdata", "ucr1960.sas",
package = "asciiSetupReader")
NIBRS_dataset_name <- system.file("testdata", "nibrs_2000_batch_header1.zip",
package = "asciiSetupReader")
NIBRS_sps_name <- system.file("testdata", "nibrs_2000_batch_header1.sps",
package = "asciiSetupReader")
NIBRS_sas_name <- system.file("testdata", "nibrs_2000_batch_header1.sas",
package = "asciiSetupReader")
weimar_sps_name <- system.file("testdata", "weimar.sps",
package = "asciiSetupReader")
weimar_sas_name <- system.file("testdata", "weimar.sas",
package = "asciiSetupReader")
SHR <- spss_ascii_reader(dataset_name = SHR_dataset_name,
sps_name = SHR_sps_name,
keep_columns = c(1, 33, 45, 72, 100, 152))
SHR2 <- spss_ascii_reader(dataset_name = SHR_dataset_name,
sps_name = SHR_sps_name,
real_names = FALSE,
keep_columns = c(1, 33, 45, 72, 100, 152))
UCR <- spss_ascii_reader(dataset_name = UCR_dataset_name,
sps_name = UCR_sps_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400))
UCR2 <- spss_ascii_reader(dataset_name = UCR_dataset_name,
sps_name = UCR_sps_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400),
real_names = FALSE)
NIBRS <- spss_ascii_reader(dataset_name = NIBRS_dataset_name,
sps_name = NIBRS_sps_name,
keep_columns = c(1, 3, 5, 7, 10, 15))
NIBRS2 <- spss_ascii_reader(dataset_name = NIBRS_dataset_name,
sps_name = NIBRS_sps_name,
real_names = FALSE,
keep_columns = c(1, 3, 5, 7, 10, 15))
weimar <- spss_ascii_reader(dataset_name = weimar_dataset_name,
sps_name = weimar_sps_name,
keep_columns = c(1:7, 23))
weimar2 <- spss_ascii_reader(dataset_name = weimar_dataset_name,
sps_name = weimar_sps_name,
real_names = FALSE)
# Read SAS ===============================================================
SHR_sas <- sas_ascii_reader(dataset_name = SHR_dataset_name,
sas_name = SHR_sas_name,
keep_columns = c(1, 33, 45, 72, 100, 152))
SHR2_sas <- sas_ascii_reader(dataset_name = SHR_dataset_name,
sas_name = SHR_sas_name,
real_names = FALSE,
keep_columns = c(1, 33, 45, 72, 100, 152))
UCR_sas <- sas_ascii_reader(dataset_name = UCR_dataset_name,
sas_name = UCR_sas_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400))
UCR2_sas <- sas_ascii_reader(dataset_name = UCR_dataset_name,
sas_name = UCR_sas_name,
keep_columns = c(1, 33, 345, 572, 1000, 1400),
real_names = FALSE)
NIBRS_sas <- sas_ascii_reader(dataset_name = NIBRS_dataset_name,
sas_name = NIBRS_sas_name,
keep_columns = c(1, 3, 5, 7, 10, 15))
NIBRS2_sas <- sas_ascii_reader(dataset_name = NIBRS_dataset_name,
sas_name = NIBRS_sas_name,
real_names = FALSE,
keep_columns = c(1, 3, 5, 7, 10, 15))
weimar_sas <- sas_ascii_reader(dataset_name = weimar_dataset_name,
sas_name = weimar_sas_name,
keep_columns = c(1:7, 23))
weimar2_sas <- sas_ascii_reader(dataset_name = weimar_dataset_name,
sas_name = weimar_sas_name,
real_names = FALSE)
test_that("Fixed (real names) columns are correct - SPSS", {
expect_named(SHR, c("IDENTIFIER_CODE", "VICTIM_2_AGE",
"VICTIM_5_AGE",
"VICTIM_11_ETHNIC_ORIGIN",
"OFFENDER_5_ETHNIC_ORIGIN",
"OFFENDER_11_SUB_CIRCUMSTANCE"))
expect_named(NIBRS, c("SEGMENT_LEVEL", "ORIGINATING_AGENCY_IDENTIFIER",
"DATE_ORI_WAS_ADDED",
"CITY_NAME",
"COUNTRY_DIVISION",
"FBI_FIELD_OFFICE"))
expect_named(UCR, c("ID_CODE", "JAN_MONTH_INCLUDED_IN",
"MAR_TOT_CLR_OTH_WPN_ASLT",
"MAY_TOT_CLR_ATMPTD_RAPE",
"SEP_UNFOUND_KNIFE_ASSL",
"DEC_TOT_CLR_GUN_ROBBER"))
expect_named(weimar[1:8], c("WAHLKREISCODE", "LAND_REGIERUNGSBEZ_CODE",
"DATA_TYPE_CODE", "UNIT_OF_ANALYSIS_NAME",
"X1919_RT_NR_ELIGIBLE_VTRS",
"X1919_RT_NR_VOTES_CAST",
"X1919_RT_VOTES_CAST",
"X1919_RT_OTHER_PARTIES"))
})
test_that("Not fixed column names are correct - SPSS", {
expect_named(SHR2, c("V1", "V33", "V45", "V72", "V100", "V152"))
expect_named(NIBRS2, c("B1001", "B1003", "B1005",
"B1007", "B1010", "B1015"))
expect_named(UCR2, c("V1", "V33", "V345", "V572", "V1000", "V1400"))
expect_named(weimar2, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8",
"V9", "V10", "V11", "V12", "V13", "V14", "V15",
"V16", "V17", "V18", "V19", "V20", "V21", "V22",
"V23"))
})
# Test SAS ============================================================
test_that("Fixed (real names) columns are correct - SAS", {
expect_named(SHR_sas, c("IDENTIFIER_CODE", "VICTIM_2_AGE",
"VICTIM_5_AGE",
"VICTIM_11_ETHNIC_ORIGIN",
"OFFENDER_5_ETHNIC_ORIGIN",
"OFFENDER_11_SUB_CIRCUMSTANCE"))
expect_named(NIBRS_sas, c("SEGMENT_LEVEL", "ORIGINATING_AGENCY_IDENTIFIER",
"DATE_ORI_WAS_ADDED",
"CITY_NAME",
"COUNTRY_DIVISION",
"FBI_FIELD_OFFICE"))
expect_named(UCR_sas, c("ID_CODE", "JAN_MONTH_INCLUDED_IN",
"MAR_TOT_CLR_OTH_WPN_ASLT",
"MAY_TOT_CLR_ATMPTD_RAPE",
"SEP_UNFOUND_KNIFE_ASSL",
"DEC_TOT_CLR_GUN_ROBBER"))
expect_named(weimar_sas, c("WAHLKREISCODE", "LAND_REGIERUNGSBEZ_CODE",
"DATA_TYPE_CODE", "UNIT_OF_ANALYSIS_NAME",
"X1919_RT_NR_ELIGIBLE_VTRS",
"X1919_RT_NR_VOTES_CAST",
"X1919_RT_VOTES_CAST",
"X1919_RT_OTHER_PARTIES"))
})
test_that("Not fixed column names are correct - SAS", {
expect_named(SHR2_sas, c("V1", "V33", "V45", "V72", "V100", "V152"))
expect_named(NIBRS2_sas, c("B1001", "B1003", "B1005",
"B1007", "B1010", "B1015"))
expect_named(UCR2_sas, c("V1", "V33", "V345", "V572", "V1000", "V1400"))
expect_named(weimar2_sas, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8",
"V9", "V10", "V11", "V12", "V13", "V14", "V15",
"V16", "V17", "V18", "V19", "V20", "V21", "V22",
"V23"))
})
|
library("zoo")
library("xts")
library("ggplot2")
attacks <- read.csv("../../out/snort/distinct/part-r-00000", header=F)
z_attacks <- zoo(attacks$V6, as.POSIXlt(attacks$V1,origin="1970-01-01", tz="GMT"))
plot(z_attacks,title="Time",ylab="Attacks")
points(z_attacks, col='blue', pch=20, cex=1)
qplot(x=attacks$V6, stat='density', geom='line', xlab="No of Attacks per bin period",ylab="Density")
| /r/examples/snort.r | no_license | RIPE-NCC/packetpig | R | false | false | 396 | r | library("zoo")
library("xts")
library("ggplot2")
attacks <- read.csv("../../out/snort/distinct/part-r-00000", header=F)
z_attacks <- zoo(attacks$V6, as.POSIXlt(attacks$V1,origin="1970-01-01", tz="GMT"))
plot(z_attacks,title="Time",ylab="Attacks")
points(z_attacks, col='blue', pch=20, cex=1)
qplot(x=attacks$V6, stat='density', geom='line', xlab="No of Attacks per bin period",ylab="Density")
|
rm(list =ls())
library(tidyr)
library(dplyr)
library(purrr)
library(repurrrsive)
library(jsonlite)
library(tidyjson)
library(stringr)
jdata<-read.csv("Data/clean.2.3.2020.1500.csv", sep = ",", stringsAsFactors = F)
myanns<-read.csv("Data/test_JSON_annotations.csv",
sep = ",", stringsAsFactors = F)
flat_t_t_anns<-myanns %>%
select(., user_name, annotations) %>%
as.tbl_json(json.column = "annotations") %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"),
task_label = jstring("task_label"),
value = jstring("value"))
#limit to subset of columns
flat_to_task <- jdata %>%
select(., subject_ids, user_name, classification_id,
workflow_version, annotations) %>%
as.tbl_json(json.column = "annotations") %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"),
task_label = jstring("task_label"),
value = jstring("value")
flat_to_task <- jdata %>%
select(., subject_ids, user_name, classification_id,
workflow_version, annotations) %>%
as.tbl_json(json.column = "annotations")
flat_to_task1 <- flat_to_task %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"),
task_label = jstring("task_label"))
flat_to_task2 <- flat_to_task1 %>%
enter_object("value") %>%
spread_values
choices_only<-flat_to_task1 %>%
enter_object("value") %>%
json_lengths(column.name = "total_submissions") %>%
gather_array(column.name = "submission_index") %>%
spread_values(choice = jstring("choice"))
single_choice_answers<- choices_only %>%
enter_object("answers") %>%
spread_single_choice_values(single_choice_colnames, lapply(single_choice_Qs, jstring))
multi_choice_answers<-get_multi_choice_Qs(choices_only,multi_choice_Qs, multi_choice_colnames)
#this works thus far; need to join to one table which is what the zooniverse script does. But let's see if we can get the subject data, first.
#limit to subset of columns
subj_data <- jdata %>%
select(., subject_ids, user_name, classification_id,
workflow_version, subject_data) %>%
as.tbl_json(json.column = "subject_data")
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"), task_label = jstring("task_label"),
value = jstring("value"))
choices_only<-flat_to_task %>%
enter_object("value") %>%
json_lengths(column.name = "total_submissions") %>%
gather_array(column.name = "submission_index") %>%
spread_values(choice = jstring("choice"))
single_choice_answers<- choices_only %>%
enter_object("answers") %>%
spread_single_choice_values(single_choice_colnames, lapply(single_choice_Qs, jstring))
multi_choice_answers<-get_multi_choice_Qs(choices_only,multi_choice_Qs, multi_choice_colnames)
rm(list = ls())
mydata<-read.csv("Data/test_JSON_annotations.csv",
sep = ",", stringsAsFactors = F)
glimpse(mydata)
library(jsonlite)
library(tidyjson)
library(purrr)
survey_id <- c("T0") #determine from prettify
single_choice_Qs <- c("choice","HOWMANY", "YOUNGPRESENT",
"ANTLERSPRESENT", "ESTIMATEOFSNOWDEPTHSEETUTORIAL",
"CHILDRENPRESENT",
"ISITACTIVELYRAININGORSNOWINGINTHEPICTURE")
#determine from prettify call
single_choice_colnames <- c("Species", "Number", "Young","Antlers",
"SnowDepth","Children","Precipitation")
#determine from View_json call
multi_choice_Qs <- c("WHATBEHAVIORSDOYOUSEE")#determine from View_json call
multi_choice_colnames <- c("behavior")#determine from View_json call
cols_in = single_choice_Qs
cols_out = single_choice_colnames
x <- cols_out
names<-lapply(cols_in, jstring)
#convert annotations from json
flat_to_task<-mydata %>%
select(., user_name, annotations) %>%
as.tbl_json(json.column = "annotations") %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring ("task"), task_label = jstring("task_label"), value = jstring("value"))
choices_only<-flat_to_task %>%
enter_object("value") %>%
json_lengths(column.name = "total_submissions") %>%
gather_array(column.name = "submission_index") %>%
spread_values(choice = jstring("choice"))
single_choice_answers<- choices_only %>%
enter_object("answers") %>%
spread_single_choice_values(cols_out, lapply(cols_in, jstring))
df<-read.csv("Data/clean.2.3.2020.1500.csv", sep = ",", stringsAsFactors = F)
subj_id_string<-as.character(df$subject_ids)
df$new_sub_data<-df$subject_data %>% str_replace(subj_id_string, "subject")
flat_to_task <- df %>%
select(., subject_ids, user_name, classification_id,
workflow_version, subject_ids ,new_sub_data) %>%
as.tbl_json(json.column = "new_sub_data") %>%
#enter_object("subject") %>%
spread_all
#works!
flat_to_task1 <-df %>%
select(., subject_ids, user_name, classification_id,
workflow_version, subject_ids, new_sub_data) %>%
as.tbl_json(json.column = "new_sub_data") %>%
spread_values(
id = jstring(subject,retired,id),
class.count = jnumber(subject, retired, classifications_count),
batch = jstring("subject", "!Batch"),
round = jstring("subject", "!Round"),
Imj1 = jstring(subject, Image1),
Imj2 = jstring(subject,Image2),
Img3 = jstring(subject, Image3),
CamModel = jstring(subject, CamModel),
CamNum = jstring("subject", "#CamNumber"),
SD_card_num = jstring("subject", "#SDCardNum"),
ForestType = jstring("subject", "!ForestType"),
ForestName = jstring("subject", "#ForestName")
)
#works
| /Script files/put_em_together.R | no_license | erethizon/JSON_primer | R | false | false | 5,635 | r | rm(list =ls())
library(tidyr)
library(dplyr)
library(purrr)
library(repurrrsive)
library(jsonlite)
library(tidyjson)
library(stringr)
jdata<-read.csv("Data/clean.2.3.2020.1500.csv", sep = ",", stringsAsFactors = F)
myanns<-read.csv("Data/test_JSON_annotations.csv",
sep = ",", stringsAsFactors = F)
flat_t_t_anns<-myanns %>%
select(., user_name, annotations) %>%
as.tbl_json(json.column = "annotations") %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"),
task_label = jstring("task_label"),
value = jstring("value"))
#limit to subset of columns
flat_to_task <- jdata %>%
select(., subject_ids, user_name, classification_id,
workflow_version, annotations) %>%
as.tbl_json(json.column = "annotations") %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"),
task_label = jstring("task_label"),
value = jstring("value")
flat_to_task <- jdata %>%
select(., subject_ids, user_name, classification_id,
workflow_version, annotations) %>%
as.tbl_json(json.column = "annotations")
flat_to_task1 <- flat_to_task %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"),
task_label = jstring("task_label"))
flat_to_task2 <- flat_to_task1 %>%
enter_object("value") %>%
spread_values
choices_only<-flat_to_task1 %>%
enter_object("value") %>%
json_lengths(column.name = "total_submissions") %>%
gather_array(column.name = "submission_index") %>%
spread_values(choice = jstring("choice"))
single_choice_answers<- choices_only %>%
enter_object("answers") %>%
spread_single_choice_values(single_choice_colnames, lapply(single_choice_Qs, jstring))
multi_choice_answers<-get_multi_choice_Qs(choices_only,multi_choice_Qs, multi_choice_colnames)
#this works thus far; need to join to one table which is what the zooniverse script does. But let's see if we can get the subject data, first.
#limit to subset of columns
subj_data <- jdata %>%
select(., subject_ids, user_name, classification_id,
workflow_version, subject_data) %>%
as.tbl_json(json.column = "subject_data")
gather_array(column.name = "task_index") %>%
spread_values(task = jstring("task"), task_label = jstring("task_label"),
value = jstring("value"))
choices_only<-flat_to_task %>%
enter_object("value") %>%
json_lengths(column.name = "total_submissions") %>%
gather_array(column.name = "submission_index") %>%
spread_values(choice = jstring("choice"))
single_choice_answers<- choices_only %>%
enter_object("answers") %>%
spread_single_choice_values(single_choice_colnames, lapply(single_choice_Qs, jstring))
multi_choice_answers<-get_multi_choice_Qs(choices_only,multi_choice_Qs, multi_choice_colnames)
rm(list = ls())
mydata<-read.csv("Data/test_JSON_annotations.csv",
sep = ",", stringsAsFactors = F)
glimpse(mydata)
library(jsonlite)
library(tidyjson)
library(purrr)
survey_id <- c("T0") #determine from prettify
single_choice_Qs <- c("choice","HOWMANY", "YOUNGPRESENT",
"ANTLERSPRESENT", "ESTIMATEOFSNOWDEPTHSEETUTORIAL",
"CHILDRENPRESENT",
"ISITACTIVELYRAININGORSNOWINGINTHEPICTURE")
#determine from prettify call
single_choice_colnames <- c("Species", "Number", "Young","Antlers",
"SnowDepth","Children","Precipitation")
#determine from View_json call
multi_choice_Qs <- c("WHATBEHAVIORSDOYOUSEE")#determine from View_json call
multi_choice_colnames <- c("behavior")#determine from View_json call
cols_in = single_choice_Qs
cols_out = single_choice_colnames
x <- cols_out
names<-lapply(cols_in, jstring)
#convert annotations from json
flat_to_task<-mydata %>%
select(., user_name, annotations) %>%
as.tbl_json(json.column = "annotations") %>%
gather_array(column.name = "task_index") %>%
spread_values(task = jstring ("task"), task_label = jstring("task_label"), value = jstring("value"))
choices_only<-flat_to_task %>%
enter_object("value") %>%
json_lengths(column.name = "total_submissions") %>%
gather_array(column.name = "submission_index") %>%
spread_values(choice = jstring("choice"))
single_choice_answers<- choices_only %>%
enter_object("answers") %>%
spread_single_choice_values(cols_out, lapply(cols_in, jstring))
df<-read.csv("Data/clean.2.3.2020.1500.csv", sep = ",", stringsAsFactors = F)
subj_id_string<-as.character(df$subject_ids)
df$new_sub_data<-df$subject_data %>% str_replace(subj_id_string, "subject")
flat_to_task <- df %>%
select(., subject_ids, user_name, classification_id,
workflow_version, subject_ids ,new_sub_data) %>%
as.tbl_json(json.column = "new_sub_data") %>%
#enter_object("subject") %>%
spread_all
#works!
flat_to_task1 <-df %>%
select(., subject_ids, user_name, classification_id,
workflow_version, subject_ids, new_sub_data) %>%
as.tbl_json(json.column = "new_sub_data") %>%
spread_values(
id = jstring(subject,retired,id),
class.count = jnumber(subject, retired, classifications_count),
batch = jstring("subject", "!Batch"),
round = jstring("subject", "!Round"),
Imj1 = jstring(subject, Image1),
Imj2 = jstring(subject,Image2),
Img3 = jstring(subject, Image3),
CamModel = jstring(subject, CamModel),
CamNum = jstring("subject", "#CamNumber"),
SD_card_num = jstring("subject", "#SDCardNum"),
ForestType = jstring("subject", "!ForestType"),
ForestName = jstring("subject", "#ForestName")
)
#works
|
## The cachematric.R file provides the functionality to store the inverse of a
## matrix in memory for quick and easy retrieval rather than calculating the
## inverse on the fly when needed
## makeCacheMatrix: This function takes a matrix as an arguement and stores
## the matrix in memory. In addtion it provides getters and setters for the
## matrix and it's inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function () x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve: This function takes a matrix and returns it's inverse. If the
## inverse of the matrix already exists in memory, the inverse is returned.
## If the inverse has not been previously cached, then the inverse is
## calculated and saved prior to returning the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | ASantini/ProgrammingAssignment2 | R | false | false | 1,212 | r | ## The cachematric.R file provides the functionality to store the inverse of a
## matrix in memory for quick and easy retrieval rather than calculating the
## inverse on the fly when needed
## makeCacheMatrix: This function takes a matrix as an arguement and stores
## the matrix in memory. In addtion it provides getters and setters for the
## matrix and it's inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function () x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve: This function takes a matrix and returns it's inverse. If the
## inverse of the matrix already exists in memory, the inverse is returned.
## If the inverse has not been previously cached, then the inverse is
## calculated and saved prior to returning the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fup_completeness.R
\name{fup_completeness}
\alias{fup_completeness}
\title{Calculate C index of of follow up completeness}
\usage{
fup_completeness(time = NULL, status = NULL, cutoff = seq(1, max(time),
length = 10), strata = NULL)
}
\arguments{
\item{time}{Follow up (in days?)}
\item{status}{event indicator}
\item{cutoff}{(in days?)}
\item{strata}{group}
}
\value{
A data.frame with a global C and a C for each group
}
\description{
Calculate C index of of follow up completeness.
}
\examples{
time <- c(180, 12, 240, 250 )
status <- c( 0, 1, 0, 1 )
group <- c("A","A", "B", "B" )
## example:
## quantify fup completeness to 200 days (eg minimum potential
## follow up in a hypotethic prospective trial)
fup_completeness(time = time, status = status,
cutoff = seq(150, 250, 10),
strata = group)
}
\references{
Clark T., Altman D., De Stavola B. (2002),
Quantification of the completeness of follow-up. Lancet 2002;
359: 1309-10
}
| /man/fup_completeness.Rd | no_license | strategist922/lbsurv | R | false | true | 1,064 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fup_completeness.R
\name{fup_completeness}
\alias{fup_completeness}
\title{Calculate C index of of follow up completeness}
\usage{
fup_completeness(time = NULL, status = NULL, cutoff = seq(1, max(time),
length = 10), strata = NULL)
}
\arguments{
\item{time}{Follow up (in days?)}
\item{status}{event indicator}
\item{cutoff}{(in days?)}
\item{strata}{group}
}
\value{
A data.frame with a global C and a C for each group
}
\description{
Calculate C index of of follow up completeness.
}
\examples{
time <- c(180, 12, 240, 250 )
status <- c( 0, 1, 0, 1 )
group <- c("A","A", "B", "B" )
## example:
## quantify fup completeness to 200 days (eg minimum potential
## follow up in a hypotethic prospective trial)
fup_completeness(time = time, status = status,
cutoff = seq(150, 250, 10),
strata = group)
}
\references{
Clark T., Altman D., De Stavola B. (2002),
Quantification of the completeness of follow-up. Lancet 2002;
359: 1309-10
}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(6.14105502101992e+238, -9.41831726741248e+144, 4.9768274128466e+236, 8.94210540636618e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(NaN, 7.5180760628122e-304, -1.0099548945943e-211, NaN, -7.29111871014492e-304, 1.75512488375807e+50, 2.64939413087107e-158, -16173318.4735499, -5.24414796837718e-148, NaN, -1.10339037428038e-87, 5.62067438631181e-104, -5.83380844035165e+196, 8.84662638470377e-160, Inf, 1.25561609525069e+163, -2.48524917209761e+175, 7.69395743255115e+35, -8.77362046735381e+261, -9.41828154183551e+144, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615841546-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 957 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(6.14105502101992e+238, -9.41831726741248e+144, 4.9768274128466e+236, 8.94210540636618e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(NaN, 7.5180760628122e-304, -1.0099548945943e-211, NaN, -7.29111871014492e-304, 1.75512488375807e+50, 2.64939413087107e-158, -16173318.4735499, -5.24414796837718e-148, NaN, -1.10339037428038e-87, 5.62067438631181e-104, -5.83380844035165e+196, 8.84662638470377e-160, Inf, 1.25561609525069e+163, -2.48524917209761e+175, 7.69395743255115e+35, -8.77362046735381e+261, -9.41828154183551e+144, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
#' Elasticsearch alias APIs
#'
#' @param index An index name
#' @param alias An alias name
#' @param ignore_unavailable (logical) What to do if an specified index name doesn't exist. If set
#' to TRUE then those indices are ignored.
#' @param routing Ignored for now
#' @param filter Ignored for now
#' @param ... Curl args passed on to \code{\link[httr]{POST}}
#' @examples \dontrun{
#' # Retrieve a specified alias
#' alias_get(index="plos")
#' alias_get(alias="tables")
#' aliases_get()
#'
#' # Create/update an alias
#' alias_create(index = "plos", alias = "tables")
#'
#' # Check for alias existence
#' alias_exists(index = "plos")
#' alias_exists(alias = "tables")
#' alias_exists(alias = "adsfasdf")
#'
#' # Delete an alias
#' alias_delete(index = "plos", alias = "tables")
#' alias_exists(alias = "tables")
#'
#' # Curl options
#' library("httr")
#' aliases_get(config=verbose())
#' }
#' @references
#' \url{http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html}
#' @author Scott Chamberlain <myrmecocystus@@gmail.com>
#' @name alias
NULL
#' @export
#' @rdname alias
alias_get <- function(index=NULL, alias=NULL, ignore_unavailable=FALSE, ...) {
alias_GET(index, alias, ignore_unavailable, ...)
}
#' @export
#' @rdname alias
aliases_get <- function(index=NULL, alias=NULL, ignore_unavailable=FALSE, ...) {
alias_GET(index, alias, ignore_unavailable, ...)
}
#' @export
#' @rdname alias
alias_exists <- function(index=NULL, alias=NULL, ...) {
res <- HEAD(alias_url(index, alias), ...)
if (res$status_code == 200) TRUE else FALSE
}
#' @export
#' @rdname alias
alias_create <- function(index=NULL, alias, routing=NULL, filter=NULL, ...) {
out <- PUT(alias_url(index, alias), c(make_up(), ...))
stop_for_status(out)
jsonlite::fromJSON(content(out, "text"), FALSE)
}
#' @export
#' @rdname alias
alias_delete <- function(index=NULL, alias, ...) {
out <- DELETE(alias_url(index, alias), c(make_up(), ...))
stop_for_status(out)
jsonlite::fromJSON(content(out, "text"), FALSE)
}
alias_GET <- function(index, alias, ignore, ...) {
checkconn()
tt <- GET( alias_url(index, alias), query = ec(list(ignore_unavailable = as_log(ignore))), make_up(), ...)
if (tt$status_code > 202) geterror(tt)
jsonlite::fromJSON(content(tt, as = "text"), FALSE)
}
alias_url <- function(index, alias) {
url <- make_url(es_get_auth())
if (!is.null(index)) {
if (!is.null(alias))
sprintf("%s/%s/_alias/%s", url, cl(index), alias)
else
sprintf("%s/%s/_alias", url, cl(index))
} else {
if (!is.null(alias))
sprintf("%s/_alias/%s", url, alias)
else
sprintf("%s/_alias", url)
}
}
| /R/alias.R | permissive | adeandrade/elastic | R | false | false | 2,669 | r | #' Elasticsearch alias APIs
#'
#' @param index An index name
#' @param alias An alias name
#' @param ignore_unavailable (logical) What to do if an specified index name doesn't exist. If set
#' to TRUE then those indices are ignored.
#' @param routing Ignored for now
#' @param filter Ignored for now
#' @param ... Curl args passed on to \code{\link[httr]{POST}}
#' @examples \dontrun{
#' # Retrieve a specified alias
#' alias_get(index="plos")
#' alias_get(alias="tables")
#' aliases_get()
#'
#' # Create/update an alias
#' alias_create(index = "plos", alias = "tables")
#'
#' # Check for alias existence
#' alias_exists(index = "plos")
#' alias_exists(alias = "tables")
#' alias_exists(alias = "adsfasdf")
#'
#' # Delete an alias
#' alias_delete(index = "plos", alias = "tables")
#' alias_exists(alias = "tables")
#'
#' # Curl options
#' library("httr")
#' aliases_get(config=verbose())
#' }
#' @references
#' \url{http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html}
#' @author Scott Chamberlain <myrmecocystus@@gmail.com>
#' @name alias
NULL
#' @export
#' @rdname alias
alias_get <- function(index=NULL, alias=NULL, ignore_unavailable=FALSE, ...) {
alias_GET(index, alias, ignore_unavailable, ...)
}
#' @export
#' @rdname alias
aliases_get <- function(index=NULL, alias=NULL, ignore_unavailable=FALSE, ...) {
alias_GET(index, alias, ignore_unavailable, ...)
}
#' @export
#' @rdname alias
alias_exists <- function(index=NULL, alias=NULL, ...) {
res <- HEAD(alias_url(index, alias), ...)
if (res$status_code == 200) TRUE else FALSE
}
#' @export
#' @rdname alias
alias_create <- function(index=NULL, alias, routing=NULL, filter=NULL, ...) {
out <- PUT(alias_url(index, alias), c(make_up(), ...))
stop_for_status(out)
jsonlite::fromJSON(content(out, "text"), FALSE)
}
#' @export
#' @rdname alias
alias_delete <- function(index=NULL, alias, ...) {
out <- DELETE(alias_url(index, alias), c(make_up(), ...))
stop_for_status(out)
jsonlite::fromJSON(content(out, "text"), FALSE)
}
alias_GET <- function(index, alias, ignore, ...) {
checkconn()
tt <- GET( alias_url(index, alias), query = ec(list(ignore_unavailable = as_log(ignore))), make_up(), ...)
if (tt$status_code > 202) geterror(tt)
jsonlite::fromJSON(content(tt, as = "text"), FALSE)
}
alias_url <- function(index, alias) {
url <- make_url(es_get_auth())
if (!is.null(index)) {
if (!is.null(alias))
sprintf("%s/%s/_alias/%s", url, cl(index), alias)
else
sprintf("%s/%s/_alias", url, cl(index))
} else {
if (!is.null(alias))
sprintf("%s/_alias/%s", url, alias)
else
sprintf("%s/_alias", url)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_ctmcProbabilisticSAI.R
\name{transition2Generator}
\alias{transition2Generator}
\title{Return the generator matrix for a corresponding transition matrix}
\usage{
transition2Generator(P, t = 1, method = "logarithm")
}
\arguments{
\item{P}{transition matrix between time 0 and t}
\item{t}{time of observation}
\item{method}{"logarithm" returns the Matrix logarithm of the transition matrix}
}
\value{
A matrix that represent the generator of P
}
\description{
Calculate the generator matrix for a
corresponding transition matrix
}
\examples{
mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE)
Q <- transition2Generator(P = mymatr)
expm::expm(Q)
}
\seealso{
\code{\link{rctmc}}
}
| /man/transition2Generator.Rd | no_license | ehsan66/markovchain | R | false | true | 785 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_ctmcProbabilisticSAI.R
\name{transition2Generator}
\alias{transition2Generator}
\title{Return the generator matrix for a corresponding transition matrix}
\usage{
transition2Generator(P, t = 1, method = "logarithm")
}
\arguments{
\item{P}{transition matrix between time 0 and t}
\item{t}{time of observation}
\item{method}{"logarithm" returns the Matrix logarithm of the transition matrix}
}
\value{
A matrix that represent the generator of P
}
\description{
Calculate the generator matrix for a
corresponding transition matrix
}
\examples{
mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE)
Q <- transition2Generator(P = mymatr)
expm::expm(Q)
}
\seealso{
\code{\link{rctmc}}
}
|
# DLM size methods
matsizlim<-function(x,DLM){
return(1/(1+exp((DLM@AM[x]-(1:DLM@MaxAge))/(DLM@AM[x]*DLM@AM[x]*0.05))))
}
class(matsizlim)<-"DLM size"
| /R/DLM_size.R | no_license | FisheriesIIM/DLMtool | R | false | false | 160 | r | # DLM size methods
matsizlim<-function(x,DLM){
return(1/(1+exp((DLM@AM[x]-(1:DLM@MaxAge))/(DLM@AM[x]*DLM@AM[x]*0.05))))
}
class(matsizlim)<-"DLM size"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.